From 2f0d49e70add4224c295df1ec222856ddabf1574 Mon Sep 17 00:00:00 2001 From: Filippo Valsorda Date: Wed, 2 Jan 2013 22:47:06 +0100 Subject: [PATCH] release 2013.01.02 --- about.html | 2 +- documentation.html | 2 +- download.html | 31 +- download.html.in | 2 +- faq.html | 2 +- index.html | 2 +- test_coverage/coverage_html.js | 376 + test_coverage/index.html | 144 + test_coverage/jquery-1.4.3.min.js | 166 + test_coverage/jquery.hotkeys.js | 99 + test_coverage/jquery.isonscreen.js | 53 + test_coverage/jquery.tablesorter.min.js | 2 + test_coverage/keybd_closed.png | Bin 0 -> 264 bytes test_coverage/keybd_open.png | Bin 0 -> 267 bytes test_coverage/status.dat | 271 + test_coverage/style.css | 300 + test_coverage/youtube_dl.html | 1066 +++ test_coverage/youtube_dl_FileDownloader.html | 1542 ++++ test_coverage/youtube_dl_InfoExtractors.html | 7646 ++++++++++++++++++ test_coverage/youtube_dl_PostProcessor.html | 490 ++ test_coverage/youtube_dl_update.html | 402 + test_coverage/youtube_dl_utils.html | 1160 +++ test_coverage/youtube_dl_version.html | 86 + update/LATEST_VERSION | 2 +- update/versions.json | 18 +- 25 files changed, 13849 insertions(+), 15 deletions(-) create mode 100644 test_coverage/coverage_html.js create mode 100644 test_coverage/index.html create mode 100644 test_coverage/jquery-1.4.3.min.js create mode 100644 test_coverage/jquery.hotkeys.js create mode 100644 test_coverage/jquery.isonscreen.js create mode 100644 test_coverage/jquery.tablesorter.min.js create mode 100644 test_coverage/keybd_closed.png create mode 100644 test_coverage/keybd_open.png create mode 100644 test_coverage/status.dat create mode 100644 test_coverage/style.css create mode 100644 test_coverage/youtube_dl.html create mode 100644 test_coverage/youtube_dl_FileDownloader.html create mode 100644 test_coverage/youtube_dl_InfoExtractors.html create mode 100644 test_coverage/youtube_dl_PostProcessor.html create mode 100644 test_coverage/youtube_dl_update.html create mode 100644 test_coverage/youtube_dl_utils.html create mode 100644 test_coverage/youtube_dl_version.html diff --git a/about.html b/about.html index 74ecc87fd..685bef9e5 100644 --- a/about.html +++ b/about.html @@ -36,6 +36,6 @@ Creative Commons License
-Copyright © 2006-2012 Ricardo Garcia Gonzalez +Copyright © 2006-2013 Ricardo Garcia Gonzalez diff --git a/documentation.html b/documentation.html index b2c63ce47..bef2e9530 100644 --- a/documentation.html +++ b/documentation.html @@ -132,6 +132,6 @@ You can configure youtube-dl by placing default arguments (such as --extra Creative Commons License
-Copyright © 2006-2012 Ricardo Garcia Gonzalez +Copyright © 2006-2013 Ricardo Garcia Gonzalez diff --git a/download.html b/download.html index a9b50d08f..f12e46a82 100644 --- a/download.html +++ b/download.html @@ -16,23 +16,40 @@

Remember youtube-dl requires Python version 2.6, 2.7, or 3.3+ to work.

-

2012.12.11

+

2013.01.02 (sig)

-

To install it right away for all users, type: -sudo wget https://github.com/downloads/rg3/youtube-dl/youtube-dl -O /usr/local/bin/youtube-dl +

+ +

+ Windows exe (sig - 75fa89d2ce297d102ff27675aa9d92545bbc91013f52ec52868c069f4f9f0422)
+ Full source + docs + binary tarball (sig - 6a66d022ac8e1c13da284036288a133ec8dba003b7bd3a5179d0c0daca8c8196) +

+ +

To install it right away for all UNIX users (Linux, OS X, etc.), type: +sudo wget http://youtube-dl.org/downloads/2013.01.02/youtube-dl -O /usr/local/bin/youtube-dl sudo chmod a+x /usr/local/bin/youtube-dl

+

To check the signature, type: +sudo wget http://youtube-dl.org/downloads/2013.01.02/youtube-dl.sig -O youtube-dl.sig +gpg --verify youtube-dl.sig /usr/local/bin/youtube-dl +rm youtube-dl.sig +

+ +

The following GPG keys will be used to sign the binaries and the git tags:

    -
  • MD5: a73e1d6f59e34d1231cf5788b31fa177
  • -
  • SHA1: aa45f8e32c637a2342674f32d1ca9e8f70bf695c
  • -
  • SHA256: e2109ac82b05f910828a12b2de0d8e5d3b29b0ed9e2daeb68badb1fce8c1ec7a
  • +
  • 1024D/FAFB085C Philipp Hagemeister Key fingerprint = 0600 E1DB 6FB5 3A5D 95D8 FC0D F5EA B582 FAFB 085C
  • +
  • 4096R/D977155C Filippo Valsorda Key fingerprint = 9524 4D9F EE39 0B71 25A3 4708 3CD8 8EE0 D977 155C
Creative Commons License
-Copyright © 2006-2012 Ricardo Garcia Gonzalez
+Copyright © 2006-2013 Ricardo Garcia Gonzalez diff --git a/download.html.in b/download.html.in index 05d7e4f5f..a0d4fab30 100644 --- a/download.html.in +++ b/download.html.in @@ -50,6 +50,6 @@ rm youtube-dl.sig
Creative Commons License
-Copyright © 2006-2012 Ricardo Garcia Gonzalez +Copyright © 2006-2013 Ricardo Garcia Gonzalez diff --git a/faq.html b/faq.html index 59490460d..6d70ee3c4 100644 --- a/faq.html +++ b/faq.html @@ -85,6 +85,6 @@ means you're using an outdated version of Python. Please update to Python 2.6 or Creative Commons License
-Copyright © 2006-2012 Ricardo Garcia Gonzalez +Copyright © 2006-2013 Ricardo Garcia Gonzalez diff --git a/index.html b/index.html index 55ac5a382..904b4d4d4 100644 --- a/index.html +++ b/index.html @@ -31,6 +31,6 @@ You can also contact us on the irc channel Creative Commons License
-Copyright © 2006-2012 Ricardo Garcia Gonzalez +Copyright © 2006-2013 Ricardo Garcia Gonzalez diff --git a/test_coverage/coverage_html.js b/test_coverage/coverage_html.js new file mode 100644 index 000000000..b24006d25 --- /dev/null +++ b/test_coverage/coverage_html.js @@ -0,0 +1,376 @@ +// Coverage.py HTML report browser code. +/*jslint browser: true, sloppy: true, vars: true, plusplus: true, maxerr: 50, indent: 4 */ +/*global coverage: true, document, window, $ */ + +coverage = {}; + +// Find all the elements with shortkey_* class, and use them to assign a shotrtcut key. +coverage.assign_shortkeys = function () { + $("*[class*='shortkey_']").each(function (i, e) { + $.each($(e).attr("class").split(" "), function (i, c) { + if (/^shortkey_/.test(c)) { + $(document).bind('keydown', c.substr(9), function () { + $(e).click(); + }); + } + }); + }); +}; + +// Create the events for the help panel. +coverage.wire_up_help_panel = function () { + $("#keyboard_icon").click(function () { + // Show the help panel, and position it so the keyboard icon in the + // panel is in the same place as the keyboard icon in the header. + $(".help_panel").show(); + var koff = $("#keyboard_icon").offset(); + var poff = $("#panel_icon").position(); + $(".help_panel").offset({ + top: koff.top-poff.top, + left: koff.left-poff.left + }); + }); + $("#panel_icon").click(function () { + $(".help_panel").hide(); + }); +}; + +// Loaded on index.html +coverage.index_ready = function ($) { + // Look for a cookie containing previous sort settings: + var sort_list = []; + var cookie_name = "COVERAGE_INDEX_SORT"; + var i; + + // This almost makes it worth installing the jQuery cookie plugin: + if (document.cookie.indexOf(cookie_name) > -1) { + var cookies = document.cookie.split(";"); + for (i = 0; i < cookies.length; i++) { + var parts = cookies[i].split("="); + + if ($.trim(parts[0]) === cookie_name && parts[1]) { + sort_list = eval("[[" + parts[1] + "]]"); + break; + } + } + } + + // Create a new widget which exists only to save and restore + // the sort order: + $.tablesorter.addWidget({ + id: "persistentSort", + + // Format is called by the widget before displaying: + format: function (table) { + if (table.config.sortList.length === 0 && sort_list.length > 0) { + // This table hasn't been sorted before - we'll use + // our stored settings: + $(table).trigger('sorton', [sort_list]); + } + else { + // This is not the first load - something has + // already defined sorting so we'll just update + // our stored value to match: + sort_list = table.config.sortList; + } + } + }); + + // Configure our tablesorter to handle the variable number of + // columns produced depending on report options: + var headers = []; + var col_count = $("table.index > thead > tr > th").length; + + headers[0] = { sorter: 'text' }; + for (i = 1; i < col_count-1; i++) { + headers[i] = { sorter: 'digit' }; + } + headers[col_count-1] = { sorter: 'percent' }; + + // Enable the table sorter: + $("table.index").tablesorter({ + widgets: ['persistentSort'], + headers: headers + }); + + coverage.assign_shortkeys(); + coverage.wire_up_help_panel(); + + // Watch for page unload events so we can save the final sort settings: + $(window).unload(function () { + document.cookie = cookie_name + "=" + sort_list.toString() + "; path=/"; + }); +}; + +// -- pyfile stuff -- + +coverage.pyfile_ready = function ($) { + // If we're directed to a particular line number, highlight the line. + var frag = location.hash; + if (frag.length > 2 && frag[1] === 'n') { + $(frag).addClass('highlight'); + coverage.set_sel(parseInt(frag.substr(2), 10)); + } + else { + coverage.set_sel(0); + } + + $(document) + .bind('keydown', 'j', coverage.to_next_chunk_nicely) + .bind('keydown', 'k', coverage.to_prev_chunk_nicely) + .bind('keydown', '0', coverage.to_top) + .bind('keydown', '1', coverage.to_first_chunk) + ; + + $(".button_toggle_run").click(function (evt) {coverage.toggle_lines(evt.target, "run");}); + $(".button_toggle_exc").click(function (evt) {coverage.toggle_lines(evt.target, "exc");}); + $(".button_toggle_mis").click(function (evt) {coverage.toggle_lines(evt.target, "mis");}); + $(".button_toggle_par").click(function (evt) {coverage.toggle_lines(evt.target, "par");}); + + coverage.assign_shortkeys(); + coverage.wire_up_help_panel(); +}; + +coverage.toggle_lines = function (btn, cls) { + btn = $(btn); + var hide = "hide_"+cls; + if (btn.hasClass(hide)) { + $("#source ."+cls).removeClass(hide); + btn.removeClass(hide); + } + else { + $("#source ."+cls).addClass(hide); + btn.addClass(hide); + } +}; + +// Return the nth line div. +coverage.line_elt = function (n) { + return $("#t" + n); +}; + +// Return the nth line number div. +coverage.num_elt = function (n) { + return $("#n" + n); +}; + +// Return the container of all the code. +coverage.code_container = function () { + return $(".linenos"); +}; + +// Set the selection. b and e are line numbers. +coverage.set_sel = function (b, e) { + // The first line selected. + coverage.sel_begin = b; + // The next line not selected. + coverage.sel_end = (e === undefined) ? b+1 : e; +}; + +coverage.to_top = function () { + coverage.set_sel(0, 1); + coverage.scroll_window(0); +}; + +coverage.to_first_chunk = function () { + coverage.set_sel(0, 1); + coverage.to_next_chunk(); +}; + +coverage.is_transparent = function (color) { + // Different browsers return different colors for "none". + return color === "transparent" || color === "rgba(0, 0, 0, 0)"; +}; + +coverage.to_next_chunk = function () { + var c = coverage; + + // Find the start of the next colored chunk. + var probe = c.sel_end; + while (true) { + var probe_line = c.line_elt(probe); + if (probe_line.length === 0) { + return; + } + var color = probe_line.css("background-color"); + if (!c.is_transparent(color)) { + break; + } + probe++; + } + + // There's a next chunk, `probe` points to it. + var begin = probe; + + // Find the end of this chunk. + var next_color = color; + while (next_color === color) { + probe++; + probe_line = c.line_elt(probe); + next_color = probe_line.css("background-color"); + } + c.set_sel(begin, probe); + c.show_selection(); +}; + +coverage.to_prev_chunk = function () { + var c = coverage; + + // Find the end of the prev colored chunk. + var probe = c.sel_begin-1; + var probe_line = c.line_elt(probe); + if (probe_line.length === 0) { + return; + } + var color = probe_line.css("background-color"); + while (probe > 0 && c.is_transparent(color)) { + probe--; + probe_line = c.line_elt(probe); + if (probe_line.length === 0) { + return; + } + color = probe_line.css("background-color"); + } + + // There's a prev chunk, `probe` points to its last line. + var end = probe+1; + + // Find the beginning of this chunk. + var prev_color = color; + while (prev_color === color) { + probe--; + probe_line = c.line_elt(probe); + prev_color = probe_line.css("background-color"); + } + c.set_sel(probe+1, end); + c.show_selection(); +}; + +// Return the line number of the line nearest pixel position pos +coverage.line_at_pos = function (pos) { + var l1 = coverage.line_elt(1), + l2 = coverage.line_elt(2), + result; + if (l1.length && l2.length) { + var l1_top = l1.offset().top, + line_height = l2.offset().top - l1_top, + nlines = (pos - l1_top) / line_height; + if (nlines < 1) { + result = 1; + } + else { + result = Math.ceil(nlines); + } + } + else { + result = 1; + } + return result; +}; + +// Returns 0, 1, or 2: how many of the two ends of the selection are on +// the screen right now? +coverage.selection_ends_on_screen = function () { + if (coverage.sel_begin === 0) { + return 0; + } + + var top = coverage.line_elt(coverage.sel_begin); + var next = coverage.line_elt(coverage.sel_end-1); + + return ( + (top.isOnScreen() ? 1 : 0) + + (next.isOnScreen() ? 1 : 0) + ); +}; + +coverage.to_next_chunk_nicely = function () { + coverage.finish_scrolling(); + if (coverage.selection_ends_on_screen() === 0) { + // The selection is entirely off the screen: select the top line on + // the screen. + var win = $(window); + coverage.select_line_or_chunk(coverage.line_at_pos(win.scrollTop())); + } + coverage.to_next_chunk(); +}; + +coverage.to_prev_chunk_nicely = function () { + coverage.finish_scrolling(); + if (coverage.selection_ends_on_screen() === 0) { + var win = $(window); + coverage.select_line_or_chunk(coverage.line_at_pos(win.scrollTop() + win.height())); + } + coverage.to_prev_chunk(); +}; + +// Select line number lineno, or if it is in a colored chunk, select the +// entire chunk +coverage.select_line_or_chunk = function (lineno) { + var c = coverage; + var probe_line = c.line_elt(lineno); + if (probe_line.length === 0) { + return; + } + var the_color = probe_line.css("background-color"); + if (!c.is_transparent(the_color)) { + // The line is in a highlighted chunk. + // Search backward for the first line. + var probe = lineno; + var color = the_color; + while (probe > 0 && color === the_color) { + probe--; + probe_line = c.line_elt(probe); + if (probe_line.length === 0) { + break; + } + color = probe_line.css("background-color"); + } + var begin = probe + 1; + + // Search forward for the last line. + probe = lineno; + color = the_color; + while (color === the_color) { + probe++; + probe_line = c.line_elt(probe); + color = probe_line.css("background-color"); + } + + coverage.set_sel(begin, probe); + } + else { + coverage.set_sel(lineno); + } +}; + +coverage.show_selection = function () { + var c = coverage; + + // Highlight the lines in the chunk + c.code_container().find(".highlight").removeClass("highlight"); + for (var probe = c.sel_begin; probe > 0 && probe < c.sel_end; probe++) { + c.num_elt(probe).addClass("highlight"); + } + + c.scroll_to_selection(); +}; + +coverage.scroll_to_selection = function () { + // Scroll the page if the chunk isn't fully visible. + if (coverage.selection_ends_on_screen() < 2) { + // Need to move the page. The html,body trick makes it scroll in all + // browsers, got it from http://stackoverflow.com/questions/3042651 + var top = coverage.line_elt(coverage.sel_begin); + var top_pos = parseInt(top.offset().top, 10); + coverage.scroll_window(top_pos - 30); + } +}; + +coverage.scroll_window = function (to_pos) { + $("html,body").animate({scrollTop: to_pos}, 200); +}; + +coverage.finish_scrolling = function () { + $("html,body").stop(true, true); +}; diff --git a/test_coverage/index.html b/test_coverage/index.html new file mode 100644 index 000000000..2aecc71a3 --- /dev/null +++ b/test_coverage/index.html @@ -0,0 +1,144 @@ + + + + + Coverage report + + + + + + + + + + + + +
+ +

Hot-keys on this page

+
+

+ n + s + m + x + + c   change column sorting +

+
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Modulestatementsmissingexcludedcoverage
Total36322026044%
youtube_dl27425108%
youtube_dl.FileDownloader479208057%
youtube_dl.InfoExtractors22751273044%
youtube_dl.PostProcessor133114014%
youtube_dl.update13012206%
youtube_dl.utils34058083%
youtube_dl.version100100%
+
+ + + + + diff --git a/test_coverage/jquery-1.4.3.min.js b/test_coverage/jquery-1.4.3.min.js new file mode 100644 index 000000000..c941a5f7a --- /dev/null +++ b/test_coverage/jquery-1.4.3.min.js @@ -0,0 +1,166 @@ +/*! + * jQuery JavaScript Library v1.4.3 + * http://jquery.com/ + * + * Copyright 2010, John Resig + * Dual licensed under the MIT or GPL Version 2 licenses. + * http://jquery.org/license + * + * Includes Sizzle.js + * http://sizzlejs.com/ + * Copyright 2010, The Dojo Foundation + * Released under the MIT, BSD, and GPL Licenses. + * + * Date: Thu Oct 14 23:10:06 2010 -0400 + */ +(function(E,A){function U(){return false}function ba(){return true}function ja(a,b,d){d[0].type=a;return c.event.handle.apply(b,d)}function Ga(a){var b,d,e=[],f=[],h,k,l,n,s,v,B,D;k=c.data(this,this.nodeType?"events":"__events__");if(typeof k==="function")k=k.events;if(!(a.liveFired===this||!k||!k.live||a.button&&a.type==="click")){if(a.namespace)D=RegExp("(^|\\.)"+a.namespace.split(".").join("\\.(?:.*\\.)?")+"(\\.|$)");a.liveFired=this;var H=k.live.slice(0);for(n=0;nd)break;a.currentTarget=f.elem;a.data=f.handleObj.data; +a.handleObj=f.handleObj;D=f.handleObj.origHandler.apply(f.elem,arguments);if(D===false||a.isPropagationStopped()){d=f.level;if(D===false)b=false}}return b}}function Y(a,b){return(a&&a!=="*"?a+".":"")+b.replace(Ha,"`").replace(Ia,"&")}function ka(a,b,d){if(c.isFunction(b))return c.grep(a,function(f,h){return!!b.call(f,h,f)===d});else if(b.nodeType)return c.grep(a,function(f){return f===b===d});else if(typeof b==="string"){var e=c.grep(a,function(f){return f.nodeType===1});if(Ja.test(b))return c.filter(b, +e,!d);else b=c.filter(b,e)}return c.grep(a,function(f){return c.inArray(f,b)>=0===d})}function la(a,b){var d=0;b.each(function(){if(this.nodeName===(a[d]&&a[d].nodeName)){var e=c.data(a[d++]),f=c.data(this,e);if(e=e&&e.events){delete f.handle;f.events={};for(var h in e)for(var k in e[h])c.event.add(this,h,e[h][k],e[h][k].data)}}})}function Ka(a,b){b.src?c.ajax({url:b.src,async:false,dataType:"script"}):c.globalEval(b.text||b.textContent||b.innerHTML||"");b.parentNode&&b.parentNode.removeChild(b)} +function ma(a,b,d){var e=b==="width"?a.offsetWidth:a.offsetHeight;if(d==="border")return e;c.each(b==="width"?La:Ma,function(){d||(e-=parseFloat(c.css(a,"padding"+this))||0);if(d==="margin")e+=parseFloat(c.css(a,"margin"+this))||0;else e-=parseFloat(c.css(a,"border"+this+"Width"))||0});return e}function ca(a,b,d,e){if(c.isArray(b)&&b.length)c.each(b,function(f,h){d||Na.test(a)?e(a,h):ca(a+"["+(typeof h==="object"||c.isArray(h)?f:"")+"]",h,d,e)});else if(!d&&b!=null&&typeof b==="object")c.isEmptyObject(b)? +e(a,""):c.each(b,function(f,h){ca(a+"["+f+"]",h,d,e)});else e(a,b)}function S(a,b){var d={};c.each(na.concat.apply([],na.slice(0,b)),function(){d[this]=a});return d}function oa(a){if(!da[a]){var b=c("<"+a+">").appendTo("body"),d=b.css("display");b.remove();if(d==="none"||d==="")d="block";da[a]=d}return da[a]}function ea(a){return c.isWindow(a)?a:a.nodeType===9?a.defaultView||a.parentWindow:false}var u=E.document,c=function(){function a(){if(!b.isReady){try{u.documentElement.doScroll("left")}catch(i){setTimeout(a, +1);return}b.ready()}}var b=function(i,r){return new b.fn.init(i,r)},d=E.jQuery,e=E.$,f,h=/^(?:[^<]*(<[\w\W]+>)[^>]*$|#([\w\-]+)$)/,k=/\S/,l=/^\s+/,n=/\s+$/,s=/\W/,v=/\d/,B=/^<(\w+)\s*\/?>(?:<\/\1>)?$/,D=/^[\],:{}\s]*$/,H=/\\(?:["\\\/bfnrt]|u[0-9a-fA-F]{4})/g,w=/"[^"\\\n\r]*"|true|false|null|-?\d+(?:\.\d*)?(?:[eE][+\-]?\d+)?/g,G=/(?:^|:|,)(?:\s*\[)+/g,M=/(webkit)[ \/]([\w.]+)/,g=/(opera)(?:.*version)?[ \/]([\w.]+)/,j=/(msie) ([\w.]+)/,o=/(mozilla)(?:.*? rv:([\w.]+))?/,m=navigator.userAgent,p=false, +q=[],t,x=Object.prototype.toString,C=Object.prototype.hasOwnProperty,P=Array.prototype.push,N=Array.prototype.slice,R=String.prototype.trim,Q=Array.prototype.indexOf,L={};b.fn=b.prototype={init:function(i,r){var y,z,F;if(!i)return this;if(i.nodeType){this.context=this[0]=i;this.length=1;return this}if(i==="body"&&!r&&u.body){this.context=u;this[0]=u.body;this.selector="body";this.length=1;return this}if(typeof i==="string")if((y=h.exec(i))&&(y[1]||!r))if(y[1]){F=r?r.ownerDocument||r:u;if(z=B.exec(i))if(b.isPlainObject(r)){i= +[u.createElement(z[1])];b.fn.attr.call(i,r,true)}else i=[F.createElement(z[1])];else{z=b.buildFragment([y[1]],[F]);i=(z.cacheable?z.fragment.cloneNode(true):z.fragment).childNodes}return b.merge(this,i)}else{if((z=u.getElementById(y[2]))&&z.parentNode){if(z.id!==y[2])return f.find(i);this.length=1;this[0]=z}this.context=u;this.selector=i;return this}else if(!r&&!s.test(i)){this.selector=i;this.context=u;i=u.getElementsByTagName(i);return b.merge(this,i)}else return!r||r.jquery?(r||f).find(i):b(r).find(i); +else if(b.isFunction(i))return f.ready(i);if(i.selector!==A){this.selector=i.selector;this.context=i.context}return b.makeArray(i,this)},selector:"",jquery:"1.4.3",length:0,size:function(){return this.length},toArray:function(){return N.call(this,0)},get:function(i){return i==null?this.toArray():i<0?this.slice(i)[0]:this[i]},pushStack:function(i,r,y){var z=b();b.isArray(i)?P.apply(z,i):b.merge(z,i);z.prevObject=this;z.context=this.context;if(r==="find")z.selector=this.selector+(this.selector?" ": +"")+y;else if(r)z.selector=this.selector+"."+r+"("+y+")";return z},each:function(i,r){return b.each(this,i,r)},ready:function(i){b.bindReady();if(b.isReady)i.call(u,b);else q&&q.push(i);return this},eq:function(i){return i===-1?this.slice(i):this.slice(i,+i+1)},first:function(){return this.eq(0)},last:function(){return this.eq(-1)},slice:function(){return this.pushStack(N.apply(this,arguments),"slice",N.call(arguments).join(","))},map:function(i){return this.pushStack(b.map(this,function(r,y){return i.call(r, +y,r)}))},end:function(){return this.prevObject||b(null)},push:P,sort:[].sort,splice:[].splice};b.fn.init.prototype=b.fn;b.extend=b.fn.extend=function(){var i=arguments[0]||{},r=1,y=arguments.length,z=false,F,I,K,J,fa;if(typeof i==="boolean"){z=i;i=arguments[1]||{};r=2}if(typeof i!=="object"&&!b.isFunction(i))i={};if(y===r){i=this;--r}for(;r0)){if(q){for(var r=0;i=q[r++];)i.call(u,b);q=null}b.fn.triggerHandler&&b(u).triggerHandler("ready")}}},bindReady:function(){if(!p){p=true;if(u.readyState==="complete")return setTimeout(b.ready, +1);if(u.addEventListener){u.addEventListener("DOMContentLoaded",t,false);E.addEventListener("load",b.ready,false)}else if(u.attachEvent){u.attachEvent("onreadystatechange",t);E.attachEvent("onload",b.ready);var i=false;try{i=E.frameElement==null}catch(r){}u.documentElement.doScroll&&i&&a()}}},isFunction:function(i){return b.type(i)==="function"},isArray:Array.isArray||function(i){return b.type(i)==="array"},isWindow:function(i){return i&&typeof i==="object"&&"setInterval"in i},isNaN:function(i){return i== +null||!v.test(i)||isNaN(i)},type:function(i){return i==null?String(i):L[x.call(i)]||"object"},isPlainObject:function(i){if(!i||b.type(i)!=="object"||i.nodeType||b.isWindow(i))return false;if(i.constructor&&!C.call(i,"constructor")&&!C.call(i.constructor.prototype,"isPrototypeOf"))return false;for(var r in i);return r===A||C.call(i,r)},isEmptyObject:function(i){for(var r in i)return false;return true},error:function(i){throw i;},parseJSON:function(i){if(typeof i!=="string"||!i)return null;i=b.trim(i); +if(D.test(i.replace(H,"@").replace(w,"]").replace(G,"")))return E.JSON&&E.JSON.parse?E.JSON.parse(i):(new Function("return "+i))();else b.error("Invalid JSON: "+i)},noop:function(){},globalEval:function(i){if(i&&k.test(i)){var r=u.getElementsByTagName("head")[0]||u.documentElement,y=u.createElement("script");y.type="text/javascript";if(b.support.scriptEval)y.appendChild(u.createTextNode(i));else y.text=i;r.insertBefore(y,r.firstChild);r.removeChild(y)}},nodeName:function(i,r){return i.nodeName&&i.nodeName.toUpperCase()=== +r.toUpperCase()},each:function(i,r,y){var z,F=0,I=i.length,K=I===A||b.isFunction(i);if(y)if(K)for(z in i){if(r.apply(i[z],y)===false)break}else for(;F";a=u.createDocumentFragment();a.appendChild(d.firstChild);c.support.checkClone=a.cloneNode(true).cloneNode(true).lastChild.checked;c(function(){var s=u.createElement("div"); +s.style.width=s.style.paddingLeft="1px";u.body.appendChild(s);c.boxModel=c.support.boxModel=s.offsetWidth===2;if("zoom"in s.style){s.style.display="inline";s.style.zoom=1;c.support.inlineBlockNeedsLayout=s.offsetWidth===2;s.style.display="";s.innerHTML="
";c.support.shrinkWrapBlocks=s.offsetWidth!==2}s.innerHTML="
t
";var v=s.getElementsByTagName("td");c.support.reliableHiddenOffsets=v[0].offsetHeight=== +0;v[0].style.display="";v[1].style.display="none";c.support.reliableHiddenOffsets=c.support.reliableHiddenOffsets&&v[0].offsetHeight===0;s.innerHTML="";u.body.removeChild(s).style.display="none"});a=function(s){var v=u.createElement("div");s="on"+s;var B=s in v;if(!B){v.setAttribute(s,"return;");B=typeof v[s]==="function"}return B};c.support.submitBubbles=a("submit");c.support.changeBubbles=a("change");a=b=d=f=h=null}})();c.props={"for":"htmlFor","class":"className",readonly:"readOnly",maxlength:"maxLength", +cellspacing:"cellSpacing",rowspan:"rowSpan",colspan:"colSpan",tabindex:"tabIndex",usemap:"useMap",frameborder:"frameBorder"};var pa={},Oa=/^(?:\{.*\}|\[.*\])$/;c.extend({cache:{},uuid:0,expando:"jQuery"+c.now(),noData:{embed:true,object:"clsid:D27CDB6E-AE6D-11cf-96B8-444553540000",applet:true},data:function(a,b,d){if(c.acceptData(a)){a=a==E?pa:a;var e=a.nodeType,f=e?a[c.expando]:null,h=c.cache;if(!(e&&!f&&typeof b==="string"&&d===A)){if(e)f||(a[c.expando]=f=++c.uuid);else h=a;if(typeof b==="object")if(e)h[f]= +c.extend(h[f],b);else c.extend(h,b);else if(e&&!h[f])h[f]={};a=e?h[f]:h;if(d!==A)a[b]=d;return typeof b==="string"?a[b]:a}}},removeData:function(a,b){if(c.acceptData(a)){a=a==E?pa:a;var d=a.nodeType,e=d?a[c.expando]:a,f=c.cache,h=d?f[e]:e;if(b){if(h){delete h[b];d&&c.isEmptyObject(h)&&c.removeData(a)}}else if(d&&c.support.deleteExpando)delete a[c.expando];else if(a.removeAttribute)a.removeAttribute(c.expando);else if(d)delete f[e];else for(var k in a)delete a[k]}},acceptData:function(a){if(a.nodeName){var b= +c.noData[a.nodeName.toLowerCase()];if(b)return!(b===true||a.getAttribute("classid")!==b)}return true}});c.fn.extend({data:function(a,b){if(typeof a==="undefined")return this.length?c.data(this[0]):null;else if(typeof a==="object")return this.each(function(){c.data(this,a)});var d=a.split(".");d[1]=d[1]?"."+d[1]:"";if(b===A){var e=this.triggerHandler("getData"+d[1]+"!",[d[0]]);if(e===A&&this.length){e=c.data(this[0],a);if(e===A&&this[0].nodeType===1){e=this[0].getAttribute("data-"+a);if(typeof e=== +"string")try{e=e==="true"?true:e==="false"?false:e==="null"?null:!c.isNaN(e)?parseFloat(e):Oa.test(e)?c.parseJSON(e):e}catch(f){}else e=A}}return e===A&&d[1]?this.data(d[0]):e}else return this.each(function(){var h=c(this),k=[d[0],b];h.triggerHandler("setData"+d[1]+"!",k);c.data(this,a,b);h.triggerHandler("changeData"+d[1]+"!",k)})},removeData:function(a){return this.each(function(){c.removeData(this,a)})}});c.extend({queue:function(a,b,d){if(a){b=(b||"fx")+"queue";var e=c.data(a,b);if(!d)return e|| +[];if(!e||c.isArray(d))e=c.data(a,b,c.makeArray(d));else e.push(d);return e}},dequeue:function(a,b){b=b||"fx";var d=c.queue(a,b),e=d.shift();if(e==="inprogress")e=d.shift();if(e){b==="fx"&&d.unshift("inprogress");e.call(a,function(){c.dequeue(a,b)})}}});c.fn.extend({queue:function(a,b){if(typeof a!=="string"){b=a;a="fx"}if(b===A)return c.queue(this[0],a);return this.each(function(){var d=c.queue(this,a,b);a==="fx"&&d[0]!=="inprogress"&&c.dequeue(this,a)})},dequeue:function(a){return this.each(function(){c.dequeue(this, +a)})},delay:function(a,b){a=c.fx?c.fx.speeds[a]||a:a;b=b||"fx";return this.queue(b,function(){var d=this;setTimeout(function(){c.dequeue(d,b)},a)})},clearQueue:function(a){return this.queue(a||"fx",[])}});var qa=/[\n\t]/g,ga=/\s+/,Pa=/\r/g,Qa=/^(?:href|src|style)$/,Ra=/^(?:button|input)$/i,Sa=/^(?:button|input|object|select|textarea)$/i,Ta=/^a(?:rea)?$/i,ra=/^(?:radio|checkbox)$/i;c.fn.extend({attr:function(a,b){return c.access(this,a,b,true,c.attr)},removeAttr:function(a){return this.each(function(){c.attr(this, +a,"");this.nodeType===1&&this.removeAttribute(a)})},addClass:function(a){if(c.isFunction(a))return this.each(function(s){var v=c(this);v.addClass(a.call(this,s,v.attr("class")))});if(a&&typeof a==="string")for(var b=(a||"").split(ga),d=0,e=this.length;d-1)return true;return false}, +val:function(a){if(!arguments.length){var b=this[0];if(b){if(c.nodeName(b,"option")){var d=b.attributes.value;return!d||d.specified?b.value:b.text}if(c.nodeName(b,"select")){var e=b.selectedIndex;d=[];var f=b.options;b=b.type==="select-one";if(e<0)return null;var h=b?e:0;for(e=b?e+1:f.length;h=0;else if(c.nodeName(this,"select")){var B=c.makeArray(v);c("option",this).each(function(){this.selected= +c.inArray(c(this).val(),B)>=0});if(!B.length)this.selectedIndex=-1}else this.value=v}})}});c.extend({attrFn:{val:true,css:true,html:true,text:true,data:true,width:true,height:true,offset:true},attr:function(a,b,d,e){if(!a||a.nodeType===3||a.nodeType===8)return A;if(e&&b in c.attrFn)return c(a)[b](d);e=a.nodeType!==1||!c.isXMLDoc(a);var f=d!==A;b=e&&c.props[b]||b;if(a.nodeType===1){var h=Qa.test(b);if((b in a||a[b]!==A)&&e&&!h){if(f){b==="type"&&Ra.test(a.nodeName)&&a.parentNode&&c.error("type property can't be changed"); +if(d===null)a.nodeType===1&&a.removeAttribute(b);else a[b]=d}if(c.nodeName(a,"form")&&a.getAttributeNode(b))return a.getAttributeNode(b).nodeValue;if(b==="tabIndex")return(b=a.getAttributeNode("tabIndex"))&&b.specified?b.value:Sa.test(a.nodeName)||Ta.test(a.nodeName)&&a.href?0:A;return a[b]}if(!c.support.style&&e&&b==="style"){if(f)a.style.cssText=""+d;return a.style.cssText}f&&a.setAttribute(b,""+d);if(!a.attributes[b]&&a.hasAttribute&&!a.hasAttribute(b))return A;a=!c.support.hrefNormalized&&e&& +h?a.getAttribute(b,2):a.getAttribute(b);return a===null?A:a}}});var X=/\.(.*)$/,ha=/^(?:textarea|input|select)$/i,Ha=/\./g,Ia=/ /g,Ua=/[^\w\s.|`]/g,Va=function(a){return a.replace(Ua,"\\$&")},sa={focusin:0,focusout:0};c.event={add:function(a,b,d,e){if(!(a.nodeType===3||a.nodeType===8)){if(c.isWindow(a)&&a!==E&&!a.frameElement)a=E;if(d===false)d=U;var f,h;if(d.handler){f=d;d=f.handler}if(!d.guid)d.guid=c.guid++;if(h=c.data(a)){var k=a.nodeType?"events":"__events__",l=h[k],n=h.handle;if(typeof l=== +"function"){n=l.handle;l=l.events}else if(!l){a.nodeType||(h[k]=h=function(){});h.events=l={}}if(!n)h.handle=n=function(){return typeof c!=="undefined"&&!c.event.triggered?c.event.handle.apply(n.elem,arguments):A};n.elem=a;b=b.split(" ");for(var s=0,v;k=b[s++];){h=f?c.extend({},f):{handler:d,data:e};if(k.indexOf(".")>-1){v=k.split(".");k=v.shift();h.namespace=v.slice(0).sort().join(".")}else{v=[];h.namespace=""}h.type=k;if(!h.guid)h.guid=d.guid;var B=l[k],D=c.event.special[k]||{};if(!B){B=l[k]=[]; +if(!D.setup||D.setup.call(a,e,v,n)===false)if(a.addEventListener)a.addEventListener(k,n,false);else a.attachEvent&&a.attachEvent("on"+k,n)}if(D.add){D.add.call(a,h);if(!h.handler.guid)h.handler.guid=d.guid}B.push(h);c.event.global[k]=true}a=null}}},global:{},remove:function(a,b,d,e){if(!(a.nodeType===3||a.nodeType===8)){if(d===false)d=U;var f,h,k=0,l,n,s,v,B,D,H=a.nodeType?"events":"__events__",w=c.data(a),G=w&&w[H];if(w&&G){if(typeof G==="function"){w=G;G=G.events}if(b&&b.type){d=b.handler;b=b.type}if(!b|| +typeof b==="string"&&b.charAt(0)==="."){b=b||"";for(f in G)c.event.remove(a,f+b)}else{for(b=b.split(" ");f=b[k++];){v=f;l=f.indexOf(".")<0;n=[];if(!l){n=f.split(".");f=n.shift();s=RegExp("(^|\\.)"+c.map(n.slice(0).sort(),Va).join("\\.(?:.*\\.)?")+"(\\.|$)")}if(B=G[f])if(d){v=c.event.special[f]||{};for(h=e||0;h=0){a.type= +f=f.slice(0,-1);a.exclusive=true}if(!d){a.stopPropagation();c.event.global[f]&&c.each(c.cache,function(){this.events&&this.events[f]&&c.event.trigger(a,b,this.handle.elem)})}if(!d||d.nodeType===3||d.nodeType===8)return A;a.result=A;a.target=d;b=c.makeArray(b);b.unshift(a)}a.currentTarget=d;(e=d.nodeType?c.data(d,"handle"):(c.data(d,"__events__")||{}).handle)&&e.apply(d,b);e=d.parentNode||d.ownerDocument;try{if(!(d&&d.nodeName&&c.noData[d.nodeName.toLowerCase()]))if(d["on"+f]&&d["on"+f].apply(d,b)=== +false){a.result=false;a.preventDefault()}}catch(h){}if(!a.isPropagationStopped()&&e)c.event.trigger(a,b,e,true);else if(!a.isDefaultPrevented()){e=a.target;var k,l=f.replace(X,""),n=c.nodeName(e,"a")&&l==="click",s=c.event.special[l]||{};if((!s._default||s._default.call(d,a)===false)&&!n&&!(e&&e.nodeName&&c.noData[e.nodeName.toLowerCase()])){try{if(e[l]){if(k=e["on"+l])e["on"+l]=null;c.event.triggered=true;e[l]()}}catch(v){}if(k)e["on"+l]=k;c.event.triggered=false}}},handle:function(a){var b,d,e; +d=[];var f,h=c.makeArray(arguments);a=h[0]=c.event.fix(a||E.event);a.currentTarget=this;b=a.type.indexOf(".")<0&&!a.exclusive;if(!b){e=a.type.split(".");a.type=e.shift();d=e.slice(0).sort();e=RegExp("(^|\\.)"+d.join("\\.(?:.*\\.)?")+"(\\.|$)")}a.namespace=a.namespace||d.join(".");f=c.data(this,this.nodeType?"events":"__events__");if(typeof f==="function")f=f.events;d=(f||{})[a.type];if(f&&d){d=d.slice(0);f=0;for(var k=d.length;f-1?c.map(a.options,function(e){return e.selected}).join("-"):"";else if(a.nodeName.toLowerCase()==="select")d=a.selectedIndex;return d},Z=function(a,b){var d=a.target,e,f;if(!(!ha.test(d.nodeName)||d.readOnly)){e=c.data(d,"_change_data");f=va(d);if(a.type!=="focusout"||d.type!=="radio")c.data(d,"_change_data",f);if(!(e===A||f===e))if(e!=null||f){a.type="change";a.liveFired= +A;return c.event.trigger(a,b,d)}}};c.event.special.change={filters:{focusout:Z,beforedeactivate:Z,click:function(a){var b=a.target,d=b.type;if(d==="radio"||d==="checkbox"||b.nodeName.toLowerCase()==="select")return Z.call(this,a)},keydown:function(a){var b=a.target,d=b.type;if(a.keyCode===13&&b.nodeName.toLowerCase()!=="textarea"||a.keyCode===32&&(d==="checkbox"||d==="radio")||d==="select-multiple")return Z.call(this,a)},beforeactivate:function(a){a=a.target;c.data(a,"_change_data",va(a))}},setup:function(){if(this.type=== +"file")return false;for(var a in V)c.event.add(this,a+".specialChange",V[a]);return ha.test(this.nodeName)},teardown:function(){c.event.remove(this,".specialChange");return ha.test(this.nodeName)}};V=c.event.special.change.filters;V.focus=V.beforeactivate}u.addEventListener&&c.each({focus:"focusin",blur:"focusout"},function(a,b){function d(e){e=c.event.fix(e);e.type=b;return c.event.trigger(e,null,e.target)}c.event.special[b]={setup:function(){sa[b]++===0&&u.addEventListener(a,d,true)},teardown:function(){--sa[b]=== +0&&u.removeEventListener(a,d,true)}}});c.each(["bind","one"],function(a,b){c.fn[b]=function(d,e,f){if(typeof d==="object"){for(var h in d)this[b](h,e,d[h],f);return this}if(c.isFunction(e)||e===false){f=e;e=A}var k=b==="one"?c.proxy(f,function(n){c(this).unbind(n,k);return f.apply(this,arguments)}):f;if(d==="unload"&&b!=="one")this.one(d,e,f);else{h=0;for(var l=this.length;h0?this.bind(b,d,e):this.trigger(b)};if(c.attrFn)c.attrFn[b]=true});E.attachEvent&&!E.addEventListener&&c(E).bind("unload",function(){for(var a in c.cache)if(c.cache[a].handle)try{c.event.remove(c.cache[a].handle.elem)}catch(b){}}); +(function(){function a(g,j,o,m,p,q){p=0;for(var t=m.length;p0){C=x;break}}x=x[g]}m[p]=C}}}var d=/((?:\((?:\([^()]+\)|[^()]+)+\)|\[(?:\[[^\[\]]*\]|['"][^'"]*['"]|[^\[\]'"]+)+\]|\\.|[^ >+~,(\[\\]+)+|[>+~])(\s*,\s*)?((?:.|\r|\n)*)/g,e=0,f=Object.prototype.toString,h=false,k=true;[0,0].sort(function(){k=false;return 0});var l=function(g,j,o,m){o=o||[];var p=j=j||u;if(j.nodeType!==1&&j.nodeType!==9)return[];if(!g||typeof g!=="string")return o;var q=[],t,x,C,P,N=true,R=l.isXML(j),Q=g,L;do{d.exec("");if(t=d.exec(Q)){Q=t[3];q.push(t[1]);if(t[2]){P=t[3]; +break}}}while(t);if(q.length>1&&s.exec(g))if(q.length===2&&n.relative[q[0]])x=M(q[0]+q[1],j);else for(x=n.relative[q[0]]?[j]:l(q.shift(),j);q.length;){g=q.shift();if(n.relative[g])g+=q.shift();x=M(g,x)}else{if(!m&&q.length>1&&j.nodeType===9&&!R&&n.match.ID.test(q[0])&&!n.match.ID.test(q[q.length-1])){t=l.find(q.shift(),j,R);j=t.expr?l.filter(t.expr,t.set)[0]:t.set[0]}if(j){t=m?{expr:q.pop(),set:D(m)}:l.find(q.pop(),q.length===1&&(q[0]==="~"||q[0]==="+")&&j.parentNode?j.parentNode:j,R);x=t.expr?l.filter(t.expr, +t.set):t.set;if(q.length>0)C=D(x);else N=false;for(;q.length;){t=L=q.pop();if(n.relative[L])t=q.pop();else L="";if(t==null)t=j;n.relative[L](C,t,R)}}else C=[]}C||(C=x);C||l.error(L||g);if(f.call(C)==="[object Array]")if(N)if(j&&j.nodeType===1)for(g=0;C[g]!=null;g++){if(C[g]&&(C[g]===true||C[g].nodeType===1&&l.contains(j,C[g])))o.push(x[g])}else for(g=0;C[g]!=null;g++)C[g]&&C[g].nodeType===1&&o.push(x[g]);else o.push.apply(o,C);else D(C,o);if(P){l(P,p,o,m);l.uniqueSort(o)}return o};l.uniqueSort=function(g){if(w){h= +k;g.sort(w);if(h)for(var j=1;j0};l.find=function(g,j,o){var m;if(!g)return[];for(var p=0,q=n.order.length;p":function(g,j){var o=typeof j==="string",m,p=0,q=g.length;if(o&&!/\W/.test(j))for(j=j.toLowerCase();p=0))o||m.push(t);else if(o)j[q]=false;return false},ID:function(g){return g[1].replace(/\\/g,"")},TAG:function(g){return g[1].toLowerCase()},CHILD:function(g){if(g[1]==="nth"){var j=/(-?)(\d*)n((?:\+|-)?\d*)/.exec(g[2]==="even"&&"2n"||g[2]==="odd"&&"2n+1"||!/\D/.test(g[2])&&"0n+"+g[2]||g[2]);g[2]=j[1]+(j[2]||1)-0;g[3]=j[3]-0}g[0]=e++;return g},ATTR:function(g,j,o, +m,p,q){j=g[1].replace(/\\/g,"");if(!q&&n.attrMap[j])g[1]=n.attrMap[j];if(g[2]==="~=")g[4]=" "+g[4]+" ";return g},PSEUDO:function(g,j,o,m,p){if(g[1]==="not")if((d.exec(g[3])||"").length>1||/^\w/.test(g[3]))g[3]=l(g[3],null,null,j);else{g=l.filter(g[3],j,o,true^p);o||m.push.apply(m,g);return false}else if(n.match.POS.test(g[0])||n.match.CHILD.test(g[0]))return true;return g},POS:function(g){g.unshift(true);return g}},filters:{enabled:function(g){return g.disabled===false&&g.type!=="hidden"},disabled:function(g){return g.disabled=== +true},checked:function(g){return g.checked===true},selected:function(g){return g.selected===true},parent:function(g){return!!g.firstChild},empty:function(g){return!g.firstChild},has:function(g,j,o){return!!l(o[3],g).length},header:function(g){return/h\d/i.test(g.nodeName)},text:function(g){return"text"===g.type},radio:function(g){return"radio"===g.type},checkbox:function(g){return"checkbox"===g.type},file:function(g){return"file"===g.type},password:function(g){return"password"===g.type},submit:function(g){return"submit"=== +g.type},image:function(g){return"image"===g.type},reset:function(g){return"reset"===g.type},button:function(g){return"button"===g.type||g.nodeName.toLowerCase()==="button"},input:function(g){return/input|select|textarea|button/i.test(g.nodeName)}},setFilters:{first:function(g,j){return j===0},last:function(g,j,o,m){return j===m.length-1},even:function(g,j){return j%2===0},odd:function(g,j){return j%2===1},lt:function(g,j,o){return jo[3]-0},nth:function(g,j,o){return o[3]- +0===j},eq:function(g,j,o){return o[3]-0===j}},filter:{PSEUDO:function(g,j,o,m){var p=j[1],q=n.filters[p];if(q)return q(g,o,j,m);else if(p==="contains")return(g.textContent||g.innerText||l.getText([g])||"").indexOf(j[3])>=0;else if(p==="not"){j=j[3];o=0;for(m=j.length;o=0}},ID:function(g,j){return g.nodeType===1&&g.getAttribute("id")===j},TAG:function(g,j){return j==="*"&&g.nodeType===1||g.nodeName.toLowerCase()=== +j},CLASS:function(g,j){return(" "+(g.className||g.getAttribute("class"))+" ").indexOf(j)>-1},ATTR:function(g,j){var o=j[1];o=n.attrHandle[o]?n.attrHandle[o](g):g[o]!=null?g[o]:g.getAttribute(o);var m=o+"",p=j[2],q=j[4];return o==null?p==="!=":p==="="?m===q:p==="*="?m.indexOf(q)>=0:p==="~="?(" "+m+" ").indexOf(q)>=0:!q?m&&o!==false:p==="!="?m!==q:p==="^="?m.indexOf(q)===0:p==="$="?m.substr(m.length-q.length)===q:p==="|="?m===q||m.substr(0,q.length+1)===q+"-":false},POS:function(g,j,o,m){var p=n.setFilters[j[2]]; +if(p)return p(g,o,j,m)}}},s=n.match.POS,v=function(g,j){return"\\"+(j-0+1)},B;for(B in n.match){n.match[B]=RegExp(n.match[B].source+/(?![^\[]*\])(?![^\(]*\))/.source);n.leftMatch[B]=RegExp(/(^(?:.|\r|\n)*?)/.source+n.match[B].source.replace(/\\(\d+)/g,v))}var D=function(g,j){g=Array.prototype.slice.call(g,0);if(j){j.push.apply(j,g);return j}return g};try{Array.prototype.slice.call(u.documentElement.childNodes,0)}catch(H){D=function(g,j){var o=j||[],m=0;if(f.call(g)==="[object Array]")Array.prototype.push.apply(o, +g);else if(typeof g.length==="number")for(var p=g.length;m";var o=u.documentElement;o.insertBefore(g,o.firstChild);if(u.getElementById(j)){n.find.ID=function(m,p,q){if(typeof p.getElementById!=="undefined"&&!q)return(p=p.getElementById(m[1]))?p.id===m[1]||typeof p.getAttributeNode!=="undefined"&&p.getAttributeNode("id").nodeValue===m[1]?[p]:A:[]};n.filter.ID=function(m,p){var q=typeof m.getAttributeNode!=="undefined"&&m.getAttributeNode("id");return m.nodeType===1&&q&&q.nodeValue===p}}o.removeChild(g); +o=g=null})();(function(){var g=u.createElement("div");g.appendChild(u.createComment(""));if(g.getElementsByTagName("*").length>0)n.find.TAG=function(j,o){var m=o.getElementsByTagName(j[1]);if(j[1]==="*"){for(var p=[],q=0;m[q];q++)m[q].nodeType===1&&p.push(m[q]);m=p}return m};g.innerHTML="";if(g.firstChild&&typeof g.firstChild.getAttribute!=="undefined"&&g.firstChild.getAttribute("href")!=="#")n.attrHandle.href=function(j){return j.getAttribute("href",2)};g=null})();u.querySelectorAll&& +function(){var g=l,j=u.createElement("div");j.innerHTML="

";if(!(j.querySelectorAll&&j.querySelectorAll(".TEST").length===0)){l=function(m,p,q,t){p=p||u;if(!t&&!l.isXML(p))if(p.nodeType===9)try{return D(p.querySelectorAll(m),q)}catch(x){}else if(p.nodeType===1&&p.nodeName.toLowerCase()!=="object"){var C=p.id,P=p.id="__sizzle__";try{return D(p.querySelectorAll("#"+P+" "+m),q)}catch(N){}finally{if(C)p.id=C;else p.removeAttribute("id")}}return g(m,p,q,t)};for(var o in g)l[o]=g[o]; +j=null}}();(function(){var g=u.documentElement,j=g.matchesSelector||g.mozMatchesSelector||g.webkitMatchesSelector||g.msMatchesSelector,o=false;try{j.call(u.documentElement,":sizzle")}catch(m){o=true}if(j)l.matchesSelector=function(p,q){try{if(o||!n.match.PSEUDO.test(q))return j.call(p,q)}catch(t){}return l(q,null,null,[p]).length>0}})();(function(){var g=u.createElement("div");g.innerHTML="
";if(!(!g.getElementsByClassName||g.getElementsByClassName("e").length=== +0)){g.lastChild.className="e";if(g.getElementsByClassName("e").length!==1){n.order.splice(1,0,"CLASS");n.find.CLASS=function(j,o,m){if(typeof o.getElementsByClassName!=="undefined"&&!m)return o.getElementsByClassName(j[1])};g=null}}})();l.contains=u.documentElement.contains?function(g,j){return g!==j&&(g.contains?g.contains(j):true)}:function(g,j){return!!(g.compareDocumentPosition(j)&16)};l.isXML=function(g){return(g=(g?g.ownerDocument||g:0).documentElement)?g.nodeName!=="HTML":false};var M=function(g, +j){for(var o=[],m="",p,q=j.nodeType?[j]:j;p=n.match.PSEUDO.exec(g);){m+=p[0];g=g.replace(n.match.PSEUDO,"")}g=n.relative[g]?g+"*":g;p=0;for(var t=q.length;p0)for(var h=d;h0},closest:function(a, +b){var d=[],e,f,h=this[0];if(c.isArray(a)){var k={},l,n=1;if(h&&a.length){e=0;for(f=a.length;e-1:c(h).is(e))d.push({selector:l,elem:h,level:n})}h=h.parentNode;n++}}return d}k=$a.test(a)?c(a,b||this.context):null;e=0;for(f=this.length;e-1:c.find.matchesSelector(h,a)){d.push(h);break}else{h=h.parentNode;if(!h|| +!h.ownerDocument||h===b)break}d=d.length>1?c.unique(d):d;return this.pushStack(d,"closest",a)},index:function(a){if(!a||typeof a==="string")return c.inArray(this[0],a?c(a):this.parent().children());return c.inArray(a.jquery?a[0]:a,this)},add:function(a,b){var d=typeof a==="string"?c(a,b||this.context):c.makeArray(a),e=c.merge(this.get(),d);return this.pushStack(!d[0]||!d[0].parentNode||d[0].parentNode.nodeType===11||!e[0]||!e[0].parentNode||e[0].parentNode.nodeType===11?e:c.unique(e))},andSelf:function(){return this.add(this.prevObject)}}); +c.each({parent:function(a){return(a=a.parentNode)&&a.nodeType!==11?a:null},parents:function(a){return c.dir(a,"parentNode")},parentsUntil:function(a,b,d){return c.dir(a,"parentNode",d)},next:function(a){return c.nth(a,2,"nextSibling")},prev:function(a){return c.nth(a,2,"previousSibling")},nextAll:function(a){return c.dir(a,"nextSibling")},prevAll:function(a){return c.dir(a,"previousSibling")},nextUntil:function(a,b,d){return c.dir(a,"nextSibling",d)},prevUntil:function(a,b,d){return c.dir(a,"previousSibling", +d)},siblings:function(a){return c.sibling(a.parentNode.firstChild,a)},children:function(a){return c.sibling(a.firstChild)},contents:function(a){return c.nodeName(a,"iframe")?a.contentDocument||a.contentWindow.document:c.makeArray(a.childNodes)}},function(a,b){c.fn[a]=function(d,e){var f=c.map(this,b,d);Wa.test(a)||(e=d);if(e&&typeof e==="string")f=c.filter(e,f);f=this.length>1?c.unique(f):f;if((this.length>1||Ya.test(e))&&Xa.test(a))f=f.reverse();return this.pushStack(f,a,Za.call(arguments).join(","))}}); +c.extend({filter:function(a,b,d){if(d)a=":not("+a+")";return b.length===1?c.find.matchesSelector(b[0],a)?[b[0]]:[]:c.find.matches(a,b)},dir:function(a,b,d){var e=[];for(a=a[b];a&&a.nodeType!==9&&(d===A||a.nodeType!==1||!c(a).is(d));){a.nodeType===1&&e.push(a);a=a[b]}return e},nth:function(a,b,d){b=b||1;for(var e=0;a;a=a[d])if(a.nodeType===1&&++e===b)break;return a},sibling:function(a,b){for(var d=[];a;a=a.nextSibling)a.nodeType===1&&a!==b&&d.push(a);return d}});var xa=/ jQuery\d+="(?:\d+|null)"/g, +$=/^\s+/,ya=/<(?!area|br|col|embed|hr|img|input|link|meta|param)(([\w:]+)[^>]*)\/>/ig,za=/<([\w:]+)/,ab=/\s]+\/)>/g,O={option:[1,""],legend:[1,"
","
"],thead:[1,"","
"],tr:[2,"","
"],td:[3,"","
"],col:[2,"","
"], +area:[1,"",""],_default:[0,"",""]};O.optgroup=O.option;O.tbody=O.tfoot=O.colgroup=O.caption=O.thead;O.th=O.td;if(!c.support.htmlSerialize)O._default=[1,"div
","
"];c.fn.extend({text:function(a){if(c.isFunction(a))return this.each(function(b){var d=c(this);d.text(a.call(this,b,d.text()))});if(typeof a!=="object"&&a!==A)return this.empty().append((this[0]&&this[0].ownerDocument||u).createTextNode(a));return c.text(this)},wrapAll:function(a){if(c.isFunction(a))return this.each(function(d){c(this).wrapAll(a.call(this, +d))});if(this[0]){var b=c(a,this[0].ownerDocument).eq(0).clone(true);this[0].parentNode&&b.insertBefore(this[0]);b.map(function(){for(var d=this;d.firstChild&&d.firstChild.nodeType===1;)d=d.firstChild;return d}).append(this)}return this},wrapInner:function(a){if(c.isFunction(a))return this.each(function(b){c(this).wrapInner(a.call(this,b))});return this.each(function(){var b=c(this),d=b.contents();d.length?d.wrapAll(a):b.append(a)})},wrap:function(a){return this.each(function(){c(this).wrapAll(a)})}, +unwrap:function(){return this.parent().each(function(){c.nodeName(this,"body")||c(this).replaceWith(this.childNodes)}).end()},append:function(){return this.domManip(arguments,true,function(a){this.nodeType===1&&this.appendChild(a)})},prepend:function(){return this.domManip(arguments,true,function(a){this.nodeType===1&&this.insertBefore(a,this.firstChild)})},before:function(){if(this[0]&&this[0].parentNode)return this.domManip(arguments,false,function(b){this.parentNode.insertBefore(b,this)});else if(arguments.length){var a= +c(arguments[0]);a.push.apply(a,this.toArray());return this.pushStack(a,"before",arguments)}},after:function(){if(this[0]&&this[0].parentNode)return this.domManip(arguments,false,function(b){this.parentNode.insertBefore(b,this.nextSibling)});else if(arguments.length){var a=this.pushStack(this,"after",arguments);a.push.apply(a,c(arguments[0]).toArray());return a}},remove:function(a,b){for(var d=0,e;(e=this[d])!=null;d++)if(!a||c.filter(a,[e]).length){if(!b&&e.nodeType===1){c.cleanData(e.getElementsByTagName("*")); +c.cleanData([e])}e.parentNode&&e.parentNode.removeChild(e)}return this},empty:function(){for(var a=0,b;(b=this[a])!=null;a++)for(b.nodeType===1&&c.cleanData(b.getElementsByTagName("*"));b.firstChild;)b.removeChild(b.firstChild);return this},clone:function(a){var b=this.map(function(){if(!c.support.noCloneEvent&&!c.isXMLDoc(this)){var d=this.outerHTML,e=this.ownerDocument;if(!d){d=e.createElement("div");d.appendChild(this.cloneNode(true));d=d.innerHTML}return c.clean([d.replace(xa,"").replace(cb,'="$1">').replace($, +"")],e)[0]}else return this.cloneNode(true)});if(a===true){la(this,b);la(this.find("*"),b.find("*"))}return b},html:function(a){if(a===A)return this[0]&&this[0].nodeType===1?this[0].innerHTML.replace(xa,""):null;else if(typeof a==="string"&&!Aa.test(a)&&(c.support.leadingWhitespace||!$.test(a))&&!O[(za.exec(a)||["",""])[1].toLowerCase()]){a=a.replace(ya,"<$1>");try{for(var b=0,d=this.length;b0||e.cacheable||this.length>1?l.cloneNode(true):l)}k.length&&c.each(k,Ka)}return this}});c.buildFragment=function(a,b,d){var e,f,h;b=b&&b[0]?b[0].ownerDocument||b[0]:u;if(a.length===1&&typeof a[0]==="string"&&a[0].length<512&&b===u&&!Aa.test(a[0])&&(c.support.checkClone|| +!Ba.test(a[0]))){f=true;if(h=c.fragments[a[0]])if(h!==1)e=h}if(!e){e=b.createDocumentFragment();c.clean(a,b,e,d)}if(f)c.fragments[a[0]]=h?e:1;return{fragment:e,cacheable:f}};c.fragments={};c.each({appendTo:"append",prependTo:"prepend",insertBefore:"before",insertAfter:"after",replaceAll:"replaceWith"},function(a,b){c.fn[a]=function(d){var e=[];d=c(d);var f=this.length===1&&this[0].parentNode;if(f&&f.nodeType===11&&f.childNodes.length===1&&d.length===1){d[b](this[0]);return this}else{f=0;for(var h= +d.length;f0?this.clone(true):this).get();c(d[f])[b](k);e=e.concat(k)}return this.pushStack(e,a,d.selector)}}});c.extend({clean:function(a,b,d,e){b=b||u;if(typeof b.createElement==="undefined")b=b.ownerDocument||b[0]&&b[0].ownerDocument||u;for(var f=[],h=0,k;(k=a[h])!=null;h++){if(typeof k==="number")k+="";if(k){if(typeof k==="string"&&!bb.test(k))k=b.createTextNode(k);else if(typeof k==="string"){k=k.replace(ya,"<$1>");var l=(za.exec(k)||["",""])[1].toLowerCase(),n=O[l]||O._default, +s=n[0],v=b.createElement("div");for(v.innerHTML=n[1]+k+n[2];s--;)v=v.lastChild;if(!c.support.tbody){s=ab.test(k);l=l==="table"&&!s?v.firstChild&&v.firstChild.childNodes:n[1]===""&&!s?v.childNodes:[];for(n=l.length-1;n>=0;--n)c.nodeName(l[n],"tbody")&&!l[n].childNodes.length&&l[n].parentNode.removeChild(l[n])}!c.support.leadingWhitespace&&$.test(k)&&v.insertBefore(b.createTextNode($.exec(k)[0]),v.firstChild);k=v.childNodes}if(k.nodeType)f.push(k);else f=c.merge(f,k)}}if(d)for(h=0;f[h];h++)if(e&& +c.nodeName(f[h],"script")&&(!f[h].type||f[h].type.toLowerCase()==="text/javascript"))e.push(f[h].parentNode?f[h].parentNode.removeChild(f[h]):f[h]);else{f[h].nodeType===1&&f.splice.apply(f,[h+1,0].concat(c.makeArray(f[h].getElementsByTagName("script"))));d.appendChild(f[h])}return f},cleanData:function(a){for(var b,d,e=c.cache,f=c.event.special,h=c.support.deleteExpando,k=0,l;(l=a[k])!=null;k++)if(!(l.nodeName&&c.noData[l.nodeName.toLowerCase()]))if(d=l[c.expando]){if((b=e[d])&&b.events)for(var n in b.events)f[n]? +c.event.remove(l,n):c.removeEvent(l,n,b.handle);if(h)delete l[c.expando];else l.removeAttribute&&l.removeAttribute(c.expando);delete e[d]}}});var Ca=/alpha\([^)]*\)/i,db=/opacity=([^)]*)/,eb=/-([a-z])/ig,fb=/([A-Z])/g,Da=/^-?\d+(?:px)?$/i,gb=/^-?\d/,hb={position:"absolute",visibility:"hidden",display:"block"},La=["Left","Right"],Ma=["Top","Bottom"],W,ib=u.defaultView&&u.defaultView.getComputedStyle,jb=function(a,b){return b.toUpperCase()};c.fn.css=function(a,b){if(arguments.length===2&&b===A)return this; +return c.access(this,a,b,true,function(d,e,f){return f!==A?c.style(d,e,f):c.css(d,e)})};c.extend({cssHooks:{opacity:{get:function(a,b){if(b){var d=W(a,"opacity","opacity");return d===""?"1":d}else return a.style.opacity}}},cssNumber:{zIndex:true,fontWeight:true,opacity:true,zoom:true,lineHeight:true},cssProps:{"float":c.support.cssFloat?"cssFloat":"styleFloat"},style:function(a,b,d,e){if(!(!a||a.nodeType===3||a.nodeType===8||!a.style)){var f,h=c.camelCase(b),k=a.style,l=c.cssHooks[h];b=c.cssProps[h]|| +h;if(d!==A){if(!(typeof d==="number"&&isNaN(d)||d==null)){if(typeof d==="number"&&!c.cssNumber[h])d+="px";if(!l||!("set"in l)||(d=l.set(a,d))!==A)try{k[b]=d}catch(n){}}}else{if(l&&"get"in l&&(f=l.get(a,false,e))!==A)return f;return k[b]}}},css:function(a,b,d){var e,f=c.camelCase(b),h=c.cssHooks[f];b=c.cssProps[f]||f;if(h&&"get"in h&&(e=h.get(a,true,d))!==A)return e;else if(W)return W(a,b,f)},swap:function(a,b,d){var e={},f;for(f in b){e[f]=a.style[f];a.style[f]=b[f]}d.call(a);for(f in b)a.style[f]= +e[f]},camelCase:function(a){return a.replace(eb,jb)}});c.curCSS=c.css;c.each(["height","width"],function(a,b){c.cssHooks[b]={get:function(d,e,f){var h;if(e){if(d.offsetWidth!==0)h=ma(d,b,f);else c.swap(d,hb,function(){h=ma(d,b,f)});return h+"px"}},set:function(d,e){if(Da.test(e)){e=parseFloat(e);if(e>=0)return e+"px"}else return e}}});if(!c.support.opacity)c.cssHooks.opacity={get:function(a,b){return db.test((b&&a.currentStyle?a.currentStyle.filter:a.style.filter)||"")?parseFloat(RegExp.$1)/100+"": +b?"1":""},set:function(a,b){var d=a.style;d.zoom=1;var e=c.isNaN(b)?"":"alpha(opacity="+b*100+")",f=d.filter||"";d.filter=Ca.test(f)?f.replace(Ca,e):d.filter+" "+e}};if(ib)W=function(a,b,d){var e;d=d.replace(fb,"-$1").toLowerCase();if(!(b=a.ownerDocument.defaultView))return A;if(b=b.getComputedStyle(a,null)){e=b.getPropertyValue(d);if(e===""&&!c.contains(a.ownerDocument.documentElement,a))e=c.style(a,d)}return e};else if(u.documentElement.currentStyle)W=function(a,b){var d,e,f=a.currentStyle&&a.currentStyle[b], +h=a.style;if(!Da.test(f)&&gb.test(f)){d=h.left;e=a.runtimeStyle.left;a.runtimeStyle.left=a.currentStyle.left;h.left=b==="fontSize"?"1em":f||0;f=h.pixelLeft+"px";h.left=d;a.runtimeStyle.left=e}return f};if(c.expr&&c.expr.filters){c.expr.filters.hidden=function(a){var b=a.offsetHeight;return a.offsetWidth===0&&b===0||!c.support.reliableHiddenOffsets&&(a.style.display||c.css(a,"display"))==="none"};c.expr.filters.visible=function(a){return!c.expr.filters.hidden(a)}}var kb=c.now(),lb=/)<[^<]*)*<\/script>/gi, +mb=/^(?:select|textarea)/i,nb=/^(?:color|date|datetime|email|hidden|month|number|password|range|search|tel|text|time|url|week)$/i,ob=/^(?:GET|HEAD|DELETE)$/,Na=/\[\]$/,T=/\=\?(&|$)/,ia=/\?/,pb=/([?&])_=[^&]*/,qb=/^(\w+:)?\/\/([^\/?#]+)/,rb=/%20/g,sb=/#.*$/,Ea=c.fn.load;c.fn.extend({load:function(a,b,d){if(typeof a!=="string"&&Ea)return Ea.apply(this,arguments);else if(!this.length)return this;var e=a.indexOf(" ");if(e>=0){var f=a.slice(e,a.length);a=a.slice(0,e)}e="GET";if(b)if(c.isFunction(b)){d= +b;b=null}else if(typeof b==="object"){b=c.param(b,c.ajaxSettings.traditional);e="POST"}var h=this;c.ajax({url:a,type:e,dataType:"html",data:b,complete:function(k,l){if(l==="success"||l==="notmodified")h.html(f?c("
").append(k.responseText.replace(lb,"")).find(f):k.responseText);d&&h.each(d,[k.responseText,l,k])}});return this},serialize:function(){return c.param(this.serializeArray())},serializeArray:function(){return this.map(function(){return this.elements?c.makeArray(this.elements):this}).filter(function(){return this.name&& +!this.disabled&&(this.checked||mb.test(this.nodeName)||nb.test(this.type))}).map(function(a,b){var d=c(this).val();return d==null?null:c.isArray(d)?c.map(d,function(e){return{name:b.name,value:e}}):{name:b.name,value:d}}).get()}});c.each("ajaxStart ajaxStop ajaxComplete ajaxError ajaxSuccess ajaxSend".split(" "),function(a,b){c.fn[b]=function(d){return this.bind(b,d)}});c.extend({get:function(a,b,d,e){if(c.isFunction(b)){e=e||d;d=b;b=null}return c.ajax({type:"GET",url:a,data:b,success:d,dataType:e})}, +getScript:function(a,b){return c.get(a,null,b,"script")},getJSON:function(a,b,d){return c.get(a,b,d,"json")},post:function(a,b,d,e){if(c.isFunction(b)){e=e||d;d=b;b={}}return c.ajax({type:"POST",url:a,data:b,success:d,dataType:e})},ajaxSetup:function(a){c.extend(c.ajaxSettings,a)},ajaxSettings:{url:location.href,global:true,type:"GET",contentType:"application/x-www-form-urlencoded",processData:true,async:true,xhr:function(){return new E.XMLHttpRequest},accepts:{xml:"application/xml, text/xml",html:"text/html", +script:"text/javascript, application/javascript",json:"application/json, text/javascript",text:"text/plain",_default:"*/*"}},ajax:function(a){var b=c.extend(true,{},c.ajaxSettings,a),d,e,f,h=b.type.toUpperCase(),k=ob.test(h);b.url=b.url.replace(sb,"");b.context=a&&a.context!=null?a.context:b;if(b.data&&b.processData&&typeof b.data!=="string")b.data=c.param(b.data,b.traditional);if(b.dataType==="jsonp"){if(h==="GET")T.test(b.url)||(b.url+=(ia.test(b.url)?"&":"?")+(b.jsonp||"callback")+"=?");else if(!b.data|| +!T.test(b.data))b.data=(b.data?b.data+"&":"")+(b.jsonp||"callback")+"=?";b.dataType="json"}if(b.dataType==="json"&&(b.data&&T.test(b.data)||T.test(b.url))){d=b.jsonpCallback||"jsonp"+kb++;if(b.data)b.data=(b.data+"").replace(T,"="+d+"$1");b.url=b.url.replace(T,"="+d+"$1");b.dataType="script";var l=E[d];E[d]=function(m){f=m;c.handleSuccess(b,w,e,f);c.handleComplete(b,w,e,f);if(c.isFunction(l))l(m);else{E[d]=A;try{delete E[d]}catch(p){}}v&&v.removeChild(B)}}if(b.dataType==="script"&&b.cache===null)b.cache= +false;if(b.cache===false&&h==="GET"){var n=c.now(),s=b.url.replace(pb,"$1_="+n);b.url=s+(s===b.url?(ia.test(b.url)?"&":"?")+"_="+n:"")}if(b.data&&h==="GET")b.url+=(ia.test(b.url)?"&":"?")+b.data;b.global&&c.active++===0&&c.event.trigger("ajaxStart");n=(n=qb.exec(b.url))&&(n[1]&&n[1]!==location.protocol||n[2]!==location.host);if(b.dataType==="script"&&h==="GET"&&n){var v=u.getElementsByTagName("head")[0]||u.documentElement,B=u.createElement("script");if(b.scriptCharset)B.charset=b.scriptCharset;B.src= +b.url;if(!d){var D=false;B.onload=B.onreadystatechange=function(){if(!D&&(!this.readyState||this.readyState==="loaded"||this.readyState==="complete")){D=true;c.handleSuccess(b,w,e,f);c.handleComplete(b,w,e,f);B.onload=B.onreadystatechange=null;v&&B.parentNode&&v.removeChild(B)}}}v.insertBefore(B,v.firstChild);return A}var H=false,w=b.xhr();if(w){b.username?w.open(h,b.url,b.async,b.username,b.password):w.open(h,b.url,b.async);try{if(b.data!=null&&!k||a&&a.contentType)w.setRequestHeader("Content-Type", +b.contentType);if(b.ifModified){c.lastModified[b.url]&&w.setRequestHeader("If-Modified-Since",c.lastModified[b.url]);c.etag[b.url]&&w.setRequestHeader("If-None-Match",c.etag[b.url])}n||w.setRequestHeader("X-Requested-With","XMLHttpRequest");w.setRequestHeader("Accept",b.dataType&&b.accepts[b.dataType]?b.accepts[b.dataType]+", */*; q=0.01":b.accepts._default)}catch(G){}if(b.beforeSend&&b.beforeSend.call(b.context,w,b)===false){b.global&&c.active--===1&&c.event.trigger("ajaxStop");w.abort();return false}b.global&& +c.triggerGlobal(b,"ajaxSend",[w,b]);var M=w.onreadystatechange=function(m){if(!w||w.readyState===0||m==="abort"){H||c.handleComplete(b,w,e,f);H=true;if(w)w.onreadystatechange=c.noop}else if(!H&&w&&(w.readyState===4||m==="timeout")){H=true;w.onreadystatechange=c.noop;e=m==="timeout"?"timeout":!c.httpSuccess(w)?"error":b.ifModified&&c.httpNotModified(w,b.url)?"notmodified":"success";var p;if(e==="success")try{f=c.httpData(w,b.dataType,b)}catch(q){e="parsererror";p=q}if(e==="success"||e==="notmodified")d|| +c.handleSuccess(b,w,e,f);else c.handleError(b,w,e,p);d||c.handleComplete(b,w,e,f);m==="timeout"&&w.abort();if(b.async)w=null}};try{var g=w.abort;w.abort=function(){w&&g.call&&g.call(w);M("abort")}}catch(j){}b.async&&b.timeout>0&&setTimeout(function(){w&&!H&&M("timeout")},b.timeout);try{w.send(k||b.data==null?null:b.data)}catch(o){c.handleError(b,w,null,o);c.handleComplete(b,w,e,f)}b.async||M();return w}},param:function(a,b){var d=[],e=function(h,k){k=c.isFunction(k)?k():k;d[d.length]=encodeURIComponent(h)+ +"="+encodeURIComponent(k)};if(b===A)b=c.ajaxSettings.traditional;if(c.isArray(a)||a.jquery)c.each(a,function(){e(this.name,this.value)});else for(var f in a)ca(f,a[f],b,e);return d.join("&").replace(rb,"+")}});c.extend({active:0,lastModified:{},etag:{},handleError:function(a,b,d,e){a.error&&a.error.call(a.context,b,d,e);a.global&&c.triggerGlobal(a,"ajaxError",[b,a,e])},handleSuccess:function(a,b,d,e){a.success&&a.success.call(a.context,e,d,b);a.global&&c.triggerGlobal(a,"ajaxSuccess",[b,a])},handleComplete:function(a, +b,d){a.complete&&a.complete.call(a.context,b,d);a.global&&c.triggerGlobal(a,"ajaxComplete",[b,a]);a.global&&c.active--===1&&c.event.trigger("ajaxStop")},triggerGlobal:function(a,b,d){(a.context&&a.context.url==null?c(a.context):c.event).trigger(b,d)},httpSuccess:function(a){try{return!a.status&&location.protocol==="file:"||a.status>=200&&a.status<300||a.status===304||a.status===1223}catch(b){}return false},httpNotModified:function(a,b){var d=a.getResponseHeader("Last-Modified"),e=a.getResponseHeader("Etag"); +if(d)c.lastModified[b]=d;if(e)c.etag[b]=e;return a.status===304},httpData:function(a,b,d){var e=a.getResponseHeader("content-type")||"",f=b==="xml"||!b&&e.indexOf("xml")>=0;a=f?a.responseXML:a.responseText;f&&a.documentElement.nodeName==="parsererror"&&c.error("parsererror");if(d&&d.dataFilter)a=d.dataFilter(a,b);if(typeof a==="string")if(b==="json"||!b&&e.indexOf("json")>=0)a=c.parseJSON(a);else if(b==="script"||!b&&e.indexOf("javascript")>=0)c.globalEval(a);return a}});if(E.ActiveXObject)c.ajaxSettings.xhr= +function(){if(E.location.protocol!=="file:")try{return new E.XMLHttpRequest}catch(a){}try{return new E.ActiveXObject("Microsoft.XMLHTTP")}catch(b){}};c.support.ajax=!!c.ajaxSettings.xhr();var da={},tb=/^(?:toggle|show|hide)$/,ub=/^([+\-]=)?([\d+.\-]+)(.*)$/,aa,na=[["height","marginTop","marginBottom","paddingTop","paddingBottom"],["width","marginLeft","marginRight","paddingLeft","paddingRight"],["opacity"]];c.fn.extend({show:function(a,b,d){if(a||a===0)return this.animate(S("show",3),a,b,d);else{a= +0;for(b=this.length;a=0;e--)if(d[e].elem===this){b&&d[e](true);d.splice(e,1)}});b||this.dequeue();return this}});c.each({slideDown:S("show",1),slideUp:S("hide",1),slideToggle:S("toggle",1),fadeIn:{opacity:"show"},fadeOut:{opacity:"hide"}},function(a,b){c.fn[a]=function(d,e,f){return this.animate(b, +d,e,f)}});c.extend({speed:function(a,b,d){var e=a&&typeof a==="object"?c.extend({},a):{complete:d||!d&&b||c.isFunction(a)&&a,duration:a,easing:d&&b||b&&!c.isFunction(b)&&b};e.duration=c.fx.off?0:typeof e.duration==="number"?e.duration:e.duration in c.fx.speeds?c.fx.speeds[e.duration]:c.fx.speeds._default;e.old=e.complete;e.complete=function(){e.queue!==false&&c(this).dequeue();c.isFunction(e.old)&&e.old.call(this)};return e},easing:{linear:function(a,b,d,e){return d+e*a},swing:function(a,b,d,e){return(-Math.cos(a* +Math.PI)/2+0.5)*e+d}},timers:[],fx:function(a,b,d){this.options=b;this.elem=a;this.prop=d;if(!b.orig)b.orig={}}});c.fx.prototype={update:function(){this.options.step&&this.options.step.call(this.elem,this.now,this);(c.fx.step[this.prop]||c.fx.step._default)(this)},cur:function(){if(this.elem[this.prop]!=null&&(!this.elem.style||this.elem.style[this.prop]==null))return this.elem[this.prop];var a=parseFloat(c.css(this.elem,this.prop));return a&&a>-1E4?a:0},custom:function(a,b,d){function e(h){return f.step(h)} +this.startTime=c.now();this.start=a;this.end=b;this.unit=d||this.unit||"px";this.now=this.start;this.pos=this.state=0;var f=this;a=c.fx;e.elem=this.elem;if(e()&&c.timers.push(e)&&!aa)aa=setInterval(a.tick,a.interval)},show:function(){this.options.orig[this.prop]=c.style(this.elem,this.prop);this.options.show=true;this.custom(this.prop==="width"||this.prop==="height"?1:0,this.cur());c(this.elem).show()},hide:function(){this.options.orig[this.prop]=c.style(this.elem,this.prop);this.options.hide=true; +this.custom(this.cur(),0)},step:function(a){var b=c.now(),d=true;if(a||b>=this.options.duration+this.startTime){this.now=this.end;this.pos=this.state=1;this.update();this.options.curAnim[this.prop]=true;for(var e in this.options.curAnim)if(this.options.curAnim[e]!==true)d=false;if(d){if(this.options.overflow!=null&&!c.support.shrinkWrapBlocks){var f=this.elem,h=this.options;c.each(["","X","Y"],function(l,n){f.style["overflow"+n]=h.overflow[l]})}this.options.hide&&c(this.elem).hide();if(this.options.hide|| +this.options.show)for(var k in this.options.curAnim)c.style(this.elem,k,this.options.orig[k]);this.options.complete.call(this.elem)}return false}else{a=b-this.startTime;this.state=a/this.options.duration;b=this.options.easing||(c.easing.swing?"swing":"linear");this.pos=c.easing[this.options.specialEasing&&this.options.specialEasing[this.prop]||b](this.state,a,0,1,this.options.duration);this.now=this.start+(this.end-this.start)*this.pos;this.update()}return true}};c.extend(c.fx,{tick:function(){for(var a= +c.timers,b=0;b-1;e={};var s={};if(n)s=f.position();k=n?s.top:parseInt(k,10)||0;l=n?s.left:parseInt(l,10)||0;if(c.isFunction(b))b=b.call(a,d,h);if(b.top!=null)e.top=b.top-h.top+k;if(b.left!=null)e.left=b.left-h.left+l;"using"in b?b.using.call(a, +e):f.css(e)}};c.fn.extend({position:function(){if(!this[0])return null;var a=this[0],b=this.offsetParent(),d=this.offset(),e=Fa.test(b[0].nodeName)?{top:0,left:0}:b.offset();d.top-=parseFloat(c.css(a,"marginTop"))||0;d.left-=parseFloat(c.css(a,"marginLeft"))||0;e.top+=parseFloat(c.css(b[0],"borderTopWidth"))||0;e.left+=parseFloat(c.css(b[0],"borderLeftWidth"))||0;return{top:d.top-e.top,left:d.left-e.left}},offsetParent:function(){return this.map(function(){for(var a=this.offsetParent||u.body;a&&!Fa.test(a.nodeName)&& +c.css(a,"position")==="static";)a=a.offsetParent;return a})}});c.each(["Left","Top"],function(a,b){var d="scroll"+b;c.fn[d]=function(e){var f=this[0],h;if(!f)return null;if(e!==A)return this.each(function(){if(h=ea(this))h.scrollTo(!a?e:c(h).scrollLeft(),a?e:c(h).scrollTop());else this[d]=e});else return(h=ea(f))?"pageXOffset"in h?h[a?"pageYOffset":"pageXOffset"]:c.support.boxModel&&h.document.documentElement[d]||h.document.body[d]:f[d]}});c.each(["Height","Width"],function(a,b){var d=b.toLowerCase(); +c.fn["inner"+b]=function(){return this[0]?parseFloat(c.css(this[0],d,"padding")):null};c.fn["outer"+b]=function(e){return this[0]?parseFloat(c.css(this[0],d,e?"margin":"border")):null};c.fn[d]=function(e){var f=this[0];if(!f)return e==null?null:this;if(c.isFunction(e))return this.each(function(h){var k=c(this);k[d](e.call(this,h,k[d]()))});return c.isWindow(f)?f.document.compatMode==="CSS1Compat"&&f.document.documentElement["client"+b]||f.document.body["client"+b]:f.nodeType===9?Math.max(f.documentElement["client"+ +b],f.body["scroll"+b],f.documentElement["scroll"+b],f.body["offset"+b],f.documentElement["offset"+b]):e===A?parseFloat(c.css(f,d)):this.css(d,typeof e==="string"?e:e+"px")}})})(window); diff --git a/test_coverage/jquery.hotkeys.js b/test_coverage/jquery.hotkeys.js new file mode 100644 index 000000000..09b21e03c --- /dev/null +++ b/test_coverage/jquery.hotkeys.js @@ -0,0 +1,99 @@ +/* + * jQuery Hotkeys Plugin + * Copyright 2010, John Resig + * Dual licensed under the MIT or GPL Version 2 licenses. + * + * Based upon the plugin by Tzury Bar Yochay: + * http://github.com/tzuryby/hotkeys + * + * Original idea by: + * Binny V A, http://www.openjs.com/scripts/events/keyboard_shortcuts/ +*/ + +(function(jQuery){ + + jQuery.hotkeys = { + version: "0.8", + + specialKeys: { + 8: "backspace", 9: "tab", 13: "return", 16: "shift", 17: "ctrl", 18: "alt", 19: "pause", + 20: "capslock", 27: "esc", 32: "space", 33: "pageup", 34: "pagedown", 35: "end", 36: "home", + 37: "left", 38: "up", 39: "right", 40: "down", 45: "insert", 46: "del", + 96: "0", 97: "1", 98: "2", 99: "3", 100: "4", 101: "5", 102: "6", 103: "7", + 104: "8", 105: "9", 106: "*", 107: "+", 109: "-", 110: ".", 111 : "/", + 112: "f1", 113: "f2", 114: "f3", 115: "f4", 116: "f5", 117: "f6", 118: "f7", 119: "f8", + 120: "f9", 121: "f10", 122: "f11", 123: "f12", 144: "numlock", 145: "scroll", 191: "/", 224: "meta" + }, + + shiftNums: { + "`": "~", "1": "!", "2": "@", "3": "#", "4": "$", "5": "%", "6": "^", "7": "&", + "8": "*", "9": "(", "0": ")", "-": "_", "=": "+", ";": ": ", "'": "\"", ",": "<", + ".": ">", "/": "?", "\\": "|" + } + }; + + function keyHandler( handleObj ) { + // Only care when a possible input has been specified + if ( typeof handleObj.data !== "string" ) { + return; + } + + var origHandler = handleObj.handler, + keys = handleObj.data.toLowerCase().split(" "); + + handleObj.handler = function( event ) { + // Don't fire in text-accepting inputs that we didn't directly bind to + if ( this !== event.target && (/textarea|select/i.test( event.target.nodeName ) || + event.target.type === "text") ) { + return; + } + + // Keypress represents characters, not special keys + var special = event.type !== "keypress" && jQuery.hotkeys.specialKeys[ event.which ], + character = String.fromCharCode( event.which ).toLowerCase(), + key, modif = "", possible = {}; + + // check combinations (alt|ctrl|shift+anything) + if ( event.altKey && special !== "alt" ) { + modif += "alt+"; + } + + if ( event.ctrlKey && special !== "ctrl" ) { + modif += "ctrl+"; + } + + // TODO: Need to make sure this works consistently across platforms + if ( event.metaKey && !event.ctrlKey && special !== "meta" ) { + modif += "meta+"; + } + + if ( event.shiftKey && special !== "shift" ) { + modif += "shift+"; + } + + if ( special ) { + possible[ modif + special ] = true; + + } else { + possible[ modif + character ] = true; + possible[ modif + jQuery.hotkeys.shiftNums[ character ] ] = true; + + // "$" can be triggered as "Shift+4" or "Shift+$" or just "$" + if ( modif === "shift+" ) { + possible[ jQuery.hotkeys.shiftNums[ character ] ] = true; + } + } + + for ( var i = 0, l = keys.length; i < l; i++ ) { + if ( possible[ keys[i] ] ) { + return origHandler.apply( this, arguments ); + } + } + }; + } + + jQuery.each([ "keydown", "keyup", "keypress" ], function() { + jQuery.event.special[ this ] = { add: keyHandler }; + }); + +})( jQuery ); diff --git a/test_coverage/jquery.isonscreen.js b/test_coverage/jquery.isonscreen.js new file mode 100644 index 000000000..0182ebd21 --- /dev/null +++ b/test_coverage/jquery.isonscreen.js @@ -0,0 +1,53 @@ +/* Copyright (c) 2010 + * @author Laurence Wheway + * Dual licensed under the MIT (http://www.opensource.org/licenses/mit-license.php) + * and GPL (http://www.opensource.org/licenses/gpl-license.php) licenses. + * + * @version 1.2.0 + */ +(function($) { + jQuery.extend({ + isOnScreen: function(box, container) { + //ensure numbers come in as intgers (not strings) and remove 'px' is it's there + for(var i in box){box[i] = parseFloat(box[i])}; + for(var i in container){container[i] = parseFloat(container[i])}; + + if(!container){ + container = { + left: $(window).scrollLeft(), + top: $(window).scrollTop(), + width: $(window).width(), + height: $(window).height() + } + } + + if( box.left+box.width-container.left > 0 && + box.left < container.width+container.left && + box.top+box.height-container.top > 0 && + box.top < container.height+container.top + ) return true; + return false; + } + }) + + + jQuery.fn.isOnScreen = function (container) { + for(var i in container){container[i] = parseFloat(container[i])}; + + if(!container){ + container = { + left: $(window).scrollLeft(), + top: $(window).scrollTop(), + width: $(window).width(), + height: $(window).height() + } + } + + if( $(this).offset().left+$(this).width()-container.left > 0 && + $(this).offset().left < container.width+container.left && + $(this).offset().top+$(this).height()-container.top > 0 && + $(this).offset().top < container.height+container.top + ) return true; + return false; + } +})(jQuery); diff --git a/test_coverage/jquery.tablesorter.min.js b/test_coverage/jquery.tablesorter.min.js new file mode 100644 index 000000000..64c700712 --- /dev/null +++ b/test_coverage/jquery.tablesorter.min.js @@ -0,0 +1,2 @@ + +(function($){$.extend({tablesorter:new function(){var parsers=[],widgets=[];this.defaults={cssHeader:"header",cssAsc:"headerSortUp",cssDesc:"headerSortDown",sortInitialOrder:"asc",sortMultiSortKey:"shiftKey",sortForce:null,sortAppend:null,textExtraction:"simple",parsers:{},widgets:[],widgetZebra:{css:["even","odd"]},headers:{},widthFixed:false,cancelSelection:true,sortList:[],headerList:[],dateFormat:"us",decimal:'.',debug:false};function benchmark(s,d){log(s+","+(new Date().getTime()-d.getTime())+"ms");}this.benchmark=benchmark;function log(s){if(typeof console!="undefined"&&typeof console.debug!="undefined"){console.log(s);}else{alert(s);}}function buildParserCache(table,$headers){if(table.config.debug){var parsersDebug="";}var rows=table.tBodies[0].rows;if(table.tBodies[0].rows[0]){var list=[],cells=rows[0].cells,l=cells.length;for(var i=0;i1){arr=arr.concat(checkCellColSpan(table,headerArr,row++));}else{if(table.tHead.length==1||(cell.rowSpan>1||!r[row+1])){arr.push(cell);}}}return arr;};function checkHeaderMetadata(cell){if(($.metadata)&&($(cell).metadata().sorter===false)){return true;};return false;}function checkHeaderOptions(table,i){if((table.config.headers[i])&&(table.config.headers[i].sorter===false)){return true;};return false;}function applyWidget(table){var c=table.config.widgets;var l=c.length;for(var i=0;i');$("tr:first td",table.tBodies[0]).each(function(){colgroup.append($('
').css('width',$(this).width()));});$(table).prepend(colgroup);};}function updateHeaderSortCount(table,sortList){var c=table.config,l=sortList.length;for(var i=0;ib)?1:0));};function sortTextDesc(a,b){return((ba)?1:0));};function sortNumeric(a,b){return a-b;};function sortNumericDesc(a,b){return b-a;};function getCachedSortType(parsers,i){return parsers[i].type;};this.construct=function(settings){return this.each(function(){if(!this.tHead||!this.tBodies)return;var $this,$document,$headers,cache,config,shiftDown=0,sortOrder;this.config={};config=$.extend(this.config,$.tablesorter.defaults,settings);$this=$(this);$headers=buildHeaders(this);this.config.parsers=buildParserCache(this,$headers);cache=buildCache(this);var sortCSS=[config.cssDesc,config.cssAsc];fixColumnWidth(this);$headers.click(function(e){$this.trigger("sortStart");var totalRows=($this[0].tBodies[0]&&$this[0].tBodies[0].rows.length)||0;if(!this.sortDisabled&&totalRows>0){var $cell=$(this);var i=this.column;this.order=this.count++%2;if(!e[config.sortMultiSortKey]){config.sortList=[];if(config.sortForce!=null){var a=config.sortForce;for(var j=0;j0){$this.trigger("sorton",[config.sortList]);}applyWidget(this);});};this.addParser=function(parser){var l=parsers.length,a=true;for(var i=0;iD6{MWQjEnx?oJHr&dIz4a@dl*-CY>| zgW!U_%O?XxI14-?iy0WWg+Z8+Vb&Z8pdfpRr>`sfZ8lau9@bl*u7(4JIy_w*Lo808 zo$Afkpupp@{Fv_bobxQ#pD>iB3oNa1d9=pM`D99*FvsH{pKJfpB1-4UD;=6}F=+gKX>Gx9b=!>PY1_pdfo@{(boFyt=akR{ E04sl8JOBUy literal 0 HcmV?d00001 diff --git a/test_coverage/keybd_open.png b/test_coverage/keybd_open.png new file mode 100644 index 0000000000000000000000000000000000000000..a77961db5424cfff43a63d399972ee85fc0dfdb1 GIT binary patch literal 267 zcmeAS@N?(olHy`uVBq!ia0vp^%0SG+!3HE>D6{MWQjEnx?oJHr&dIz4a@dl*-CY>| zgW!U_%O?XxI14-?iy0WWg+Z8+Vb&Z8pdfpRr>`sfZ8lau9%kc-1xY}mZci7-5R21$ zCp+>TR^VYdE*ieC^FGV{Cyeh_21=Rotz3KNq=!VmdK II;Vst00jnQH~;_u literal 0 HcmV?d00001 diff --git a/test_coverage/status.dat b/test_coverage/status.dat new file mode 100644 index 000000000..f49c79028 --- /dev/null +++ b/test_coverage/status.dat @@ -0,0 +1,271 @@ +(dp1 +S'files' +p2 +(dp3 +S'youtube_dl_PostProcessor' +p4 +(dp5 +S'index' +p6 +(dp7 +S'html_filename' +p8 +S'youtube_dl_PostProcessor.html' +p9 +sS'name' +p10 +S'youtube_dl.PostProcessor' +p11 +sS'nums' +p12 +ccopy_reg +_reconstructor +p13 +(ccoverage.results +Numbers +p14 +c__builtin__ +object +p15 +NtRp16 +(dp17 +S'n_files' +p18 +I1 +sS'n_branches' +p19 +I0 +sS'n_statements' +p20 +I133 +sS'n_excluded' +p21 +I0 +sS'n_partial_branches' +p22 +I0 +sS'n_missing' +p23 +I114 +sS'n_missing_branches' +p24 +I0 +sbssS'hash' +p25 +S'C\xc0\xe7,\xf52VnS\x9e%\xc6\xe8\xeaT\xd9' +p26 +ssS'youtube_dl_InfoExtractors' +p27 +(dp28 +g6 +(dp29 +g8 +S'youtube_dl_InfoExtractors.html' +p30 +sg10 +S'youtube_dl.InfoExtractors' +p31 +sg12 +g13 +(g14 +g15 +NtRp32 +(dp33 +g18 +I1 +sg19 +I0 +sg20 +I2275 +sg21 +I0 +sg22 +I0 +sg23 +I1273 +sg24 +I0 +sbssg25 +S'\xc7\xc4\x03@-U\x1f\x93k!\xe6\x12\xf6\xf2\xe6l' +p34 +ssS'youtube_dl_version' +p35 +(dp36 +g6 +(dp37 +g8 +S'youtube_dl_version.html' +p38 +sg10 +S'youtube_dl.version' +p39 +sg12 +g13 +(g14 +g15 +NtRp40 +(dp41 +g18 +I1 +sg19 +I0 +sg20 +I1 +sg21 +I0 +sg22 +I0 +sg23 +I0 +sg24 +I0 +sbssg25 +S'\xc2$\xeb8-M\x17\xbd\xadu4rB\xd3\xfc\x0f' +p42 +ssS'youtube_dl_utils' +p43 +(dp44 +g6 +(dp45 +g8 +S'youtube_dl_utils.html' +p46 +sg10 +S'youtube_dl.utils' +p47 +sg12 +g13 +(g14 +g15 +NtRp48 +(dp49 +g18 +I1 +sg19 +I0 +sg20 +I340 +sg21 +I0 +sg22 +I0 +sg23 +I58 +sg24 +I0 +sbssg25 +S'\x19\x02[U\xd7[yZ{\xb4q\xa4\x1bY0/' +p50 +ssS'youtube_dl_update' +p51 +(dp52 +g6 +(dp53 +g8 +S'youtube_dl_update.html' +p54 +sg10 +S'youtube_dl.update' +p55 +sg12 +g13 +(g14 +g15 +NtRp56 +(dp57 +g18 +I1 +sg19 +I0 +sg20 +I130 +sg21 +I0 +sg22 +I0 +sg23 +I122 +sg24 +I0 +sbssg25 +S'\x15\x94\xbeDlF*\x0c>\x07\xf2\x17n\x0cN\xbc' +p58 +ssS'youtube_dl_FileDownloader' +p59 +(dp60 +g6 +(dp61 +g8 +S'youtube_dl_FileDownloader.html' +p62 +sg10 +S'youtube_dl.FileDownloader' +p63 +sg12 +g13 +(g14 +g15 +NtRp64 +(dp65 +g18 +I1 +sg19 +I0 +sg20 +I479 +sg21 +I0 +sg22 +I0 +sg23 +I208 +sg24 +I0 +sbssg25 +S'9\x8c\x8c\xe2V\xb9e\xc6\xee\xb9\x85\xe0\xc2\x8c\x84Z' +p66 +ssS'youtube_dl' +p67 +(dp68 +g6 +(dp69 +g8 +S'youtube_dl.html' +p70 +sg10 +g67 +sg12 +g13 +(g14 +g15 +NtRp71 +(dp72 +g18 +I1 +sg19 +I0 +sg20 +I274 +sg21 +I0 +sg22 +I0 +sg23 +I251 +sg24 +I0 +sbssg25 +S'e\x9c/\x9c\xc3\x1f\xc0\xcbp\x0f\x8a\xf3\xbe\xfdp_' +p73 +sssS'version' +p74 +S'3.6b1' +p75 +sS'settings' +p76 +S'\xce^\xf2\xdb\x0fV\xcc\xfe\x1e\x9a\xd9\x81\xe5\xe3.\xa9' +p77 +sS'format' +p78 +I1 +s. \ No newline at end of file diff --git a/test_coverage/style.css b/test_coverage/style.css new file mode 100644 index 000000000..811c64019 --- /dev/null +++ b/test_coverage/style.css @@ -0,0 +1,300 @@ +/* CSS styles for Coverage. */ +/* Page-wide styles */ +html, body, h1, h2, h3, p, td, th { + margin: 0; + padding: 0; + border: 0; + outline: 0; + font-weight: inherit; + font-style: inherit; + font-size: 100%; + font-family: inherit; + vertical-align: baseline; + } + +/* Set baseline grid to 16 pt. */ +body { + font-family: georgia, serif; + font-size: 1em; + } + +html>body { + font-size: 16px; + } + +/* Set base font size to 12/16 */ +p { + font-size: .75em; /* 12/16 */ + line-height: 1.33333333em; /* 16/12 */ + } + +table { + border-collapse: collapse; + } + +a.nav { + text-decoration: none; + color: inherit; + } +a.nav:hover { + text-decoration: underline; + color: inherit; + } + +/* Page structure */ +#header { + background: #f8f8f8; + width: 100%; + border-bottom: 1px solid #eee; + } + +#source { + padding: 1em; + font-family: "courier new", monospace; + } + +#indexfile #footer { + margin: 1em 3em; + } + +#pyfile #footer { + margin: 1em 1em; + } + +#footer .content { + padding: 0; + font-size: 85%; + font-family: verdana, sans-serif; + color: #666666; + font-style: italic; + } + +#index { + margin: 1em 0 0 3em; + } + +/* Header styles */ +#header .content { + padding: 1em 3em; + } + +h1 { + font-size: 1.25em; +} + +h2.stats { + margin-top: .5em; + font-size: 1em; +} +.stats span { + border: 1px solid; + padding: .1em .25em; + margin: 0 .1em; + cursor: pointer; + border-color: #999 #ccc #ccc #999; +} +.stats span.hide_run, .stats span.hide_exc, +.stats span.hide_mis, .stats span.hide_par, +.stats span.par.hide_run.hide_par { + border-color: #ccc #999 #999 #ccc; +} +.stats span.par.hide_run { + border-color: #999 #ccc #ccc #999; +} + +.stats span.run { + background: #ddffdd; +} +.stats span.exc { + background: #eeeeee; +} +.stats span.mis { + background: #ffdddd; +} +.stats span.hide_run { + background: #eeffee; +} +.stats span.hide_exc { + background: #f5f5f5; +} +.stats span.hide_mis { + background: #ffeeee; +} +.stats span.par { + background: #ffffaa; +} +.stats span.hide_par { + background: #ffffcc; +} + +/* Help panel */ +#keyboard_icon { + float: right; + cursor: pointer; +} + +.help_panel { + position: absolute; + background: #ffc; + padding: .5em; + border: 1px solid #883; + display: none; +} + +#indexfile .help_panel { + width: 20em; height: 4em; +} + +#pyfile .help_panel { + width: 16em; height: 8em; +} + +.help_panel .legend { + font-style: italic; + margin-bottom: 1em; +} + +#panel_icon { + float: right; + cursor: pointer; +} + +.keyhelp { + margin: .75em; +} + +.keyhelp .key { + border: 1px solid black; + border-color: #888 #333 #333 #888; + padding: .1em .35em; + font-family: monospace; + font-weight: bold; + background: #eee; +} + +/* Source file styles */ +.linenos p { + text-align: right; + margin: 0; + padding: 0 .5em; + color: #999999; + font-family: verdana, sans-serif; + font-size: .625em; /* 10/16 */ + line-height: 1.6em; /* 16/10 */ + } +.linenos p.highlight { + background: #ffdd00; + } +.linenos p a { + text-decoration: none; + color: #999999; + } +.linenos p a:hover { + text-decoration: underline; + color: #999999; + } + +td.text { + width: 100%; + } +.text p { + margin: 0; + padding: 0 0 0 .5em; + border-left: 2px solid #ffffff; + white-space: nowrap; + } + +.text p.mis { + background: #ffdddd; + border-left: 2px solid #ff0000; + } +.text p.run, .text p.run.hide_par { + background: #ddffdd; + border-left: 2px solid #00ff00; + } +.text p.exc { + background: #eeeeee; + border-left: 2px solid #808080; + } +.text p.par, .text p.par.hide_run { + background: #ffffaa; + border-left: 2px solid #eeee99; + } +.text p.hide_run, .text p.hide_exc, .text p.hide_mis, .text p.hide_par, +.text p.hide_run.hide_par { + background: inherit; + } + +.text span.annotate { + font-family: georgia; + font-style: italic; + color: #666; + float: right; + padding-right: .5em; + } +.text p.hide_par span.annotate { + display: none; + } + +/* Syntax coloring */ +.text .com { + color: green; + font-style: italic; + line-height: 1px; + } +.text .key { + font-weight: bold; + line-height: 1px; + } +.text .str { + color: #000080; + } + +/* index styles */ +#index td, #index th { + text-align: right; + width: 5em; + padding: .25em .5em; + border-bottom: 1px solid #eee; + } +#index th { + font-style: italic; + color: #333; + border-bottom: 1px solid #ccc; + cursor: pointer; + } +#index th:hover { + background: #eee; + border-bottom: 1px solid #999; + } +#index td.left, #index th.left { + padding-left: 0; + } +#index td.right, #index th.right { + padding-right: 0; + } +#index th.headerSortDown, #index th.headerSortUp { + border-bottom: 1px solid #000; + } +#index td.name, #index th.name { + text-align: left; + width: auto; + } +#index td.name a { + text-decoration: none; + color: #000; + } +#index td.name a:hover { + text-decoration: underline; + color: #000; + } +#index tr.total { + } +#index tr.total td { + font-weight: bold; + border-top: 1px solid #ccc; + border-bottom: none; + } +#index tr.file:hover { + background: #eeeeee; + } diff --git a/test_coverage/youtube_dl.html b/test_coverage/youtube_dl.html new file mode 100644 index 000000000..229d7027b --- /dev/null +++ b/test_coverage/youtube_dl.html @@ -0,0 +1,1066 @@ + + + + + + + + Coverage for youtube_dl: 8% + + + + + + + + + + + + +
+ +

Hot-keys on this page

+
+

+ r + m + x + p   toggle line displays +

+

+ j + k   next/prev highlighted chunk +

+

+ 0   (zero) top of page +

+

+ 1   (one) first highlighted chunk +

+
+
+ +
+
+ + + + +
+

1

+

2

+

3

+

4

+

5

+

6

+

7

+

8

+

9

+

10

+

11

+

12

+

13

+

14

+

15

+

16

+

17

+

18

+

19

+

20

+

21

+

22

+

23

+

24

+

25

+

26

+

27

+

28

+

29

+

30

+

31

+

32

+

33

+

34

+

35

+

36

+

37

+

38

+

39

+

40

+

41

+

42

+

43

+

44

+

45

+

46

+

47

+

48

+

49

+

50

+

51

+

52

+

53

+

54

+

55

+

56

+

57

+

58

+

59

+

60

+

61

+

62

+

63

+

64

+

65

+

66

+

67

+

68

+

69

+

70

+

71

+

72

+

73

+

74

+

75

+

76

+

77

+

78

+

79

+

80

+

81

+

82

+

83

+

84

+

85

+

86

+

87

+

88

+

89

+

90

+

91

+

92

+

93

+

94

+

95

+

96

+

97

+

98

+

99

+

100

+

101

+

102

+

103

+

104

+

105

+

106

+

107

+

108

+

109

+

110

+

111

+

112

+

113

+

114

+

115

+

116

+

117

+

118

+

119

+

120

+

121

+

122

+

123

+

124

+

125

+

126

+

127

+

128

+

129

+

130

+

131

+

132

+

133

+

134

+

135

+

136

+

137

+

138

+

139

+

140

+

141

+

142

+

143

+

144

+

145

+

146

+

147

+

148

+

149

+

150

+

151

+

152

+

153

+

154

+

155

+

156

+

157

+

158

+

159

+

160

+

161

+

162

+

163

+

164

+

165

+

166

+

167

+

168

+

169

+

170

+

171

+

172

+

173

+

174

+

175

+

176

+

177

+

178

+

179

+

180

+

181

+

182

+

183

+

184

+

185

+

186

+

187

+

188

+

189

+

190

+

191

+

192

+

193

+

194

+

195

+

196

+

197

+

198

+

199

+

200

+

201

+

202

+

203

+

204

+

205

+

206

+

207

+

208

+

209

+

210

+

211

+

212

+

213

+

214

+

215

+

216

+

217

+

218

+

219

+

220

+

221

+

222

+

223

+

224

+

225

+

226

+

227

+

228

+

229

+

230

+

231

+

232

+

233

+

234

+

235

+

236

+

237

+

238

+

239

+

240

+

241

+

242

+

243

+

244

+

245

+

246

+

247

+

248

+

249

+

250

+

251

+

252

+

253

+

254

+

255

+

256

+

257

+

258

+

259

+

260

+

261

+

262

+

263

+

264

+

265

+

266

+

267

+

268

+

269

+

270

+

271

+

272

+

273

+

274

+

275

+

276

+

277

+

278

+

279

+

280

+

281

+

282

+

283

+

284

+

285

+

286

+

287

+

288

+

289

+

290

+

291

+

292

+

293

+

294

+

295

+

296

+

297

+

298

+

299

+

300

+

301

+

302

+

303

+

304

+

305

+

306

+

307

+

308

+

309

+

310

+

311

+

312

+

313

+

314

+

315

+

316

+

317

+

318

+

319

+

320

+

321

+

322

+

323

+

324

+

325

+

326

+

327

+

328

+

329

+

330

+

331

+

332

+

333

+

334

+

335

+

336

+

337

+

338

+

339

+

340

+

341

+

342

+

343

+

344

+

345

+

346

+

347

+

348

+

349

+

350

+

351

+

352

+

353

+

354

+

355

+

356

+

357

+

358

+

359

+

360

+

361

+

362

+

363

+

364

+

365

+

366

+

367

+

368

+

369

+

370

+

371

+

372

+

373

+

374

+

375

+

376

+

377

+

378

+

379

+

380

+

381

+

382

+

383

+

384

+

385

+

386

+

387

+

388

+

389

+

390

+

391

+

392

+

393

+

394

+

395

+

396

+

397

+

398

+

399

+

400

+

401

+

402

+

403

+

404

+

405

+

406

+

407

+

408

+

409

+

410

+

411

+

412

+

413

+

414

+

415

+

416

+

417

+

418

+

419

+

420

+

421

+

422

+

423

+

424

+

425

+

426

+

427

+

428

+

429

+

430

+

431

+

432

+

433

+

434

+

435

+

436

+

437

+

438

+

439

+

440

+

441

+

442

+

443

+

444

+

445

+

446

+

447

+

448

+

449

+

450

+

451

+

452

+

453

+

454

+

455

+

456

+

457

+

458

+

459

+

460

+

461

+

462

+

463

+

464

+

465

+

466

+

467

+

468

+

469

+

470

+

471

+

472

+

473

+

474

+

475

+

476

+

477

+

478

+

479

+

480

+

481

+

482

+

483

+

484

+

485

+

486

+

487

+

488

+

489

+

490

+

491

+

492

+ +
+

#!/usr/bin/env python 

+

# -*- coding: utf-8 -*- 

+

 

+

from __future__ import with_statement 

+

from __future__ import absolute_import 

+

 

+

__authors__  = ( 

+

    'Ricardo Garcia Gonzalez', 

+

    'Danny Colligan', 

+

    'Benjamin Johnson', 

+

    'Vasyl\' Vavrychuk', 

+

    'Witold Baryluk', 

+

    'Paweł Paprota', 

+

    'Gergely Imreh', 

+

    'Rogério Brito', 

+

    'Philipp Hagemeister', 

+

    'Sören Schulze', 

+

    'Kevin Ngo', 

+

    'Ori Avtalion', 

+

    'shizeeg', 

+

    'Filippo Valsorda', 

+

    'Christian Albrecht', 

+

    'Dave Vasilevsky', 

+

    'Jaime Marquínez Ferrándiz', 

+

    ) 

+

 

+

__license__ = 'Public Domain' 

+

 

+

import getpass 

+

import optparse 

+

import os 

+

import re 

+

import shlex 

+

import socket 

+

import subprocess 

+

import sys 

+

import warnings 

+

import platform 

+

 

+

from .utils import * 

+

from .update import update_self 

+

from .version import __version__ 

+

from .FileDownloader import * 

+

from .InfoExtractors import gen_extractors 

+

from .PostProcessor import * 

+

 

+

def parseOpts(): 

+

    def _readOptions(filename_bytes): 

+

        try: 

+

            optionf = open(filename_bytes) 

+

        except IOError: 

+

            return [] # silently skip if file is not present 

+

        try: 

+

            res = [] 

+

            for l in optionf: 

+

                res += shlex.split(l, comments=True) 

+

        finally: 

+

            optionf.close() 

+

        return res 

+

 

+

    def _format_option_string(option): 

+

        ''' ('-o', '--option') -> -o, --format METAVAR''' 

+

 

+

        opts = [] 

+

 

+

        if option._short_opts: 

+

            opts.append(option._short_opts[0]) 

+

        if option._long_opts: 

+

            opts.append(option._long_opts[0]) 

+

        if len(opts) > 1: 

+

            opts.insert(1, ', ') 

+

 

+

        if option.takes_value(): opts.append(' %s' % option.metavar) 

+

 

+

        return "".join(opts) 

+

 

+

    def _find_term_columns(): 

+

        columns = os.environ.get('COLUMNS', None) 

+

        if columns: 

+

            return int(columns) 

+

 

+

        try: 

+

            sp = subprocess.Popen(['stty', 'size'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) 

+

            out,err = sp.communicate() 

+

            return int(out.split()[1]) 

+

        except: 

+

            pass 

+

        return None 

+

 

+

    max_width = 80 

+

    max_help_position = 80 

+

 

+

    # No need to wrap help messages if we're on a wide console 

+

    columns = _find_term_columns() 

+

    if columns: max_width = columns 

+

 

+

    fmt = optparse.IndentedHelpFormatter(width=max_width, max_help_position=max_help_position) 

+

    fmt.format_option_strings = _format_option_string 

+

 

+

    kw = { 

+

        'version'   : __version__, 

+

        'formatter' : fmt, 

+

        'usage' : '%prog [options] url [url...]', 

+

        'conflict_handler' : 'resolve', 

+

    } 

+

 

+

    parser = optparse.OptionParser(**kw) 

+

 

+

    # option groups 

+

    general        = optparse.OptionGroup(parser, 'General Options') 

+

    selection      = optparse.OptionGroup(parser, 'Video Selection') 

+

    authentication = optparse.OptionGroup(parser, 'Authentication Options') 

+

    video_format   = optparse.OptionGroup(parser, 'Video Format Options') 

+

    postproc       = optparse.OptionGroup(parser, 'Post-processing Options') 

+

    filesystem     = optparse.OptionGroup(parser, 'Filesystem Options') 

+

    verbosity      = optparse.OptionGroup(parser, 'Verbosity / Simulation Options') 

+

 

+

    general.add_option('-h', '--help', 

+

            action='help', help='print this help text and exit') 

+

    general.add_option('-v', '--version', 

+

            action='version', help='print program version and exit') 

+

    general.add_option('-U', '--update', 

+

            action='store_true', dest='update_self', help='update this program to latest version') 

+

    general.add_option('-i', '--ignore-errors', 

+

            action='store_true', dest='ignoreerrors', help='continue on download errors', default=False) 

+

    general.add_option('-r', '--rate-limit', 

+

            dest='ratelimit', metavar='LIMIT', help='download rate limit (e.g. 50k or 44.6m)') 

+

    general.add_option('-R', '--retries', 

+

            dest='retries', metavar='RETRIES', help='number of retries (default is %default)', default=10) 

+

    general.add_option('--buffer-size', 

+

            dest='buffersize', metavar='SIZE', help='size of download buffer (e.g. 1024 or 16k) (default is %default)', default="1024") 

+

    general.add_option('--no-resize-buffer', 

+

            action='store_true', dest='noresizebuffer', 

+

            help='do not automatically adjust the buffer size. By default, the buffer size is automatically resized from an initial value of SIZE.', default=False) 

+

    general.add_option('--dump-user-agent', 

+

            action='store_true', dest='dump_user_agent', 

+

            help='display the current browser identification', default=False) 

+

    general.add_option('--user-agent', 

+

            dest='user_agent', help='specify a custom user agent', metavar='UA') 

+

    general.add_option('--list-extractors', 

+

            action='store_true', dest='list_extractors', 

+

            help='List all supported extractors and the URLs they would handle', default=False) 

+

    general.add_option('--test', action='store_true', dest='test', default=False, help=optparse.SUPPRESS_HELP) 

+

 

+

    selection.add_option('--playlist-start', 

+

            dest='playliststart', metavar='NUMBER', help='playlist video to start at (default is %default)', default=1) 

+

    selection.add_option('--playlist-end', 

+

            dest='playlistend', metavar='NUMBER', help='playlist video to end at (default is last)', default=-1) 

+

    selection.add_option('--match-title', dest='matchtitle', metavar='REGEX',help='download only matching titles (regex or caseless sub-string)') 

+

    selection.add_option('--reject-title', dest='rejecttitle', metavar='REGEX',help='skip download for matching titles (regex or caseless sub-string)') 

+

    selection.add_option('--max-downloads', metavar='NUMBER', dest='max_downloads', help='Abort after downloading NUMBER files', default=None) 

+

 

+

    authentication.add_option('-u', '--username', 

+

            dest='username', metavar='USERNAME', help='account username') 

+

    authentication.add_option('-p', '--password', 

+

            dest='password', metavar='PASSWORD', help='account password') 

+

    authentication.add_option('-n', '--netrc', 

+

            action='store_true', dest='usenetrc', help='use .netrc authentication data', default=False) 

+

 

+

 

+

    video_format.add_option('-f', '--format', 

+

            action='store', dest='format', metavar='FORMAT', help='video format code') 

+

    video_format.add_option('--all-formats', 

+

            action='store_const', dest='format', help='download all available video formats', const='all') 

+

    video_format.add_option('--prefer-free-formats', 

+

            action='store_true', dest='prefer_free_formats', default=False, help='prefer free video formats unless a specific one is requested') 

+

    video_format.add_option('--max-quality', 

+

            action='store', dest='format_limit', metavar='FORMAT', help='highest quality format to download') 

+

    video_format.add_option('-F', '--list-formats', 

+

            action='store_true', dest='listformats', help='list all available formats (currently youtube only)') 

+

    video_format.add_option('--write-srt', 

+

            action='store_true', dest='writesubtitles', 

+

            help='write video closed captions to a .srt file (currently youtube only)', default=False) 

+

    video_format.add_option('--srt-lang', 

+

            action='store', dest='subtitleslang', metavar='LANG', 

+

            help='language of the closed captions to download (optional) use IETF language tags like \'en\'') 

+

 

+

 

+

    verbosity.add_option('-q', '--quiet', 

+

            action='store_true', dest='quiet', help='activates quiet mode', default=False) 

+

    verbosity.add_option('-s', '--simulate', 

+

            action='store_true', dest='simulate', help='do not download the video and do not write anything to disk', default=False) 

+

    verbosity.add_option('--skip-download', 

+

            action='store_true', dest='skip_download', help='do not download the video', default=False) 

+

    verbosity.add_option('-g', '--get-url', 

+

            action='store_true', dest='geturl', help='simulate, quiet but print URL', default=False) 

+

    verbosity.add_option('-e', '--get-title', 

+

            action='store_true', dest='gettitle', help='simulate, quiet but print title', default=False) 

+

    verbosity.add_option('--get-thumbnail', 

+

            action='store_true', dest='getthumbnail', 

+

            help='simulate, quiet but print thumbnail URL', default=False) 

+

    verbosity.add_option('--get-description', 

+

            action='store_true', dest='getdescription', 

+

            help='simulate, quiet but print video description', default=False) 

+

    verbosity.add_option('--get-filename', 

+

            action='store_true', dest='getfilename', 

+

            help='simulate, quiet but print output filename', default=False) 

+

    verbosity.add_option('--get-format', 

+

            action='store_true', dest='getformat', 

+

            help='simulate, quiet but print output format', default=False) 

+

    verbosity.add_option('--no-progress', 

+

            action='store_true', dest='noprogress', help='do not print progress bar', default=False) 

+

    verbosity.add_option('--console-title', 

+

            action='store_true', dest='consoletitle', 

+

            help='display progress in console titlebar', default=False) 

+

    verbosity.add_option('-v', '--verbose', 

+

            action='store_true', dest='verbose', help='print various debugging information', default=False) 

+

 

+

 

+

    filesystem.add_option('-t', '--title', 

+

            action='store_true', dest='usetitle', help='use title in file name', default=False) 

+

    filesystem.add_option('--id', 

+

            action='store_true', dest='useid', help='use video ID in file name', default=False) 

+

    filesystem.add_option('-l', '--literal', 

+

            action='store_true', dest='usetitle', help='[deprecated] alias of --title', default=False) 

+

    filesystem.add_option('-A', '--auto-number', 

+

            action='store_true', dest='autonumber', 

+

            help='number downloaded files starting from 00000', default=False) 

+

    filesystem.add_option('-o', '--output', 

+

            dest='outtmpl', metavar='TEMPLATE', help='output filename template. Use %(title)s to get the title, %(uploader)s for the uploader name, %(uploader_id)s for the uploader nickname if different, %(autonumber)s to get an automatically incremented number, %(ext)s for the filename extension, %(upload_date)s for the upload date (YYYYMMDD), %(extractor)s for the provider (youtube, metacafe, etc), %(id)s for the video id and %% for a literal percent. Use - to output to stdout. Can also be used to download to a different directory, for example with -o \'/my/downloads/%(uploader)s/%(title)s-%(id)s.%(ext)s\' .') 

+

    filesystem.add_option('--restrict-filenames', 

+

            action='store_true', dest='restrictfilenames', 

+

            help='Restrict filenames to only ASCII characters, and avoid "&" and spaces in filenames', default=False) 

+

    filesystem.add_option('-a', '--batch-file', 

+

            dest='batchfile', metavar='FILE', help='file containing URLs to download (\'-\' for stdin)') 

+

    filesystem.add_option('-w', '--no-overwrites', 

+

            action='store_true', dest='nooverwrites', help='do not overwrite files', default=False) 

+

    filesystem.add_option('-c', '--continue', 

+

            action='store_true', dest='continue_dl', help='resume partially downloaded files', default=True) 

+

    filesystem.add_option('--no-continue', 

+

            action='store_false', dest='continue_dl', 

+

            help='do not resume partially downloaded files (restart from beginning)') 

+

    filesystem.add_option('--cookies', 

+

            dest='cookiefile', metavar='FILE', help='file to read cookies from and dump cookie jar in') 

+

    filesystem.add_option('--no-part', 

+

            action='store_true', dest='nopart', help='do not use .part files', default=False) 

+

    filesystem.add_option('--no-mtime', 

+

            action='store_false', dest='updatetime', 

+

            help='do not use the Last-modified header to set the file modification time', default=True) 

+

    filesystem.add_option('--write-description', 

+

            action='store_true', dest='writedescription', 

+

            help='write video description to a .description file', default=False) 

+

    filesystem.add_option('--write-info-json', 

+

            action='store_true', dest='writeinfojson', 

+

            help='write video metadata to a .info.json file', default=False) 

+

 

+

 

+

    postproc.add_option('-x', '--extract-audio', action='store_true', dest='extractaudio', default=False, 

+

            help='convert video files to audio-only files (requires ffmpeg or avconv and ffprobe or avprobe)') 

+

    postproc.add_option('--audio-format', metavar='FORMAT', dest='audioformat', default='best', 

+

            help='"best", "aac", "vorbis", "mp3", "m4a", or "wav"; best by default') 

+

    postproc.add_option('--audio-quality', metavar='QUALITY', dest='audioquality', default='5', 

+

            help='ffmpeg/avconv audio quality specification, insert a value between 0 (better) and 9 (worse) for VBR or a specific bitrate like 128K (default 5)') 

+

    postproc.add_option('-k', '--keep-video', action='store_true', dest='keepvideo', default=False, 

+

            help='keeps the video file on disk after the post-processing; the video is erased by default') 

+

    postproc.add_option('--no-post-overwrites', action='store_true', dest='nopostoverwrites', default=False, 

+

            help='do not overwrite post-processed files; the post-processed files are overwritten by default') 

+

 

+

 

+

    parser.add_option_group(general) 

+

    parser.add_option_group(selection) 

+

    parser.add_option_group(filesystem) 

+

    parser.add_option_group(verbosity) 

+

    parser.add_option_group(video_format) 

+

    parser.add_option_group(authentication) 

+

    parser.add_option_group(postproc) 

+

 

+

    xdg_config_home = os.environ.get('XDG_CONFIG_HOME') 

+

    if xdg_config_home: 

+

        userConf = os.path.join(xdg_config_home, 'youtube-dl.conf') 

+

    else: 

+

        userConf = os.path.join(os.path.expanduser('~'), '.config', 'youtube-dl.conf') 

+

    argv = _readOptions('/etc/youtube-dl.conf') + _readOptions(userConf) + sys.argv[1:] 

+

    opts, args = parser.parse_args(argv) 

+

 

+

    return parser, opts, args 

+

 

+

def _real_main(): 

+

    parser, opts, args = parseOpts() 

+

 

+

    # Open appropriate CookieJar 

+

    if opts.cookiefile is None: 

+

        jar = compat_cookiejar.CookieJar() 

+

    else: 

+

        try: 

+

            jar = compat_cookiejar.MozillaCookieJar(opts.cookiefile) 

+

            if os.path.isfile(opts.cookiefile) and os.access(opts.cookiefile, os.R_OK): 

+

                jar.load() 

+

        except (IOError, OSError) as err: 

+

            sys.exit(u'ERROR: unable to open cookie file') 

+

    # Set user agent 

+

    if opts.user_agent is not None: 

+

        std_headers['User-Agent'] = opts.user_agent 

+

 

+

    # Dump user agent 

+

    if opts.dump_user_agent: 

+

        print(std_headers['User-Agent']) 

+

        sys.exit(0) 

+

 

+

    # Batch file verification 

+

    batchurls = [] 

+

    if opts.batchfile is not None: 

+

        try: 

+

            if opts.batchfile == '-': 

+

                batchfd = sys.stdin 

+

            else: 

+

                batchfd = open(opts.batchfile, 'r') 

+

            batchurls = batchfd.readlines() 

+

            batchurls = [x.strip() for x in batchurls] 

+

            batchurls = [x for x in batchurls if len(x) > 0 and not re.search(r'^[#/;]', x)] 

+

        except IOError: 

+

            sys.exit(u'ERROR: batch file could not be read') 

+

    all_urls = batchurls + args 

+

    all_urls = [url.strip() for url in all_urls] 

+

 

+

    # General configuration 

+

    cookie_processor = compat_urllib_request.HTTPCookieProcessor(jar) 

+

    proxy_handler = compat_urllib_request.ProxyHandler() 

+

    opener = compat_urllib_request.build_opener(proxy_handler, cookie_processor, YoutubeDLHandler()) 

+

    compat_urllib_request.install_opener(opener) 

+

    socket.setdefaulttimeout(300) # 5 minutes should be enough (famous last words) 

+

 

+

    extractors = gen_extractors() 

+

 

+

    if opts.list_extractors: 

+

        for ie in extractors: 

+

            print(ie.IE_NAME + (' (CURRENTLY BROKEN)' if not ie._WORKING else '')) 

+

            matchedUrls = [url for url in all_urls if ie.suitable(url)] 

+

            all_urls = [url for url in all_urls if url not in matchedUrls] 

+

            for mu in matchedUrls: 

+

                print(u'  ' + mu) 

+

        sys.exit(0) 

+

 

+

    # Conflicting, missing and erroneous options 

+

    if opts.usenetrc and (opts.username is not None or opts.password is not None): 

+

        parser.error(u'using .netrc conflicts with giving username/password') 

+

    if opts.password is not None and opts.username is None: 

+

        parser.error(u'account username missing') 

+

    if opts.outtmpl is not None and (opts.usetitle or opts.autonumber or opts.useid): 

+

        parser.error(u'using output template conflicts with using title, video ID or auto number') 

+

    if opts.usetitle and opts.useid: 

+

        parser.error(u'using title conflicts with using video ID') 

+

    if opts.username is not None and opts.password is None: 

+

        opts.password = getpass.getpass(u'Type account password and press return:') 

+

    if opts.ratelimit is not None: 

+

        numeric_limit = FileDownloader.parse_bytes(opts.ratelimit) 

+

        if numeric_limit is None: 

+

            parser.error(u'invalid rate limit specified') 

+

        opts.ratelimit = numeric_limit 

+

    if opts.retries is not None: 

+

        try: 

+

            opts.retries = int(opts.retries) 

+

        except (TypeError, ValueError) as err: 

+

            parser.error(u'invalid retry count specified') 

+

    if opts.buffersize is not None: 

+

        numeric_buffersize = FileDownloader.parse_bytes(opts.buffersize) 

+

        if numeric_buffersize is None: 

+

            parser.error(u'invalid buffer size specified') 

+

        opts.buffersize = numeric_buffersize 

+

    try: 

+

        opts.playliststart = int(opts.playliststart) 

+

        if opts.playliststart <= 0: 

+

            raise ValueError(u'Playlist start must be positive') 

+

    except (TypeError, ValueError) as err: 

+

        parser.error(u'invalid playlist start number specified') 

+

    try: 

+

        opts.playlistend = int(opts.playlistend) 

+

        if opts.playlistend != -1 and (opts.playlistend <= 0 or opts.playlistend < opts.playliststart): 

+

            raise ValueError(u'Playlist end must be greater than playlist start') 

+

    except (TypeError, ValueError) as err: 

+

        parser.error(u'invalid playlist end number specified') 

+

    if opts.extractaudio: 

+

        if opts.audioformat not in ['best', 'aac', 'mp3', 'vorbis', 'm4a', 'wav']: 

+

            parser.error(u'invalid audio format specified') 

+

    if opts.audioquality: 

+

        opts.audioquality = opts.audioquality.strip('k').strip('K') 

+

        if not opts.audioquality.isdigit(): 

+

            parser.error(u'invalid audio quality specified') 

+

 

+

    if sys.version_info < (3,): 

+

        # In Python 2, sys.argv is a bytestring (also note http://bugs.python.org/issue2128 for Windows systems) 

+

        if opts.outtmpl is not None: 

+

            opts.outtmpl = opts.outtmpl.decode(preferredencoding()) 

+

    outtmpl =((opts.outtmpl is not None and opts.outtmpl) 

+

            or (opts.format == '-1' and opts.usetitle and u'%(title)s-%(id)s-%(format)s.%(ext)s') 

+

            or (opts.format == '-1' and u'%(id)s-%(format)s.%(ext)s') 

+

            or (opts.usetitle and opts.autonumber and u'%(autonumber)s-%(title)s-%(id)s.%(ext)s') 

+

            or (opts.usetitle and u'%(title)s-%(id)s.%(ext)s') 

+

            or (opts.useid and u'%(id)s.%(ext)s') 

+

            or (opts.autonumber and u'%(autonumber)s-%(id)s.%(ext)s') 

+

            or u'%(id)s.%(ext)s') 

+

    # File downloader 

+

    fd = FileDownloader({ 

+

        'usenetrc': opts.usenetrc, 

+

        'username': opts.username, 

+

        'password': opts.password, 

+

        'quiet': (opts.quiet or opts.geturl or opts.gettitle or opts.getthumbnail or opts.getdescription or opts.getfilename or opts.getformat), 

+

        'forceurl': opts.geturl, 

+

        'forcetitle': opts.gettitle, 

+

        'forcethumbnail': opts.getthumbnail, 

+

        'forcedescription': opts.getdescription, 

+

        'forcefilename': opts.getfilename, 

+

        'forceformat': opts.getformat, 

+

        'simulate': opts.simulate, 

+

        'skip_download': (opts.skip_download or opts.simulate or opts.geturl or opts.gettitle or opts.getthumbnail or opts.getdescription or opts.getfilename or opts.getformat), 

+

        'format': opts.format, 

+

        'format_limit': opts.format_limit, 

+

        'listformats': opts.listformats, 

+

        'outtmpl': outtmpl, 

+

        'restrictfilenames': opts.restrictfilenames, 

+

        'ignoreerrors': opts.ignoreerrors, 

+

        'ratelimit': opts.ratelimit, 

+

        'nooverwrites': opts.nooverwrites, 

+

        'retries': opts.retries, 

+

        'buffersize': opts.buffersize, 

+

        'noresizebuffer': opts.noresizebuffer, 

+

        'continuedl': opts.continue_dl, 

+

        'noprogress': opts.noprogress, 

+

        'playliststart': opts.playliststart, 

+

        'playlistend': opts.playlistend, 

+

        'logtostderr': opts.outtmpl == '-', 

+

        'consoletitle': opts.consoletitle, 

+

        'nopart': opts.nopart, 

+

        'updatetime': opts.updatetime, 

+

        'writedescription': opts.writedescription, 

+

        'writeinfojson': opts.writeinfojson, 

+

        'writesubtitles': opts.writesubtitles, 

+

        'subtitleslang': opts.subtitleslang, 

+

        'matchtitle': opts.matchtitle, 

+

        'rejecttitle': opts.rejecttitle, 

+

        'max_downloads': opts.max_downloads, 

+

        'prefer_free_formats': opts.prefer_free_formats, 

+

        'verbose': opts.verbose, 

+

        'test': opts.test, 

+

        }) 

+

 

+

    if opts.verbose: 

+

        fd.to_screen(u'[debug] youtube-dl version ' + __version__) 

+

        try: 

+

            sp = subprocess.Popen(['git', 'rev-parse', '--short', 'HEAD'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, 

+

                                  cwd=os.path.dirname(os.path.abspath(__file__))) 

+

            out, err = sp.communicate() 

+

            out = out.decode().strip() 

+

            if re.match('[0-9a-f]+', out): 

+

                fd.to_screen(u'[debug] Git HEAD: ' + out) 

+

        except: 

+

            pass 

+

        fd.to_screen(u'[debug] Python version %s - %s' %(platform.python_version(), platform.platform())) 

+

        fd.to_screen(u'[debug] Proxy map: ' + str(proxy_handler.proxies)) 

+

 

+

    for extractor in extractors: 

+

        fd.add_info_extractor(extractor) 

+

 

+

    # PostProcessors 

+

    if opts.extractaudio: 

+

        fd.add_post_processor(FFmpegExtractAudioPP(preferredcodec=opts.audioformat, preferredquality=opts.audioquality, keepvideo=opts.keepvideo, nopostoverwrites=opts.nopostoverwrites)) 

+

 

+

    # Update version 

+

    if opts.update_self: 

+

        update_self(fd.to_screen, opts.verbose, sys.argv[0]) 

+

 

+

    # Maybe do nothing 

+

    if len(all_urls) < 1: 

+

        if not opts.update_self: 

+

            parser.error(u'you must provide at least one URL') 

+

        else: 

+

            sys.exit() 

+

 

+

    try: 

+

        retcode = fd.download(all_urls) 

+

    except MaxDownloadsReached: 

+

        fd.to_screen(u'--max-download limit reached, aborting.') 

+

        retcode = 101 

+

 

+

    # Dump cookie jar if requested 

+

    if opts.cookiefile is not None: 

+

        try: 

+

            jar.save() 

+

        except (IOError, OSError) as err: 

+

            sys.exit(u'ERROR: unable to save cookie jar') 

+

 

+

    sys.exit(retcode) 

+

 

+

def main(): 

+

    try: 

+

        _real_main() 

+

    except DownloadError: 

+

        sys.exit(1) 

+

    except SameFileError: 

+

        sys.exit(u'ERROR: fixed output name but more than one file to download') 

+

    except KeyboardInterrupt: 

+

        sys.exit(u'\nERROR: Interrupted by user') 

+ +
+ + + + + + diff --git a/test_coverage/youtube_dl_FileDownloader.html b/test_coverage/youtube_dl_FileDownloader.html new file mode 100644 index 000000000..039f21456 --- /dev/null +++ b/test_coverage/youtube_dl_FileDownloader.html @@ -0,0 +1,1542 @@ + + + + + + + + Coverage for youtube_dl.FileDownloader: 57% + + + + + + + + + + + + +
+ +

Hot-keys on this page

+
+

+ r + m + x + p   toggle line displays +

+

+ j + k   next/prev highlighted chunk +

+

+ 0   (zero) top of page +

+

+ 1   (one) first highlighted chunk +

+
+
+ +
+ + + + + +
+

1

+

2

+

3

+

4

+

5

+

6

+

7

+

8

+

9

+

10

+

11

+

12

+

13

+

14

+

15

+

16

+

17

+

18

+

19

+

20

+

21

+

22

+

23

+

24

+

25

+

26

+

27

+

28

+

29

+

30

+

31

+

32

+

33

+

34

+

35

+

36

+

37

+

38

+

39

+

40

+

41

+

42

+

43

+

44

+

45

+

46

+

47

+

48

+

49

+

50

+

51

+

52

+

53

+

54

+

55

+

56

+

57

+

58

+

59

+

60

+

61

+

62

+

63

+

64

+

65

+

66

+

67

+

68

+

69

+

70

+

71

+

72

+

73

+

74

+

75

+

76

+

77

+

78

+

79

+

80

+

81

+

82

+

83

+

84

+

85

+

86

+

87

+

88

+

89

+

90

+

91

+

92

+

93

+

94

+

95

+

96

+

97

+

98

+

99

+

100

+

101

+

102

+

103

+

104

+

105

+

106

+

107

+

108

+

109

+

110

+

111

+

112

+

113

+

114

+

115

+

116

+

117

+

118

+

119

+

120

+

121

+

122

+

123

+

124

+

125

+

126

+

127

+

128

+

129

+

130

+

131

+

132

+

133

+

134

+

135

+

136

+

137

+

138

+

139

+

140

+

141

+

142

+

143

+

144

+

145

+

146

+

147

+

148

+

149

+

150

+

151

+

152

+

153

+

154

+

155

+

156

+

157

+

158

+

159

+

160

+

161

+

162

+

163

+

164

+

165

+

166

+

167

+

168

+

169

+

170

+

171

+

172

+

173

+

174

+

175

+

176

+

177

+

178

+

179

+

180

+

181

+

182

+

183

+

184

+

185

+

186

+

187

+

188

+

189

+

190

+

191

+

192

+

193

+

194

+

195

+

196

+

197

+

198

+

199

+

200

+

201

+

202

+

203

+

204

+

205

+

206

+

207

+

208

+

209

+

210

+

211

+

212

+

213

+

214

+

215

+

216

+

217

+

218

+

219

+

220

+

221

+

222

+

223

+

224

+

225

+

226

+

227

+

228

+

229

+

230

+

231

+

232

+

233

+

234

+

235

+

236

+

237

+

238

+

239

+

240

+

241

+

242

+

243

+

244

+

245

+

246

+

247

+

248

+

249

+

250

+

251

+

252

+

253

+

254

+

255

+

256

+

257

+

258

+

259

+

260

+

261

+

262

+

263

+

264

+

265

+

266

+

267

+

268

+

269

+

270

+

271

+

272

+

273

+

274

+

275

+

276

+

277

+

278

+

279

+

280

+

281

+

282

+

283

+

284

+

285

+

286

+

287

+

288

+

289

+

290

+

291

+

292

+

293

+

294

+

295

+

296

+

297

+

298

+

299

+

300

+

301

+

302

+

303

+

304

+

305

+

306

+

307

+

308

+

309

+

310

+

311

+

312

+

313

+

314

+

315

+

316

+

317

+

318

+

319

+

320

+

321

+

322

+

323

+

324

+

325

+

326

+

327

+

328

+

329

+

330

+

331

+

332

+

333

+

334

+

335

+

336

+

337

+

338

+

339

+

340

+

341

+

342

+

343

+

344

+

345

+

346

+

347

+

348

+

349

+

350

+

351

+

352

+

353

+

354

+

355

+

356

+

357

+

358

+

359

+

360

+

361

+

362

+

363

+

364

+

365

+

366

+

367

+

368

+

369

+

370

+

371

+

372

+

373

+

374

+

375

+

376

+

377

+

378

+

379

+

380

+

381

+

382

+

383

+

384

+

385

+

386

+

387

+

388

+

389

+

390

+

391

+

392

+

393

+

394

+

395

+

396

+

397

+

398

+

399

+

400

+

401

+

402

+

403

+

404

+

405

+

406

+

407

+

408

+

409

+

410

+

411

+

412

+

413

+

414

+

415

+

416

+

417

+

418

+

419

+

420

+

421

+

422

+

423

+

424

+

425

+

426

+

427

+

428

+

429

+

430

+

431

+

432

+

433

+

434

+

435

+

436

+

437

+

438

+

439

+

440

+

441

+

442

+

443

+

444

+

445

+

446

+

447

+

448

+

449

+

450

+

451

+

452

+

453

+

454

+

455

+

456

+

457

+

458

+

459

+

460

+

461

+

462

+

463

+

464

+

465

+

466

+

467

+

468

+

469

+

470

+

471

+

472

+

473

+

474

+

475

+

476

+

477

+

478

+

479

+

480

+

481

+

482

+

483

+

484

+

485

+

486

+

487

+

488

+

489

+

490

+

491

+

492

+

493

+

494

+

495

+

496

+

497

+

498

+

499

+

500

+

501

+

502

+

503

+

504

+

505

+

506

+

507

+

508

+

509

+

510

+

511

+

512

+

513

+

514

+

515

+

516

+

517

+

518

+

519

+

520

+

521

+

522

+

523

+

524

+

525

+

526

+

527

+

528

+

529

+

530

+

531

+

532

+

533

+

534

+

535

+

536

+

537

+

538

+

539

+

540

+

541

+

542

+

543

+

544

+

545

+

546

+

547

+

548

+

549

+

550

+

551

+

552

+

553

+

554

+

555

+

556

+

557

+

558

+

559

+

560

+

561

+

562

+

563

+

564

+

565

+

566

+

567

+

568

+

569

+

570

+

571

+

572

+

573

+

574

+

575

+

576

+

577

+

578

+

579

+

580

+

581

+

582

+

583

+

584

+

585

+

586

+

587

+

588

+

589

+

590

+

591

+

592

+

593

+

594

+

595

+

596

+

597

+

598

+

599

+

600

+

601

+

602

+

603

+

604

+

605

+

606

+

607

+

608

+

609

+

610

+

611

+

612

+

613

+

614

+

615

+

616

+

617

+

618

+

619

+

620

+

621

+

622

+

623

+

624

+

625

+

626

+

627

+

628

+

629

+

630

+

631

+

632

+

633

+

634

+

635

+

636

+

637

+

638

+

639

+

640

+

641

+

642

+

643

+

644

+

645

+

646

+

647

+

648

+

649

+

650

+

651

+

652

+

653

+

654

+

655

+

656

+

657

+

658

+

659

+

660

+

661

+

662

+

663

+

664

+

665

+

666

+

667

+

668

+

669

+

670

+

671

+

672

+

673

+

674

+

675

+

676

+

677

+

678

+

679

+

680

+

681

+

682

+

683

+

684

+

685

+

686

+

687

+

688

+

689

+

690

+

691

+

692

+

693

+

694

+

695

+

696

+

697

+

698

+

699

+

700

+

701

+

702

+

703

+

704

+

705

+

706

+

707

+

708

+

709

+

710

+

711

+

712

+

713

+

714

+

715

+

716

+

717

+

718

+

719

+

720

+

721

+

722

+

723

+

724

+

725

+

726

+

727

+

728

+

729

+

730

+ +
+

#!/usr/bin/env python 

+

# -*- coding: utf-8 -*- 

+

 

+

from __future__ import absolute_import 

+

 

+

import math 

+

import io 

+

import os 

+

import re 

+

import socket 

+

import subprocess 

+

import sys 

+

import time 

+

import traceback 

+

 

+

if os.name == 'nt': 

+

    import ctypes 

+

 

+

from .utils import * 

+

 

+

 

+

class FileDownloader(object): 

+

    """File Downloader class. 

+

 

+

    File downloader objects are the ones responsible of downloading the 

+

    actual video file and writing it to disk if the user has requested 

+

    it, among some other tasks. In most cases there should be one per 

+

    program. As, given a video URL, the downloader doesn't know how to 

+

    extract all the needed information, task that InfoExtractors do, it 

+

    has to pass the URL to one of them. 

+

 

+

    For this, file downloader objects have a method that allows 

+

    InfoExtractors to be registered in a given order. When it is passed 

+

    a URL, the file downloader handles it to the first InfoExtractor it 

+

    finds that reports being able to handle it. The InfoExtractor extracts 

+

    all the information about the video or videos the URL refers to, and 

+

    asks the FileDownloader to process the video information, possibly 

+

    downloading the video. 

+

 

+

    File downloaders accept a lot of parameters. In order not to saturate 

+

    the object constructor with arguments, it receives a dictionary of 

+

    options instead. These options are available through the params 

+

    attribute for the InfoExtractors to use. The FileDownloader also 

+

    registers itself as the downloader in charge for the InfoExtractors 

+

    that are added to it, so this is a "mutual registration". 

+

 

+

    Available options: 

+

 

+

    username:          Username for authentication purposes. 

+

    password:          Password for authentication purposes. 

+

    usenetrc:          Use netrc for authentication instead. 

+

    quiet:             Do not print messages to stdout. 

+

    forceurl:          Force printing final URL. 

+

    forcetitle:        Force printing title. 

+

    forcethumbnail:    Force printing thumbnail URL. 

+

    forcedescription:  Force printing description. 

+

    forcefilename:     Force printing final filename. 

+

    simulate:          Do not download the video files. 

+

    format:            Video format code. 

+

    format_limit:      Highest quality format to try. 

+

    outtmpl:           Template for output names. 

+

    restrictfilenames: Do not allow "&" and spaces in file names 

+

    ignoreerrors:      Do not stop on download errors. 

+

    ratelimit:         Download speed limit, in bytes/sec. 

+

    nooverwrites:      Prevent overwriting files. 

+

    retries:           Number of times to retry for HTTP error 5xx 

+

    buffersize:        Size of download buffer in bytes. 

+

    noresizebuffer:    Do not automatically resize the download buffer. 

+

    continuedl:        Try to continue downloads if possible. 

+

    noprogress:        Do not print the progress bar. 

+

    playliststart:     Playlist item to start at. 

+

    playlistend:       Playlist item to end at. 

+

    matchtitle:        Download only matching titles. 

+

    rejecttitle:       Reject downloads for matching titles. 

+

    logtostderr:       Log messages to stderr instead of stdout. 

+

    consoletitle:      Display progress in console window's titlebar. 

+

    nopart:            Do not use temporary .part files. 

+

    updatetime:        Use the Last-modified header to set output file timestamps. 

+

    writedescription:  Write the video description to a .description file 

+

    writeinfojson:     Write the video description to a .info.json file 

+

    writesubtitles:    Write the video subtitles to a .srt file 

+

    subtitleslang:     Language of the subtitles to download 

+

    test:              Download only first bytes to test the downloader. 

+

    """ 

+

 

+

    params = None 

+

    _ies = [] 

+

    _pps = [] 

+

    _download_retcode = None 

+

    _num_downloads = None 

+

    _screen_file = None 

+

 

+

    def __init__(self, params): 

+

        """Create a FileDownloader object with the given options.""" 

+

        self._ies = [] 

+

        self._pps = [] 

+

        self._download_retcode = 0 

+

        self._num_downloads = 0 

+

        self._screen_file = [sys.stdout, sys.stderr][params.get('logtostderr', False)] 

+

        self.params = params 

+

 

+

        if '%(stitle)s' in self.params['outtmpl']: 

+

            self.to_stderr(u'WARNING: %(stitle)s is deprecated. Use the %(title)s and the --restrict-filenames flag(which also secures %(uploader)s et al) instead.') 

+

 

+

    @staticmethod 

+

    def format_bytes(bytes): 

+

        if bytes is None: 

+

            return 'N/A' 

+

        if type(bytes) is str: 

+

            bytes = float(bytes) 

+

        if bytes == 0.0: 

+

            exponent = 0 

+

        else: 

+

            exponent = int(math.log(bytes, 1024.0)) 

+

        suffix = 'bkMGTPEZY'[exponent] 

+

        converted = float(bytes) / float(1024 ** exponent) 

+

        return '%.2f%s' % (converted, suffix) 

+

 

+

    @staticmethod 

+

    def calc_percent(byte_counter, data_len): 

+

        if data_len is None: 

+

            return '---.-%' 

+

        return '%6s' % ('%3.1f%%' % (float(byte_counter) / float(data_len) * 100.0)) 

+

 

+

    @staticmethod 

+

    def calc_eta(start, now, total, current): 

+

        if total is None: 

+

            return '--:--' 

+

        dif = now - start 

+

        if current == 0 or dif < 0.001: # One millisecond 

+

            return '--:--' 

+

        rate = float(current) / dif 

+

        eta = int((float(total) - float(current)) / rate) 

+

        (eta_mins, eta_secs) = divmod(eta, 60) 

+

        if eta_mins > 99: 

+

            return '--:--' 

+

        return '%02d:%02d' % (eta_mins, eta_secs) 

+

 

+

    @staticmethod 

+

    def calc_speed(start, now, bytes): 

+

        dif = now - start 

+

        if bytes == 0 or dif < 0.001: # One millisecond 

+

            return '%10s' % '---b/s' 

+

        return '%10s' % ('%s/s' % FileDownloader.format_bytes(float(bytes) / dif)) 

+

 

+

    @staticmethod 

+

    def best_block_size(elapsed_time, bytes): 

+

        new_min = max(bytes / 2.0, 1.0) 

+

        new_max = min(max(bytes * 2.0, 1.0), 4194304) # Do not surpass 4 MB 

+

        if elapsed_time < 0.001: 

+

            return int(new_max) 

+

        rate = bytes / elapsed_time 

+

        if rate > new_max: 

+

            return int(new_max) 

+

        if rate < new_min: 

+

            return int(new_min) 

+

        return int(rate) 

+

 

+

    @staticmethod 

+

    def parse_bytes(bytestr): 

+

        """Parse a string indicating a byte quantity into an integer.""" 

+

        matchobj = re.match(r'(?i)^(\d+(?:\.\d+)?)([kMGTPEZY]?)$', bytestr) 

+

        if matchobj is None: 

+

            return None 

+

        number = float(matchobj.group(1)) 

+

        multiplier = 1024.0 ** 'bkmgtpezy'.index(matchobj.group(2).lower()) 

+

        return int(round(number * multiplier)) 

+

 

+

    def add_info_extractor(self, ie): 

+

        """Add an InfoExtractor object to the end of the list.""" 

+

        self._ies.append(ie) 

+

        ie.set_downloader(self) 

+

 

+

    def add_post_processor(self, pp): 

+

        """Add a PostProcessor object to the end of the chain.""" 

+

        self._pps.append(pp) 

+

        pp.set_downloader(self) 

+

 

+

    def to_screen(self, message, skip_eol=False): 

+

        """Print message to stdout if not in quiet mode.""" 

+

        assert type(message) == type(u'') 

+

        if not self.params.get('quiet', False): 

+

            terminator = [u'\n', u''][skip_eol] 

+

            output = message + terminator 

+

            if 'b' in getattr(self._screen_file, 'mode', '') or sys.version_info[0] < 3: # Python 2 lies about the mode of sys.stdout/sys.stderr 

+

                output = output.encode(preferredencoding(), 'ignore') 

+

            self._screen_file.write(output) 

+

            self._screen_file.flush() 

+

 

+

    def to_stderr(self, message): 

+

        """Print message to stderr.""" 

+

        assert type(message) == type(u'') 

+

        output = message + u'\n' 

+

        if 'b' in getattr(self._screen_file, 'mode', '') or sys.version_info[0] < 3: # Python 2 lies about the mode of sys.stdout/sys.stderr 

+

            output = output.encode(preferredencoding()) 

+

        sys.stderr.write(output) 

+

 

+

    def to_cons_title(self, message): 

+

        """Set console/terminal window title to message.""" 

+

        if not self.params.get('consoletitle', False): 

+

            return 

+

        if os.name == 'nt' and ctypes.windll.kernel32.GetConsoleWindow(): 

+

            # c_wchar_p() might not be necessary if `message` is 

+

            # already of type unicode() 

+

            ctypes.windll.kernel32.SetConsoleTitleW(ctypes.c_wchar_p(message)) 

+

        elif 'TERM' in os.environ: 

+

            sys.stderr.write('\033]0;%s\007' % message.encode(preferredencoding())) 

+

 

+

    def fixed_template(self): 

+

        """Checks if the output template is fixed.""" 

+

        return (re.search(u'(?u)%\\(.+?\\)s', self.params['outtmpl']) is None) 

+

 

+

    def trouble(self, message=None, tb=None): 

+

        """Determine action to take when a download problem appears. 

+

 

+

        Depending on if the downloader has been configured to ignore 

+

        download errors or not, this method may throw an exception or 

+

        not when errors are found, after printing the message. 

+

        """ 

+

        if message is not None: 

+

            self.to_stderr(message) 

+

        if self.params.get('verbose'): 

+

            if tb is None: 

+

                tb = u''.join(traceback.format_list(traceback.extract_stack())) 

+

            self.to_stderr(tb) 

+

        if not self.params.get('ignoreerrors', False): 

+

            raise DownloadError(message) 

+

        self._download_retcode = 1 

+

 

+

    def slow_down(self, start_time, byte_counter): 

+

        """Sleep if the download speed is over the rate limit.""" 

+

        rate_limit = self.params.get('ratelimit', None) 

+

        if rate_limit is None or byte_counter == 0: 

+

            return 

+

        now = time.time() 

+

        elapsed = now - start_time 

+

        if elapsed <= 0.0: 

+

            return 

+

        speed = float(byte_counter) / elapsed 

+

        if speed > rate_limit: 

+

            time.sleep((byte_counter - rate_limit * (now - start_time)) / rate_limit) 

+

 

+

    def temp_name(self, filename): 

+

        """Returns a temporary filename for the given filename.""" 

+

        if self.params.get('nopart', False) or filename == u'-' or \ 

+

                (os.path.exists(encodeFilename(filename)) and not os.path.isfile(encodeFilename(filename))): 

+

            return filename 

+

        return filename + u'.part' 

+

 

+

    def undo_temp_name(self, filename): 

+

        if filename.endswith(u'.part'): 

+

            return filename[:-len(u'.part')] 

+

        return filename 

+

 

+

    def try_rename(self, old_filename, new_filename): 

+

        try: 

+

            if old_filename == new_filename: 

+

                return 

+

            os.rename(encodeFilename(old_filename), encodeFilename(new_filename)) 

+

        except (IOError, OSError) as err: 

+

            self.trouble(u'ERROR: unable to rename file') 

+

 

+

    def try_utime(self, filename, last_modified_hdr): 

+

        """Try to set the last-modified time of the given file.""" 

+

        if last_modified_hdr is None: 

+

            return 

+

        if not os.path.isfile(encodeFilename(filename)): 

+

            return 

+

        timestr = last_modified_hdr 

+

        if timestr is None: 

+

            return 

+

        filetime = timeconvert(timestr) 

+

        if filetime is None: 

+

            return filetime 

+

        try: 

+

            os.utime(filename, (time.time(), filetime)) 

+

        except: 

+

            pass 

+

        return filetime 

+

 

+

    def report_writedescription(self, descfn): 

+

        """ Report that the description file is being written """ 

+

        self.to_screen(u'[info] Writing video description to: ' + descfn) 

+

 

+

    def report_writesubtitles(self, srtfn): 

+

        """ Report that the subtitles file is being written """ 

+

        self.to_screen(u'[info] Writing video subtitles to: ' + srtfn) 

+

 

+

    def report_writeinfojson(self, infofn): 

+

        """ Report that the metadata file has been written """ 

+

        self.to_screen(u'[info] Video description metadata as JSON to: ' + infofn) 

+

 

+

    def report_destination(self, filename): 

+

        """Report destination filename.""" 

+

        self.to_screen(u'[download] Destination: ' + filename) 

+

 

+

    def report_progress(self, percent_str, data_len_str, speed_str, eta_str): 

+

        """Report download progress.""" 

+

        if self.params.get('noprogress', False): 

+

            return 

+

        self.to_screen(u'\r[download] %s of %s at %s ETA %s' % 

+

                (percent_str, data_len_str, speed_str, eta_str), skip_eol=True) 

+

        self.to_cons_title(u'youtube-dl - %s of %s at %s ETA %s' % 

+

                (percent_str.strip(), data_len_str.strip(), speed_str.strip(), eta_str.strip())) 

+

 

+

    def report_resuming_byte(self, resume_len): 

+

        """Report attempt to resume at given byte.""" 

+

        self.to_screen(u'[download] Resuming download at byte %s' % resume_len) 

+

 

+

    def report_retry(self, count, retries): 

+

        """Report retry in case of HTTP error 5xx""" 

+

        self.to_screen(u'[download] Got server HTTP error. Retrying (attempt %d of %d)...' % (count, retries)) 

+

 

+

    def report_file_already_downloaded(self, file_name): 

+

        """Report file has already been fully downloaded.""" 

+

        try: 

+

            self.to_screen(u'[download] %s has already been downloaded' % file_name) 

+

        except (UnicodeEncodeError) as err: 

+

            self.to_screen(u'[download] The file has already been downloaded') 

+

 

+

    def report_unable_to_resume(self): 

+

        """Report it was impossible to resume download.""" 

+

        self.to_screen(u'[download] Unable to resume') 

+

 

+

    def report_finish(self): 

+

        """Report download finished.""" 

+

        if self.params.get('noprogress', False): 

+

            self.to_screen(u'[download] Download completed') 

+

        else: 

+

            self.to_screen(u'') 

+

 

+

    def increment_downloads(self): 

+

        """Increment the ordinal that assigns a number to each file.""" 

+

        self._num_downloads += 1 

+

 

+

    def prepare_filename(self, info_dict): 

+

        """Generate the output filename.""" 

+

        try: 

+

            template_dict = dict(info_dict) 

+

 

+

            template_dict['epoch'] = int(time.time()) 

+

            template_dict['autonumber'] = u'%05d' % self._num_downloads 

+

 

+

            sanitize = lambda k,v: sanitize_filename( 

+

                u'NA' if v is None else compat_str(v), 

+

                restricted=self.params.get('restrictfilenames'), 

+

                is_id=(k==u'id')) 

+

            template_dict = dict((k, sanitize(k, v)) for k,v in template_dict.items()) 

+

 

+

            filename = self.params['outtmpl'] % template_dict 

+

            return filename 

+

        except (ValueError, KeyError) as err: 

+

            self.trouble(u'ERROR: invalid system charset or erroneous output template') 

+

            return None 

+

 

+

    def _match_entry(self, info_dict): 

+

        """ Returns None iff the file should be downloaded """ 

+

 

+

        title = info_dict['title'] 

+

        matchtitle = self.params.get('matchtitle', False) 

+

        if matchtitle: 

+

            matchtitle = matchtitle.decode('utf8') 

+

            if not re.search(matchtitle, title, re.IGNORECASE): 

+

                return u'[download] "' + title + '" title did not match pattern "' + matchtitle + '"' 

+

        rejecttitle = self.params.get('rejecttitle', False) 

+

        if rejecttitle: 

+

            rejecttitle = rejecttitle.decode('utf8') 

+

            if re.search(rejecttitle, title, re.IGNORECASE): 

+

                return u'"' + title + '" title matched reject pattern "' + rejecttitle + '"' 

+

        return None 

+

 

+

    def process_info(self, info_dict): 

+

        """Process a single dictionary returned by an InfoExtractor.""" 

+

 

+

        # Keep for backwards compatibility 

+

        info_dict['stitle'] = info_dict['title'] 

+

 

+

        if not 'format' in info_dict: 

+

            info_dict['format'] = info_dict['ext'] 

+

 

+

        reason = self._match_entry(info_dict) 

+

        if reason is not None: 

+

            self.to_screen(u'[download] ' + reason) 

+

            return 

+

 

+

        max_downloads = self.params.get('max_downloads') 

+

        if max_downloads is not None: 

+

            if self._num_downloads > int(max_downloads): 

+

                raise MaxDownloadsReached() 

+

 

+

        filename = self.prepare_filename(info_dict) 

+

 

+

        # Forced printings 

+

        if self.params.get('forcetitle', False): 

+

            compat_print(info_dict['title']) 

+

        if self.params.get('forceurl', False): 

+

            compat_print(info_dict['url']) 

+

        if self.params.get('forcethumbnail', False) and 'thumbnail' in info_dict: 

+

            compat_print(info_dict['thumbnail']) 

+

        if self.params.get('forcedescription', False) and 'description' in info_dict: 

+

            compat_print(info_dict['description']) 

+

        if self.params.get('forcefilename', False) and filename is not None: 

+

            compat_print(filename) 

+

        if self.params.get('forceformat', False): 

+

            compat_print(info_dict['format']) 

+

 

+

        # Do nothing else if in simulate mode 

+

        if self.params.get('simulate', False): 

+

            return 

+

 

+

        if filename is None: 

+

            return 

+

 

+

        try: 

+

            dn = os.path.dirname(encodeFilename(filename)) 

+

            if dn != '' and not os.path.exists(dn): # dn is already encoded 

+

                os.makedirs(dn) 

+

        except (OSError, IOError) as err: 

+

            self.trouble(u'ERROR: unable to create directory ' + compat_str(err)) 

+

            return 

+

 

+

        if self.params.get('writedescription', False): 

+

            try: 

+

                descfn = filename + u'.description' 

+

                self.report_writedescription(descfn) 

+

                with io.open(encodeFilename(descfn), 'w', encoding='utf-8') as descfile: 

+

                    descfile.write(info_dict['description']) 

+

            except (OSError, IOError): 

+

                self.trouble(u'ERROR: Cannot write description file ' + descfn) 

+

                return 

+

 

+

        if self.params.get('writesubtitles', False) and 'subtitles' in info_dict and info_dict['subtitles']: 

+

            # subtitles download errors are already managed as troubles in relevant IE 

+

            # that way it will silently go on when used with unsupporting IE 

+

            try: 

+

                srtfn = filename.rsplit('.', 1)[0] + u'.srt' 

+

                self.report_writesubtitles(srtfn) 

+

                with io.open(encodeFilename(srtfn), 'w', encoding='utf-8') as srtfile: 

+

                    srtfile.write(info_dict['subtitles']) 

+

            except (OSError, IOError): 

+

                self.trouble(u'ERROR: Cannot write subtitles file ' + descfn) 

+

                return 

+

 

+

        if self.params.get('writeinfojson', False): 

+

            infofn = filename + u'.info.json' 

+

            self.report_writeinfojson(infofn) 

+

            try: 

+

                json_info_dict = dict((k, v) for k,v in info_dict.items() if not k in ['urlhandle']) 

+

                write_json_file(json_info_dict, encodeFilename(infofn)) 

+

            except (OSError, IOError): 

+

                self.trouble(u'ERROR: Cannot write metadata to JSON file ' + infofn) 

+

                return 

+

 

+

        if not self.params.get('skip_download', False): 

+

            if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(filename)): 

+

                success = True 

+

            else: 

+

                try: 

+

                    success = self._do_download(filename, info_dict) 

+

                except (OSError, IOError) as err: 

+

                    raise UnavailableVideoError() 

+

                except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: 

+

                    self.trouble(u'ERROR: unable to download video data: %s' % str(err)) 

+

                    return 

+

                except (ContentTooShortError, ) as err: 

+

                    self.trouble(u'ERROR: content too short (expected %s bytes and served %s)' % (err.expected, err.downloaded)) 

+

                    return 

+

 

+

            if success: 

+

                try: 

+

                    self.post_process(filename, info_dict) 

+

                except (PostProcessingError) as err: 

+

                    self.trouble(u'ERROR: postprocessing: %s' % str(err)) 

+

                    return 

+

 

+

    def download(self, url_list): 

+

        """Download a given list of URLs.""" 

+

        if len(url_list) > 1 and self.fixed_template(): 

+

            raise SameFileError(self.params['outtmpl']) 

+

 

+

        for url in url_list: 

+

            suitable_found = False 

+

            for ie in self._ies: 

+

                # Go to next InfoExtractor if not suitable 

+

                if not ie.suitable(url): 

+

                    continue 

+

 

+

                # Warn if the _WORKING attribute is False 

+

                if not ie.working(): 

+

                    self.to_stderr(u'WARNING: the program functionality for this site has been marked as broken, ' 

+

                                   u'and will probably not work. If you want to go on, use the -i option.') 

+

 

+

                # Suitable InfoExtractor found 

+

                suitable_found = True 

+

 

+

                # Extract information from URL and process it 

+

                try: 

+

                    videos = ie.extract(url) 

+

                except ExtractorError as de: # An error we somewhat expected 

+

                    self.trouble(u'ERROR: ' + compat_str(de), compat_str(u''.join(traceback.format_tb(de.traceback)))) 

+

                    break 

+

                except Exception as e: 

+

                    if self.params.get('ignoreerrors', False): 

+

                        self.trouble(u'ERROR: ' + compat_str(e), tb=compat_str(traceback.format_exc())) 

+

                        break 

+

                    else: 

+

                        raise 

+

 

+

                if len(videos or []) > 1 and self.fixed_template(): 

+

                    raise SameFileError(self.params['outtmpl']) 

+

 

+

                for video in videos or []: 

+

                    video['extractor'] = ie.IE_NAME 

+

                    try: 

+

                        self.increment_downloads() 

+

                        self.process_info(video) 

+

                    except UnavailableVideoError: 

+

                        self.trouble(u'\nERROR: unable to download video') 

+

 

+

                # Suitable InfoExtractor had been found; go to next URL 

+

                break 

+

 

+

            if not suitable_found: 

+

                self.trouble(u'ERROR: no suitable InfoExtractor: %s' % url) 

+

 

+

        return self._download_retcode 

+

 

+

    def post_process(self, filename, ie_info): 

+

        """Run the postprocessing chain on the given file.""" 

+

        info = dict(ie_info) 

+

        info['filepath'] = filename 

+

        for pp in self._pps: 

+

            info = pp.run(info) 

+

            if info is None: 

+

                break 

+

 

+

    def _download_with_rtmpdump(self, filename, url, player_url): 

+

        self.report_destination(filename) 

+

        tmpfilename = self.temp_name(filename) 

+

 

+

        # Check for rtmpdump first 

+

        try: 

+

            subprocess.call(['rtmpdump', '-h'], stdout=(file(os.path.devnull, 'w')), stderr=subprocess.STDOUT) 

+

        except (OSError, IOError): 

+

            self.trouble(u'ERROR: RTMP download detected but "rtmpdump" could not be run') 

+

            return False 

+

 

+

        # Download using rtmpdump. rtmpdump returns exit code 2 when 

+

        # the connection was interrumpted and resuming appears to be 

+

        # possible. This is part of rtmpdump's normal usage, AFAIK. 

+

        basic_args = ['rtmpdump', '-q'] + [[], ['-W', player_url]][player_url is not None] + ['-r', url, '-o', tmpfilename] 

+

        args = basic_args + [[], ['-e', '-k', '1']][self.params.get('continuedl', False)] 

+

        if self.params.get('verbose', False): 

+

            try: 

+

                import pipes 

+

                shell_quote = lambda args: ' '.join(map(pipes.quote, args)) 

+

            except ImportError: 

+

                shell_quote = repr 

+

            self.to_screen(u'[debug] rtmpdump command line: ' + shell_quote(args)) 

+

        retval = subprocess.call(args) 

+

        while retval == 2 or retval == 1: 

+

            prevsize = os.path.getsize(encodeFilename(tmpfilename)) 

+

            self.to_screen(u'\r[rtmpdump] %s bytes' % prevsize, skip_eol=True) 

+

            time.sleep(5.0) # This seems to be needed 

+

            retval = subprocess.call(basic_args + ['-e'] + [[], ['-k', '1']][retval == 1]) 

+

            cursize = os.path.getsize(encodeFilename(tmpfilename)) 

+

            if prevsize == cursize and retval == 1: 

+

                break 

+

             # Some rtmp streams seem abort after ~ 99.8%. Don't complain for those 

+

            if prevsize == cursize and retval == 2 and cursize > 1024: 

+

                self.to_screen(u'\r[rtmpdump] Could not download the whole video. This can happen for some advertisements.') 

+

                retval = 0 

+

                break 

+

        if retval == 0: 

+

            self.to_screen(u'\r[rtmpdump] %s bytes' % os.path.getsize(encodeFilename(tmpfilename))) 

+

            self.try_rename(tmpfilename, filename) 

+

            return True 

+

        else: 

+

            self.trouble(u'\nERROR: rtmpdump exited with code %d' % retval) 

+

            return False 

+

 

+

    def _do_download(self, filename, info_dict): 

+

        url = info_dict['url'] 

+

        player_url = info_dict.get('player_url', None) 

+

 

+

        # Check file already present 

+

        if self.params.get('continuedl', False) and os.path.isfile(encodeFilename(filename)) and not self.params.get('nopart', False): 

+

            self.report_file_already_downloaded(filename) 

+

            return True 

+

 

+

        # Attempt to download using rtmpdump 

+

        if url.startswith('rtmp'): 

+

            return self._download_with_rtmpdump(filename, url, player_url) 

+

 

+

        tmpfilename = self.temp_name(filename) 

+

        stream = None 

+

 

+

        # Do not include the Accept-Encoding header 

+

        headers = {'Youtubedl-no-compression': 'True'} 

+

        basic_request = compat_urllib_request.Request(url, None, headers) 

+

        request = compat_urllib_request.Request(url, None, headers) 

+

 

+

        if self.params.get('test', False): 

+

            request.add_header('Range','bytes=0-10240') 

+

 

+

        # Establish possible resume length 

+

        if os.path.isfile(encodeFilename(tmpfilename)): 

+

            resume_len = os.path.getsize(encodeFilename(tmpfilename)) 

+

        else: 

+

            resume_len = 0 

+

 

+

        open_mode = 'wb' 

+

        if resume_len != 0: 

+

            if self.params.get('continuedl', False): 

+

                self.report_resuming_byte(resume_len) 

+

                request.add_header('Range','bytes=%d-' % resume_len) 

+

                open_mode = 'ab' 

+

            else: 

+

                resume_len = 0 

+

 

+

        count = 0 

+

        retries = self.params.get('retries', 0) 

+

        while count <= retries: 

+

            # Establish connection 

+

            try: 

+

                if count == 0 and 'urlhandle' in info_dict: 

+

                    data = info_dict['urlhandle'] 

+

                data = compat_urllib_request.urlopen(request) 

+

                break 

+

            except (compat_urllib_error.HTTPError, ) as err: 

+

                if (err.code < 500 or err.code >= 600) and err.code != 416: 

+

                    # Unexpected HTTP error 

+

                    raise 

+

                elif err.code == 416: 

+

                    # Unable to resume (requested range not satisfiable) 

+

                    try: 

+

                        # Open the connection again without the range header 

+

                        data = compat_urllib_request.urlopen(basic_request) 

+

                        content_length = data.info()['Content-Length'] 

+

                    except (compat_urllib_error.HTTPError, ) as err: 

+

                        if err.code < 500 or err.code >= 600: 

+

                            raise 

+

                    else: 

+

                        # Examine the reported length 

+

                        if (content_length is not None and 

+

                                (resume_len - 100 < int(content_length) < resume_len + 100)): 

+

                            # The file had already been fully downloaded. 

+

                            # Explanation to the above condition: in issue #175 it was revealed that 

+

                            # YouTube sometimes adds or removes a few bytes from the end of the file, 

+

                            # changing the file size slightly and causing problems for some users. So 

+

                            # I decided to implement a suggested change and consider the file 

+

                            # completely downloaded if the file size differs less than 100 bytes from 

+

                            # the one in the hard drive. 

+

                            self.report_file_already_downloaded(filename) 

+

                            self.try_rename(tmpfilename, filename) 

+

                            return True 

+

                        else: 

+

                            # The length does not match, we start the download over 

+

                            self.report_unable_to_resume() 

+

                            open_mode = 'wb' 

+

                            break 

+

            # Retry 

+

            count += 1 

+

            if count <= retries: 

+

                self.report_retry(count, retries) 

+

 

+

        if count > retries: 

+

            self.trouble(u'ERROR: giving up after %s retries' % retries) 

+

            return False 

+

 

+

        data_len = data.info().get('Content-length', None) 

+

        if data_len is not None: 

+

            data_len = int(data_len) + resume_len 

+

        data_len_str = self.format_bytes(data_len) 

+

        byte_counter = 0 + resume_len 

+

        block_size = self.params.get('buffersize', 1024) 

+

        start = time.time() 

+

        while True: 

+

            # Download and write 

+

            before = time.time() 

+

            data_block = data.read(block_size) 

+

            after = time.time() 

+

            if len(data_block) == 0: 

+

                break 

+

            byte_counter += len(data_block) 

+

 

+

            # Open file just in time 

+

            if stream is None: 

+

                try: 

+

                    (stream, tmpfilename) = sanitize_open(tmpfilename, open_mode) 

+

                    assert stream is not None 

+

                    filename = self.undo_temp_name(tmpfilename) 

+

                    self.report_destination(filename) 

+

                except (OSError, IOError) as err: 

+

                    self.trouble(u'ERROR: unable to open for writing: %s' % str(err)) 

+

                    return False 

+

            try: 

+

                stream.write(data_block) 

+

            except (IOError, OSError) as err: 

+

                self.trouble(u'\nERROR: unable to write data: %s' % str(err)) 

+

                return False 

+

            if not self.params.get('noresizebuffer', False): 

+

                block_size = self.best_block_size(after - before, len(data_block)) 

+

 

+

            # Progress message 

+

            speed_str = self.calc_speed(start, time.time(), byte_counter - resume_len) 

+

            if data_len is None: 

+

                self.report_progress('Unknown %', data_len_str, speed_str, 'Unknown ETA') 

+

            else: 

+

                percent_str = self.calc_percent(byte_counter, data_len) 

+

                eta_str = self.calc_eta(start, time.time(), data_len - resume_len, byte_counter - resume_len) 

+

                self.report_progress(percent_str, data_len_str, speed_str, eta_str) 

+

 

+

            # Apply rate limit 

+

            self.slow_down(start, byte_counter - resume_len) 

+

 

+

        if stream is None: 

+

            self.trouble(u'\nERROR: Did not get any data blocks') 

+

            return False 

+

        stream.close() 

+

        self.report_finish() 

+

        if data_len is not None and byte_counter != data_len: 

+

            raise ContentTooShortError(byte_counter, int(data_len)) 

+

        self.try_rename(tmpfilename, filename) 

+

 

+

        # Update file modification time 

+

        if self.params.get('updatetime', True): 

+

            info_dict['filetime'] = self.try_utime(filename, data.info().get('last-modified', None)) 

+

 

+

        return True 

+ +
+
+ + + + + diff --git a/test_coverage/youtube_dl_InfoExtractors.html b/test_coverage/youtube_dl_InfoExtractors.html new file mode 100644 index 000000000..3122ac66f --- /dev/null +++ b/test_coverage/youtube_dl_InfoExtractors.html @@ -0,0 +1,7646 @@ + + + + + + + + Coverage for youtube_dl.InfoExtractors: 44% + + + + + + + + + + + + +
+ +

Hot-keys on this page

+
+

+ r + m + x + p   toggle line displays +

+

+ j + k   next/prev highlighted chunk +

+

+ 0   (zero) top of page +

+

+ 1   (one) first highlighted chunk +

+
+
+ +
+ + + + + +
+

1

+

2

+

3

+

4

+

5

+

6

+

7

+

8

+

9

+

10

+

11

+

12

+

13

+

14

+

15

+

16

+

17

+

18

+

19

+

20

+

21

+

22

+

23

+

24

+

25

+

26

+

27

+

28

+

29

+

30

+

31

+

32

+

33

+

34

+

35

+

36

+

37

+

38

+

39

+

40

+

41

+

42

+

43

+

44

+

45

+

46

+

47

+

48

+

49

+

50

+

51

+

52

+

53

+

54

+

55

+

56

+

57

+

58

+

59

+

60

+

61

+

62

+

63

+

64

+

65

+

66

+

67

+

68

+

69

+

70

+

71

+

72

+

73

+

74

+

75

+

76

+

77

+

78

+

79

+

80

+

81

+

82

+

83

+

84

+

85

+

86

+

87

+

88

+

89

+

90

+

91

+

92

+

93

+

94

+

95

+

96

+

97

+

98

+

99

+

100

+

101

+

102

+

103

+

104

+

105

+

106

+

107

+

108

+

109

+

110

+

111

+

112

+

113

+

114

+

115

+

116

+

117

+

118

+

119

+

120

+

121

+

122

+

123

+

124

+

125

+

126

+

127

+

128

+

129

+

130

+

131

+

132

+

133

+

134

+

135

+

136

+

137

+

138

+

139

+

140

+

141

+

142

+

143

+

144

+

145

+

146

+

147

+

148

+

149

+

150

+

151

+

152

+

153

+

154

+

155

+

156

+

157

+

158

+

159

+

160

+

161

+

162

+

163

+

164

+

165

+

166

+

167

+

168

+

169

+

170

+

171

+

172

+

173

+

174

+

175

+

176

+

177

+

178

+

179

+

180

+

181

+

182

+

183

+

184

+

185

+

186

+

187

+

188

+

189

+

190

+

191

+

192

+

193

+

194

+

195

+

196

+

197

+

198

+

199

+

200

+

201

+

202

+

203

+

204

+

205

+

206

+

207

+

208

+

209

+

210

+

211

+

212

+

213

+

214

+

215

+

216

+

217

+

218

+

219

+

220

+

221

+

222

+

223

+

224

+

225

+

226

+

227

+

228

+

229

+

230

+

231

+

232

+

233

+

234

+

235

+

236

+

237

+

238

+

239

+

240

+

241

+

242

+

243

+

244

+

245

+

246

+

247

+

248

+

249

+

250

+

251

+

252

+

253

+

254

+

255

+

256

+

257

+

258

+

259

+

260

+

261

+

262

+

263

+

264

+

265

+

266

+

267

+

268

+

269

+

270

+

271

+

272

+

273

+

274

+

275

+

276

+

277

+

278

+

279

+

280

+

281

+

282

+

283

+

284

+

285

+

286

+

287

+

288

+

289

+

290

+

291

+

292

+

293

+

294

+

295

+

296

+

297

+

298

+

299

+

300

+

301

+

302

+

303

+

304

+

305

+

306

+

307

+

308

+

309

+

310

+

311

+

312

+

313

+

314

+

315

+

316

+

317

+

318

+

319

+

320

+

321

+

322

+

323

+

324

+

325

+

326

+

327

+

328

+

329

+

330

+

331

+

332

+

333

+

334

+

335

+

336

+

337

+

338

+

339

+

340

+

341

+

342

+

343

+

344

+

345

+

346

+

347

+

348

+

349

+

350

+

351

+

352

+

353

+

354

+

355

+

356

+

357

+

358

+

359

+

360

+

361

+

362

+

363

+

364

+

365

+

366

+

367

+

368

+

369

+

370

+

371

+

372

+

373

+

374

+

375

+

376

+

377

+

378

+

379

+

380

+

381

+

382

+

383

+

384

+

385

+

386

+

387

+

388

+

389

+

390

+

391

+

392

+

393

+

394

+

395

+

396

+

397

+

398

+

399

+

400

+

401

+

402

+

403

+

404

+

405

+

406

+

407

+

408

+

409

+

410

+

411

+

412

+

413

+

414

+

415

+

416

+

417

+

418

+

419

+

420

+

421

+

422

+

423

+

424

+

425

+

426

+

427

+

428

+

429

+

430

+

431

+

432

+

433

+

434

+

435

+

436

+

437

+

438

+

439

+

440

+

441

+

442

+

443

+

444

+

445

+

446

+

447

+

448

+

449

+

450

+

451

+

452

+

453

+

454

+

455

+

456

+

457

+

458

+

459

+

460

+

461

+

462

+

463

+

464

+

465

+

466

+

467

+

468

+

469

+

470

+

471

+

472

+

473

+

474

+

475

+

476

+

477

+

478

+

479

+

480

+

481

+

482

+

483

+

484

+

485

+

486

+

487

+

488

+

489

+

490

+

491

+

492

+

493

+

494

+

495

+

496

+

497

+

498

+

499

+

500

+

501

+

502

+

503

+

504

+

505

+

506

+

507

+

508

+

509

+

510

+

511

+

512

+

513

+

514

+

515

+

516

+

517

+

518

+

519

+

520

+

521

+

522

+

523

+

524

+

525

+

526

+

527

+

528

+

529

+

530

+

531

+

532

+

533

+

534

+

535

+

536

+

537

+

538

+

539

+

540

+

541

+

542

+

543

+

544

+

545

+

546

+

547

+

548

+

549

+

550

+

551

+

552

+

553

+

554

+

555

+

556

+

557

+

558

+

559

+

560

+

561

+

562

+

563

+

564

+

565

+

566

+

567

+

568

+

569

+

570

+

571

+

572

+

573

+

574

+

575

+

576

+

577

+

578

+

579

+

580

+

581

+

582

+

583

+

584

+

585

+

586

+

587

+

588

+

589

+

590

+

591

+

592

+

593

+

594

+

595

+

596

+

597

+

598

+

599

+

600

+

601

+

602

+

603

+

604

+

605

+

606

+

607

+

608

+

609

+

610

+

611

+

612

+

613

+

614

+

615

+

616

+

617

+

618

+

619

+

620

+

621

+

622

+

623

+

624

+

625

+

626

+

627

+

628

+

629

+

630

+

631

+

632

+

633

+

634

+

635

+

636

+

637

+

638

+

639

+

640

+

641

+

642

+

643

+

644

+

645

+

646

+

647

+

648

+

649

+

650

+

651

+

652

+

653

+

654

+

655

+

656

+

657

+

658

+

659

+

660

+

661

+

662

+

663

+

664

+

665

+

666

+

667

+

668

+

669

+

670

+

671

+

672

+

673

+

674

+

675

+

676

+

677

+

678

+

679

+

680

+

681

+

682

+

683

+

684

+

685

+

686

+

687

+

688

+

689

+

690

+

691

+

692

+

693

+

694

+

695

+

696

+

697

+

698

+

699

+

700

+

701

+

702

+

703

+

704

+

705

+

706

+

707

+

708

+

709

+

710

+

711

+

712

+

713

+

714

+

715

+

716

+

717

+

718

+

719

+

720

+

721

+

722

+

723

+

724

+

725

+

726

+

727

+

728

+

729

+

730

+

731

+

732

+

733

+

734

+

735

+

736

+

737

+

738

+

739

+

740

+

741

+

742

+

743

+

744

+

745

+

746

+

747

+

748

+

749

+

750

+

751

+

752

+

753

+

754

+

755

+

756

+

757

+

758

+

759

+

760

+

761

+

762

+

763

+

764

+

765

+

766

+

767

+

768

+

769

+

770

+

771

+

772

+

773

+

774

+

775

+

776

+

777

+

778

+

779

+

780

+

781

+

782

+

783

+

784

+

785

+

786

+

787

+

788

+

789

+

790

+

791

+

792

+

793

+

794

+

795

+

796

+

797

+

798

+

799

+

800

+

801

+

802

+

803

+

804

+

805

+

806

+

807

+

808

+

809

+

810

+

811

+

812

+

813

+

814

+

815

+

816

+

817

+

818

+

819

+

820

+

821

+

822

+

823

+

824

+

825

+

826

+

827

+

828

+

829

+

830

+

831

+

832

+

833

+

834

+

835

+

836

+

837

+

838

+

839

+

840

+

841

+

842

+

843

+

844

+

845

+

846

+

847

+

848

+

849

+

850

+

851

+

852

+

853

+

854

+

855

+

856

+

857

+

858

+

859

+

860

+

861

+

862

+

863

+

864

+

865

+

866

+

867

+

868

+

869

+

870

+

871

+

872

+

873

+

874

+

875

+

876

+

877

+

878

+

879

+

880

+

881

+

882

+

883

+

884

+

885

+

886

+

887

+

888

+

889

+

890

+

891

+

892

+

893

+

894

+

895

+

896

+

897

+

898

+

899

+

900

+

901

+

902

+

903

+

904

+

905

+

906

+

907

+

908

+

909

+

910

+

911

+

912

+

913

+

914

+

915

+

916

+

917

+

918

+

919

+

920

+

921

+

922

+

923

+

924

+

925

+

926

+

927

+

928

+

929

+

930

+

931

+

932

+

933

+

934

+

935

+

936

+

937

+

938

+

939

+

940

+

941

+

942

+

943

+

944

+

945

+

946

+

947

+

948

+

949

+

950

+

951

+

952

+

953

+

954

+

955

+

956

+

957

+

958

+

959

+

960

+

961

+

962

+

963

+

964

+

965

+

966

+

967

+

968

+

969

+

970

+

971

+

972

+

973

+

974

+

975

+

976

+

977

+

978

+

979

+

980

+

981

+

982

+

983

+

984

+

985

+

986

+

987

+

988

+

989

+

990

+

991

+

992

+

993

+

994

+

995

+

996

+

997

+

998

+

999

+

1000

+

1001

+

1002

+

1003

+

1004

+

1005

+

1006

+

1007

+

1008

+

1009

+

1010

+

1011

+

1012

+

1013

+

1014

+

1015

+

1016

+

1017

+

1018

+

1019

+

1020

+

1021

+

1022

+

1023

+

1024

+

1025

+

1026

+

1027

+

1028

+

1029

+

1030

+

1031

+

1032

+

1033

+

1034

+

1035

+

1036

+

1037

+

1038

+

1039

+

1040

+

1041

+

1042

+

1043

+

1044

+

1045

+

1046

+

1047

+

1048

+

1049

+

1050

+

1051

+

1052

+

1053

+

1054

+

1055

+

1056

+

1057

+

1058

+

1059

+

1060

+

1061

+

1062

+

1063

+

1064

+

1065

+

1066

+

1067

+

1068

+

1069

+

1070

+

1071

+

1072

+

1073

+

1074

+

1075

+

1076

+

1077

+

1078

+

1079

+

1080

+

1081

+

1082

+

1083

+

1084

+

1085

+

1086

+

1087

+

1088

+

1089

+

1090

+

1091

+

1092

+

1093

+

1094

+

1095

+

1096

+

1097

+

1098

+

1099

+

1100

+

1101

+

1102

+

1103

+

1104

+

1105

+

1106

+

1107

+

1108

+

1109

+

1110

+

1111

+

1112

+

1113

+

1114

+

1115

+

1116

+

1117

+

1118

+

1119

+

1120

+

1121

+

1122

+

1123

+

1124

+

1125

+

1126

+

1127

+

1128

+

1129

+

1130

+

1131

+

1132

+

1133

+

1134

+

1135

+

1136

+

1137

+

1138

+

1139

+

1140

+

1141

+

1142

+

1143

+

1144

+

1145

+

1146

+

1147

+

1148

+

1149

+

1150

+

1151

+

1152

+

1153

+

1154

+

1155

+

1156

+

1157

+

1158

+

1159

+

1160

+

1161

+

1162

+

1163

+

1164

+

1165

+

1166

+

1167

+

1168

+

1169

+

1170

+

1171

+

1172

+

1173

+

1174

+

1175

+

1176

+

1177

+

1178

+

1179

+

1180

+

1181

+

1182

+

1183

+

1184

+

1185

+

1186

+

1187

+

1188

+

1189

+

1190

+

1191

+

1192

+

1193

+

1194

+

1195

+

1196

+

1197

+

1198

+

1199

+

1200

+

1201

+

1202

+

1203

+

1204

+

1205

+

1206

+

1207

+

1208

+

1209

+

1210

+

1211

+

1212

+

1213

+

1214

+

1215

+

1216

+

1217

+

1218

+

1219

+

1220

+

1221

+

1222

+

1223

+

1224

+

1225

+

1226

+

1227

+

1228

+

1229

+

1230

+

1231

+

1232

+

1233

+

1234

+

1235

+

1236

+

1237

+

1238

+

1239

+

1240

+

1241

+

1242

+

1243

+

1244

+

1245

+

1246

+

1247

+

1248

+

1249

+

1250

+

1251

+

1252

+

1253

+

1254

+

1255

+

1256

+

1257

+

1258

+

1259

+

1260

+

1261

+

1262

+

1263

+

1264

+

1265

+

1266

+

1267

+

1268

+

1269

+

1270

+

1271

+

1272

+

1273

+

1274

+

1275

+

1276

+

1277

+

1278

+

1279

+

1280

+

1281

+

1282

+

1283

+

1284

+

1285

+

1286

+

1287

+

1288

+

1289

+

1290

+

1291

+

1292

+

1293

+

1294

+

1295

+

1296

+

1297

+

1298

+

1299

+

1300

+

1301

+

1302

+

1303

+

1304

+

1305

+

1306

+

1307

+

1308

+

1309

+

1310

+

1311

+

1312

+

1313

+

1314

+

1315

+

1316

+

1317

+

1318

+

1319

+

1320

+

1321

+

1322

+

1323

+

1324

+

1325

+

1326

+

1327

+

1328

+

1329

+

1330

+

1331

+

1332

+

1333

+

1334

+

1335

+

1336

+

1337

+

1338

+

1339

+

1340

+

1341

+

1342

+

1343

+

1344

+

1345

+

1346

+

1347

+

1348

+

1349

+

1350

+

1351

+

1352

+

1353

+

1354

+

1355

+

1356

+

1357

+

1358

+

1359

+

1360

+

1361

+

1362

+

1363

+

1364

+

1365

+

1366

+

1367

+

1368

+

1369

+

1370

+

1371

+

1372

+

1373

+

1374

+

1375

+

1376

+

1377

+

1378

+

1379

+

1380

+

1381

+

1382

+

1383

+

1384

+

1385

+

1386

+

1387

+

1388

+

1389

+

1390

+

1391

+

1392

+

1393

+

1394

+

1395

+

1396

+

1397

+

1398

+

1399

+

1400

+

1401

+

1402

+

1403

+

1404

+

1405

+

1406

+

1407

+

1408

+

1409

+

1410

+

1411

+

1412

+

1413

+

1414

+

1415

+

1416

+

1417

+

1418

+

1419

+

1420

+

1421

+

1422

+

1423

+

1424

+

1425

+

1426

+

1427

+

1428

+

1429

+

1430

+

1431

+

1432

+

1433

+

1434

+

1435

+

1436

+

1437

+

1438

+

1439

+

1440

+

1441

+

1442

+

1443

+

1444

+

1445

+

1446

+

1447

+

1448

+

1449

+

1450

+

1451

+

1452

+

1453

+

1454

+

1455

+

1456

+

1457

+

1458

+

1459

+

1460

+

1461

+

1462

+

1463

+

1464

+

1465

+

1466

+

1467

+

1468

+

1469

+

1470

+

1471

+

1472

+

1473

+

1474

+

1475

+

1476

+

1477

+

1478

+

1479

+

1480

+

1481

+

1482

+

1483

+

1484

+

1485

+

1486

+

1487

+

1488

+

1489

+

1490

+

1491

+

1492

+

1493

+

1494

+

1495

+

1496

+

1497

+

1498

+

1499

+

1500

+

1501

+

1502

+

1503

+

1504

+

1505

+

1506

+

1507

+

1508

+

1509

+

1510

+

1511

+

1512

+

1513

+

1514

+

1515

+

1516

+

1517

+

1518

+

1519

+

1520

+

1521

+

1522

+

1523

+

1524

+

1525

+

1526

+

1527

+

1528

+

1529

+

1530

+

1531

+

1532

+

1533

+

1534

+

1535

+

1536

+

1537

+

1538

+

1539

+

1540

+

1541

+

1542

+

1543

+

1544

+

1545

+

1546

+

1547

+

1548

+

1549

+

1550

+

1551

+

1552

+

1553

+

1554

+

1555

+

1556

+

1557

+

1558

+

1559

+

1560

+

1561

+

1562

+

1563

+

1564

+

1565

+

1566

+

1567

+

1568

+

1569

+

1570

+

1571

+

1572

+

1573

+

1574

+

1575

+

1576

+

1577

+

1578

+

1579

+

1580

+

1581

+

1582

+

1583

+

1584

+

1585

+

1586

+

1587

+

1588

+

1589

+

1590

+

1591

+

1592

+

1593

+

1594

+

1595

+

1596

+

1597

+

1598

+

1599

+

1600

+

1601

+

1602

+

1603

+

1604

+

1605

+

1606

+

1607

+

1608

+

1609

+

1610

+

1611

+

1612

+

1613

+

1614

+

1615

+

1616

+

1617

+

1618

+

1619

+

1620

+

1621

+

1622

+

1623

+

1624

+

1625

+

1626

+

1627

+

1628

+

1629

+

1630

+

1631

+

1632

+

1633

+

1634

+

1635

+

1636

+

1637

+

1638

+

1639

+

1640

+

1641

+

1642

+

1643

+

1644

+

1645

+

1646

+

1647

+

1648

+

1649

+

1650

+

1651

+

1652

+

1653

+

1654

+

1655

+

1656

+

1657

+

1658

+

1659

+

1660

+

1661

+

1662

+

1663

+

1664

+

1665

+

1666

+

1667

+

1668

+

1669

+

1670

+

1671

+

1672

+

1673

+

1674

+

1675

+

1676

+

1677

+

1678

+

1679

+

1680

+

1681

+

1682

+

1683

+

1684

+

1685

+

1686

+

1687

+

1688

+

1689

+

1690

+

1691

+

1692

+

1693

+

1694

+

1695

+

1696

+

1697

+

1698

+

1699

+

1700

+

1701

+

1702

+

1703

+

1704

+

1705

+

1706

+

1707

+

1708

+

1709

+

1710

+

1711

+

1712

+

1713

+

1714

+

1715

+

1716

+

1717

+

1718

+

1719

+

1720

+

1721

+

1722

+

1723

+

1724

+

1725

+

1726

+

1727

+

1728

+

1729

+

1730

+

1731

+

1732

+

1733

+

1734

+

1735

+

1736

+

1737

+

1738

+

1739

+

1740

+

1741

+

1742

+

1743

+

1744

+

1745

+

1746

+

1747

+

1748

+

1749

+

1750

+

1751

+

1752

+

1753

+

1754

+

1755

+

1756

+

1757

+

1758

+

1759

+

1760

+

1761

+

1762

+

1763

+

1764

+

1765

+

1766

+

1767

+

1768

+

1769

+

1770

+

1771

+

1772

+

1773

+

1774

+

1775

+

1776

+

1777

+

1778

+

1779

+

1780

+

1781

+

1782

+

1783

+

1784

+

1785

+

1786

+

1787

+

1788

+

1789

+

1790

+

1791

+

1792

+

1793

+

1794

+

1795

+

1796

+

1797

+

1798

+

1799

+

1800

+

1801

+

1802

+

1803

+

1804

+

1805

+

1806

+

1807

+

1808

+

1809

+

1810

+

1811

+

1812

+

1813

+

1814

+

1815

+

1816

+

1817

+

1818

+

1819

+

1820

+

1821

+

1822

+

1823

+

1824

+

1825

+

1826

+

1827

+

1828

+

1829

+

1830

+

1831

+

1832

+

1833

+

1834

+

1835

+

1836

+

1837

+

1838

+

1839

+

1840

+

1841

+

1842

+

1843

+

1844

+

1845

+

1846

+

1847

+

1848

+

1849

+

1850

+

1851

+

1852

+

1853

+

1854

+

1855

+

1856

+

1857

+

1858

+

1859

+

1860

+

1861

+

1862

+

1863

+

1864

+

1865

+

1866

+

1867

+

1868

+

1869

+

1870

+

1871

+

1872

+

1873

+

1874

+

1875

+

1876

+

1877

+

1878

+

1879

+

1880

+

1881

+

1882

+

1883

+

1884

+

1885

+

1886

+

1887

+

1888

+

1889

+

1890

+

1891

+

1892

+

1893

+

1894

+

1895

+

1896

+

1897

+

1898

+

1899

+

1900

+

1901

+

1902

+

1903

+

1904

+

1905

+

1906

+

1907

+

1908

+

1909

+

1910

+

1911

+

1912

+

1913

+

1914

+

1915

+

1916

+

1917

+

1918

+

1919

+

1920

+

1921

+

1922

+

1923

+

1924

+

1925

+

1926

+

1927

+

1928

+

1929

+

1930

+

1931

+

1932

+

1933

+

1934

+

1935

+

1936

+

1937

+

1938

+

1939

+

1940

+

1941

+

1942

+

1943

+

1944

+

1945

+

1946

+

1947

+

1948

+

1949

+

1950

+

1951

+

1952

+

1953

+

1954

+

1955

+

1956

+

1957

+

1958

+

1959

+

1960

+

1961

+

1962

+

1963

+

1964

+

1965

+

1966

+

1967

+

1968

+

1969

+

1970

+

1971

+

1972

+

1973

+

1974

+

1975

+

1976

+

1977

+

1978

+

1979

+

1980

+

1981

+

1982

+

1983

+

1984

+

1985

+

1986

+

1987

+

1988

+

1989

+

1990

+

1991

+

1992

+

1993

+

1994

+

1995

+

1996

+

1997

+

1998

+

1999

+

2000

+

2001

+

2002

+

2003

+

2004

+

2005

+

2006

+

2007

+

2008

+

2009

+

2010

+

2011

+

2012

+

2013

+

2014

+

2015

+

2016

+

2017

+

2018

+

2019

+

2020

+

2021

+

2022

+

2023

+

2024

+

2025

+

2026

+

2027

+

2028

+

2029

+

2030

+

2031

+

2032

+

2033

+

2034

+

2035

+

2036

+

2037

+

2038

+

2039

+

2040

+

2041

+

2042

+

2043

+

2044

+

2045

+

2046

+

2047

+

2048

+

2049

+

2050

+

2051

+

2052

+

2053

+

2054

+

2055

+

2056

+

2057

+

2058

+

2059

+

2060

+

2061

+

2062

+

2063

+

2064

+

2065

+

2066

+

2067

+

2068

+

2069

+

2070

+

2071

+

2072

+

2073

+

2074

+

2075

+

2076

+

2077

+

2078

+

2079

+

2080

+

2081

+

2082

+

2083

+

2084

+

2085

+

2086

+

2087

+

2088

+

2089

+

2090

+

2091

+

2092

+

2093

+

2094

+

2095

+

2096

+

2097

+

2098

+

2099

+

2100

+

2101

+

2102

+

2103

+

2104

+

2105

+

2106

+

2107

+

2108

+

2109

+

2110

+

2111

+

2112

+

2113

+

2114

+

2115

+

2116

+

2117

+

2118

+

2119

+

2120

+

2121

+

2122

+

2123

+

2124

+

2125

+

2126

+

2127

+

2128

+

2129

+

2130

+

2131

+

2132

+

2133

+

2134

+

2135

+

2136

+

2137

+

2138

+

2139

+

2140

+

2141

+

2142

+

2143

+

2144

+

2145

+

2146

+

2147

+

2148

+

2149

+

2150

+

2151

+

2152

+

2153

+

2154

+

2155

+

2156

+

2157

+

2158

+

2159

+

2160

+

2161

+

2162

+

2163

+

2164

+

2165

+

2166

+

2167

+

2168

+

2169

+

2170

+

2171

+

2172

+

2173

+

2174

+

2175

+

2176

+

2177

+

2178

+

2179

+

2180

+

2181

+

2182

+

2183

+

2184

+

2185

+

2186

+

2187

+

2188

+

2189

+

2190

+

2191

+

2192

+

2193

+

2194

+

2195

+

2196

+

2197

+

2198

+

2199

+

2200

+

2201

+

2202

+

2203

+

2204

+

2205

+

2206

+

2207

+

2208

+

2209

+

2210

+

2211

+

2212

+

2213

+

2214

+

2215

+

2216

+

2217

+

2218

+

2219

+

2220

+

2221

+

2222

+

2223

+

2224

+

2225

+

2226

+

2227

+

2228

+

2229

+

2230

+

2231

+

2232

+

2233

+

2234

+

2235

+

2236

+

2237

+

2238

+

2239

+

2240

+

2241

+

2242

+

2243

+

2244

+

2245

+

2246

+

2247

+

2248

+

2249

+

2250

+

2251

+

2252

+

2253

+

2254

+

2255

+

2256

+

2257

+

2258

+

2259

+

2260

+

2261

+

2262

+

2263

+

2264

+

2265

+

2266

+

2267

+

2268

+

2269

+

2270

+

2271

+

2272

+

2273

+

2274

+

2275

+

2276

+

2277

+

2278

+

2279

+

2280

+

2281

+

2282

+

2283

+

2284

+

2285

+

2286

+

2287

+

2288

+

2289

+

2290

+

2291

+

2292

+

2293

+

2294

+

2295

+

2296

+

2297

+

2298

+

2299

+

2300

+

2301

+

2302

+

2303

+

2304

+

2305

+

2306

+

2307

+

2308

+

2309

+

2310

+

2311

+

2312

+

2313

+

2314

+

2315

+

2316

+

2317

+

2318

+

2319

+

2320

+

2321

+

2322

+

2323

+

2324

+

2325

+

2326

+

2327

+

2328

+

2329

+

2330

+

2331

+

2332

+

2333

+

2334

+

2335

+

2336

+

2337

+

2338

+

2339

+

2340

+

2341

+

2342

+

2343

+

2344

+

2345

+

2346

+

2347

+

2348

+

2349

+

2350

+

2351

+

2352

+

2353

+

2354

+

2355

+

2356

+

2357

+

2358

+

2359

+

2360

+

2361

+

2362

+

2363

+

2364

+

2365

+

2366

+

2367

+

2368

+

2369

+

2370

+

2371

+

2372

+

2373

+

2374

+

2375

+

2376

+

2377

+

2378

+

2379

+

2380

+

2381

+

2382

+

2383

+

2384

+

2385

+

2386

+

2387

+

2388

+

2389

+

2390

+

2391

+

2392

+

2393

+

2394

+

2395

+

2396

+

2397

+

2398

+

2399

+

2400

+

2401

+

2402

+

2403

+

2404

+

2405

+

2406

+

2407

+

2408

+

2409

+

2410

+

2411

+

2412

+

2413

+

2414

+

2415

+

2416

+

2417

+

2418

+

2419

+

2420

+

2421

+

2422

+

2423

+

2424

+

2425

+

2426

+

2427

+

2428

+

2429

+

2430

+

2431

+

2432

+

2433

+

2434

+

2435

+

2436

+

2437

+

2438

+

2439

+

2440

+

2441

+

2442

+

2443

+

2444

+

2445

+

2446

+

2447

+

2448

+

2449

+

2450

+

2451

+

2452

+

2453

+

2454

+

2455

+

2456

+

2457

+

2458

+

2459

+

2460

+

2461

+

2462

+

2463

+

2464

+

2465

+

2466

+

2467

+

2468

+

2469

+

2470

+

2471

+

2472

+

2473

+

2474

+

2475

+

2476

+

2477

+

2478

+

2479

+

2480

+

2481

+

2482

+

2483

+

2484

+

2485

+

2486

+

2487

+

2488

+

2489

+

2490

+

2491

+

2492

+

2493

+

2494

+

2495

+

2496

+

2497

+

2498

+

2499

+

2500

+

2501

+

2502

+

2503

+

2504

+

2505

+

2506

+

2507

+

2508

+

2509

+

2510

+

2511

+

2512

+

2513

+

2514

+

2515

+

2516

+

2517

+

2518

+

2519

+

2520

+

2521

+

2522

+

2523

+

2524

+

2525

+

2526

+

2527

+

2528

+

2529

+

2530

+

2531

+

2532

+

2533

+

2534

+

2535

+

2536

+

2537

+

2538

+

2539

+

2540

+

2541

+

2542

+

2543

+

2544

+

2545

+

2546

+

2547

+

2548

+

2549

+

2550

+

2551

+

2552

+

2553

+

2554

+

2555

+

2556

+

2557

+

2558

+

2559

+

2560

+

2561

+

2562

+

2563

+

2564

+

2565

+

2566

+

2567

+

2568

+

2569

+

2570

+

2571

+

2572

+

2573

+

2574

+

2575

+

2576

+

2577

+

2578

+

2579

+

2580

+

2581

+

2582

+

2583

+

2584

+

2585

+

2586

+

2587

+

2588

+

2589

+

2590

+

2591

+

2592

+

2593

+

2594

+

2595

+

2596

+

2597

+

2598

+

2599

+

2600

+

2601

+

2602

+

2603

+

2604

+

2605

+

2606

+

2607

+

2608

+

2609

+

2610

+

2611

+

2612

+

2613

+

2614

+

2615

+

2616

+

2617

+

2618

+

2619

+

2620

+

2621

+

2622

+

2623

+

2624

+

2625

+

2626

+

2627

+

2628

+

2629

+

2630

+

2631

+

2632

+

2633

+

2634

+

2635

+

2636

+

2637

+

2638

+

2639

+

2640

+

2641

+

2642

+

2643

+

2644

+

2645

+

2646

+

2647

+

2648

+

2649

+

2650

+

2651

+

2652

+

2653

+

2654

+

2655

+

2656

+

2657

+

2658

+

2659

+

2660

+

2661

+

2662

+

2663

+

2664

+

2665

+

2666

+

2667

+

2668

+

2669

+

2670

+

2671

+

2672

+

2673

+

2674

+

2675

+

2676

+

2677

+

2678

+

2679

+

2680

+

2681

+

2682

+

2683

+

2684

+

2685

+

2686

+

2687

+

2688

+

2689

+

2690

+

2691

+

2692

+

2693

+

2694

+

2695

+

2696

+

2697

+

2698

+

2699

+

2700

+

2701

+

2702

+

2703

+

2704

+

2705

+

2706

+

2707

+

2708

+

2709

+

2710

+

2711

+

2712

+

2713

+

2714

+

2715

+

2716

+

2717

+

2718

+

2719

+

2720

+

2721

+

2722

+

2723

+

2724

+

2725

+

2726

+

2727

+

2728

+

2729

+

2730

+

2731

+

2732

+

2733

+

2734

+

2735

+

2736

+

2737

+

2738

+

2739

+

2740

+

2741

+

2742

+

2743

+

2744

+

2745

+

2746

+

2747

+

2748

+

2749

+

2750

+

2751

+

2752

+

2753

+

2754

+

2755

+

2756

+

2757

+

2758

+

2759

+

2760

+

2761

+

2762

+

2763

+

2764

+

2765

+

2766

+

2767

+

2768

+

2769

+

2770

+

2771

+

2772

+

2773

+

2774

+

2775

+

2776

+

2777

+

2778

+

2779

+

2780

+

2781

+

2782

+

2783

+

2784

+

2785

+

2786

+

2787

+

2788

+

2789

+

2790

+

2791

+

2792

+

2793

+

2794

+

2795

+

2796

+

2797

+

2798

+

2799

+

2800

+

2801

+

2802

+

2803

+

2804

+

2805

+

2806

+

2807

+

2808

+

2809

+

2810

+

2811

+

2812

+

2813

+

2814

+

2815

+

2816

+

2817

+

2818

+

2819

+

2820

+

2821

+

2822

+

2823

+

2824

+

2825

+

2826

+

2827

+

2828

+

2829

+

2830

+

2831

+

2832

+

2833

+

2834

+

2835

+

2836

+

2837

+

2838

+

2839

+

2840

+

2841

+

2842

+

2843

+

2844

+

2845

+

2846

+

2847

+

2848

+

2849

+

2850

+

2851

+

2852

+

2853

+

2854

+

2855

+

2856

+

2857

+

2858

+

2859

+

2860

+

2861

+

2862

+

2863

+

2864

+

2865

+

2866

+

2867

+

2868

+

2869

+

2870

+

2871

+

2872

+

2873

+

2874

+

2875

+

2876

+

2877

+

2878

+

2879

+

2880

+

2881

+

2882

+

2883

+

2884

+

2885

+

2886

+

2887

+

2888

+

2889

+

2890

+

2891

+

2892

+

2893

+

2894

+

2895

+

2896

+

2897

+

2898

+

2899

+

2900

+

2901

+

2902

+

2903

+

2904

+

2905

+

2906

+

2907

+

2908

+

2909

+

2910

+

2911

+

2912

+

2913

+

2914

+

2915

+

2916

+

2917

+

2918

+

2919

+

2920

+

2921

+

2922

+

2923

+

2924

+

2925

+

2926

+

2927

+

2928

+

2929

+

2930

+

2931

+

2932

+

2933

+

2934

+

2935

+

2936

+

2937

+

2938

+

2939

+

2940

+

2941

+

2942

+

2943

+

2944

+

2945

+

2946

+

2947

+

2948

+

2949

+

2950

+

2951

+

2952

+

2953

+

2954

+

2955

+

2956

+

2957

+

2958

+

2959

+

2960

+

2961

+

2962

+

2963

+

2964

+

2965

+

2966

+

2967

+

2968

+

2969

+

2970

+

2971

+

2972

+

2973

+

2974

+

2975

+

2976

+

2977

+

2978

+

2979

+

2980

+

2981

+

2982

+

2983

+

2984

+

2985

+

2986

+

2987

+

2988

+

2989

+

2990

+

2991

+

2992

+

2993

+

2994

+

2995

+

2996

+

2997

+

2998

+

2999

+

3000

+

3001

+

3002

+

3003

+

3004

+

3005

+

3006

+

3007

+

3008

+

3009

+

3010

+

3011

+

3012

+

3013

+

3014

+

3015

+

3016

+

3017

+

3018

+

3019

+

3020

+

3021

+

3022

+

3023

+

3024

+

3025

+

3026

+

3027

+

3028

+

3029

+

3030

+

3031

+

3032

+

3033

+

3034

+

3035

+

3036

+

3037

+

3038

+

3039

+

3040

+

3041

+

3042

+

3043

+

3044

+

3045

+

3046

+

3047

+

3048

+

3049

+

3050

+

3051

+

3052

+

3053

+

3054

+

3055

+

3056

+

3057

+

3058

+

3059

+

3060

+

3061

+

3062

+

3063

+

3064

+

3065

+

3066

+

3067

+

3068

+

3069

+

3070

+

3071

+

3072

+

3073

+

3074

+

3075

+

3076

+

3077

+

3078

+

3079

+

3080

+

3081

+

3082

+

3083

+

3084

+

3085

+

3086

+

3087

+

3088

+

3089

+

3090

+

3091

+

3092

+

3093

+

3094

+

3095

+

3096

+

3097

+

3098

+

3099

+

3100

+

3101

+

3102

+

3103

+

3104

+

3105

+

3106

+

3107

+

3108

+

3109

+

3110

+

3111

+

3112

+

3113

+

3114

+

3115

+

3116

+

3117

+

3118

+

3119

+

3120

+

3121

+

3122

+

3123

+

3124

+

3125

+

3126

+

3127

+

3128

+

3129

+

3130

+

3131

+

3132

+

3133

+

3134

+

3135

+

3136

+

3137

+

3138

+

3139

+

3140

+

3141

+

3142

+

3143

+

3144

+

3145

+

3146

+

3147

+

3148

+

3149

+

3150

+

3151

+

3152

+

3153

+

3154

+

3155

+

3156

+

3157

+

3158

+

3159

+

3160

+

3161

+

3162

+

3163

+

3164

+

3165

+

3166

+

3167

+

3168

+

3169

+

3170

+

3171

+

3172

+

3173

+

3174

+

3175

+

3176

+

3177

+

3178

+

3179

+

3180

+

3181

+

3182

+

3183

+

3184

+

3185

+

3186

+

3187

+

3188

+

3189

+

3190

+

3191

+

3192

+

3193

+

3194

+

3195

+

3196

+

3197

+

3198

+

3199

+

3200

+

3201

+

3202

+

3203

+

3204

+

3205

+

3206

+

3207

+

3208

+

3209

+

3210

+

3211

+

3212

+

3213

+

3214

+

3215

+

3216

+

3217

+

3218

+

3219

+

3220

+

3221

+

3222

+

3223

+

3224

+

3225

+

3226

+

3227

+

3228

+

3229

+

3230

+

3231

+

3232

+

3233

+

3234

+

3235

+

3236

+

3237

+

3238

+

3239

+

3240

+

3241

+

3242

+

3243

+

3244

+

3245

+

3246

+

3247

+

3248

+

3249

+

3250

+

3251

+

3252

+

3253

+

3254

+

3255

+

3256

+

3257

+

3258

+

3259

+

3260

+

3261

+

3262

+

3263

+

3264

+

3265

+

3266

+

3267

+

3268

+

3269

+

3270

+

3271

+

3272

+

3273

+

3274

+

3275

+

3276

+

3277

+

3278

+

3279

+

3280

+

3281

+

3282

+

3283

+

3284

+

3285

+

3286

+

3287

+

3288

+

3289

+

3290

+

3291

+

3292

+

3293

+

3294

+

3295

+

3296

+

3297

+

3298

+

3299

+

3300

+

3301

+

3302

+

3303

+

3304

+

3305

+

3306

+

3307

+

3308

+

3309

+

3310

+

3311

+

3312

+

3313

+

3314

+

3315

+

3316

+

3317

+

3318

+

3319

+

3320

+

3321

+

3322

+

3323

+

3324

+

3325

+

3326

+

3327

+

3328

+

3329

+

3330

+

3331

+

3332

+

3333

+

3334

+

3335

+

3336

+

3337

+

3338

+

3339

+

3340

+

3341

+

3342

+

3343

+

3344

+

3345

+

3346

+

3347

+

3348

+

3349

+

3350

+

3351

+

3352

+

3353

+

3354

+

3355

+

3356

+

3357

+

3358

+

3359

+

3360

+

3361

+

3362

+

3363

+

3364

+

3365

+

3366

+

3367

+

3368

+

3369

+

3370

+

3371

+

3372

+

3373

+

3374

+

3375

+

3376

+

3377

+

3378

+

3379

+

3380

+

3381

+

3382

+

3383

+

3384

+

3385

+

3386

+

3387

+

3388

+

3389

+

3390

+

3391

+

3392

+

3393

+

3394

+

3395

+

3396

+

3397

+

3398

+

3399

+

3400

+

3401

+

3402

+

3403

+

3404

+

3405

+

3406

+

3407

+

3408

+

3409

+

3410

+

3411

+

3412

+

3413

+

3414

+

3415

+

3416

+

3417

+

3418

+

3419

+

3420

+

3421

+

3422

+

3423

+

3424

+

3425

+

3426

+

3427

+

3428

+

3429

+

3430

+

3431

+

3432

+

3433

+

3434

+

3435

+

3436

+

3437

+

3438

+

3439

+

3440

+

3441

+

3442

+

3443

+

3444

+

3445

+

3446

+

3447

+

3448

+

3449

+

3450

+

3451

+

3452

+

3453

+

3454

+

3455

+

3456

+

3457

+

3458

+

3459

+

3460

+

3461

+

3462

+

3463

+

3464

+

3465

+

3466

+

3467

+

3468

+

3469

+

3470

+

3471

+

3472

+

3473

+

3474

+

3475

+

3476

+

3477

+

3478

+

3479

+

3480

+

3481

+

3482

+

3483

+

3484

+

3485

+

3486

+

3487

+

3488

+

3489

+

3490

+

3491

+

3492

+

3493

+

3494

+

3495

+

3496

+

3497

+

3498

+

3499

+

3500

+

3501

+

3502

+

3503

+

3504

+

3505

+

3506

+

3507

+

3508

+

3509

+

3510

+

3511

+

3512

+

3513

+

3514

+

3515

+

3516

+

3517

+

3518

+

3519

+

3520

+

3521

+

3522

+

3523

+

3524

+

3525

+

3526

+

3527

+

3528

+

3529

+

3530

+

3531

+

3532

+

3533

+

3534

+

3535

+

3536

+

3537

+

3538

+

3539

+

3540

+

3541

+

3542

+

3543

+

3544

+

3545

+

3546

+

3547

+

3548

+

3549

+

3550

+

3551

+

3552

+

3553

+

3554

+

3555

+

3556

+

3557

+

3558

+

3559

+

3560

+

3561

+

3562

+

3563

+

3564

+

3565

+

3566

+

3567

+

3568

+

3569

+

3570

+

3571

+

3572

+

3573

+

3574

+

3575

+

3576

+

3577

+

3578

+

3579

+

3580

+

3581

+

3582

+

3583

+

3584

+

3585

+

3586

+

3587

+

3588

+

3589

+

3590

+

3591

+

3592

+

3593

+

3594

+

3595

+

3596

+

3597

+

3598

+

3599

+

3600

+

3601

+

3602

+

3603

+

3604

+

3605

+

3606

+

3607

+

3608

+

3609

+

3610

+

3611

+

3612

+

3613

+

3614

+

3615

+

3616

+

3617

+

3618

+

3619

+

3620

+

3621

+

3622

+

3623

+

3624

+

3625

+

3626

+

3627

+

3628

+

3629

+

3630

+

3631

+

3632

+

3633

+

3634

+

3635

+

3636

+

3637

+

3638

+

3639

+

3640

+

3641

+

3642

+

3643

+

3644

+

3645

+

3646

+

3647

+

3648

+

3649

+

3650

+

3651

+

3652

+

3653

+

3654

+

3655

+

3656

+

3657

+

3658

+

3659

+

3660

+

3661

+

3662

+

3663

+

3664

+

3665

+

3666

+

3667

+

3668

+

3669

+

3670

+

3671

+

3672

+

3673

+

3674

+

3675

+

3676

+

3677

+

3678

+

3679

+

3680

+

3681

+

3682

+

3683

+

3684

+

3685

+

3686

+

3687

+

3688

+

3689

+

3690

+

3691

+

3692

+

3693

+

3694

+

3695

+

3696

+

3697

+

3698

+

3699

+

3700

+

3701

+

3702

+

3703

+

3704

+

3705

+

3706

+

3707

+

3708

+

3709

+

3710

+

3711

+

3712

+

3713

+

3714

+

3715

+

3716

+

3717

+

3718

+

3719

+

3720

+

3721

+

3722

+

3723

+

3724

+

3725

+

3726

+

3727

+

3728

+

3729

+

3730

+

3731

+

3732

+

3733

+

3734

+

3735

+

3736

+

3737

+

3738

+

3739

+

3740

+

3741

+

3742

+

3743

+

3744

+

3745

+

3746

+

3747

+

3748

+

3749

+

3750

+

3751

+

3752

+

3753

+

3754

+

3755

+

3756

+

3757

+

3758

+

3759

+

3760

+

3761

+

3762

+

3763

+

3764

+

3765

+

3766

+

3767

+

3768

+

3769

+

3770

+

3771

+

3772

+

3773

+

3774

+

3775

+

3776

+

3777

+

3778

+

3779

+

3780

+

3781

+

3782

+ +
+

#!/usr/bin/env python 

+

# -*- coding: utf-8 -*- 

+

 

+

from __future__ import absolute_import 

+

 

+

import base64 

+

import datetime 

+

import netrc 

+

import os 

+

import re 

+

import socket 

+

import time 

+

import email.utils 

+

import xml.etree.ElementTree 

+

import random 

+

import math 

+

 

+

from .utils import * 

+

 

+

 

+

class InfoExtractor(object): 

+

    """Information Extractor class. 

+

 

+

    Information extractors are the classes that, given a URL, extract 

+

    information about the video (or videos) the URL refers to. This 

+

    information includes the real video URL, the video title, author and 

+

    others. The information is stored in a dictionary which is then 

+

    passed to the FileDownloader. The FileDownloader processes this 

+

    information possibly downloading the video to the file system, among 

+

    other possible outcomes. 

+

 

+

    The dictionaries must include the following fields: 

+

 

+

    id:             Video identifier. 

+

    url:            Final video URL. 

+

    title:          Video title, unescaped. 

+

    ext:            Video filename extension. 

+

    uploader:       Full name of the video uploader. 

+

    upload_date:    Video upload date (YYYYMMDD). 

+

 

+

    The following fields are optional: 

+

 

+

    format:         The video format, defaults to ext (used for --get-format) 

+

    thumbnail:      Full URL to a video thumbnail image. 

+

    description:    One-line video description. 

+

    uploader_id:    Nickname or id of the video uploader. 

+

    player_url:     SWF Player URL (used for rtmpdump). 

+

    subtitles:      The .srt file contents. 

+

    urlhandle:      [internal] The urlHandle to be used to download the file, 

+

                    like returned by urllib.request.urlopen 

+

 

+

    The fields should all be Unicode strings. 

+

 

+

    Subclasses of this one should re-define the _real_initialize() and 

+

    _real_extract() methods and define a _VALID_URL regexp. 

+

    Probably, they should also be added to the list of extractors. 

+

 

+

    _real_extract() must return a *list* of information dictionaries as 

+

    described above. 

+

 

+

    Finally, the _WORKING attribute should be set to False for broken IEs 

+

    in order to warn the users and skip the tests. 

+

    """ 

+

 

+

    _ready = False 

+

    _downloader = None 

+

    _WORKING = True 

+

 

+

    def __init__(self, downloader=None): 

+

        """Constructor. Receives an optional downloader.""" 

+

        self._ready = False 

+

        self.set_downloader(downloader) 

+

 

+

    def suitable(self, url): 

+

        """Receives a URL and returns True if suitable for this IE.""" 

+

        return re.match(self._VALID_URL, url) is not None 

+

 

+

    def working(self): 

+

        """Getter method for _WORKING.""" 

+

        return self._WORKING 

+

 

+

    def initialize(self): 

+

        """Initializes an instance (authentication, etc).""" 

+

        if not self._ready: 

+

            self._real_initialize() 

+

            self._ready = True 

+

 

+

    def extract(self, url): 

+

        """Extracts URL information and returns it in list of dicts.""" 

+

        self.initialize() 

+

        return self._real_extract(url) 

+

 

+

    def set_downloader(self, downloader): 

+

        """Sets the downloader for this IE.""" 

+

        self._downloader = downloader 

+

 

+

    def _real_initialize(self): 

+

        """Real initialization process. Redefine in subclasses.""" 

+

        pass 

+

 

+

    def _real_extract(self, url): 

+

        """Real extraction process. Redefine in subclasses.""" 

+

        pass 

+

 

+

    @property 

+

    def IE_NAME(self): 

+

        return type(self).__name__[:-2] 

+

 

+

    def _download_webpage(self, url_or_request, video_id, note=None, errnote=None): 

+

        if note is None: 

+

            note = u'Downloading video webpage' 

+

        self._downloader.to_screen(u'[%s] %s: %s' % (self.IE_NAME, video_id, note)) 

+

        try: 

+

            urlh = compat_urllib_request.urlopen(url_or_request) 

+

            webpage_bytes = urlh.read() 

+

            return webpage_bytes.decode('utf-8', 'replace') 

+

        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: 

+

            if errnote is None: 

+

                errnote = u'Unable to download webpage' 

+

            raise ExtractorError(u'%s: %s' % (errnote, compat_str(err))) 

+

 

+

 

+

class YoutubeIE(InfoExtractor): 

+

    """Information extractor for youtube.com.""" 

+

 

+

    _VALID_URL = r"""^ 

+

                     ( 

+

                         (?:https?://)?                                       # http(s):// (optional) 

+

                         (?:youtu\.be/|(?:\w+\.)?youtube(?:-nocookie)?\.com/| 

+

                            tube\.majestyc\.net/)                             # the various hostnames, with wildcard subdomains 

+

                         (?:.*?\#/)?                                          # handle anchor (#/) redirect urls 

+

                         (?!view_play_list|my_playlists|artist|playlist)      # ignore playlist URLs 

+

                         (?:                                                  # the various things that can precede the ID: 

+

                             (?:(?:v|embed|e)/)                               # v/ or embed/ or e/ 

+

                             |(?:                                             # or the v= param in all its forms 

+

                                 (?:watch(?:_popup)?(?:\.php)?)?              # preceding watch(_popup|.php) or nothing (like /?v=xxxx) 

+

                                 (?:\?|\#!?)                                  # the params delimiter ? or # or #! 

+

                                 (?:.*?&)?                                    # any other preceding param (like /?s=tuff&v=xxxx) 

+

                                 v= 

+

                             ) 

+

                         )?                                                   # optional -> youtube.com/xxxx is OK 

+

                     )?                                                       # all until now is optional -> you can pass the naked ID 

+

                     ([0-9A-Za-z_-]+)                                         # here is it! the YouTube video ID 

+

                     (?(1).+)?                                                # if we found the ID, everything can follow 

+

                     $""" 

+

    _LANG_URL = r'http://www.youtube.com/?hl=en&persist_hl=1&gl=US&persist_gl=1&opt_out_ackd=1' 

+

    _LOGIN_URL = 'https://www.youtube.com/signup?next=/&gl=US&hl=en' 

+

    _AGE_URL = 'http://www.youtube.com/verify_age?next_url=/&gl=US&hl=en' 

+

    _NEXT_URL_RE = r'[\?&]next_url=([^&]+)' 

+

    _NETRC_MACHINE = 'youtube' 

+

    # Listed in order of quality 

+

    _available_formats = ['38', '37', '46', '22', '45', '35', '44', '34', '18', '43', '6', '5', '17', '13'] 

+

    _available_formats_prefer_free = ['38', '46', '37', '45', '22', '44', '35', '43', '34', '18', '6', '5', '17', '13'] 

+

    _video_extensions = { 

+

        '13': '3gp', 

+

        '17': 'mp4', 

+

        '18': 'mp4', 

+

        '22': 'mp4', 

+

        '37': 'mp4', 

+

        '38': 'video', # You actually don't know if this will be MOV, AVI or whatever 

+

        '43': 'webm', 

+

        '44': 'webm', 

+

        '45': 'webm', 

+

        '46': 'webm', 

+

    } 

+

    _video_dimensions = { 

+

        '5': '240x400', 

+

        '6': '???', 

+

        '13': '???', 

+

        '17': '144x176', 

+

        '18': '360x640', 

+

        '22': '720x1280', 

+

        '34': '360x640', 

+

        '35': '480x854', 

+

        '37': '1080x1920', 

+

        '38': '3072x4096', 

+

        '43': '360x640', 

+

        '44': '480x854', 

+

        '45': '720x1280', 

+

        '46': '1080x1920', 

+

    } 

+

    IE_NAME = u'youtube' 

+

 

+

    def suitable(self, url): 

+

        """Receives a URL and returns True if suitable for this IE.""" 

+

        return re.match(self._VALID_URL, url, re.VERBOSE) is not None 

+

 

+

    def report_lang(self): 

+

        """Report attempt to set language.""" 

+

        self._downloader.to_screen(u'[youtube] Setting language') 

+

 

+

    def report_login(self): 

+

        """Report attempt to log in.""" 

+

        self._downloader.to_screen(u'[youtube] Logging in') 

+

 

+

    def report_age_confirmation(self): 

+

        """Report attempt to confirm age.""" 

+

        self._downloader.to_screen(u'[youtube] Confirming age') 

+

 

+

    def report_video_webpage_download(self, video_id): 

+

        """Report attempt to download video webpage.""" 

+

        self._downloader.to_screen(u'[youtube] %s: Downloading video webpage' % video_id) 

+

 

+

    def report_video_info_webpage_download(self, video_id): 

+

        """Report attempt to download video info webpage.""" 

+

        self._downloader.to_screen(u'[youtube] %s: Downloading video info webpage' % video_id) 

+

 

+

    def report_video_subtitles_download(self, video_id): 

+

        """Report attempt to download video info webpage.""" 

+

        self._downloader.to_screen(u'[youtube] %s: Downloading video subtitles' % video_id) 

+

 

+

    def report_information_extraction(self, video_id): 

+

        """Report attempt to extract video information.""" 

+

        self._downloader.to_screen(u'[youtube] %s: Extracting video information' % video_id) 

+

 

+

    def report_unavailable_format(self, video_id, format): 

+

        """Report extracted video URL.""" 

+

        self._downloader.to_screen(u'[youtube] %s: Format %s not available' % (video_id, format)) 

+

 

+

    def report_rtmp_download(self): 

+

        """Indicate the download will use the RTMP protocol.""" 

+

        self._downloader.to_screen(u'[youtube] RTMP download detected') 

+

 

+

    def _closed_captions_xml_to_srt(self, xml_string): 

+

        srt = '' 

+

        texts = re.findall(r'<text start="([\d\.]+)"( dur="([\d\.]+)")?>([^<]+)</text>', xml_string, re.MULTILINE) 

+

        # TODO parse xml instead of regex 

+

        for n, (start, dur_tag, dur, caption) in enumerate(texts): 

+

            if not dur: dur = '4' 

+

            start = float(start) 

+

            end = start + float(dur) 

+

            start = "%02i:%02i:%02i,%03i" %(start/(60*60), start/60%60, start%60, start%1*1000) 

+

            end = "%02i:%02i:%02i,%03i" %(end/(60*60), end/60%60, end%60, end%1*1000) 

+

            caption = unescapeHTML(caption) 

+

            caption = unescapeHTML(caption) # double cycle, intentional 

+

            srt += str(n+1) + '\n' 

+

            srt += start + ' --> ' + end + '\n' 

+

            srt += caption + '\n\n' 

+

        return srt 

+

 

+

    def _extract_subtitles(self, video_id): 

+

        self.report_video_subtitles_download(video_id) 

+

        request = compat_urllib_request.Request('http://video.google.com/timedtext?hl=en&type=list&v=%s' % video_id) 

+

        try: 

+

            srt_list = compat_urllib_request.urlopen(request).read().decode('utf-8') 

+

        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: 

+

            return (u'WARNING: unable to download video subtitles: %s' % compat_str(err), None) 

+

        srt_lang_list = re.findall(r'name="([^"]*)"[^>]+lang_code="([\w\-]+)"', srt_list) 

+

        srt_lang_list = dict((l[1], l[0]) for l in srt_lang_list) 

+

        if not srt_lang_list: 

+

            return (u'WARNING: video has no closed captions', None) 

+

        if self._downloader.params.get('subtitleslang', False): 

+

            srt_lang = self._downloader.params.get('subtitleslang') 

+

        elif 'en' in srt_lang_list: 

+

            srt_lang = 'en' 

+

        else: 

+

            srt_lang = list(srt_lang_list.keys())[0] 

+

        if not srt_lang in srt_lang_list: 

+

            return (u'WARNING: no closed captions found in the specified language', None) 

+

        request = compat_urllib_request.Request('http://www.youtube.com/api/timedtext?lang=%s&name=%s&v=%s' % (srt_lang, srt_lang_list[srt_lang], video_id)) 

+

        try: 

+

            srt_xml = compat_urllib_request.urlopen(request).read().decode('utf-8') 

+

        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: 

+

            return (u'WARNING: unable to download video subtitles: %s' % compat_str(err), None) 

+

        if not srt_xml: 

+

            return (u'WARNING: unable to download video subtitles', None) 

+

        return (None, self._closed_captions_xml_to_srt(srt_xml)) 

+

 

+

    def _print_formats(self, formats): 

+

        print('Available formats:') 

+

        for x in formats: 

+

            print('%s\t:\t%s\t[%s]' %(x, self._video_extensions.get(x, 'flv'), self._video_dimensions.get(x, '???'))) 

+

 

+

    def _real_initialize(self): 

+

        if self._downloader is None: 

+

            return 

+

 

+

        username = None 

+

        password = None 

+

        downloader_params = self._downloader.params 

+

 

+

        # Attempt to use provided username and password or .netrc data 

+

        if downloader_params.get('username', None) is not None: 

+

            username = downloader_params['username'] 

+

            password = downloader_params['password'] 

+

        elif downloader_params.get('usenetrc', False): 

+

            try: 

+

                info = netrc.netrc().authenticators(self._NETRC_MACHINE) 

+

                if info is not None: 

+

                    username = info[0] 

+

                    password = info[2] 

+

                else: 

+

                    raise netrc.NetrcParseError('No authenticators for %s' % self._NETRC_MACHINE) 

+

            except (IOError, netrc.NetrcParseError) as err: 

+

                self._downloader.to_stderr(u'WARNING: parsing .netrc: %s' % compat_str(err)) 

+

                return 

+

 

+

        # Set language 

+

        request = compat_urllib_request.Request(self._LANG_URL) 

+

        try: 

+

            self.report_lang() 

+

            compat_urllib_request.urlopen(request).read() 

+

        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: 

+

            self._downloader.to_stderr(u'WARNING: unable to set language: %s' % compat_str(err)) 

+

            return 

+

 

+

        # No authentication to be performed 

+

        if username is None: 

+

            return 

+

 

+

        # Log in 

+

        login_form = { 

+

                'current_form': 'loginForm', 

+

                'next':     '/', 

+

                'action_login': 'Log In', 

+

                'username': username, 

+

                'password': password, 

+

                } 

+

        request = compat_urllib_request.Request(self._LOGIN_URL, compat_urllib_parse.urlencode(login_form)) 

+

        try: 

+

            self.report_login() 

+

            login_results = compat_urllib_request.urlopen(request).read().decode('utf-8') 

+

            if re.search(r'(?i)<form[^>]* name="loginForm"', login_results) is not None: 

+

                self._downloader.to_stderr(u'WARNING: unable to log in: bad username or password') 

+

                return 

+

        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: 

+

            self._downloader.to_stderr(u'WARNING: unable to log in: %s' % compat_str(err)) 

+

            return 

+

 

+

        # Confirm age 

+

        age_form = { 

+

                'next_url':     '/', 

+

                'action_confirm':   'Confirm', 

+

                } 

+

        request = compat_urllib_request.Request(self._AGE_URL, compat_urllib_parse.urlencode(age_form)) 

+

        try: 

+

            self.report_age_confirmation() 

+

            age_results = compat_urllib_request.urlopen(request).read().decode('utf-8') 

+

        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: 

+

            self._downloader.trouble(u'ERROR: unable to confirm age: %s' % compat_str(err)) 

+

            return 

+

 

+

    def _extract_id(self, url): 

+

        mobj = re.match(self._VALID_URL, url, re.VERBOSE) 

+

        if mobj is None: 

+

            self._downloader.trouble(u'ERROR: invalid URL: %s' % url) 

+

            return 

+

        video_id = mobj.group(2) 

+

        return video_id 

+

 

+

    def _real_extract(self, url): 

+

        # Extract original video URL from URL with redirection, like age verification, using next_url parameter 

+

        mobj = re.search(self._NEXT_URL_RE, url) 

+

        if mobj: 

+

            url = 'http://www.youtube.com/' + compat_urllib_parse.unquote(mobj.group(1)).lstrip('/') 

+

        video_id = self._extract_id(url) 

+

 

+

        # Get video webpage 

+

        self.report_video_webpage_download(video_id) 

+

        url = 'http://www.youtube.com/watch?v=%s&gl=US&hl=en&has_verified=1' % video_id 

+

        request = compat_urllib_request.Request(url) 

+

        try: 

+

            video_webpage_bytes = compat_urllib_request.urlopen(request).read() 

+

        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: 

+

            self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % compat_str(err)) 

+

            return 

+

 

+

        video_webpage = video_webpage_bytes.decode('utf-8', 'ignore') 

+

 

+

        # Attempt to extract SWF player URL 

+

        mobj = re.search(r'swfConfig.*?"(http:\\/\\/.*?watch.*?-.*?\.swf)"', video_webpage) 

+

        if mobj is not None: 

+

            player_url = re.sub(r'\\(.)', r'\1', mobj.group(1)) 

+

        else: 

+

            player_url = None 

+

 

+

        # Get video info 

+

        self.report_video_info_webpage_download(video_id) 

+

        for el_type in ['&el=embedded', '&el=detailpage', '&el=vevo', '']: 

+

            video_info_url = ('http://www.youtube.com/get_video_info?&video_id=%s%s&ps=default&eurl=&gl=US&hl=en' 

+

                    % (video_id, el_type)) 

+

            request = compat_urllib_request.Request(video_info_url) 

+

            try: 

+

                video_info_webpage_bytes = compat_urllib_request.urlopen(request).read() 

+

                video_info_webpage = video_info_webpage_bytes.decode('utf-8', 'ignore') 

+

                video_info = compat_parse_qs(video_info_webpage) 

+

                if 'token' in video_info: 

+

                    break 

+

            except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: 

+

                self._downloader.trouble(u'ERROR: unable to download video info webpage: %s' % compat_str(err)) 

+

                return 

+

        if 'token' not in video_info: 

+

            if 'reason' in video_info: 

+

                self._downloader.trouble(u'ERROR: YouTube said: %s' % video_info['reason'][0]) 

+

            else: 

+

                self._downloader.trouble(u'ERROR: "token" parameter not in video info for unknown reason') 

+

            return 

+

 

+

        # Check for "rental" videos 

+

        if 'ypc_video_rental_bar_text' in video_info and 'author' not in video_info: 

+

            self._downloader.trouble(u'ERROR: "rental" videos not supported') 

+

            return 

+

 

+

        # Start extracting information 

+

        self.report_information_extraction(video_id) 

+

 

+

        # uploader 

+

        if 'author' not in video_info: 

+

            self._downloader.trouble(u'ERROR: unable to extract uploader name') 

+

            return 

+

        video_uploader = compat_urllib_parse.unquote_plus(video_info['author'][0]) 

+

 

+

        # uploader_id 

+

        video_uploader_id = None 

+

        mobj = re.search(r'<link itemprop="url" href="http://www.youtube.com/(?:user|channel)/([^"]+)">', video_webpage) 

+

        if mobj is not None: 

+

            video_uploader_id = mobj.group(1) 

+

        else: 

+

            self._downloader.trouble(u'WARNING: unable to extract uploader nickname') 

+

 

+

        # title 

+

        if 'title' not in video_info: 

+

            self._downloader.trouble(u'ERROR: unable to extract video title') 

+

            return 

+

        video_title = compat_urllib_parse.unquote_plus(video_info['title'][0]) 

+

 

+

        # thumbnail image 

+

        if 'thumbnail_url' not in video_info: 

+

            self._downloader.trouble(u'WARNING: unable to extract video thumbnail') 

+

            video_thumbnail = '' 

+

        else:   # don't panic if we can't find it 

+

            video_thumbnail = compat_urllib_parse.unquote_plus(video_info['thumbnail_url'][0]) 

+

 

+

        # upload date 

+

        upload_date = None 

+

        mobj = re.search(r'id="eow-date.*?>(.*?)</span>', video_webpage, re.DOTALL) 

+

        if mobj is not None: 

+

            upload_date = ' '.join(re.sub(r'[/,-]', r' ', mobj.group(1)).split()) 

+

            format_expressions = ['%d %B %Y', '%B %d %Y', '%b %d %Y'] 

+

            for expression in format_expressions: 

+

                try: 

+

                    upload_date = datetime.datetime.strptime(upload_date, expression).strftime('%Y%m%d') 

+

                except: 

+

                    pass 

+

 

+

        # description 

+

        video_description = get_element_by_id("eow-description", video_webpage) 

+

        if video_description: 

+

            video_description = clean_html(video_description) 

+

        else: 

+

            video_description = '' 

+

 

+

        # closed captions 

+

        video_subtitles = None 

+

        if self._downloader.params.get('writesubtitles', False): 

+

            (srt_error, video_subtitles) = self._extract_subtitles(video_id) 

+

            if srt_error: 

+

                self._downloader.trouble(srt_error) 

+

 

+

        if 'length_seconds' not in video_info: 

+

            self._downloader.trouble(u'WARNING: unable to extract video duration') 

+

            video_duration = '' 

+

        else: 

+

            video_duration = compat_urllib_parse.unquote_plus(video_info['length_seconds'][0]) 

+

 

+

        # token 

+

        video_token = compat_urllib_parse.unquote_plus(video_info['token'][0]) 

+

 

+

        # Decide which formats to download 

+

        req_format = self._downloader.params.get('format', None) 

+

 

+

        if 'conn' in video_info and video_info['conn'][0].startswith('rtmp'): 

+

            self.report_rtmp_download() 

+

            video_url_list = [(None, video_info['conn'][0])] 

+

        elif 'url_encoded_fmt_stream_map' in video_info and len(video_info['url_encoded_fmt_stream_map']) >= 1: 

+

            url_data_strs = video_info['url_encoded_fmt_stream_map'][0].split(',') 

+

            url_data = [compat_parse_qs(uds) for uds in url_data_strs] 

+

            url_data = [ud for ud in url_data if 'itag' in ud and 'url' in ud] 

+

            url_map = dict((ud['itag'][0], ud['url'][0] + '&signature=' + ud['sig'][0]) for ud in url_data) 

+

 

+

            format_limit = self._downloader.params.get('format_limit', None) 

+

            available_formats = self._available_formats_prefer_free if self._downloader.params.get('prefer_free_formats', False) else self._available_formats 

+

            if format_limit is not None and format_limit in available_formats: 

+

                format_list = available_formats[available_formats.index(format_limit):] 

+

            else: 

+

                format_list = available_formats 

+

            existing_formats = [x for x in format_list if x in url_map] 

+

            if len(existing_formats) == 0: 

+

                self._downloader.trouble(u'ERROR: no known formats available for video') 

+

                return 

+

            if self._downloader.params.get('listformats', None): 

+

                self._print_formats(existing_formats) 

+

                return 

+

            if req_format is None or req_format == 'best': 

+

                video_url_list = [(existing_formats[0], url_map[existing_formats[0]])] # Best quality 

+

            elif req_format == 'worst': 

+

                video_url_list = [(existing_formats[len(existing_formats)-1], url_map[existing_formats[len(existing_formats)-1]])] # worst quality 

+

            elif req_format in ('-1', 'all'): 

+

                video_url_list = [(f, url_map[f]) for f in existing_formats] # All formats 

+

            else: 

+

                # Specific formats. We pick the first in a slash-delimeted sequence. 

+

                # For example, if '1/2/3/4' is requested and '2' and '4' are available, we pick '2'. 

+

                req_formats = req_format.split('/') 

+

                video_url_list = None 

+

                for rf in req_formats: 

+

                    if rf in url_map: 

+

                        video_url_list = [(rf, url_map[rf])] 

+

                        break 

+

                if video_url_list is None: 

+

                    self._downloader.trouble(u'ERROR: requested format not available') 

+

                    return 

+

        else: 

+

            self._downloader.trouble(u'ERROR: no conn or url_encoded_fmt_stream_map information found in video info') 

+

            return 

+

 

+

        results = [] 

+

        for format_param, video_real_url in video_url_list: 

+

            # Extension 

+

            video_extension = self._video_extensions.get(format_param, 'flv') 

+

 

+

            video_format = '{0} - {1}'.format(format_param if format_param else video_extension, 

+

                                              self._video_dimensions.get(format_param, '???')) 

+

 

+

            results.append({ 

+

                'id':       video_id, 

+

                'url':      video_real_url, 

+

                'uploader': video_uploader, 

+

                'uploader_id': video_uploader_id, 

+

                'upload_date':  upload_date, 

+

                'title':    video_title, 

+

                'ext':      video_extension, 

+

                'format':   video_format, 

+

                'thumbnail':    video_thumbnail, 

+

                'description':  video_description, 

+

                'player_url':   player_url, 

+

                'subtitles':    video_subtitles, 

+

                'duration':     video_duration 

+

            }) 

+

        return results 

+

 

+

 

+

class MetacafeIE(InfoExtractor): 

+

    """Information Extractor for metacafe.com.""" 

+

 

+

    _VALID_URL = r'(?:http://)?(?:www\.)?metacafe\.com/watch/([^/]+)/([^/]+)/.*' 

+

    _DISCLAIMER = 'http://www.metacafe.com/family_filter/' 

+

    _FILTER_POST = 'http://www.metacafe.com/f/index.php?inputType=filter&controllerGroup=user' 

+

    IE_NAME = u'metacafe' 

+

 

+

    def __init__(self, downloader=None): 

+

        InfoExtractor.__init__(self, downloader) 

+

 

+

    def report_disclaimer(self): 

+

        """Report disclaimer retrieval.""" 

+

        self._downloader.to_screen(u'[metacafe] Retrieving disclaimer') 

+

 

+

    def report_age_confirmation(self): 

+

        """Report attempt to confirm age.""" 

+

        self._downloader.to_screen(u'[metacafe] Confirming age') 

+

 

+

    def report_download_webpage(self, video_id): 

+

        """Report webpage download.""" 

+

        self._downloader.to_screen(u'[metacafe] %s: Downloading webpage' % video_id) 

+

 

+

    def report_extraction(self, video_id): 

+

        """Report information extraction.""" 

+

        self._downloader.to_screen(u'[metacafe] %s: Extracting information' % video_id) 

+

 

+

    def _real_initialize(self): 

+

        # Retrieve disclaimer 

+

        request = compat_urllib_request.Request(self._DISCLAIMER) 

+

        try: 

+

            self.report_disclaimer() 

+

            disclaimer = compat_urllib_request.urlopen(request).read() 

+

        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: 

+

            self._downloader.trouble(u'ERROR: unable to retrieve disclaimer: %s' % compat_str(err)) 

+

            return 

+

 

+

        # Confirm age 

+

        disclaimer_form = { 

+

            'filters': '0', 

+

            'submit': "Continue - I'm over 18", 

+

            } 

+

        request = compat_urllib_request.Request(self._FILTER_POST, compat_urllib_parse.urlencode(disclaimer_form)) 

+

        try: 

+

            self.report_age_confirmation() 

+

            disclaimer = compat_urllib_request.urlopen(request).read() 

+

        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: 

+

            self._downloader.trouble(u'ERROR: unable to confirm age: %s' % compat_str(err)) 

+

            return 

+

 

+

    def _real_extract(self, url): 

+

        # Extract id and simplified title from URL 

+

        mobj = re.match(self._VALID_URL, url) 

+

        if mobj is None: 

+

            self._downloader.trouble(u'ERROR: invalid URL: %s' % url) 

+

            return 

+

 

+

        video_id = mobj.group(1) 

+

 

+

        # Check if video comes from YouTube 

+

        mobj2 = re.match(r'^yt-(.*)$', video_id) 

+

        if mobj2 is not None: 

+

            self._downloader.download(['http://www.youtube.com/watch?v=%s' % mobj2.group(1)]) 

+

            return 

+

 

+

        # Retrieve video webpage to extract further information 

+

        request = compat_urllib_request.Request('http://www.metacafe.com/watch/%s/' % video_id) 

+

        try: 

+

            self.report_download_webpage(video_id) 

+

            webpage = compat_urllib_request.urlopen(request).read() 

+

        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: 

+

            self._downloader.trouble(u'ERROR: unable retrieve video webpage: %s' % compat_str(err)) 

+

            return 

+

 

+

        # Extract URL, uploader and title from webpage 

+

        self.report_extraction(video_id) 

+

        mobj = re.search(r'(?m)&mediaURL=([^&]+)', webpage) 

+

        if mobj is not None: 

+

            mediaURL = compat_urllib_parse.unquote(mobj.group(1)) 

+

            video_extension = mediaURL[-3:] 

+

 

+

            # Extract gdaKey if available 

+

            mobj = re.search(r'(?m)&gdaKey=(.*?)&', webpage) 

+

            if mobj is None: 

+

                video_url = mediaURL 

+

            else: 

+

                gdaKey = mobj.group(1) 

+

                video_url = '%s?__gda__=%s' % (mediaURL, gdaKey) 

+

        else: 

+

            mobj = re.search(r' name="flashvars" value="(.*?)"', webpage) 

+

            if mobj is None: 

+

                self._downloader.trouble(u'ERROR: unable to extract media URL') 

+

                return 

+

            vardict = compat_parse_qs(mobj.group(1)) 

+

            if 'mediaData' not in vardict: 

+

                self._downloader.trouble(u'ERROR: unable to extract media URL') 

+

                return 

+

            mobj = re.search(r'"mediaURL":"(http.*?)","key":"(.*?)"', vardict['mediaData'][0]) 

+

            if mobj is None: 

+

                self._downloader.trouble(u'ERROR: unable to extract media URL') 

+

                return 

+

            mediaURL = mobj.group(1).replace('\\/', '/') 

+

            video_extension = mediaURL[-3:] 

+

            video_url = '%s?__gda__=%s' % (mediaURL, mobj.group(2)) 

+

 

+

        mobj = re.search(r'(?im)<title>(.*) - Video</title>', webpage) 

+

        if mobj is None: 

+

            self._downloader.trouble(u'ERROR: unable to extract title') 

+

            return 

+

        video_title = mobj.group(1).decode('utf-8') 

+

 

+

        mobj = re.search(r'submitter=(.*?);', webpage) 

+

        if mobj is None: 

+

            self._downloader.trouble(u'ERROR: unable to extract uploader nickname') 

+

            return 

+

        video_uploader = mobj.group(1) 

+

 

+

        return [{ 

+

            'id':       video_id.decode('utf-8'), 

+

            'url':      video_url.decode('utf-8'), 

+

            'uploader': video_uploader.decode('utf-8'), 

+

            'upload_date':  None, 

+

            'title':    video_title, 

+

            'ext':      video_extension.decode('utf-8'), 

+

        }] 

+

 

+

 

+

class DailymotionIE(InfoExtractor): 

+

    """Information Extractor for Dailymotion""" 

+

 

+

    _VALID_URL = r'(?i)(?:https?://)?(?:www\.)?dailymotion\.[a-z]{2,3}/video/([^/]+)' 

+

    IE_NAME = u'dailymotion' 

+

 

+

    def __init__(self, downloader=None): 

+

        InfoExtractor.__init__(self, downloader) 

+

 

+

    def report_extraction(self, video_id): 

+

        """Report information extraction.""" 

+

        self._downloader.to_screen(u'[dailymotion] %s: Extracting information' % video_id) 

+

 

+

    def _real_extract(self, url): 

+

        # Extract id and simplified title from URL 

+

        mobj = re.match(self._VALID_URL, url) 

+

        if mobj is None: 

+

            self._downloader.trouble(u'ERROR: invalid URL: %s' % url) 

+

            return 

+

 

+

        video_id = mobj.group(1).split('_')[0].split('?')[0] 

+

 

+

        video_extension = 'mp4' 

+

 

+

        # Retrieve video webpage to extract further information 

+

        request = compat_urllib_request.Request(url) 

+

        request.add_header('Cookie', 'family_filter=off') 

+

        webpage = self._download_webpage(request, video_id) 

+

 

+

        # Extract URL, uploader and title from webpage 

+

        self.report_extraction(video_id) 

+

        mobj = re.search(r'\s*var flashvars = (.*)', webpage) 

+

        if mobj is None: 

+

            self._downloader.trouble(u'ERROR: unable to extract media URL') 

+

            return 

+

        flashvars = compat_urllib_parse.unquote(mobj.group(1)) 

+

 

+

        for key in ['hd1080URL', 'hd720URL', 'hqURL', 'sdURL', 'ldURL', 'video_url']: 

+

            if key in flashvars: 

+

                max_quality = key 

+

                self._downloader.to_screen(u'[dailymotion] Using %s' % key) 

+

                break 

+

        else: 

+

            self._downloader.trouble(u'ERROR: unable to extract video URL') 

+

            return 

+

 

+

        mobj = re.search(r'"' + max_quality + r'":"(.+?)"', flashvars) 

+

        if mobj is None: 

+

            self._downloader.trouble(u'ERROR: unable to extract video URL') 

+

            return 

+

 

+

        video_url = compat_urllib_parse.unquote(mobj.group(1)).replace('\\/', '/') 

+

 

+

        # TODO: support choosing qualities 

+

 

+

        mobj = re.search(r'<meta property="og:title" content="(?P<title>[^"]*)" />', webpage) 

+

        if mobj is None: 

+

            self._downloader.trouble(u'ERROR: unable to extract title') 

+

            return 

+

        video_title = unescapeHTML(mobj.group('title')) 

+

 

+

        video_uploader = None 

+

        mobj = re.search(r'(?im)<span class="owner[^\"]+?">[^<]+?<a [^>]+?>([^<]+?)</a>', webpage) 

+

        if mobj is None: 

+

            # lookin for official user 

+

            mobj_official = re.search(r'<span rel="author"[^>]+?>([^<]+?)</span>', webpage) 

+

            if mobj_official is None: 

+

                self._downloader.trouble(u'WARNING: unable to extract uploader nickname') 

+

            else: 

+

                video_uploader = mobj_official.group(1) 

+

        else: 

+

            video_uploader = mobj.group(1) 

+

 

+

        video_upload_date = None 

+

        mobj = re.search(r'<div class="[^"]*uploaded_cont[^"]*" title="[^"]*">([0-9]{2})-([0-9]{2})-([0-9]{4})</div>', webpage) 

+

        if mobj is not None: 

+

            video_upload_date = mobj.group(3) + mobj.group(2) + mobj.group(1) 

+

 

+

        return [{ 

+

            'id':       video_id, 

+

            'url':      video_url, 

+

            'uploader': video_uploader, 

+

            'upload_date':  video_upload_date, 

+

            'title':    video_title, 

+

            'ext':      video_extension, 

+

        }] 

+

 

+

 

+

class PhotobucketIE(InfoExtractor): 

+

    """Information extractor for photobucket.com.""" 

+

 

+

    _VALID_URL = r'(?:http://)?(?:[a-z0-9]+\.)?photobucket\.com/.*[\?\&]current=(.*\.flv)' 

+

    IE_NAME = u'photobucket' 

+

 

+

    def __init__(self, downloader=None): 

+

        InfoExtractor.__init__(self, downloader) 

+

 

+

    def report_download_webpage(self, video_id): 

+

        """Report webpage download.""" 

+

        self._downloader.to_screen(u'[photobucket] %s: Downloading webpage' % video_id) 

+

 

+

    def report_extraction(self, video_id): 

+

        """Report information extraction.""" 

+

        self._downloader.to_screen(u'[photobucket] %s: Extracting information' % video_id) 

+

 

+

    def _real_extract(self, url): 

+

        # Extract id from URL 

+

        mobj = re.match(self._VALID_URL, url) 

+

        if mobj is None: 

+

            self._downloader.trouble(u'ERROR: Invalid URL: %s' % url) 

+

            return 

+

 

+

        video_id = mobj.group(1) 

+

 

+

        video_extension = 'flv' 

+

 

+

        # Retrieve video webpage to extract further information 

+

        request = compat_urllib_request.Request(url) 

+

        try: 

+

            self.report_download_webpage(video_id) 

+

            webpage = compat_urllib_request.urlopen(request).read() 

+

        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: 

+

            self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err)) 

+

            return 

+

 

+

        # Extract URL, uploader, and title from webpage 

+

        self.report_extraction(video_id) 

+

        mobj = re.search(r'<link rel="video_src" href=".*\?file=([^"]+)" />', webpage) 

+

        if mobj is None: 

+

            self._downloader.trouble(u'ERROR: unable to extract media URL') 

+

            return 

+

        mediaURL = compat_urllib_parse.unquote(mobj.group(1)) 

+

 

+

        video_url = mediaURL 

+

 

+

        mobj = re.search(r'<title>(.*) video by (.*) - Photobucket</title>', webpage) 

+

        if mobj is None: 

+

            self._downloader.trouble(u'ERROR: unable to extract title') 

+

            return 

+

        video_title = mobj.group(1).decode('utf-8') 

+

 

+

        video_uploader = mobj.group(2).decode('utf-8') 

+

 

+

        return [{ 

+

            'id':       video_id.decode('utf-8'), 

+

            'url':      video_url.decode('utf-8'), 

+

            'uploader': video_uploader, 

+

            'upload_date':  None, 

+

            'title':    video_title, 

+

            'ext':      video_extension.decode('utf-8'), 

+

        }] 

+

 

+

 

+

class YahooIE(InfoExtractor): 

+

    """Information extractor for video.yahoo.com.""" 

+

 

+

    _WORKING = False 

+

    # _VALID_URL matches all Yahoo! Video URLs 

+

    # _VPAGE_URL matches only the extractable '/watch/' URLs 

+

    _VALID_URL = r'(?:http://)?(?:[a-z]+\.)?video\.yahoo\.com/(?:watch|network)/([0-9]+)(?:/|\?v=)([0-9]+)(?:[#\?].*)?' 

+

    _VPAGE_URL = r'(?:http://)?video\.yahoo\.com/watch/([0-9]+)/([0-9]+)(?:[#\?].*)?' 

+

    IE_NAME = u'video.yahoo' 

+

 

+

    def __init__(self, downloader=None): 

+

        InfoExtractor.__init__(self, downloader) 

+

 

+

    def report_download_webpage(self, video_id): 

+

        """Report webpage download.""" 

+

        self._downloader.to_screen(u'[video.yahoo] %s: Downloading webpage' % video_id) 

+

 

+

    def report_extraction(self, video_id): 

+

        """Report information extraction.""" 

+

        self._downloader.to_screen(u'[video.yahoo] %s: Extracting information' % video_id) 

+

 

+

    def _real_extract(self, url, new_video=True): 

+

        # Extract ID from URL 

+

        mobj = re.match(self._VALID_URL, url) 

+

        if mobj is None: 

+

            self._downloader.trouble(u'ERROR: Invalid URL: %s' % url) 

+

            return 

+

 

+

        video_id = mobj.group(2) 

+

        video_extension = 'flv' 

+

 

+

        # Rewrite valid but non-extractable URLs as 

+

        # extractable English language /watch/ URLs 

+

        if re.match(self._VPAGE_URL, url) is None: 

+

            request = compat_urllib_request.Request(url) 

+

            try: 

+

                webpage = compat_urllib_request.urlopen(request).read() 

+

            except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: 

+

                self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err)) 

+

                return 

+

 

+

            mobj = re.search(r'\("id", "([0-9]+)"\);', webpage) 

+

            if mobj is None: 

+

                self._downloader.trouble(u'ERROR: Unable to extract id field') 

+

                return 

+

            yahoo_id = mobj.group(1) 

+

 

+

            mobj = re.search(r'\("vid", "([0-9]+)"\);', webpage) 

+

            if mobj is None: 

+

                self._downloader.trouble(u'ERROR: Unable to extract vid field') 

+

                return 

+

            yahoo_vid = mobj.group(1) 

+

 

+

            url = 'http://video.yahoo.com/watch/%s/%s' % (yahoo_vid, yahoo_id) 

+

            return self._real_extract(url, new_video=False) 

+

 

+

        # Retrieve video webpage to extract further information 

+

        request = compat_urllib_request.Request(url) 

+

        try: 

+

            self.report_download_webpage(video_id) 

+

            webpage = compat_urllib_request.urlopen(request).read() 

+

        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: 

+

            self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err)) 

+

            return 

+

 

+

        # Extract uploader and title from webpage 

+

        self.report_extraction(video_id) 

+

        mobj = re.search(r'<meta name="title" content="(.*)" />', webpage) 

+

        if mobj is None: 

+

            self._downloader.trouble(u'ERROR: unable to extract video title') 

+

            return 

+

        video_title = mobj.group(1).decode('utf-8') 

+

 

+

        mobj = re.search(r'<h2 class="ti-5"><a href="http://video\.yahoo\.com/(people|profile)/[0-9]+" beacon=".*">(.*)</a></h2>', webpage) 

+

        if mobj is None: 

+

            self._downloader.trouble(u'ERROR: unable to extract video uploader') 

+

            return 

+

        video_uploader = mobj.group(1).decode('utf-8') 

+

 

+

        # Extract video thumbnail 

+

        mobj = re.search(r'<link rel="image_src" href="(.*)" />', webpage) 

+

        if mobj is None: 

+

            self._downloader.trouble(u'ERROR: unable to extract video thumbnail') 

+

            return 

+

        video_thumbnail = mobj.group(1).decode('utf-8') 

+

 

+

        # Extract video description 

+

        mobj = re.search(r'<meta name="description" content="(.*)" />', webpage) 

+

        if mobj is None: 

+

            self._downloader.trouble(u'ERROR: unable to extract video description') 

+

            return 

+

        video_description = mobj.group(1).decode('utf-8') 

+

        if not video_description: 

+

            video_description = 'No description available.' 

+

 

+

        # Extract video height and width 

+

        mobj = re.search(r'<meta name="video_height" content="([0-9]+)" />', webpage) 

+

        if mobj is None: 

+

            self._downloader.trouble(u'ERROR: unable to extract video height') 

+

            return 

+

        yv_video_height = mobj.group(1) 

+

 

+

        mobj = re.search(r'<meta name="video_width" content="([0-9]+)" />', webpage) 

+

        if mobj is None: 

+

            self._downloader.trouble(u'ERROR: unable to extract video width') 

+

            return 

+

        yv_video_width = mobj.group(1) 

+

 

+

        # Retrieve video playlist to extract media URL 

+

        # I'm not completely sure what all these options are, but we 

+

        # seem to need most of them, otherwise the server sends a 401. 

+

        yv_lg = 'R0xx6idZnW2zlrKP8xxAIR'  # not sure what this represents 

+

        yv_bitrate = '700'  # according to Wikipedia this is hard-coded 

+

        request = compat_urllib_request.Request('http://cosmos.bcst.yahoo.com/up/yep/process/getPlaylistFOP.php?node_id=' + video_id + 

+

                '&tech=flash&mode=playlist&lg=' + yv_lg + '&bitrate=' + yv_bitrate + '&vidH=' + yv_video_height + 

+

                '&vidW=' + yv_video_width + '&swf=as3&rd=video.yahoo.com&tk=null&adsupported=v1,v2,&eventid=1301797') 

+

        try: 

+

            self.report_download_webpage(video_id) 

+

            webpage = compat_urllib_request.urlopen(request).read() 

+

        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: 

+

            self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err)) 

+

            return 

+

 

+

        # Extract media URL from playlist XML 

+

        mobj = re.search(r'<STREAM APP="(http://.*)" FULLPATH="/?(/.*\.flv\?[^"]*)"', webpage) 

+

        if mobj is None: 

+

            self._downloader.trouble(u'ERROR: Unable to extract media URL') 

+

            return 

+

        video_url = compat_urllib_parse.unquote(mobj.group(1) + mobj.group(2)).decode('utf-8') 

+

        video_url = unescapeHTML(video_url) 

+

 

+

        return [{ 

+

            'id':       video_id.decode('utf-8'), 

+

            'url':      video_url, 

+

            'uploader': video_uploader, 

+

            'upload_date':  None, 

+

            'title':    video_title, 

+

            'ext':      video_extension.decode('utf-8'), 

+

            'thumbnail':    video_thumbnail.decode('utf-8'), 

+

            'description':  video_description, 

+

        }] 

+

 

+

 

+

class VimeoIE(InfoExtractor): 

+

    """Information extractor for vimeo.com.""" 

+

 

+

    # _VALID_URL matches Vimeo URLs 

+

    _VALID_URL = r'(?:https?://)?(?:(?:www|player).)?vimeo\.com/(?:(?:groups|album)/[^/]+/)?(?:videos?/)?([0-9]+)' 

+

    IE_NAME = u'vimeo' 

+

 

+

    def __init__(self, downloader=None): 

+

        InfoExtractor.__init__(self, downloader) 

+

 

+

    def report_download_webpage(self, video_id): 

+

        """Report webpage download.""" 

+

        self._downloader.to_screen(u'[vimeo] %s: Downloading webpage' % video_id) 

+

 

+

    def report_extraction(self, video_id): 

+

        """Report information extraction.""" 

+

        self._downloader.to_screen(u'[vimeo] %s: Extracting information' % video_id) 

+

 

+

    def _real_extract(self, url, new_video=True): 

+

        # Extract ID from URL 

+

        mobj = re.match(self._VALID_URL, url) 

+

        if mobj is None: 

+

            self._downloader.trouble(u'ERROR: Invalid URL: %s' % url) 

+

            return 

+

 

+

        video_id = mobj.group(1) 

+

 

+

        # Retrieve video webpage to extract further information 

+

        request = compat_urllib_request.Request(url, None, std_headers) 

+

        try: 

+

            self.report_download_webpage(video_id) 

+

            webpage_bytes = compat_urllib_request.urlopen(request).read() 

+

            webpage = webpage_bytes.decode('utf-8') 

+

        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: 

+

            self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err)) 

+

            return 

+

 

+

        # Now we begin extracting as much information as we can from what we 

+

        # retrieved. First we extract the information common to all extractors, 

+

        # and latter we extract those that are Vimeo specific. 

+

        self.report_extraction(video_id) 

+

 

+

        # Extract the config JSON 

+

        try: 

+

            config = webpage.split(' = {config:')[1].split(',assets:')[0] 

+

            config = json.loads(config) 

+

        except: 

+

            self._downloader.trouble(u'ERROR: unable to extract info section') 

+

            return 

+

 

+

        # Extract title 

+

        video_title = config["video"]["title"] 

+

 

+

        # Extract uploader and uploader_id 

+

        video_uploader = config["video"]["owner"]["name"] 

+

        video_uploader_id = config["video"]["owner"]["url"].split('/')[-1] 

+

 

+

        # Extract video thumbnail 

+

        video_thumbnail = config["video"]["thumbnail"] 

+

 

+

        # Extract video description 

+

        video_description = get_element_by_attribute("itemprop", "description", webpage) 

+

        if video_description: video_description = clean_html(video_description) 

+

        else: video_description = '' 

+

 

+

        # Extract upload date 

+

        video_upload_date = None 

+

        mobj = re.search(r'<meta itemprop="dateCreated" content="(\d{4})-(\d{2})-(\d{2})T', webpage) 

+

        if mobj is not None: 

+

            video_upload_date = mobj.group(1) + mobj.group(2) + mobj.group(3) 

+

 

+

        # Vimeo specific: extract request signature and timestamp 

+

        sig = config['request']['signature'] 

+

        timestamp = config['request']['timestamp'] 

+

 

+

        # Vimeo specific: extract video codec and quality information 

+

        # First consider quality, then codecs, then take everything 

+

        # TODO bind to format param 

+

        codecs = [('h264', 'mp4'), ('vp8', 'flv'), ('vp6', 'flv')] 

+

        files = { 'hd': [], 'sd': [], 'other': []} 

+

        for codec_name, codec_extension in codecs: 

+

            if codec_name in config["video"]["files"]: 

+

                if 'hd' in config["video"]["files"][codec_name]: 

+

                    files['hd'].append((codec_name, codec_extension, 'hd')) 

+

                elif 'sd' in config["video"]["files"][codec_name]: 

+

                    files['sd'].append((codec_name, codec_extension, 'sd')) 

+

                else: 

+

                    files['other'].append((codec_name, codec_extension, config["video"]["files"][codec_name][0])) 

+

 

+

        for quality in ('hd', 'sd', 'other'): 

+

            if len(files[quality]) > 0: 

+

                video_quality = files[quality][0][2] 

+

                video_codec = files[quality][0][0] 

+

                video_extension = files[quality][0][1] 

+

                self._downloader.to_screen(u'[vimeo] %s: Downloading %s file at %s quality' % (video_id, video_codec.upper(), video_quality)) 

+

                break 

+

        else: 

+

            self._downloader.trouble(u'ERROR: no known codec found') 

+

            return 

+

 

+

        video_url = "http://player.vimeo.com/play_redirect?clip_id=%s&sig=%s&time=%s&quality=%s&codecs=%s&type=moogaloop_local&embed_location=" \ 

+

                    %(video_id, sig, timestamp, video_quality, video_codec.upper()) 

+

 

+

        return [{ 

+

            'id':       video_id, 

+

            'url':      video_url, 

+

            'uploader': video_uploader, 

+

            'uploader_id': video_uploader_id, 

+

            'upload_date':  video_upload_date, 

+

            'title':    video_title, 

+

            'ext':      video_extension, 

+

            'thumbnail':    video_thumbnail, 

+

            'description':  video_description, 

+

        }] 

+

 

+

 

+

class ArteTvIE(InfoExtractor): 

+

    """arte.tv information extractor.""" 

+

 

+

    _VALID_URL = r'(?:http://)?videos\.arte\.tv/(?:fr|de)/videos/.*' 

+

    _LIVE_URL = r'index-[0-9]+\.html$' 

+

 

+

    IE_NAME = u'arte.tv' 

+

 

+

    def __init__(self, downloader=None): 

+

        InfoExtractor.__init__(self, downloader) 

+

 

+

    def report_download_webpage(self, video_id): 

+

        """Report webpage download.""" 

+

        self._downloader.to_screen(u'[arte.tv] %s: Downloading webpage' % video_id) 

+

 

+

    def report_extraction(self, video_id): 

+

        """Report information extraction.""" 

+

        self._downloader.to_screen(u'[arte.tv] %s: Extracting information' % video_id) 

+

 

+

    def fetch_webpage(self, url): 

+

        request = compat_urllib_request.Request(url) 

+

        try: 

+

            self.report_download_webpage(url) 

+

            webpage = compat_urllib_request.urlopen(request).read() 

+

        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: 

+

            self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err)) 

+

            return 

+

        except ValueError as err: 

+

            self._downloader.trouble(u'ERROR: Invalid URL: %s' % url) 

+

            return 

+

        return webpage 

+

 

+

    def grep_webpage(self, url, regex, regexFlags, matchTuples): 

+

        page = self.fetch_webpage(url) 

+

        mobj = re.search(regex, page, regexFlags) 

+

        info = {} 

+

 

+

        if mobj is None: 

+

            self._downloader.trouble(u'ERROR: Invalid URL: %s' % url) 

+

            return 

+

 

+

        for (i, key, err) in matchTuples: 

+

            if mobj.group(i) is None: 

+

                self._downloader.trouble(err) 

+

                return 

+

            else: 

+

                info[key] = mobj.group(i) 

+

 

+

        return info 

+

 

+

    def extractLiveStream(self, url): 

+

        video_lang = url.split('/')[-4] 

+

        info = self.grep_webpage( 

+

            url, 

+

            r'src="(.*?/videothek_js.*?\.js)', 

+

            0, 

+

            [ 

+

                (1, 'url', u'ERROR: Invalid URL: %s' % url) 

+

            ] 

+

        ) 

+

        http_host = url.split('/')[2] 

+

        next_url = 'http://%s%s' % (http_host, compat_urllib_parse.unquote(info.get('url'))) 

+

        info = self.grep_webpage( 

+

            next_url, 

+

            r'(s_artestras_scst_geoFRDE_' + video_lang + '.*?)\'.*?' + 

+

                '(http://.*?\.swf).*?' + 

+

                '(rtmp://.*?)\'', 

+

            re.DOTALL, 

+

            [ 

+

                (1, 'path',   u'ERROR: could not extract video path: %s' % url), 

+

                (2, 'player', u'ERROR: could not extract video player: %s' % url), 

+

                (3, 'url',    u'ERROR: could not extract video url: %s' % url) 

+

            ] 

+

        ) 

+

        video_url = u'%s/%s' % (info.get('url'), info.get('path')) 

+

 

+

    def extractPlus7Stream(self, url): 

+

        video_lang = url.split('/')[-3] 

+

        info = self.grep_webpage( 

+

            url, 

+

            r'param name="movie".*?videorefFileUrl=(http[^\'"&]*)', 

+

            0, 

+

            [ 

+

                (1, 'url', u'ERROR: Invalid URL: %s' % url) 

+

            ] 

+

        ) 

+

        next_url = compat_urllib_parse.unquote(info.get('url')) 

+

        info = self.grep_webpage( 

+

            next_url, 

+

            r'<video lang="%s" ref="(http[^\'"&]*)' % video_lang, 

+

            0, 

+

            [ 

+

                (1, 'url', u'ERROR: Could not find <video> tag: %s' % url) 

+

            ] 

+

        ) 

+

        next_url = compat_urllib_parse.unquote(info.get('url')) 

+

 

+

        info = self.grep_webpage( 

+

            next_url, 

+

            r'<video id="(.*?)".*?>.*?' + 

+

                '<name>(.*?)</name>.*?' + 

+

                '<dateVideo>(.*?)</dateVideo>.*?' + 

+

                '<url quality="hd">(.*?)</url>', 

+

            re.DOTALL, 

+

            [ 

+

                (1, 'id',    u'ERROR: could not extract video id: %s' % url), 

+

                (2, 'title', u'ERROR: could not extract video title: %s' % url), 

+

                (3, 'date',  u'ERROR: could not extract video date: %s' % url), 

+

                (4, 'url',   u'ERROR: could not extract video url: %s' % url) 

+

            ] 

+

        ) 

+

 

+

        return { 

+

            'id':           info.get('id'), 

+

            'url':          compat_urllib_parse.unquote(info.get('url')), 

+

            'uploader':     u'arte.tv', 

+

            'upload_date':  info.get('date'), 

+

            'title':        info.get('title').decode('utf-8'), 

+

            'ext':          u'mp4', 

+

            'format':       u'NA', 

+

            'player_url':   None, 

+

        } 

+

 

+

    def _real_extract(self, url): 

+

        video_id = url.split('/')[-1] 

+

        self.report_extraction(video_id) 

+

 

+

        if re.search(self._LIVE_URL, video_id) is not None: 

+

            self.extractLiveStream(url) 

+

            return 

+

        else: 

+

            info = self.extractPlus7Stream(url) 

+

 

+

        return [info] 

+

 

+

 

+

class GenericIE(InfoExtractor): 

+

    """Generic last-resort information extractor.""" 

+

 

+

    _VALID_URL = r'.*' 

+

    IE_NAME = u'generic' 

+

 

+

    def __init__(self, downloader=None): 

+

        InfoExtractor.__init__(self, downloader) 

+

 

+

    def report_download_webpage(self, video_id): 

+

        """Report webpage download.""" 

+

        self._downloader.to_screen(u'WARNING: Falling back on generic information extractor.') 

+

        self._downloader.to_screen(u'[generic] %s: Downloading webpage' % video_id) 

+

 

+

    def report_extraction(self, video_id): 

+

        """Report information extraction.""" 

+

        self._downloader.to_screen(u'[generic] %s: Extracting information' % video_id) 

+

 

+

    def report_following_redirect(self, new_url): 

+

        """Report information extraction.""" 

+

        self._downloader.to_screen(u'[redirect] Following redirect to %s' % new_url) 

+

 

+

    def _test_redirect(self, url): 

+

        """Check if it is a redirect, like url shorteners, in case restart chain.""" 

+

        class HeadRequest(compat_urllib_request.Request): 

+

            def get_method(self): 

+

                return "HEAD" 

+

 

+

        class HEADRedirectHandler(compat_urllib_request.HTTPRedirectHandler): 

+

            """ 

+

            Subclass the HTTPRedirectHandler to make it use our 

+

            HeadRequest also on the redirected URL 

+

            """ 

+

            def redirect_request(self, req, fp, code, msg, headers, newurl): 

+

                if code in (301, 302, 303, 307): 

+

                    newurl = newurl.replace(' ', '%20') 

+

                    newheaders = dict((k,v) for k,v in req.headers.items() 

+

                                      if k.lower() not in ("content-length", "content-type")) 

+

                    return HeadRequest(newurl, 

+

                                       headers=newheaders, 

+

                                       origin_req_host=req.get_origin_req_host(), 

+

                                       unverifiable=True) 

+

                else: 

+

                    raise compat_urllib_error.HTTPError(req.get_full_url(), code, msg, headers, fp) 

+

 

+

        class HTTPMethodFallback(compat_urllib_request.BaseHandler): 

+

            """ 

+

            Fallback to GET if HEAD is not allowed (405 HTTP error) 

+

            """ 

+

            def http_error_405(self, req, fp, code, msg, headers): 

+

                fp.read() 

+

                fp.close() 

+

 

+

                newheaders = dict((k,v) for k,v in req.headers.items() 

+

                                  if k.lower() not in ("content-length", "content-type")) 

+

                return self.parent.open(compat_urllib_request.Request(req.get_full_url(), 

+

                                                 headers=newheaders, 

+

                                                 origin_req_host=req.get_origin_req_host(), 

+

                                                 unverifiable=True)) 

+

 

+

        # Build our opener 

+

        opener = compat_urllib_request.OpenerDirector() 

+

        for handler in [compat_urllib_request.HTTPHandler, compat_urllib_request.HTTPDefaultErrorHandler, 

+

                        HTTPMethodFallback, HEADRedirectHandler, 

+

                        compat_urllib_error.HTTPErrorProcessor, compat_urllib_request.HTTPSHandler]: 

+

            opener.add_handler(handler()) 

+

 

+

        response = opener.open(HeadRequest(url)) 

+

        new_url = response.geturl() 

+

 

+

        if url == new_url: 

+

            return False 

+

 

+

        self.report_following_redirect(new_url) 

+

        self._downloader.download([new_url]) 

+

        return True 

+

 

+

    def _real_extract(self, url): 

+

        if self._test_redirect(url): return 

+

 

+

        video_id = url.split('/')[-1] 

+

        request = compat_urllib_request.Request(url) 

+

        try: 

+

            self.report_download_webpage(video_id) 

+

            webpage = compat_urllib_request.urlopen(request).read() 

+

        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: 

+

            self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err)) 

+

            return 

+

        except ValueError as err: 

+

            # since this is the last-resort InfoExtractor, if 

+

            # this error is thrown, it'll be thrown here 

+

            self._downloader.trouble(u'ERROR: Invalid URL: %s' % url) 

+

            return 

+

 

+

        self.report_extraction(video_id) 

+

        # Start with something easy: JW Player in SWFObject 

+

        mobj = re.search(r'flashvars: [\'"](?:.*&)?file=(http[^\'"&]*)', webpage) 

+

        if mobj is None: 

+

            # Broaden the search a little bit 

+

            mobj = re.search(r'[^A-Za-z0-9]?(?:file|source)=(http[^\'"&]*)', webpage) 

+

        if mobj is None: 

+

            self._downloader.trouble(u'ERROR: Invalid URL: %s' % url) 

+

            return 

+

 

+

        # It's possible that one of the regexes 

+

        # matched, but returned an empty group: 

+

        if mobj.group(1) is None: 

+

            self._downloader.trouble(u'ERROR: Invalid URL: %s' % url) 

+

            return 

+

 

+

        video_url = compat_urllib_parse.unquote(mobj.group(1)) 

+

        video_id = os.path.basename(video_url) 

+

 

+

        # here's a fun little line of code for you: 

+

        video_extension = os.path.splitext(video_id)[1][1:] 

+

        video_id = os.path.splitext(video_id)[0] 

+

 

+

        # it's tempting to parse this further, but you would 

+

        # have to take into account all the variations like 

+

        #   Video Title - Site Name 

+

        #   Site Name | Video Title 

+

        #   Video Title - Tagline | Site Name 

+

        # and so on and so forth; it's just not practical 

+

        mobj = re.search(r'<title>(.*)</title>', webpage) 

+

        if mobj is None: 

+

            self._downloader.trouble(u'ERROR: unable to extract title') 

+

            return 

+

        video_title = mobj.group(1) 

+

 

+

        # video uploader is domain name 

+

        mobj = re.match(r'(?:https?://)?([^/]*)/.*', url) 

+

        if mobj is None: 

+

            self._downloader.trouble(u'ERROR: unable to extract title') 

+

            return 

+

        video_uploader = mobj.group(1) 

+

 

+

        return [{ 

+

            'id':       video_id, 

+

            'url':      video_url, 

+

            'uploader': video_uploader, 

+

            'upload_date':  None, 

+

            'title':    video_title, 

+

            'ext':      video_extension, 

+

        }] 

+

 

+

 

+

class YoutubeSearchIE(InfoExtractor): 

+

    """Information Extractor for YouTube search queries.""" 

+

    _VALID_URL = r'ytsearch(\d+|all)?:[\s\S]+' 

+

    _API_URL = 'https://gdata.youtube.com/feeds/api/videos?q=%s&start-index=%i&max-results=50&v=2&alt=jsonc' 

+

    _max_youtube_results = 1000 

+

    IE_NAME = u'youtube:search' 

+

 

+

    def __init__(self, downloader=None): 

+

        InfoExtractor.__init__(self, downloader) 

+

 

+

    def report_download_page(self, query, pagenum): 

+

        """Report attempt to download search page with given number.""" 

+

        query = query.decode(preferredencoding()) 

+

        self._downloader.to_screen(u'[youtube] query "%s": Downloading page %s' % (query, pagenum)) 

+

 

+

    def _real_extract(self, query): 

+

        mobj = re.match(self._VALID_URL, query) 

+

        if mobj is None: 

+

            self._downloader.trouble(u'ERROR: invalid search query "%s"' % query) 

+

            return 

+

 

+

        prefix, query = query.split(':') 

+

        prefix = prefix[8:] 

+

        query = query.encode('utf-8') 

+

        if prefix == '': 

+

            self._download_n_results(query, 1) 

+

            return 

+

        elif prefix == 'all': 

+

            self._download_n_results(query, self._max_youtube_results) 

+

            return 

+

        else: 

+

            try: 

+

                n = int(prefix) 

+

                if n <= 0: 

+

                    self._downloader.trouble(u'ERROR: invalid download number %s for query "%s"' % (n, query)) 

+

                    return 

+

                elif n > self._max_youtube_results: 

+

                    self._downloader.to_stderr(u'WARNING: ytsearch returns max %i results (you requested %i)' % (self._max_youtube_results, n)) 

+

                    n = self._max_youtube_results 

+

                self._download_n_results(query, n) 

+

                return 

+

            except ValueError: # parsing prefix as integer fails 

+

                self._download_n_results(query, 1) 

+

                return 

+

 

+

    def _download_n_results(self, query, n): 

+

        """Downloads a specified number of results for a query""" 

+

 

+

        video_ids = [] 

+

        pagenum = 0 

+

        limit = n 

+

 

+

        while (50 * pagenum) < limit: 

+

            self.report_download_page(query, pagenum+1) 

+

            result_url = self._API_URL % (compat_urllib_parse.quote_plus(query), (50*pagenum)+1) 

+

            request = compat_urllib_request.Request(result_url) 

+

            try: 

+

                data = compat_urllib_request.urlopen(request).read() 

+

            except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: 

+

                self._downloader.trouble(u'ERROR: unable to download API page: %s' % compat_str(err)) 

+

                return 

+

            api_response = json.loads(data)['data'] 

+

 

+

            new_ids = list(video['id'] for video in api_response['items']) 

+

            video_ids += new_ids 

+

 

+

            limit = min(n, api_response['totalItems']) 

+

            pagenum += 1 

+

 

+

        if len(video_ids) > n: 

+

            video_ids = video_ids[:n] 

+

        for id in video_ids: 

+

            self._downloader.download(['http://www.youtube.com/watch?v=%s' % id]) 

+

        return 

+

 

+

 

+

class GoogleSearchIE(InfoExtractor): 

+

    """Information Extractor for Google Video search queries.""" 

+

    _VALID_URL = r'gvsearch(\d+|all)?:[\s\S]+' 

+

    _TEMPLATE_URL = 'http://video.google.com/videosearch?q=%s+site:video.google.com&start=%s&hl=en' 

+

    _VIDEO_INDICATOR = r'<a href="http://video\.google\.com/videoplay\?docid=([^"\&]+)' 

+

    _MORE_PAGES_INDICATOR = r'class="pn" id="pnnext"' 

+

    _max_google_results = 1000 

+

    IE_NAME = u'video.google:search' 

+

 

+

    def __init__(self, downloader=None): 

+

        InfoExtractor.__init__(self, downloader) 

+

 

+

    def report_download_page(self, query, pagenum): 

+

        """Report attempt to download playlist page with given number.""" 

+

        query = query.decode(preferredencoding()) 

+

        self._downloader.to_screen(u'[video.google] query "%s": Downloading page %s' % (query, pagenum)) 

+

 

+

    def _real_extract(self, query): 

+

        mobj = re.match(self._VALID_URL, query) 

+

        if mobj is None: 

+

            self._downloader.trouble(u'ERROR: invalid search query "%s"' % query) 

+

            return 

+

 

+

        prefix, query = query.split(':') 

+

        prefix = prefix[8:] 

+

        query = query.encode('utf-8') 

+

        if prefix == '': 

+

            self._download_n_results(query, 1) 

+

            return 

+

        elif prefix == 'all': 

+

            self._download_n_results(query, self._max_google_results) 

+

            return 

+

        else: 

+

            try: 

+

                n = int(prefix) 

+

                if n <= 0: 

+

                    self._downloader.trouble(u'ERROR: invalid download number %s for query "%s"' % (n, query)) 

+

                    return 

+

                elif n > self._max_google_results: 

+

                    self._downloader.to_stderr(u'WARNING: gvsearch returns max %i results (you requested %i)' % (self._max_google_results, n)) 

+

                    n = self._max_google_results 

+

                self._download_n_results(query, n) 

+

                return 

+

            except ValueError: # parsing prefix as integer fails 

+

                self._download_n_results(query, 1) 

+

                return 

+

 

+

    def _download_n_results(self, query, n): 

+

        """Downloads a specified number of results for a query""" 

+

 

+

        video_ids = [] 

+

        pagenum = 0 

+

 

+

        while True: 

+

            self.report_download_page(query, pagenum) 

+

            result_url = self._TEMPLATE_URL % (compat_urllib_parse.quote_plus(query), pagenum*10) 

+

            request = compat_urllib_request.Request(result_url) 

+

            try: 

+

                page = compat_urllib_request.urlopen(request).read() 

+

            except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: 

+

                self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err)) 

+

                return 

+

 

+

            # Extract video identifiers 

+

            for mobj in re.finditer(self._VIDEO_INDICATOR, page): 

+

                video_id = mobj.group(1) 

+

                if video_id not in video_ids: 

+

                    video_ids.append(video_id) 

+

                    if len(video_ids) == n: 

+

                        # Specified n videos reached 

+

                        for id in video_ids: 

+

                            self._downloader.download(['http://video.google.com/videoplay?docid=%s' % id]) 

+

                        return 

+

 

+

            if re.search(self._MORE_PAGES_INDICATOR, page) is None: 

+

                for id in video_ids: 

+

                    self._downloader.download(['http://video.google.com/videoplay?docid=%s' % id]) 

+

                return 

+

 

+

            pagenum = pagenum + 1 

+

 

+

 

+

class YahooSearchIE(InfoExtractor): 

+

    """Information Extractor for Yahoo! Video search queries.""" 

+

 

+

    _WORKING = False 

+

    _VALID_URL = r'yvsearch(\d+|all)?:[\s\S]+' 

+

    _TEMPLATE_URL = 'http://video.yahoo.com/search/?p=%s&o=%s' 

+

    _VIDEO_INDICATOR = r'href="http://video\.yahoo\.com/watch/([0-9]+/[0-9]+)"' 

+

    _MORE_PAGES_INDICATOR = r'\s*Next' 

+

    _max_yahoo_results = 1000 

+

    IE_NAME = u'video.yahoo:search' 

+

 

+

    def __init__(self, downloader=None): 

+

        InfoExtractor.__init__(self, downloader) 

+

 

+

    def report_download_page(self, query, pagenum): 

+

        """Report attempt to download playlist page with given number.""" 

+

        query = query.decode(preferredencoding()) 

+

        self._downloader.to_screen(u'[video.yahoo] query "%s": Downloading page %s' % (query, pagenum)) 

+

 

+

    def _real_extract(self, query): 

+

        mobj = re.match(self._VALID_URL, query) 

+

        if mobj is None: 

+

            self._downloader.trouble(u'ERROR: invalid search query "%s"' % query) 

+

            return 

+

 

+

        prefix, query = query.split(':') 

+

        prefix = prefix[8:] 

+

        query = query.encode('utf-8') 

+

        if prefix == '': 

+

            self._download_n_results(query, 1) 

+

            return 

+

        elif prefix == 'all': 

+

            self._download_n_results(query, self._max_yahoo_results) 

+

            return 

+

        else: 

+

            try: 

+

                n = int(prefix) 

+

                if n <= 0: 

+

                    self._downloader.trouble(u'ERROR: invalid download number %s for query "%s"' % (n, query)) 

+

                    return 

+

                elif n > self._max_yahoo_results: 

+

                    self._downloader.to_stderr(u'WARNING: yvsearch returns max %i results (you requested %i)' % (self._max_yahoo_results, n)) 

+

                    n = self._max_yahoo_results 

+

                self._download_n_results(query, n) 

+

                return 

+

            except ValueError: # parsing prefix as integer fails 

+

                self._download_n_results(query, 1) 

+

                return 

+

 

+

    def _download_n_results(self, query, n): 

+

        """Downloads a specified number of results for a query""" 

+

 

+

        video_ids = [] 

+

        already_seen = set() 

+

        pagenum = 1 

+

 

+

        while True: 

+

            self.report_download_page(query, pagenum) 

+

            result_url = self._TEMPLATE_URL % (compat_urllib_parse.quote_plus(query), pagenum) 

+

            request = compat_urllib_request.Request(result_url) 

+

            try: 

+

                page = compat_urllib_request.urlopen(request).read() 

+

            except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: 

+

                self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err)) 

+

                return 

+

 

+

            # Extract video identifiers 

+

            for mobj in re.finditer(self._VIDEO_INDICATOR, page): 

+

                video_id = mobj.group(1) 

+

                if video_id not in already_seen: 

+

                    video_ids.append(video_id) 

+

                    already_seen.add(video_id) 

+

                    if len(video_ids) == n: 

+

                        # Specified n videos reached 

+

                        for id in video_ids: 

+

                            self._downloader.download(['http://video.yahoo.com/watch/%s' % id]) 

+

                        return 

+

 

+

            if re.search(self._MORE_PAGES_INDICATOR, page) is None: 

+

                for id in video_ids: 

+

                    self._downloader.download(['http://video.yahoo.com/watch/%s' % id]) 

+

                return 

+

 

+

            pagenum = pagenum + 1 

+

 

+

 

+

class YoutubePlaylistIE(InfoExtractor): 

+

    """Information Extractor for YouTube playlists.""" 

+

 

+

    _VALID_URL = r'(?:(?:https?://)?(?:\w+\.)?youtube\.com/(?:(?:course|view_play_list|my_playlists|artist|playlist)\?.*?(p|a|list)=|user/.*?/user/|p/|user/.*?#[pg]/c/)(?:PL|EC)?|PL|EC)([0-9A-Za-z-_]{10,})(?:/.*?/([0-9A-Za-z_-]+))?.*' 

+

    _TEMPLATE_URL = 'http://www.youtube.com/%s?%s=%s&page=%s&gl=US&hl=en' 

+

    _VIDEO_INDICATOR_TEMPLATE = r'/watch\?v=(.+?)&amp;([^&"]+&amp;)*list=.*?%s' 

+

    _MORE_PAGES_INDICATOR = u"Next \N{RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK}" 

+

    IE_NAME = u'youtube:playlist' 

+

 

+

    def __init__(self, downloader=None): 

+

        InfoExtractor.__init__(self, downloader) 

+

 

+

    def report_download_page(self, playlist_id, pagenum): 

+

        """Report attempt to download playlist page with given number.""" 

+

        self._downloader.to_screen(u'[youtube] PL %s: Downloading page #%s' % (playlist_id, pagenum)) 

+

 

+

    def _real_extract(self, url): 

+

        # Extract playlist id 

+

        mobj = re.match(self._VALID_URL, url) 

+

        if mobj is None: 

+

            self._downloader.trouble(u'ERROR: invalid url: %s' % url) 

+

            return 

+

 

+

        # Single video case 

+

        if mobj.group(3) is not None: 

+

            self._downloader.download([mobj.group(3)]) 

+

            return 

+

 

+

        # Download playlist pages 

+

        # prefix is 'p' as default for playlists but there are other types that need extra care 

+

        playlist_prefix = mobj.group(1) 

+

        if playlist_prefix == 'a': 

+

            playlist_access = 'artist' 

+

        else: 

+

            playlist_prefix = 'p' 

+

            playlist_access = 'view_play_list' 

+

        playlist_id = mobj.group(2) 

+

        video_ids = [] 

+

        pagenum = 1 

+

 

+

        while True: 

+

            self.report_download_page(playlist_id, pagenum) 

+

            url = self._TEMPLATE_URL % (playlist_access, playlist_prefix, playlist_id, pagenum) 

+

            request = compat_urllib_request.Request(url) 

+

            try: 

+

                page = compat_urllib_request.urlopen(request).read().decode('utf-8') 

+

            except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: 

+

                self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err)) 

+

                return 

+

 

+

            # Extract video identifiers 

+

            ids_in_page = [] 

+

            for mobj in re.finditer(self._VIDEO_INDICATOR_TEMPLATE % playlist_id, page): 

+

                if mobj.group(1) not in ids_in_page: 

+

                    ids_in_page.append(mobj.group(1)) 

+

            video_ids.extend(ids_in_page) 

+

 

+

            if self._MORE_PAGES_INDICATOR not in page: 

+

                break 

+

            pagenum = pagenum + 1 

+

 

+

        total = len(video_ids) 

+

 

+

        playliststart = self._downloader.params.get('playliststart', 1) - 1 

+

        playlistend = self._downloader.params.get('playlistend', -1) 

+

        if playlistend == -1: 

+

            video_ids = video_ids[playliststart:] 

+

        else: 

+

            video_ids = video_ids[playliststart:playlistend] 

+

 

+

        if len(video_ids) == total: 

+

            self._downloader.to_screen(u'[youtube] PL %s: Found %i videos' % (playlist_id, total)) 

+

        else: 

+

            self._downloader.to_screen(u'[youtube] PL %s: Found %i videos, downloading %i' % (playlist_id, total, len(video_ids))) 

+

 

+

        for id in video_ids: 

+

            self._downloader.download(['http://www.youtube.com/watch?v=%s' % id]) 

+

        return 

+

 

+

 

+

class YoutubeChannelIE(InfoExtractor): 

+

    """Information Extractor for YouTube channels.""" 

+

 

+

    _VALID_URL = r"^(?:https?://)?(?:youtu\.be|(?:\w+\.)?youtube(?:-nocookie)?\.com)/channel/([0-9A-Za-z_-]+)(?:/.*)?$" 

+

    _TEMPLATE_URL = 'http://www.youtube.com/channel/%s/videos?sort=da&flow=list&view=0&page=%s&gl=US&hl=en' 

+

    _MORE_PAGES_INDICATOR = u"Next \N{RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK}" 

+

    IE_NAME = u'youtube:channel' 

+

 

+

    def report_download_page(self, channel_id, pagenum): 

+

        """Report attempt to download channel page with given number.""" 

+

        self._downloader.to_screen(u'[youtube] Channel %s: Downloading page #%s' % (channel_id, pagenum)) 

+

 

+

    def _real_extract(self, url): 

+

        # Extract channel id 

+

        mobj = re.match(self._VALID_URL, url) 

+

        if mobj is None: 

+

            self._downloader.trouble(u'ERROR: invalid url: %s' % url) 

+

            return 

+

 

+

        # Download channel pages 

+

        channel_id = mobj.group(1) 

+

        video_ids = [] 

+

        pagenum = 1 

+

 

+

        while True: 

+

            self.report_download_page(channel_id, pagenum) 

+

            url = self._TEMPLATE_URL % (channel_id, pagenum) 

+

            request = compat_urllib_request.Request(url) 

+

            try: 

+

                page = compat_urllib_request.urlopen(request).read().decode('utf8') 

+

            except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: 

+

                self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err)) 

+

                return 

+

 

+

            # Extract video identifiers 

+

            ids_in_page = [] 

+

            for mobj in re.finditer(r'href="/watch\?v=([0-9A-Za-z_-]+)&', page): 

+

                if mobj.group(1) not in ids_in_page: 

+

                    ids_in_page.append(mobj.group(1)) 

+

            video_ids.extend(ids_in_page) 

+

 

+

            if self._MORE_PAGES_INDICATOR not in page: 

+

                break 

+

            pagenum = pagenum + 1 

+

 

+

        self._downloader.to_screen(u'[youtube] Channel %s: Found %i videos' % (channel_id, len(video_ids))) 

+

 

+

        for id in video_ids: 

+

            self._downloader.download(['http://www.youtube.com/watch?v=%s' % id]) 

+

        return 

+

 

+

 

+

class YoutubeUserIE(InfoExtractor): 

+

    """Information Extractor for YouTube users.""" 

+

 

+

    _VALID_URL = r'(?:(?:(?:https?://)?(?:\w+\.)?youtube\.com/user/)|ytuser:)([A-Za-z0-9_-]+)' 

+

    _TEMPLATE_URL = 'http://gdata.youtube.com/feeds/api/users/%s' 

+

    _GDATA_PAGE_SIZE = 50 

+

    _GDATA_URL = 'http://gdata.youtube.com/feeds/api/users/%s/uploads?max-results=%d&start-index=%d' 

+

    _VIDEO_INDICATOR = r'/watch\?v=(.+?)[\<&]' 

+

    IE_NAME = u'youtube:user' 

+

 

+

    def __init__(self, downloader=None): 

+

        InfoExtractor.__init__(self, downloader) 

+

 

+

    def report_download_page(self, username, start_index): 

+

        """Report attempt to download user page.""" 

+

        self._downloader.to_screen(u'[youtube] user %s: Downloading video ids from %d to %d' % 

+

                (username, start_index, start_index + self._GDATA_PAGE_SIZE)) 

+

 

+

    def _real_extract(self, url): 

+

        # Extract username 

+

        mobj = re.match(self._VALID_URL, url) 

+

        if mobj is None: 

+

            self._downloader.trouble(u'ERROR: invalid url: %s' % url) 

+

            return 

+

 

+

        username = mobj.group(1) 

+

 

+

        # Download video ids using YouTube Data API. Result size per 

+

        # query is limited (currently to 50 videos) so we need to query 

+

        # page by page until there are no video ids - it means we got 

+

        # all of them. 

+

 

+

        video_ids = [] 

+

        pagenum = 0 

+

 

+

        while True: 

+

            start_index = pagenum * self._GDATA_PAGE_SIZE + 1 

+

            self.report_download_page(username, start_index) 

+

 

+

            request = compat_urllib_request.Request(self._GDATA_URL % (username, self._GDATA_PAGE_SIZE, start_index)) 

+

 

+

            try: 

+

                page = compat_urllib_request.urlopen(request).read().decode('utf-8') 

+

            except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: 

+

                self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err)) 

+

                return 

+

 

+

            # Extract video identifiers 

+

            ids_in_page = [] 

+

 

+

            for mobj in re.finditer(self._VIDEO_INDICATOR, page): 

+

                if mobj.group(1) not in ids_in_page: 

+

                    ids_in_page.append(mobj.group(1)) 

+

 

+

            video_ids.extend(ids_in_page) 

+

 

+

            # A little optimization - if current page is not 

+

            # "full", ie. does not contain PAGE_SIZE video ids then 

+

            # we can assume that this page is the last one - there 

+

            # are no more ids on further pages - no need to query 

+

            # again. 

+

 

+

            if len(ids_in_page) < self._GDATA_PAGE_SIZE: 

+

                break 

+

 

+

            pagenum += 1 

+

 

+

        all_ids_count = len(video_ids) 

+

        playliststart = self._downloader.params.get('playliststart', 1) - 1 

+

        playlistend = self._downloader.params.get('playlistend', -1) 

+

 

+

        if playlistend == -1: 

+

            video_ids = video_ids[playliststart:] 

+

        else: 

+

            video_ids = video_ids[playliststart:playlistend] 

+

 

+

        self._downloader.to_screen(u"[youtube] user %s: Collected %d video ids (downloading %d of them)" % 

+

                (username, all_ids_count, len(video_ids))) 

+

 

+

        for video_id in video_ids: 

+

            self._downloader.download(['http://www.youtube.com/watch?v=%s' % video_id]) 

+

 

+

 

+

class BlipTVUserIE(InfoExtractor): 

+

    """Information Extractor for blip.tv users.""" 

+

 

+

    _VALID_URL = r'(?:(?:(?:https?://)?(?:\w+\.)?blip\.tv/)|bliptvuser:)([^/]+)/*$' 

+

    _PAGE_SIZE = 12 

+

    IE_NAME = u'blip.tv:user' 

+

 

+

    def __init__(self, downloader=None): 

+

        InfoExtractor.__init__(self, downloader) 

+

 

+

    def report_download_page(self, username, pagenum): 

+

        """Report attempt to download user page.""" 

+

        self._downloader.to_screen(u'[%s] user %s: Downloading video ids from page %d' % 

+

                (self.IE_NAME, username, pagenum)) 

+

 

+

    def _real_extract(self, url): 

+

        # Extract username 

+

        mobj = re.match(self._VALID_URL, url) 

+

        if mobj is None: 

+

            self._downloader.trouble(u'ERROR: invalid url: %s' % url) 

+

            return 

+

 

+

        username = mobj.group(1) 

+

 

+

        page_base = 'http://m.blip.tv/pr/show_get_full_episode_list?users_id=%s&lite=0&esi=1' 

+

 

+

        request = compat_urllib_request.Request(url) 

+

 

+

        try: 

+

            page = compat_urllib_request.urlopen(request).read().decode('utf-8') 

+

            mobj = re.search(r'data-users-id="([^"]+)"', page) 

+

            page_base = page_base % mobj.group(1) 

+

        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: 

+

            self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err)) 

+

            return 

+

 

+

 

+

        # Download video ids using BlipTV Ajax calls. Result size per 

+

        # query is limited (currently to 12 videos) so we need to query 

+

        # page by page until there are no video ids - it means we got 

+

        # all of them. 

+

 

+

        video_ids = [] 

+

        pagenum = 1 

+

 

+

        while True: 

+

            self.report_download_page(username, pagenum) 

+

 

+

            request = compat_urllib_request.Request( page_base + "&page=" + str(pagenum) ) 

+

 

+

            try: 

+

                page = compat_urllib_request.urlopen(request).read().decode('utf-8') 

+

            except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: 

+

                self._downloader.trouble(u'ERROR: unable to download webpage: %s' % str(err)) 

+

                return 

+

 

+

            # Extract video identifiers 

+

            ids_in_page = [] 

+

 

+

            for mobj in re.finditer(r'href="/([^"]+)"', page): 

+

                if mobj.group(1) not in ids_in_page: 

+

                    ids_in_page.append(unescapeHTML(mobj.group(1))) 

+

 

+

            video_ids.extend(ids_in_page) 

+

 

+

            # A little optimization - if current page is not 

+

            # "full", ie. does not contain PAGE_SIZE video ids then 

+

            # we can assume that this page is the last one - there 

+

            # are no more ids on further pages - no need to query 

+

            # again. 

+

 

+

            if len(ids_in_page) < self._PAGE_SIZE: 

+

                break 

+

 

+

            pagenum += 1 

+

 

+

        all_ids_count = len(video_ids) 

+

        playliststart = self._downloader.params.get('playliststart', 1) - 1 

+

        playlistend = self._downloader.params.get('playlistend', -1) 

+

 

+

        if playlistend == -1: 

+

            video_ids = video_ids[playliststart:] 

+

        else: 

+

            video_ids = video_ids[playliststart:playlistend] 

+

 

+

        self._downloader.to_screen(u"[%s] user %s: Collected %d video ids (downloading %d of them)" % 

+

                (self.IE_NAME, username, all_ids_count, len(video_ids))) 

+

 

+

        for video_id in video_ids: 

+

            self._downloader.download([u'http://blip.tv/'+video_id]) 

+

 

+

 

+

class DepositFilesIE(InfoExtractor): 

+

    """Information extractor for depositfiles.com""" 

+

 

+

    _VALID_URL = r'(?:http://)?(?:\w+\.)?depositfiles\.com/(?:../(?#locale))?files/(.+)' 

+

 

+

    def report_download_webpage(self, file_id): 

+

        """Report webpage download.""" 

+

        self._downloader.to_screen(u'[DepositFiles] %s: Downloading webpage' % file_id) 

+

 

+

    def report_extraction(self, file_id): 

+

        """Report information extraction.""" 

+

        self._downloader.to_screen(u'[DepositFiles] %s: Extracting information' % file_id) 

+

 

+

    def _real_extract(self, url): 

+

        file_id = url.split('/')[-1] 

+

        # Rebuild url in english locale 

+

        url = 'http://depositfiles.com/en/files/' + file_id 

+

 

+

        # Retrieve file webpage with 'Free download' button pressed 

+

        free_download_indication = { 'gateway_result' : '1' } 

+

        request = compat_urllib_request.Request(url, compat_urllib_parse.urlencode(free_download_indication)) 

+

        try: 

+

            self.report_download_webpage(file_id) 

+

            webpage = compat_urllib_request.urlopen(request).read() 

+

        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: 

+

            self._downloader.trouble(u'ERROR: Unable to retrieve file webpage: %s' % compat_str(err)) 

+

            return 

+

 

+

        # Search for the real file URL 

+

        mobj = re.search(r'<form action="(http://fileshare.+?)"', webpage) 

+

        if (mobj is None) or (mobj.group(1) is None): 

+

            # Try to figure out reason of the error. 

+

            mobj = re.search(r'<strong>(Attention.*?)</strong>', webpage, re.DOTALL) 

+

            if (mobj is not None) and (mobj.group(1) is not None): 

+

                restriction_message = re.sub('\s+', ' ', mobj.group(1)).strip() 

+

                self._downloader.trouble(u'ERROR: %s' % restriction_message) 

+

            else: 

+

                self._downloader.trouble(u'ERROR: unable to extract download URL from: %s' % url) 

+

            return 

+

 

+

        file_url = mobj.group(1) 

+

        file_extension = os.path.splitext(file_url)[1][1:] 

+

 

+

        # Search for file title 

+

        mobj = re.search(r'<b title="(.*?)">', webpage) 

+

        if mobj is None: 

+

            self._downloader.trouble(u'ERROR: unable to extract title') 

+

            return 

+

        file_title = mobj.group(1).decode('utf-8') 

+

 

+

        return [{ 

+

            'id':       file_id.decode('utf-8'), 

+

            'url':      file_url.decode('utf-8'), 

+

            'uploader': None, 

+

            'upload_date':  None, 

+

            'title':    file_title, 

+

            'ext':      file_extension.decode('utf-8'), 

+

        }] 

+

 

+

 

+

class FacebookIE(InfoExtractor): 

+

    """Information Extractor for Facebook""" 

+

 

+

    _WORKING = False 

+

    _VALID_URL = r'^(?:https?://)?(?:\w+\.)?facebook\.com/(?:video/video|photo)\.php\?(?:.*?)v=(?P<ID>\d+)(?:.*)' 

+

    _LOGIN_URL = 'https://login.facebook.com/login.php?m&next=http%3A%2F%2Fm.facebook.com%2Fhome.php&' 

+

    _NETRC_MACHINE = 'facebook' 

+

    _available_formats = ['video', 'highqual', 'lowqual'] 

+

    _video_extensions = { 

+

        'video': 'mp4', 

+

        'highqual': 'mp4', 

+

        'lowqual': 'mp4', 

+

    } 

+

    IE_NAME = u'facebook' 

+

 

+

    def __init__(self, downloader=None): 

+

        InfoExtractor.__init__(self, downloader) 

+

 

+

    def _reporter(self, message): 

+

        """Add header and report message.""" 

+

        self._downloader.to_screen(u'[facebook] %s' % message) 

+

 

+

    def report_login(self): 

+

        """Report attempt to log in.""" 

+

        self._reporter(u'Logging in') 

+

 

+

    def report_video_webpage_download(self, video_id): 

+

        """Report attempt to download video webpage.""" 

+

        self._reporter(u'%s: Downloading video webpage' % video_id) 

+

 

+

    def report_information_extraction(self, video_id): 

+

        """Report attempt to extract video information.""" 

+

        self._reporter(u'%s: Extracting video information' % video_id) 

+

 

+

    def _parse_page(self, video_webpage): 

+

        """Extract video information from page""" 

+

        # General data 

+

        data = {'title': r'\("video_title", "(.*?)"\)', 

+

            'description': r'<div class="datawrap">(.*?)</div>', 

+

            'owner': r'\("video_owner_name", "(.*?)"\)', 

+

            'thumbnail':  r'\("thumb_url", "(?P<THUMB>.*?)"\)', 

+

            } 

+

        video_info = {} 

+

        for piece in data.keys(): 

+

            mobj = re.search(data[piece], video_webpage) 

+

            if mobj is not None: 

+

                video_info[piece] = compat_urllib_parse.unquote_plus(mobj.group(1).decode("unicode_escape")) 

+

 

+

        # Video urls 

+

        video_urls = {} 

+

        for fmt in self._available_formats: 

+

            mobj = re.search(r'\("%s_src\", "(.+?)"\)' % fmt, video_webpage) 

+

            if mobj is not None: 

+

                # URL is in a Javascript segment inside an escaped Unicode format within 

+

                # the generally utf-8 page 

+

                video_urls[fmt] = compat_urllib_parse.unquote_plus(mobj.group(1).decode("unicode_escape")) 

+

        video_info['video_urls'] = video_urls 

+

 

+

        return video_info 

+

 

+

    def _real_initialize(self): 

+

        if self._downloader is None: 

+

            return 

+

 

+

        useremail = None 

+

        password = None 

+

        downloader_params = self._downloader.params 

+

 

+

        # Attempt to use provided username and password or .netrc data 

+

        if downloader_params.get('username', None) is not None: 

+

            useremail = downloader_params['username'] 

+

            password = downloader_params['password'] 

+

        elif downloader_params.get('usenetrc', False): 

+

            try: 

+

                info = netrc.netrc().authenticators(self._NETRC_MACHINE) 

+

                if info is not None: 

+

                    useremail = info[0] 

+

                    password = info[2] 

+

                else: 

+

                    raise netrc.NetrcParseError('No authenticators for %s' % self._NETRC_MACHINE) 

+

            except (IOError, netrc.NetrcParseError) as err: 

+

                self._downloader.to_stderr(u'WARNING: parsing .netrc: %s' % compat_str(err)) 

+

                return 

+

 

+

        if useremail is None: 

+

            return 

+

 

+

        # Log in 

+

        login_form = { 

+

            'email': useremail, 

+

            'pass': password, 

+

            'login': 'Log+In' 

+

            } 

+

        request = compat_urllib_request.Request(self._LOGIN_URL, compat_urllib_parse.urlencode(login_form)) 

+

        try: 

+

            self.report_login() 

+

            login_results = compat_urllib_request.urlopen(request).read() 

+

            if re.search(r'<form(.*)name="login"(.*)</form>', login_results) is not None: 

+

                self._downloader.to_stderr(u'WARNING: unable to log in: bad username/password, or exceded login rate limit (~3/min). Check credentials or wait.') 

+

                return 

+

        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: 

+

            self._downloader.to_stderr(u'WARNING: unable to log in: %s' % compat_str(err)) 

+

            return 

+

 

+

    def _real_extract(self, url): 

+

        mobj = re.match(self._VALID_URL, url) 

+

        if mobj is None: 

+

            self._downloader.trouble(u'ERROR: invalid URL: %s' % url) 

+

            return 

+

        video_id = mobj.group('ID') 

+

 

+

        # Get video webpage 

+

        self.report_video_webpage_download(video_id) 

+

        request = compat_urllib_request.Request('https://www.facebook.com/video/video.php?v=%s' % video_id) 

+

        try: 

+

            page = compat_urllib_request.urlopen(request) 

+

            video_webpage = page.read() 

+

        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: 

+

            self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % compat_str(err)) 

+

            return 

+

 

+

        # Start extracting information 

+

        self.report_information_extraction(video_id) 

+

 

+

        # Extract information 

+

        video_info = self._parse_page(video_webpage) 

+

 

+

        # uploader 

+

        if 'owner' not in video_info: 

+

            self._downloader.trouble(u'ERROR: unable to extract uploader nickname') 

+

            return 

+

        video_uploader = video_info['owner'] 

+

 

+

        # title 

+

        if 'title' not in video_info: 

+

            self._downloader.trouble(u'ERROR: unable to extract video title') 

+

            return 

+

        video_title = video_info['title'] 

+

        video_title = video_title.decode('utf-8') 

+

 

+

        # thumbnail image 

+

        if 'thumbnail' not in video_info: 

+

            self._downloader.trouble(u'WARNING: unable to extract video thumbnail') 

+

            video_thumbnail = '' 

+

        else: 

+

            video_thumbnail = video_info['thumbnail'] 

+

 

+

        # upload date 

+

        upload_date = None 

+

        if 'upload_date' in video_info: 

+

            upload_time = video_info['upload_date'] 

+

            timetuple = email.utils.parsedate_tz(upload_time) 

+

            if timetuple is not None: 

+

                try: 

+

                    upload_date = time.strftime('%Y%m%d', timetuple[0:9]) 

+

                except: 

+

                    pass 

+

 

+

        # description 

+

        video_description = video_info.get('description', 'No description available.') 

+

 

+

        url_map = video_info['video_urls'] 

+

        if url_map: 

+

            # Decide which formats to download 

+

            req_format = self._downloader.params.get('format', None) 

+

            format_limit = self._downloader.params.get('format_limit', None) 

+

 

+

            if format_limit is not None and format_limit in self._available_formats: 

+

                format_list = self._available_formats[self._available_formats.index(format_limit):] 

+

            else: 

+

                format_list = self._available_formats 

+

            existing_formats = [x for x in format_list if x in url_map] 

+

            if len(existing_formats) == 0: 

+

                self._downloader.trouble(u'ERROR: no known formats available for video') 

+

                return 

+

            if req_format is None: 

+

                video_url_list = [(existing_formats[0], url_map[existing_formats[0]])] # Best quality 

+

            elif req_format == 'worst': 

+

                video_url_list = [(existing_formats[len(existing_formats)-1], url_map[existing_formats[len(existing_formats)-1]])] # worst quality 

+

            elif req_format == '-1': 

+

                video_url_list = [(f, url_map[f]) for f in existing_formats] # All formats 

+

            else: 

+

                # Specific format 

+

                if req_format not in url_map: 

+

                    self._downloader.trouble(u'ERROR: requested format not available') 

+

                    return 

+

                video_url_list = [(req_format, url_map[req_format])] # Specific format 

+

 

+

        results = [] 

+

        for format_param, video_real_url in video_url_list: 

+

            # Extension 

+

            video_extension = self._video_extensions.get(format_param, 'mp4') 

+

 

+

            results.append({ 

+

                'id':       video_id.decode('utf-8'), 

+

                'url':      video_real_url.decode('utf-8'), 

+

                'uploader': video_uploader.decode('utf-8'), 

+

                'upload_date':  upload_date, 

+

                'title':    video_title, 

+

                'ext':      video_extension.decode('utf-8'), 

+

                'format':   (format_param is None and u'NA' or format_param.decode('utf-8')), 

+

                'thumbnail':    video_thumbnail.decode('utf-8'), 

+

                'description':  video_description.decode('utf-8'), 

+

            }) 

+

        return results 

+

 

+

class BlipTVIE(InfoExtractor): 

+

    """Information extractor for blip.tv""" 

+

 

+

    _VALID_URL = r'^(?:https?://)?(?:\w+\.)?blip\.tv(/.+)$' 

+

    _URL_EXT = r'^.*\.([a-z0-9]+)$' 

+

    IE_NAME = u'blip.tv' 

+

 

+

    def report_extraction(self, file_id): 

+

        """Report information extraction.""" 

+

        self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, file_id)) 

+

 

+

    def report_direct_download(self, title): 

+

        """Report information extraction.""" 

+

        self._downloader.to_screen(u'[%s] %s: Direct download detected' % (self.IE_NAME, title)) 

+

 

+

    def _real_extract(self, url): 

+

        mobj = re.match(self._VALID_URL, url) 

+

        if mobj is None: 

+

            self._downloader.trouble(u'ERROR: invalid URL: %s' % url) 

+

            return 

+

 

+

        if '?' in url: 

+

            cchar = '&' 

+

        else: 

+

            cchar = '?' 

+

        json_url = url + cchar + 'skin=json&version=2&no_wrap=1' 

+

        request = compat_urllib_request.Request(json_url) 

+

        self.report_extraction(mobj.group(1)) 

+

        info = None 

+

        try: 

+

            urlh = compat_urllib_request.urlopen(request) 

+

            if urlh.headers.get('Content-Type', '').startswith('video/'): # Direct download 

+

                basename = url.split('/')[-1] 

+

                title,ext = os.path.splitext(basename) 

+

                title = title.decode('UTF-8') 

+

                ext = ext.replace('.', '') 

+

                self.report_direct_download(title) 

+

                info = { 

+

                    'id': title, 

+

                    'url': url, 

+

                    'uploader': None, 

+

                    'upload_date': None, 

+

                    'title': title, 

+

                    'ext': ext, 

+

                    'urlhandle': urlh 

+

                } 

+

        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: 

+

            self._downloader.trouble(u'ERROR: unable to download video info webpage: %s' % compat_str(err)) 

+

            return 

+

        if info is None: # Regular URL 

+

            try: 

+

                json_code_bytes = urlh.read() 

+

                json_code = json_code_bytes.decode('utf-8') 

+

            except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: 

+

                self._downloader.trouble(u'ERROR: unable to read video info webpage: %s' % compat_str(err)) 

+

                return 

+

 

+

            try: 

+

                json_data = json.loads(json_code) 

+

                if 'Post' in json_data: 

+

                    data = json_data['Post'] 

+

                else: 

+

                    data = json_data 

+

 

+

                upload_date = datetime.datetime.strptime(data['datestamp'], '%m-%d-%y %H:%M%p').strftime('%Y%m%d') 

+

                video_url = data['media']['url'] 

+

                umobj = re.match(self._URL_EXT, video_url) 

+

                if umobj is None: 

+

                    raise ValueError('Can not determine filename extension') 

+

                ext = umobj.group(1) 

+

 

+

                info = { 

+

                    'id': data['item_id'], 

+

                    'url': video_url, 

+

                    'uploader': data['display_name'], 

+

                    'upload_date': upload_date, 

+

                    'title': data['title'], 

+

                    'ext': ext, 

+

                    'format': data['media']['mimeType'], 

+

                    'thumbnail': data['thumbnailUrl'], 

+

                    'description': data['description'], 

+

                    'player_url': data['embedUrl'] 

+

                } 

+

            except (ValueError,KeyError) as err: 

+

                self._downloader.trouble(u'ERROR: unable to parse video information: %s' % repr(err)) 

+

                return 

+

 

+

        std_headers['User-Agent'] = 'iTunes/10.6.1' 

+

        return [info] 

+

 

+

 

+

class MyVideoIE(InfoExtractor): 

+

    """Information Extractor for myvideo.de.""" 

+

 

+

    _VALID_URL = r'(?:http://)?(?:www\.)?myvideo\.de/watch/([0-9]+)/([^?/]+).*' 

+

    IE_NAME = u'myvideo' 

+

 

+

    def __init__(self, downloader=None): 

+

        InfoExtractor.__init__(self, downloader) 

+

 

+

    def report_extraction(self, video_id): 

+

        """Report information extraction.""" 

+

        self._downloader.to_screen(u'[myvideo] %s: Extracting information' % video_id) 

+

 

+

    def _real_extract(self,url): 

+

        mobj = re.match(self._VALID_URL, url) 

+

        if mobj is None: 

+

            self._download.trouble(u'ERROR: invalid URL: %s' % url) 

+

            return 

+

 

+

        video_id = mobj.group(1) 

+

 

+

        # Get video webpage 

+

        webpage_url = 'http://www.myvideo.de/watch/%s' % video_id 

+

        webpage = self._download_webpage(webpage_url, video_id) 

+

 

+

        self.report_extraction(video_id) 

+

        mobj = re.search(r'<link rel=\'image_src\' href=\'(http://is[0-9].myvideo\.de/de/movie[0-9]+/[a-f0-9]+)/thumbs/[^.]+\.jpg\' />', 

+

                 webpage) 

+

        if mobj is None: 

+

            self._downloader.trouble(u'ERROR: unable to extract media URL') 

+

            return 

+

        video_url = mobj.group(1) + ('/%s.flv' % video_id) 

+

 

+

        mobj = re.search('<title>([^<]+)</title>', webpage) 

+

        if mobj is None: 

+

            self._downloader.trouble(u'ERROR: unable to extract title') 

+

            return 

+

 

+

        video_title = mobj.group(1) 

+

 

+

        return [{ 

+

            'id':       video_id, 

+

            'url':      video_url, 

+

            'uploader': None, 

+

            'upload_date':  None, 

+

            'title':    video_title, 

+

            'ext':      u'flv', 

+

        }] 

+

 

+

class ComedyCentralIE(InfoExtractor): 

+

    """Information extractor for The Daily Show and Colbert Report """ 

+

 

+

    # urls can be abbreviations like :thedailyshow or :colbert 

+

    # urls for episodes like: 

+

    # or urls for clips like: http://www.thedailyshow.com/watch/mon-december-10-2012/any-given-gun-day 

+

    #                     or: http://www.colbertnation.com/the-colbert-report-videos/421667/november-29-2012/moon-shattering-news 

+

    #                     or: http://www.colbertnation.com/the-colbert-report-collections/422008/festival-of-lights/79524 

+

    _VALID_URL = r"""^(:(?P<shortname>tds|thedailyshow|cr|colbert|colbertnation|colbertreport) 

+

                      |(https?://)?(www\.)? 

+

                          (?P<showname>thedailyshow|colbertnation)\.com/ 

+

                         (full-episodes/(?P<episode>.*)| 

+

                          (?P<clip> 

+

                              (the-colbert-report-(videos|collections)/(?P<clipID>[0-9]+)/[^/]*/(?P<cntitle>.*?)) 

+

                              |(watch/(?P<date>[^/]*)/(?P<tdstitle>.*))))) 

+

                     $""" 

+

    IE_NAME = u'comedycentral' 

+

 

+

    _available_formats = ['3500', '2200', '1700', '1200', '750', '400'] 

+

 

+

    _video_extensions = { 

+

        '3500': 'mp4', 

+

        '2200': 'mp4', 

+

        '1700': 'mp4', 

+

        '1200': 'mp4', 

+

        '750': 'mp4', 

+

        '400': 'mp4', 

+

    } 

+

    _video_dimensions = { 

+

        '3500': '1280x720', 

+

        '2200': '960x540', 

+

        '1700': '768x432', 

+

        '1200': '640x360', 

+

        '750': '512x288', 

+

        '400': '384x216', 

+

    } 

+

 

+

    def suitable(self, url): 

+

        """Receives a URL and returns True if suitable for this IE.""" 

+

        return re.match(self._VALID_URL, url, re.VERBOSE) is not None 

+

 

+

    def report_extraction(self, episode_id): 

+

        self._downloader.to_screen(u'[comedycentral] %s: Extracting information' % episode_id) 

+

 

+

    def report_config_download(self, episode_id): 

+

        self._downloader.to_screen(u'[comedycentral] %s: Downloading configuration' % episode_id) 

+

 

+

    def report_index_download(self, episode_id): 

+

        self._downloader.to_screen(u'[comedycentral] %s: Downloading show index' % episode_id) 

+

 

+

    def report_player_url(self, episode_id): 

+

        self._downloader.to_screen(u'[comedycentral] %s: Determining player URL' % episode_id) 

+

 

+

 

+

    def _print_formats(self, formats): 

+

        print('Available formats:') 

+

        for x in formats: 

+

            print('%s\t:\t%s\t[%s]' %(x, self._video_extensions.get(x, 'mp4'), self._video_dimensions.get(x, '???'))) 

+

 

+

 

+

    def _real_extract(self, url): 

+

        mobj = re.match(self._VALID_URL, url, re.VERBOSE) 

+

        if mobj is None: 

+

            self._downloader.trouble(u'ERROR: invalid URL: %s' % url) 

+

            return 

+

 

+

        if mobj.group('shortname'): 

+

            if mobj.group('shortname') in ('tds', 'thedailyshow'): 

+

                url = u'http://www.thedailyshow.com/full-episodes/' 

+

            else: 

+

                url = u'http://www.colbertnation.com/full-episodes/' 

+

            mobj = re.match(self._VALID_URL, url, re.VERBOSE) 

+

            assert mobj is not None 

+

 

+

        if mobj.group('clip'): 

+

            if mobj.group('showname') == 'thedailyshow': 

+

                epTitle = mobj.group('tdstitle') 

+

            else: 

+

                epTitle = mobj.group('cntitle') 

+

            dlNewest = False 

+

        else: 

+

            dlNewest = not mobj.group('episode') 

+

            if dlNewest: 

+

                epTitle = mobj.group('showname') 

+

            else: 

+

                epTitle = mobj.group('episode') 

+

 

+

        req = compat_urllib_request.Request(url) 

+

        self.report_extraction(epTitle) 

+

        try: 

+

            htmlHandle = compat_urllib_request.urlopen(req) 

+

            html = htmlHandle.read() 

+

        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: 

+

            self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err)) 

+

            return 

+

        if dlNewest: 

+

            url = htmlHandle.geturl() 

+

            mobj = re.match(self._VALID_URL, url, re.VERBOSE) 

+

            if mobj is None: 

+

                self._downloader.trouble(u'ERROR: Invalid redirected URL: ' + url) 

+

                return 

+

            if mobj.group('episode') == '': 

+

                self._downloader.trouble(u'ERROR: Redirected URL is still not specific: ' + url) 

+

                return 

+

            epTitle = mobj.group('episode') 

+

 

+

        mMovieParams = re.findall('(?:<param name="movie" value="|var url = ")(http://media.mtvnservices.com/([^"]*(?:episode|video).*?:.*?))"', html) 

+

 

+

        if len(mMovieParams) == 0: 

+

            # The Colbert Report embeds the information in a without 

+

            # a URL prefix; so extract the alternate reference 

+

            # and then add the URL prefix manually. 

+

 

+

            altMovieParams = re.findall('data-mgid="([^"]*(?:episode|video).*?:.*?)"', html) 

+

            if len(altMovieParams) == 0: 

+

                self._downloader.trouble(u'ERROR: unable to find Flash URL in webpage ' + url) 

+

                return 

+

            else: 

+

                mMovieParams = [("http://media.mtvnservices.com/" + altMovieParams[0], altMovieParams[0])] 

+

 

+

        playerUrl_raw = mMovieParams[0][0] 

+

        self.report_player_url(epTitle) 

+

        try: 

+

            urlHandle = compat_urllib_request.urlopen(playerUrl_raw) 

+

            playerUrl = urlHandle.geturl() 

+

        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: 

+

            self._downloader.trouble(u'ERROR: unable to find out player URL: ' + compat_str(err)) 

+

            return 

+

 

+

        uri = mMovieParams[0][1] 

+

        indexUrl = 'http://shadow.comedycentral.com/feeds/video_player/mrss/?' + compat_urllib_parse.urlencode({'uri': uri}) 

+

        self.report_index_download(epTitle) 

+

        try: 

+

            indexXml = compat_urllib_request.urlopen(indexUrl).read() 

+

        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: 

+

            self._downloader.trouble(u'ERROR: unable to download episode index: ' + compat_str(err)) 

+

            return 

+

 

+

        results = [] 

+

 

+

        idoc = xml.etree.ElementTree.fromstring(indexXml) 

+

        itemEls = idoc.findall('.//item') 

+

        for itemEl in itemEls: 

+

            mediaId = itemEl.findall('./guid')[0].text 

+

            shortMediaId = mediaId.split(':')[-1] 

+

            showId = mediaId.split(':')[-2].replace('.com', '') 

+

            officialTitle = itemEl.findall('./title')[0].text 

+

            officialDate = itemEl.findall('./pubDate')[0].text 

+

 

+

            configUrl = ('http://www.comedycentral.com/global/feeds/entertainment/media/mediaGenEntertainment.jhtml?' + 

+

                        compat_urllib_parse.urlencode({'uri': mediaId})) 

+

            configReq = compat_urllib_request.Request(configUrl) 

+

            self.report_config_download(epTitle) 

+

            try: 

+

                configXml = compat_urllib_request.urlopen(configReq).read() 

+

            except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: 

+

                self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err)) 

+

                return 

+

 

+

            cdoc = xml.etree.ElementTree.fromstring(configXml) 

+

            turls = [] 

+

            for rendition in cdoc.findall('.//rendition'): 

+

                finfo = (rendition.attrib['bitrate'], rendition.findall('./src')[0].text) 

+

                turls.append(finfo) 

+

 

+

            if len(turls) == 0: 

+

                self._downloader.trouble(u'\nERROR: unable to download ' + mediaId + ': No videos found') 

+

                continue 

+

 

+

            if self._downloader.params.get('listformats', None): 

+

                self._print_formats([i[0] for i in turls]) 

+

                return 

+

 

+

            # For now, just pick the highest bitrate 

+

            format,video_url = turls[-1] 

+

 

+

            # Get the format arg from the arg stream 

+

            req_format = self._downloader.params.get('format', None) 

+

 

+

            # Select format if we can find one 

+

            for f,v in turls: 

+

                if f == req_format: 

+

                    format, video_url = f, v 

+

                    break 

+

 

+

            # Patch to download from alternative CDN, which does not 

+

            # break on current RTMPDump builds 

+

            broken_cdn = "rtmpe://viacomccstrmfs.fplive.net/viacomccstrm/gsp.comedystor/" 

+

            better_cdn = "rtmpe://cp10740.edgefcs.net/ondemand/mtvnorigin/gsp.comedystor/" 

+

 

+

            if video_url.startswith(broken_cdn): 

+

                video_url = video_url.replace(broken_cdn, better_cdn) 

+

 

+

            effTitle = showId + u'-' + epTitle 

+

            info = { 

+

                'id': shortMediaId, 

+

                'url': video_url, 

+

                'uploader': showId, 

+

                'upload_date': officialDate, 

+

                'title': effTitle, 

+

                'ext': 'mp4', 

+

                'format': format, 

+

                'thumbnail': None, 

+

                'description': officialTitle, 

+

                'player_url': None #playerUrl 

+

            } 

+

 

+

            results.append(info) 

+

 

+

        return results 

+

 

+

 

+

class EscapistIE(InfoExtractor): 

+

    """Information extractor for The Escapist """ 

+

 

+

    _VALID_URL = r'^(https?://)?(www\.)?escapistmagazine\.com/videos/view/(?P<showname>[^/]+)/(?P<episode>[^/?]+)[/?]?.*$' 

+

    IE_NAME = u'escapist' 

+

 

+

    def report_extraction(self, showName): 

+

        self._downloader.to_screen(u'[escapist] %s: Extracting information' % showName) 

+

 

+

    def report_config_download(self, showName): 

+

        self._downloader.to_screen(u'[escapist] %s: Downloading configuration' % showName) 

+

 

+

    def _real_extract(self, url): 

+

        mobj = re.match(self._VALID_URL, url) 

+

        if mobj is None: 

+

            self._downloader.trouble(u'ERROR: invalid URL: %s' % url) 

+

            return 

+

        showName = mobj.group('showname') 

+

        videoId = mobj.group('episode') 

+

 

+

        self.report_extraction(showName) 

+

        try: 

+

            webPage = compat_urllib_request.urlopen(url) 

+

            webPageBytes = webPage.read() 

+

            m = re.match(r'text/html; charset="?([^"]+)"?', webPage.headers['Content-Type']) 

+

            webPage = webPageBytes.decode(m.group(1) if m else 'utf-8') 

+

        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: 

+

            self._downloader.trouble(u'ERROR: unable to download webpage: ' + compat_str(err)) 

+

            return 

+

 

+

        descMatch = re.search('<meta name="description" content="([^"]*)"', webPage) 

+

        description = unescapeHTML(descMatch.group(1)) 

+

        imgMatch = re.search('<meta property="og:image" content="([^"]*)"', webPage) 

+

        imgUrl = unescapeHTML(imgMatch.group(1)) 

+

        playerUrlMatch = re.search('<meta property="og:video" content="([^"]*)"', webPage) 

+

        playerUrl = unescapeHTML(playerUrlMatch.group(1)) 

+

        configUrlMatch = re.search('config=(.*)$', playerUrl) 

+

        configUrl = compat_urllib_parse.unquote(configUrlMatch.group(1)) 

+

 

+

        self.report_config_download(showName) 

+

        try: 

+

            configJSON = compat_urllib_request.urlopen(configUrl) 

+

            m = re.match(r'text/html; charset="?([^"]+)"?', configJSON.headers['Content-Type']) 

+

            configJSON = configJSON.read().decode(m.group(1) if m else 'utf-8') 

+

        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: 

+

            self._downloader.trouble(u'ERROR: unable to download configuration: ' + compat_str(err)) 

+

            return 

+

 

+

        # Technically, it's JavaScript, not JSON 

+

        configJSON = configJSON.replace("'", '"') 

+

 

+

        try: 

+

            config = json.loads(configJSON) 

+

        except (ValueError,) as err: 

+

            self._downloader.trouble(u'ERROR: Invalid JSON in configuration file: ' + compat_str(err)) 

+

            return 

+

 

+

        playlist = config['playlist'] 

+

        videoUrl = playlist[1]['url'] 

+

 

+

        info = { 

+

            'id': videoId, 

+

            'url': videoUrl, 

+

            'uploader': showName, 

+

            'upload_date': None, 

+

            'title': showName, 

+

            'ext': 'flv', 

+

            'thumbnail': imgUrl, 

+

            'description': description, 

+

            'player_url': playerUrl, 

+

        } 

+

 

+

        return [info] 

+

 

+

 

+

class CollegeHumorIE(InfoExtractor): 

+

    """Information extractor for collegehumor.com""" 

+

 

+

    _WORKING = False 

+

    _VALID_URL = r'^(?:https?://)?(?:www\.)?collegehumor\.com/video/(?P<videoid>[0-9]+)/(?P<shorttitle>.*)$' 

+

    IE_NAME = u'collegehumor' 

+

 

+

    def report_manifest(self, video_id): 

+

        """Report information extraction.""" 

+

        self._downloader.to_screen(u'[%s] %s: Downloading XML manifest' % (self.IE_NAME, video_id)) 

+

 

+

    def report_extraction(self, video_id): 

+

        """Report information extraction.""" 

+

        self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, video_id)) 

+

 

+

    def _real_extract(self, url): 

+

        mobj = re.match(self._VALID_URL, url) 

+

        if mobj is None: 

+

            self._downloader.trouble(u'ERROR: invalid URL: %s' % url) 

+

            return 

+

        video_id = mobj.group('videoid') 

+

 

+

        info = { 

+

            'id': video_id, 

+

            'uploader': None, 

+

            'upload_date': None, 

+

        } 

+

 

+

        self.report_extraction(video_id) 

+

        xmlUrl = 'http://www.collegehumor.com/moogaloop/video/' + video_id 

+

        try: 

+

            metaXml = compat_urllib_request.urlopen(xmlUrl).read() 

+

        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: 

+

            self._downloader.trouble(u'ERROR: unable to download video info XML: %s' % compat_str(err)) 

+

            return 

+

 

+

        mdoc = xml.etree.ElementTree.fromstring(metaXml) 

+

        try: 

+

            videoNode = mdoc.findall('./video')[0] 

+

            info['description'] = videoNode.findall('./description')[0].text 

+

            info['title'] = videoNode.findall('./caption')[0].text 

+

            info['thumbnail'] = videoNode.findall('./thumbnail')[0].text 

+

            manifest_url = videoNode.findall('./file')[0].text 

+

        except IndexError: 

+

            self._downloader.trouble(u'\nERROR: Invalid metadata XML file') 

+

            return 

+

 

+

        manifest_url += '?hdcore=2.10.3' 

+

        self.report_manifest(video_id) 

+

        try: 

+

            manifestXml = compat_urllib_request.urlopen(manifest_url).read() 

+

        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: 

+

            self._downloader.trouble(u'ERROR: unable to download video info XML: %s' % compat_str(err)) 

+

            return 

+

 

+

        adoc = xml.etree.ElementTree.fromstring(manifestXml) 

+

        try: 

+

            media_node = adoc.findall('./{http://ns.adobe.com/f4m/1.0}media')[0] 

+

            node_id = media_node.attrib['url'] 

+

            video_id = adoc.findall('./{http://ns.adobe.com/f4m/1.0}id')[0].text 

+

        except IndexError as err: 

+

            self._downloader.trouble(u'\nERROR: Invalid manifest file') 

+

            return 

+

 

+

        url_pr = compat_urllib_parse_urlparse(manifest_url) 

+

        url = url_pr.scheme + '://' + url_pr.netloc + '/z' + video_id[:-2] + '/' + node_id + 'Seg1-Frag1' 

+

 

+

        info['url'] = url 

+

        info['ext'] = 'f4f' 

+

        return [info] 

+

 

+

 

+

class XVideosIE(InfoExtractor): 

+

    """Information extractor for xvideos.com""" 

+

 

+

    _VALID_URL = r'^(?:https?://)?(?:www\.)?xvideos\.com/video([0-9]+)(?:.*)' 

+

    IE_NAME = u'xvideos' 

+

 

+

    def report_extraction(self, video_id): 

+

        """Report information extraction.""" 

+

        self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, video_id)) 

+

 

+

    def _real_extract(self, url): 

+

        mobj = re.match(self._VALID_URL, url) 

+

        if mobj is None: 

+

            self._downloader.trouble(u'ERROR: invalid URL: %s' % url) 

+

            return 

+

        video_id = mobj.group(1) 

+

 

+

        webpage = self._download_webpage(url, video_id) 

+

 

+

        self.report_extraction(video_id) 

+

 

+

 

+

        # Extract video URL 

+

        mobj = re.search(r'flv_url=(.+?)&', webpage) 

+

        if mobj is None: 

+

            self._downloader.trouble(u'ERROR: unable to extract video url') 

+

            return 

+

        video_url = compat_urllib_parse.unquote(mobj.group(1)) 

+

 

+

 

+

        # Extract title 

+

        mobj = re.search(r'<title>(.*?)\s+-\s+XVID', webpage) 

+

        if mobj is None: 

+

            self._downloader.trouble(u'ERROR: unable to extract video title') 

+

            return 

+

        video_title = mobj.group(1) 

+

 

+

 

+

        # Extract video thumbnail 

+

        mobj = re.search(r'http://(?:img.*?\.)xvideos.com/videos/thumbs/[a-fA-F0-9]+/[a-fA-F0-9]+/[a-fA-F0-9]+/[a-fA-F0-9]+/([a-fA-F0-9.]+jpg)', webpage) 

+

        if mobj is None: 

+

            self._downloader.trouble(u'ERROR: unable to extract video thumbnail') 

+

            return 

+

        video_thumbnail = mobj.group(0) 

+

 

+

        info = { 

+

            'id': video_id, 

+

            'url': video_url, 

+

            'uploader': None, 

+

            'upload_date': None, 

+

            'title': video_title, 

+

            'ext': 'flv', 

+

            'thumbnail': video_thumbnail, 

+

            'description': None, 

+

        } 

+

 

+

        return [info] 

+

 

+

 

+

class SoundcloudIE(InfoExtractor): 

+

    """Information extractor for soundcloud.com 

+

       To access the media, the uid of the song and a stream token 

+

       must be extracted from the page source and the script must make 

+

       a request to media.soundcloud.com/crossdomain.xml. Then 

+

       the media can be grabbed by requesting from an url composed 

+

       of the stream token and uid 

+

     """ 

+

 

+

    _VALID_URL = r'^(?:https?://)?(?:www\.)?soundcloud\.com/([\w\d-]+)/([\w\d-]+)' 

+

    IE_NAME = u'soundcloud' 

+

 

+

    def __init__(self, downloader=None): 

+

        InfoExtractor.__init__(self, downloader) 

+

 

+

    def report_resolve(self, video_id): 

+

        """Report information extraction.""" 

+

        self._downloader.to_screen(u'[%s] %s: Resolving id' % (self.IE_NAME, video_id)) 

+

 

+

    def report_extraction(self, video_id): 

+

        """Report information extraction.""" 

+

        self._downloader.to_screen(u'[%s] %s: Retrieving stream' % (self.IE_NAME, video_id)) 

+

 

+

    def _real_extract(self, url): 

+

        mobj = re.match(self._VALID_URL, url) 

+

        if mobj is None: 

+

            self._downloader.trouble(u'ERROR: invalid URL: %s' % url) 

+

            return 

+

 

+

        # extract uploader (which is in the url) 

+

        uploader = mobj.group(1) 

+

        # extract simple title (uploader + slug of song title) 

+

        slug_title =  mobj.group(2) 

+

        simple_title = uploader + u'-' + slug_title 

+

 

+

        self.report_resolve('%s/%s' % (uploader, slug_title)) 

+

 

+

        url = 'http://soundcloud.com/%s/%s' % (uploader, slug_title) 

+

        resolv_url = 'http://api.soundcloud.com/resolve.json?url=' + url + '&client_id=b45b1aa10f1ac2941910a7f0d10f8e28' 

+

        request = compat_urllib_request.Request(resolv_url) 

+

        try: 

+

            info_json_bytes = compat_urllib_request.urlopen(request).read() 

+

            info_json = info_json_bytes.decode('utf-8') 

+

        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: 

+

            self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % compat_str(err)) 

+

            return 

+

 

+

        info = json.loads(info_json) 

+

        video_id = info['id'] 

+

        self.report_extraction('%s/%s' % (uploader, slug_title)) 

+

 

+

        streams_url = 'https://api.sndcdn.com/i1/tracks/' + str(video_id) + '/streams?client_id=b45b1aa10f1ac2941910a7f0d10f8e28' 

+

        request = compat_urllib_request.Request(streams_url) 

+

        try: 

+

            stream_json_bytes = compat_urllib_request.urlopen(request).read() 

+

            stream_json = stream_json_bytes.decode('utf-8') 

+

        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: 

+

            self._downloader.trouble(u'ERROR: unable to download stream definitions: %s' % compat_str(err)) 

+

            return 

+

 

+

        streams = json.loads(stream_json) 

+

        mediaURL = streams['http_mp3_128_url'] 

+

 

+

        return [{ 

+

            'id':       info['id'], 

+

            'url':      mediaURL, 

+

            'uploader': info['user']['username'], 

+

            'upload_date':  info['created_at'], 

+

            'title':    info['title'], 

+

            'ext':      u'mp3', 

+

            'description': info['description'], 

+

        }] 

+

 

+

 

+

class InfoQIE(InfoExtractor): 

+

    """Information extractor for infoq.com""" 

+

    _VALID_URL = r'^(?:https?://)?(?:www\.)?infoq\.com/[^/]+/[^/]+$' 

+

 

+

    def report_extraction(self, video_id): 

+

        """Report information extraction.""" 

+

        self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, video_id)) 

+

 

+

    def _real_extract(self, url): 

+

        mobj = re.match(self._VALID_URL, url) 

+

        if mobj is None: 

+

            self._downloader.trouble(u'ERROR: invalid URL: %s' % url) 

+

            return 

+

 

+

        webpage = self._download_webpage(url, video_id=url) 

+

        self.report_extraction(url) 

+

 

+

        # Extract video URL 

+

        mobj = re.search(r"jsclassref='([^']*)'", webpage) 

+

        if mobj is None: 

+

            self._downloader.trouble(u'ERROR: unable to extract video url') 

+

            return 

+

        real_id = compat_urllib_parse.unquote(base64.b64decode(mobj.group(1).encode('ascii')).decode('utf-8')) 

+

        video_url = 'rtmpe://video.infoq.com/cfx/st/' + real_id 

+

 

+

        # Extract title 

+

        mobj = re.search(r'contentTitle = "(.*?)";', webpage) 

+

        if mobj is None: 

+

            self._downloader.trouble(u'ERROR: unable to extract video title') 

+

            return 

+

        video_title = mobj.group(1) 

+

 

+

        # Extract description 

+

        video_description = u'No description available.' 

+

        mobj = re.search(r'<meta name="description" content="(.*)"(?:\s*/)?>', webpage) 

+

        if mobj is not None: 

+

            video_description = mobj.group(1) 

+

 

+

        video_filename = video_url.split('/')[-1] 

+

        video_id, extension = video_filename.split('.') 

+

 

+

        info = { 

+

            'id': video_id, 

+

            'url': video_url, 

+

            'uploader': None, 

+

            'upload_date': None, 

+

            'title': video_title, 

+

            'ext': extension, # Extension is always(?) mp4, but seems to be flv 

+

            'thumbnail': None, 

+

            'description': video_description, 

+

        } 

+

 

+

        return [info] 

+

 

+

class MixcloudIE(InfoExtractor): 

+

    """Information extractor for www.mixcloud.com""" 

+

 

+

    _WORKING = False # New API, but it seems good http://www.mixcloud.com/developers/documentation/ 

+

    _VALID_URL = r'^(?:https?://)?(?:www\.)?mixcloud\.com/([\w\d-]+)/([\w\d-]+)' 

+

    IE_NAME = u'mixcloud' 

+

 

+

    def __init__(self, downloader=None): 

+

        InfoExtractor.__init__(self, downloader) 

+

 

+

    def report_download_json(self, file_id): 

+

        """Report JSON download.""" 

+

        self._downloader.to_screen(u'[%s] Downloading json' % self.IE_NAME) 

+

 

+

    def report_extraction(self, file_id): 

+

        """Report information extraction.""" 

+

        self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, file_id)) 

+

 

+

    def get_urls(self, jsonData, fmt, bitrate='best'): 

+

        """Get urls from 'audio_formats' section in json""" 

+

        file_url = None 

+

        try: 

+

            bitrate_list = jsonData[fmt] 

+

            if bitrate is None or bitrate == 'best' or bitrate not in bitrate_list: 

+

                bitrate = max(bitrate_list) # select highest 

+

 

+

            url_list = jsonData[fmt][bitrate] 

+

        except TypeError: # we have no bitrate info. 

+

            url_list = jsonData[fmt] 

+

        return url_list 

+

 

+

    def check_urls(self, url_list): 

+

        """Returns 1st active url from list""" 

+

        for url in url_list: 

+

            try: 

+

                compat_urllib_request.urlopen(url) 

+

                return url 

+

            except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: 

+

                url = None 

+

 

+

        return None 

+

 

+

    def _print_formats(self, formats): 

+

        print('Available formats:') 

+

        for fmt in formats.keys(): 

+

            for b in formats[fmt]: 

+

                try: 

+

                    ext = formats[fmt][b][0] 

+

                    print('%s\t%s\t[%s]' % (fmt, b, ext.split('.')[-1])) 

+

                except TypeError: # we have no bitrate info 

+

                    ext = formats[fmt][0] 

+

                    print('%s\t%s\t[%s]' % (fmt, '??', ext.split('.')[-1])) 

+

                    break 

+

 

+

    def _real_extract(self, url): 

+

        mobj = re.match(self._VALID_URL, url) 

+

        if mobj is None: 

+

            self._downloader.trouble(u'ERROR: invalid URL: %s' % url) 

+

            return 

+

        # extract uploader & filename from url 

+

        uploader = mobj.group(1).decode('utf-8') 

+

        file_id = uploader + "-" + mobj.group(2).decode('utf-8') 

+

 

+

        # construct API request 

+

        file_url = 'http://www.mixcloud.com/api/1/cloudcast/' + '/'.join(url.split('/')[-3:-1]) + '.json' 

+

        # retrieve .json file with links to files 

+

        request = compat_urllib_request.Request(file_url) 

+

        try: 

+

            self.report_download_json(file_url) 

+

            jsonData = compat_urllib_request.urlopen(request).read() 

+

        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: 

+

            self._downloader.trouble(u'ERROR: Unable to retrieve file: %s' % compat_str(err)) 

+

            return 

+

 

+

        # parse JSON 

+

        json_data = json.loads(jsonData) 

+

        player_url = json_data['player_swf_url'] 

+

        formats = dict(json_data['audio_formats']) 

+

 

+

        req_format = self._downloader.params.get('format', None) 

+

        bitrate = None 

+

 

+

        if self._downloader.params.get('listformats', None): 

+

            self._print_formats(formats) 

+

            return 

+

 

+

        if req_format is None or req_format == 'best': 

+

            for format_param in formats.keys(): 

+

                url_list = self.get_urls(formats, format_param) 

+

                # check urls 

+

                file_url = self.check_urls(url_list) 

+

                if file_url is not None: 

+

                    break # got it! 

+

        else: 

+

            if req_format not in formats: 

+

                self._downloader.trouble(u'ERROR: format is not available') 

+

                return 

+

 

+

            url_list = self.get_urls(formats, req_format) 

+

            file_url = self.check_urls(url_list) 

+

            format_param = req_format 

+

 

+

        return [{ 

+

            'id': file_id.decode('utf-8'), 

+

            'url': file_url.decode('utf-8'), 

+

            'uploader': uploader.decode('utf-8'), 

+

            'upload_date': None, 

+

            'title': json_data['name'], 

+

            'ext': file_url.split('.')[-1].decode('utf-8'), 

+

            'format': (format_param is None and u'NA' or format_param.decode('utf-8')), 

+

            'thumbnail': json_data['thumbnail_url'], 

+

            'description': json_data['description'], 

+

            'player_url': player_url.decode('utf-8'), 

+

        }] 

+

 

+

class StanfordOpenClassroomIE(InfoExtractor): 

+

    """Information extractor for Stanford's Open ClassRoom""" 

+

 

+

    _VALID_URL = r'^(?:https?://)?openclassroom.stanford.edu(?P<path>/?|(/MainFolder/(?:HomePage|CoursePage|VideoPage)\.php([?]course=(?P<course>[^&]+)(&video=(?P<video>[^&]+))?(&.*)?)?))$' 

+

    IE_NAME = u'stanfordoc' 

+

 

+

    def report_download_webpage(self, objid): 

+

        """Report information extraction.""" 

+

        self._downloader.to_screen(u'[%s] %s: Downloading webpage' % (self.IE_NAME, objid)) 

+

 

+

    def report_extraction(self, video_id): 

+

        """Report information extraction.""" 

+

        self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, video_id)) 

+

 

+

    def _real_extract(self, url): 

+

        mobj = re.match(self._VALID_URL, url) 

+

        if mobj is None: 

+

            self._downloader.trouble(u'ERROR: invalid URL: %s' % url) 

+

            return 

+

 

+

        if mobj.group('course') and mobj.group('video'): # A specific video 

+

            course = mobj.group('course') 

+

            video = mobj.group('video') 

+

            info = { 

+

                'id': course + '_' + video, 

+

                'uploader': None, 

+

                'upload_date': None, 

+

            } 

+

 

+

            self.report_extraction(info['id']) 

+

            baseUrl = 'http://openclassroom.stanford.edu/MainFolder/courses/' + course + '/videos/' 

+

            xmlUrl = baseUrl + video + '.xml' 

+

            try: 

+

                metaXml = compat_urllib_request.urlopen(xmlUrl).read() 

+

            except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: 

+

                self._downloader.trouble(u'ERROR: unable to download video info XML: %s' % compat_str(err)) 

+

                return 

+

            mdoc = xml.etree.ElementTree.fromstring(metaXml) 

+

            try: 

+

                info['title'] = mdoc.findall('./title')[0].text 

+

                info['url'] = baseUrl + mdoc.findall('./videoFile')[0].text 

+

            except IndexError: 

+

                self._downloader.trouble(u'\nERROR: Invalid metadata XML file') 

+

                return 

+

            info['ext'] = info['url'].rpartition('.')[2] 

+

            return [info] 

+

        elif mobj.group('course'): # A course page 

+

            course = mobj.group('course') 

+

            info = { 

+

                'id': course, 

+

                'type': 'playlist', 

+

                'uploader': None, 

+

                'upload_date': None, 

+

            } 

+

 

+

            self.report_download_webpage(info['id']) 

+

            try: 

+

                coursepage = compat_urllib_request.urlopen(url).read() 

+

            except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: 

+

                self._downloader.trouble(u'ERROR: unable to download course info page: ' + compat_str(err)) 

+

                return 

+

 

+

            m = re.search('<h1>([^<]+)</h1>', coursepage) 

+

            if m: 

+

                info['title'] = unescapeHTML(m.group(1)) 

+

            else: 

+

                info['title'] = info['id'] 

+

 

+

            m = re.search('<description>([^<]+)</description>', coursepage) 

+

            if m: 

+

                info['description'] = unescapeHTML(m.group(1)) 

+

 

+

            links = orderedSet(re.findall('<a href="(VideoPage.php\?[^"]+)">', coursepage)) 

+

            info['list'] = [ 

+

                { 

+

                    'type': 'reference', 

+

                    'url': 'http://openclassroom.stanford.edu/MainFolder/' + unescapeHTML(vpage), 

+

                } 

+

                    for vpage in links] 

+

            results = [] 

+

            for entry in info['list']: 

+

                assert entry['type'] == 'reference' 

+

                results += self.extract(entry['url']) 

+

            return results 

+

 

+

        else: # Root page 

+

            info = { 

+

                'id': 'Stanford OpenClassroom', 

+

                'type': 'playlist', 

+

                'uploader': None, 

+

                'upload_date': None, 

+

            } 

+

 

+

            self.report_download_webpage(info['id']) 

+

            rootURL = 'http://openclassroom.stanford.edu/MainFolder/HomePage.php' 

+

            try: 

+

                rootpage = compat_urllib_request.urlopen(rootURL).read() 

+

            except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: 

+

                self._downloader.trouble(u'ERROR: unable to download course info page: ' + compat_str(err)) 

+

                return 

+

 

+

            info['title'] = info['id'] 

+

 

+

            links = orderedSet(re.findall('<a href="(CoursePage.php\?[^"]+)">', rootpage)) 

+

            info['list'] = [ 

+

                { 

+

                    'type': 'reference', 

+

                    'url': 'http://openclassroom.stanford.edu/MainFolder/' + unescapeHTML(cpage), 

+

                } 

+

                    for cpage in links] 

+

 

+

            results = [] 

+

            for entry in info['list']: 

+

                assert entry['type'] == 'reference' 

+

                results += self.extract(entry['url']) 

+

            return results 

+

 

+

class MTVIE(InfoExtractor): 

+

    """Information extractor for MTV.com""" 

+

 

+

    _VALID_URL = r'^(?P<proto>https?://)?(?:www\.)?mtv\.com/videos/[^/]+/(?P<videoid>[0-9]+)/[^/]+$' 

+

    IE_NAME = u'mtv' 

+

 

+

    def report_extraction(self, video_id): 

+

        """Report information extraction.""" 

+

        self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, video_id)) 

+

 

+

    def _real_extract(self, url): 

+

        mobj = re.match(self._VALID_URL, url) 

+

        if mobj is None: 

+

            self._downloader.trouble(u'ERROR: invalid URL: %s' % url) 

+

            return 

+

        if not mobj.group('proto'): 

+

            url = 'http://' + url 

+

        video_id = mobj.group('videoid') 

+

 

+

        webpage = self._download_webpage(url, video_id) 

+

 

+

        mobj = re.search(r'<meta name="mtv_vt" content="([^"]+)"/>', webpage) 

+

        if mobj is None: 

+

            self._downloader.trouble(u'ERROR: unable to extract song name') 

+

            return 

+

        song_name = unescapeHTML(mobj.group(1).decode('iso-8859-1')) 

+

        mobj = re.search(r'<meta name="mtv_an" content="([^"]+)"/>', webpage) 

+

        if mobj is None: 

+

            self._downloader.trouble(u'ERROR: unable to extract performer') 

+

            return 

+

        performer = unescapeHTML(mobj.group(1).decode('iso-8859-1')) 

+

        video_title = performer + ' - ' + song_name 

+

 

+

        mobj = re.search(r'<meta name="mtvn_uri" content="([^"]+)"/>', webpage) 

+

        if mobj is None: 

+

            self._downloader.trouble(u'ERROR: unable to mtvn_uri') 

+

            return 

+

        mtvn_uri = mobj.group(1) 

+

 

+

        mobj = re.search(r'MTVN.Player.defaultPlaylistId = ([0-9]+);', webpage) 

+

        if mobj is None: 

+

            self._downloader.trouble(u'ERROR: unable to extract content id') 

+

            return 

+

        content_id = mobj.group(1) 

+

 

+

        videogen_url = 'http://www.mtv.com/player/includes/mediaGen.jhtml?uri=' + mtvn_uri + '&id=' + content_id + '&vid=' + video_id + '&ref=www.mtvn.com&viewUri=' + mtvn_uri 

+

        self.report_extraction(video_id) 

+

        request = compat_urllib_request.Request(videogen_url) 

+

        try: 

+

            metadataXml = compat_urllib_request.urlopen(request).read() 

+

        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: 

+

            self._downloader.trouble(u'ERROR: unable to download video metadata: %s' % compat_str(err)) 

+

            return 

+

 

+

        mdoc = xml.etree.ElementTree.fromstring(metadataXml) 

+

        renditions = mdoc.findall('.//rendition') 

+

 

+

        # For now, always pick the highest quality. 

+

        rendition = renditions[-1] 

+

 

+

        try: 

+

            _,_,ext = rendition.attrib['type'].partition('/') 

+

            format = ext + '-' + rendition.attrib['width'] + 'x' + rendition.attrib['height'] + '_' + rendition.attrib['bitrate'] 

+

            video_url = rendition.find('./src').text 

+

        except KeyError: 

+

            self._downloader.trouble('Invalid rendition field.') 

+

            return 

+

 

+

        info = { 

+

            'id': video_id, 

+

            'url': video_url, 

+

            'uploader': performer, 

+

            'upload_date': None, 

+

            'title': video_title, 

+

            'ext': ext, 

+

            'format': format, 

+

        } 

+

 

+

        return [info] 

+

 

+

 

+

class YoukuIE(InfoExtractor): 

+

    _VALID_URL =  r'(?:http://)?v\.youku\.com/v_show/id_(?P<ID>[A-Za-z0-9]+)\.html' 

+

 

+

    def report_download_webpage(self, file_id): 

+

        """Report webpage download.""" 

+

        self._downloader.to_screen(u'[%s] %s: Downloading webpage' % (self.IE_NAME, file_id)) 

+

 

+

    def report_extraction(self, file_id): 

+

        """Report information extraction.""" 

+

        self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, file_id)) 

+

 

+

    def _gen_sid(self): 

+

        nowTime = int(time.time() * 1000) 

+

        random1 = random.randint(1000,1998) 

+

        random2 = random.randint(1000,9999) 

+

 

+

        return "%d%d%d" %(nowTime,random1,random2) 

+

 

+

    def _get_file_ID_mix_string(self, seed): 

+

        mixed = [] 

+

        source = list("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ/\:._-1234567890") 

+

        seed = float(seed) 

+

        for i in range(len(source)): 

+

            seed  =  (seed * 211 + 30031 ) % 65536 

+

            index  =  math.floor(seed / 65536 * len(source) ) 

+

            mixed.append(source[int(index)]) 

+

            source.remove(source[int(index)]) 

+

        #return ''.join(mixed) 

+

        return mixed 

+

 

+

    def _get_file_id(self, fileId, seed): 

+

        mixed = self._get_file_ID_mix_string(seed) 

+

        ids = fileId.split('*') 

+

        realId = [] 

+

        for ch in ids: 

+

            if ch: 

+

                realId.append(mixed[int(ch)]) 

+

        return ''.join(realId) 

+

 

+

    def _real_extract(self, url): 

+

        mobj = re.match(self._VALID_URL, url) 

+

        if mobj is None: 

+

            self._downloader.trouble(u'ERROR: invalid URL: %s' % url) 

+

            return 

+

        video_id = mobj.group('ID') 

+

 

+

        info_url = 'http://v.youku.com/player/getPlayList/VideoIDS/' + video_id 

+

 

+

        request = compat_urllib_request.Request(info_url, None, std_headers) 

+

        try: 

+

            self.report_download_webpage(video_id) 

+

            jsondata = compat_urllib_request.urlopen(request).read() 

+

        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: 

+

            self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err)) 

+

            return 

+

 

+

        self.report_extraction(video_id) 

+

        try: 

+

            jsonstr = jsondata.decode('utf-8') 

+

            config = json.loads(jsonstr) 

+

 

+

            video_title =  config['data'][0]['title'] 

+

            seed = config['data'][0]['seed'] 

+

 

+

            format = self._downloader.params.get('format', None) 

+

            supported_format = list(config['data'][0]['streamfileids'].keys()) 

+

 

+

            if format is None or format == 'best': 

+

                if 'hd2' in supported_format: 

+

                    format = 'hd2' 

+

                else: 

+

                    format = 'flv' 

+

                ext = u'flv' 

+

            elif format == 'worst': 

+

                format = 'mp4' 

+

                ext = u'mp4' 

+

            else: 

+

                format = 'flv' 

+

                ext = u'flv' 

+

 

+

 

+

            fileid = config['data'][0]['streamfileids'][format] 

+

            keys = [s['k'] for s in config['data'][0]['segs'][format]] 

+

        except (UnicodeDecodeError, ValueError, KeyError): 

+

            self._downloader.trouble(u'ERROR: unable to extract info section') 

+

            return 

+

 

+

        files_info=[] 

+

        sid = self._gen_sid() 

+

        fileid = self._get_file_id(fileid, seed) 

+

 

+

        #column 8,9 of fileid represent the segment number 

+

        #fileid[7:9] should be changed 

+

        for index, key in enumerate(keys): 

+

 

+

            temp_fileid = '%s%02X%s' % (fileid[0:8], index, fileid[10:]) 

+

            download_url = 'http://f.youku.com/player/getFlvPath/sid/%s_%02X/st/flv/fileid/%s?k=%s' % (sid, index, temp_fileid, key) 

+

 

+

            info = { 

+

                'id': '%s_part%02d' % (video_id, index), 

+

                'url': download_url, 

+

                'uploader': None, 

+

                'upload_date': None, 

+

                'title': video_title, 

+

                'ext': ext, 

+

            } 

+

            files_info.append(info) 

+

 

+

        return files_info 

+

 

+

 

+

class XNXXIE(InfoExtractor): 

+

    """Information extractor for xnxx.com""" 

+

 

+

    _VALID_URL = r'^http://video\.xnxx\.com/video([0-9]+)/(.*)' 

+

    IE_NAME = u'xnxx' 

+

    VIDEO_URL_RE = r'flv_url=(.*?)&amp;' 

+

    VIDEO_TITLE_RE = r'<title>(.*?)\s+-\s+XNXX.COM' 

+

    VIDEO_THUMB_RE = r'url_bigthumb=(.*?)&amp;' 

+

 

+

    def report_webpage(self, video_id): 

+

        """Report information extraction""" 

+

        self._downloader.to_screen(u'[%s] %s: Downloading webpage' % (self.IE_NAME, video_id)) 

+

 

+

    def report_extraction(self, video_id): 

+

        """Report information extraction""" 

+

        self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, video_id)) 

+

 

+

    def _real_extract(self, url): 

+

        mobj = re.match(self._VALID_URL, url) 

+

        if mobj is None: 

+

            self._downloader.trouble(u'ERROR: invalid URL: %s' % url) 

+

            return 

+

        video_id = mobj.group(1) 

+

 

+

        self.report_webpage(video_id) 

+

 

+

        # Get webpage content 

+

        try: 

+

            webpage_bytes = compat_urllib_request.urlopen(url).read() 

+

            webpage = webpage_bytes.decode('utf-8') 

+

        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: 

+

            self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % err) 

+

            return 

+

 

+

        result = re.search(self.VIDEO_URL_RE, webpage) 

+

        if result is None: 

+

            self._downloader.trouble(u'ERROR: unable to extract video url') 

+

            return 

+

        video_url = compat_urllib_parse.unquote(result.group(1)) 

+

 

+

        result = re.search(self.VIDEO_TITLE_RE, webpage) 

+

        if result is None: 

+

            self._downloader.trouble(u'ERROR: unable to extract video title') 

+

            return 

+

        video_title = result.group(1) 

+

 

+

        result = re.search(self.VIDEO_THUMB_RE, webpage) 

+

        if result is None: 

+

            self._downloader.trouble(u'ERROR: unable to extract video thumbnail') 

+

            return 

+

        video_thumbnail = result.group(1) 

+

 

+

        return [{ 

+

            'id': video_id, 

+

            'url': video_url, 

+

            'uploader': None, 

+

            'upload_date': None, 

+

            'title': video_title, 

+

            'ext': 'flv', 

+

            'thumbnail': video_thumbnail, 

+

            'description': None, 

+

        }] 

+

 

+

 

+

class GooglePlusIE(InfoExtractor): 

+

    """Information extractor for plus.google.com.""" 

+

 

+

    _VALID_URL = r'(?:https://)?plus\.google\.com/(?:[^/]+/)*?posts/(\w+)' 

+

    IE_NAME = u'plus.google' 

+

 

+

    def __init__(self, downloader=None): 

+

        InfoExtractor.__init__(self, downloader) 

+

 

+

    def report_extract_entry(self, url): 

+

        """Report downloading extry""" 

+

        self._downloader.to_screen(u'[plus.google] Downloading entry: %s' % url) 

+

 

+

    def report_date(self, upload_date): 

+

        """Report downloading extry""" 

+

        self._downloader.to_screen(u'[plus.google] Entry date: %s' % upload_date) 

+

 

+

    def report_uploader(self, uploader): 

+

        """Report downloading extry""" 

+

        self._downloader.to_screen(u'[plus.google] Uploader: %s' % uploader) 

+

 

+

    def report_title(self, video_title): 

+

        """Report downloading extry""" 

+

        self._downloader.to_screen(u'[plus.google] Title: %s' % video_title) 

+

 

+

    def report_extract_vid_page(self, video_page): 

+

        """Report information extraction.""" 

+

        self._downloader.to_screen(u'[plus.google] Extracting video page: %s' % video_page) 

+

 

+

    def _real_extract(self, url): 

+

        # Extract id from URL 

+

        mobj = re.match(self._VALID_URL, url) 

+

        if mobj is None: 

+

            self._downloader.trouble(u'ERROR: Invalid URL: %s' % url) 

+

            return 

+

 

+

        post_url = mobj.group(0) 

+

        video_id = mobj.group(1) 

+

 

+

        video_extension = 'flv' 

+

 

+

        # Step 1, Retrieve post webpage to extract further information 

+

        self.report_extract_entry(post_url) 

+

        request = compat_urllib_request.Request(post_url) 

+

        try: 

+

            webpage = compat_urllib_request.urlopen(request).read().decode('utf-8') 

+

        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: 

+

            self._downloader.trouble(u'ERROR: Unable to retrieve entry webpage: %s' % compat_str(err)) 

+

            return 

+

 

+

        # Extract update date 

+

        upload_date = None 

+

        pattern = 'title="Timestamp">(.*?)</a>' 

+

        mobj = re.search(pattern, webpage) 

+

        if mobj: 

+

            upload_date = mobj.group(1) 

+

            # Convert timestring to a format suitable for filename 

+

            upload_date = datetime.datetime.strptime(upload_date, "%Y-%m-%d") 

+

            upload_date = upload_date.strftime('%Y%m%d') 

+

        self.report_date(upload_date) 

+

 

+

        # Extract uploader 

+

        uploader = None 

+

        pattern = r'rel\="author".*?>(.*?)</a>' 

+

        mobj = re.search(pattern, webpage) 

+

        if mobj: 

+

            uploader = mobj.group(1) 

+

        self.report_uploader(uploader) 

+

 

+

        # Extract title 

+

        # Get the first line for title 

+

        video_title = u'NA' 

+

        pattern = r'<meta name\=\"Description\" content\=\"(.*?)[\n<"]' 

+

        mobj = re.search(pattern, webpage) 

+

        if mobj: 

+

            video_title = mobj.group(1) 

+

        self.report_title(video_title) 

+

 

+

        # Step 2, Stimulate clicking the image box to launch video 

+

        pattern = '"(https\://plus\.google\.com/photos/.*?)",,"image/jpeg","video"\]' 

+

        mobj = re.search(pattern, webpage) 

+

        if mobj is None: 

+

            self._downloader.trouble(u'ERROR: unable to extract video page URL') 

+

 

+

        video_page = mobj.group(1) 

+

        request = compat_urllib_request.Request(video_page) 

+

        try: 

+

            webpage = compat_urllib_request.urlopen(request).read().decode('utf-8') 

+

        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: 

+

            self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err)) 

+

            return 

+

        self.report_extract_vid_page(video_page) 

+

 

+

 

+

        # Extract video links on video page 

+

        """Extract video links of all sizes""" 

+

        pattern = '\d+,\d+,(\d+),"(http\://redirector\.googlevideo\.com.*?)"' 

+

        mobj = re.findall(pattern, webpage) 

+

        if len(mobj) == 0: 

+

            self._downloader.trouble(u'ERROR: unable to extract video links') 

+

 

+

        # Sort in resolution 

+

        links = sorted(mobj) 

+

 

+

        # Choose the lowest of the sort, i.e. highest resolution 

+

        video_url = links[-1] 

+

        # Only get the url. The resolution part in the tuple has no use anymore 

+

        video_url = video_url[-1] 

+

        # Treat escaped \u0026 style hex 

+

        try: 

+

            video_url = video_url.decode("unicode_escape") 

+

        except AttributeError: # Python 3 

+

            video_url = bytes(video_url, 'ascii').decode('unicode-escape') 

+

 

+

 

+

        return [{ 

+

            'id':       video_id, 

+

            'url':      video_url, 

+

            'uploader': uploader, 

+

            'upload_date':  upload_date, 

+

            'title':    video_title, 

+

            'ext':      video_extension, 

+

        }] 

+

 

+

class NBAIE(InfoExtractor): 

+

    _VALID_URL = r'^(?:https?://)?(?:watch\.|www\.)?nba\.com/(?:nba/)?video(/[^?]*)(\?.*)?$' 

+

    IE_NAME = u'nba' 

+

 

+

    def _real_extract(self, url): 

+

        mobj = re.match(self._VALID_URL, url) 

+

        if mobj is None: 

+

            self._downloader.trouble(u'ERROR: invalid URL: %s' % url) 

+

            return 

+

 

+

        video_id = mobj.group(1) 

+

        if video_id.endswith('/index.html'): 

+

            video_id = video_id[:-len('/index.html')] 

+

 

+

        webpage = self._download_webpage(url, video_id) 

+

 

+

        video_url = u'http://ht-mobile.cdn.turner.com/nba/big' + video_id + '_nba_1280x720.mp4' 

+

        def _findProp(rexp, default=None): 

+

            m = re.search(rexp, webpage) 

+

            if m: 

+

                return unescapeHTML(m.group(1)) 

+

            else: 

+

                return default 

+

 

+

        shortened_video_id = video_id.rpartition('/')[2] 

+

        title = _findProp(r'<meta property="og:title" content="(.*?)"', shortened_video_id).replace('NBA.com: ', '') 

+

        info = { 

+

            'id': shortened_video_id, 

+

            'url': video_url, 

+

            'ext': 'mp4', 

+

            'title': title, 

+

            'uploader_date': _findProp(r'<b>Date:</b> (.*?)</div>'), 

+

            'description': _findProp(r'<div class="description">(.*?)</h1>'), 

+

        } 

+

        return [info] 

+

 

+

class JustinTVIE(InfoExtractor): 

+

    """Information extractor for justin.tv and twitch.tv""" 

+

    # TODO: One broadcast may be split into multiple videos. The key 

+

    # 'broadcast_id' is the same for all parts, and 'broadcast_part' 

+

    # starts at 1 and increases. Can we treat all parts as one video? 

+

 

+

    _VALID_URL = r"""(?x)^(?:http://)?(?:www\.)?(?:twitch|justin)\.tv/ 

+

        ([^/]+)(?:/b/([^/]+))?/?(?:\#.*)?$""" 

+

    _JUSTIN_PAGE_LIMIT = 100 

+

    IE_NAME = u'justin.tv' 

+

 

+

    def report_extraction(self, file_id): 

+

        """Report information extraction.""" 

+

        self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, file_id)) 

+

 

+

    def report_download_page(self, channel, offset): 

+

        """Report attempt to download a single page of videos.""" 

+

        self._downloader.to_screen(u'[%s] %s: Downloading video information from %d to %d' % 

+

                (self.IE_NAME, channel, offset, offset + self._JUSTIN_PAGE_LIMIT)) 

+

 

+

    # Return count of items, list of *valid* items 

+

    def _parse_page(self, url): 

+

        try: 

+

            urlh = compat_urllib_request.urlopen(url) 

+

            webpage_bytes = urlh.read() 

+

            webpage = webpage_bytes.decode('utf-8', 'ignore') 

+

        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: 

+

            self._downloader.trouble(u'ERROR: unable to download video info JSON: %s' % compat_str(err)) 

+

            return 

+

 

+

        response = json.loads(webpage) 

+

        info = [] 

+

        for clip in response: 

+

            video_url = clip['video_file_url'] 

+

            if video_url: 

+

                video_extension = os.path.splitext(video_url)[1][1:] 

+

                video_date = re.sub('-', '', clip['created_on'][:10]) 

+

                info.append({ 

+

                    'id': clip['id'], 

+

                    'url': video_url, 

+

                    'title': clip['title'], 

+

                    'uploader': clip.get('user_id', clip.get('channel_id')), 

+

                    'upload_date': video_date, 

+

                    'ext': video_extension, 

+

                }) 

+

        return (len(response), info) 

+

 

+

    def _real_extract(self, url): 

+

        mobj = re.match(self._VALID_URL, url) 

+

        if mobj is None: 

+

            self._downloader.trouble(u'ERROR: invalid URL: %s' % url) 

+

            return 

+

 

+

        api = 'http://api.justin.tv' 

+

        video_id = mobj.group(mobj.lastindex) 

+

        paged = False 

+

        if mobj.lastindex == 1: 

+

            paged = True 

+

            api += '/channel/archives/%s.json' 

+

        else: 

+

            api += '/clip/show/%s.json' 

+

        api = api % (video_id,) 

+

 

+

        self.report_extraction(video_id) 

+

 

+

        info = [] 

+

        offset = 0 

+

        limit = self._JUSTIN_PAGE_LIMIT 

+

        while True: 

+

            if paged: 

+

                self.report_download_page(video_id, offset) 

+

            page_url = api + ('?offset=%d&limit=%d' % (offset, limit)) 

+

            page_count, page_info = self._parse_page(page_url) 

+

            info.extend(page_info) 

+

            if not paged or page_count != limit: 

+

                break 

+

            offset += limit 

+

        return info 

+

 

+

class FunnyOrDieIE(InfoExtractor): 

+

    _VALID_URL = r'^(?:https?://)?(?:www\.)?funnyordie\.com/videos/(?P<id>[0-9a-f]+)/.*$' 

+

 

+

    def _real_extract(self, url): 

+

        mobj = re.match(self._VALID_URL, url) 

+

        if mobj is None: 

+

            self._downloader.trouble(u'ERROR: invalid URL: %s' % url) 

+

            return 

+

 

+

        video_id = mobj.group('id') 

+

        webpage = self._download_webpage(url, video_id) 

+

 

+

        m = re.search(r'<video[^>]*>\s*<source[^>]*>\s*<source src="(?P<url>[^"]+)"', webpage, re.DOTALL) 

+

        if not m: 

+

            self._downloader.trouble(u'ERROR: unable to find video information') 

+

        video_url = unescapeHTML(m.group('url')) 

+

 

+

        m = re.search(r"class='player_page_h1'>\s+<a.*?>(?P<title>.*?)</a>", webpage) 

+

        if not m: 

+

            self._downloader.trouble(u'Cannot find video title') 

+

        title = unescapeHTML(m.group('title')) 

+

 

+

        m = re.search(r'<meta property="og:description" content="(?P<desc>.*?)"', webpage) 

+

        if m: 

+

            desc = unescapeHTML(m.group('desc')) 

+

        else: 

+

            desc = None 

+

 

+

        info = { 

+

            'id': video_id, 

+

            'url': video_url, 

+

            'ext': 'mp4', 

+

            'title': title, 

+

            'description': desc, 

+

        } 

+

        return [info] 

+

 

+

class TweetReelIE(InfoExtractor): 

+

    _VALID_URL = r'^(?:https?://)?(?:www\.)?tweetreel\.com/[?](?P<id>[0-9a-z]+)$' 

+

 

+

    def _real_extract(self, url): 

+

        mobj = re.match(self._VALID_URL, url) 

+

        if mobj is None: 

+

            self._downloader.trouble(u'ERROR: invalid URL: %s' % url) 

+

            return 

+

 

+

        video_id = mobj.group('id') 

+

        webpage = self._download_webpage(url, video_id) 

+

 

+

        m = re.search(r'<div id="left" status_id="([0-9]+)">', webpage) 

+

        if not m: 

+

            self._downloader.trouble(u'ERROR: Cannot find status ID') 

+

        status_id = m.group(1) 

+

 

+

        m = re.search(r'<div class="tweet_text">(.*?)</div>', webpage, flags=re.DOTALL) 

+

        if not m: 

+

            self._downloader.trouble(u'WARNING: Cannot find description') 

+

        desc = unescapeHTML(re.sub('<a.*?</a>', '', m.group(1))).strip() 

+

 

+

        m = re.search(r'<div class="tweet_info">.*?from <a target="_blank" href="https?://twitter.com/(?P<uploader_id>.+?)">(?P<uploader>.+?)</a>', webpage, flags=re.DOTALL) 

+

        if not m: 

+

            self._downloader.trouble(u'ERROR: Cannot find uploader') 

+

        uploader = unescapeHTML(m.group('uploader')) 

+

        uploader_id = unescapeHTML(m.group('uploader_id')) 

+

 

+

        m = re.search(r'<span unixtime="([0-9]+)"', webpage) 

+

        if not m: 

+

            self._downloader.trouble(u'ERROR: Cannot find upload date') 

+

        upload_date = datetime.datetime.fromtimestamp(int(m.group(1))).strftime('%Y%m%d') 

+

 

+

        title = desc 

+

        video_url = 'http://files.tweetreel.com/video/' + status_id + '.mov' 

+

 

+

        info = { 

+

            'id': video_id, 

+

            'url': video_url, 

+

            'ext': 'mov', 

+

            'title': title, 

+

            'description': desc, 

+

            'uploader': uploader, 

+

            'uploader_id': uploader_id, 

+

            'internal_id': status_id, 

+

            'upload_date': upload_date 

+

        } 

+

        return [info] 

+

 

+

class SteamIE(InfoExtractor): 

+

    _VALID_URL = r"""http://store.steampowered.com/  

+

                (?P<urltype>video|app)/ #If the page is only for videos or for a game 

+

                (?P<gameID>\d+)/? 

+

                (?P<videoID>\d*)(?P<extra>\??) #For urltype == video we sometimes get the videoID 

+

                """ 

+

 

+

    def suitable(self, url): 

+

        """Receives a URL and returns True if suitable for this IE.""" 

+

        return re.match(self._VALID_URL, url, re.VERBOSE) is not None 

+

 

+

    def _real_extract(self, url): 

+

        m = re.match(self._VALID_URL, url, re.VERBOSE) 

+

        urlRE = r"'movie_(?P<videoID>\d+)': \{\s*FILENAME: \"(?P<videoURL>[\w:/\.\?=]+)\"(,\s*MOVIE_NAME: \"(?P<videoName>[\w:/\.\?=\+-]+)\")?\s*\}," 

+

        gameID = m.group('gameID') 

+

        videourl = 'http://store.steampowered.com/video/%s/' % gameID 

+

        webpage = self._download_webpage(videourl, gameID) 

+

        mweb = re.finditer(urlRE, webpage) 

+

        namesRE = r'<span class=\"title\">(?P<videoName>[\w:/\.\?=\+\s-]+)</span>' 

+

        titles = list(re.finditer(namesRE, webpage)) 

+

        videos = [] 

+

        for vid,vtitle in zip(mweb,titles): 

+

            video_id = vid.group('videoID') 

+

            title = vtitle.group('videoName') 

+

            video_url = vid.group('videoURL') 

+

            if not video_url: 

+

                self._downloader.trouble(u'ERROR: Cannot find video url for %s' % video_id) 

+

            info = { 

+

                'id':video_id, 

+

                'url':video_url, 

+

                'ext': 'flv', 

+

                'title': title 

+

                  } 

+

            videos.append(info) 

+

        return videos 

+

 

+

class UstreamIE(InfoExtractor): 

+

    _VALID_URL = r'http://www.ustream.tv/recorded/(?P<videoID>\d+)' 

+

    IE_NAME = u'ustream' 

+

 

+

    def _real_extract(self, url): 

+

        m = re.match(self._VALID_URL, url) 

+

        video_id = m.group('videoID') 

+

        video_url = u'http://tcdn.ustream.tv/video/%s' % video_id 

+

        webpage = self._download_webpage(url, video_id) 

+

        m = re.search(r'data-title="(?P<title>.+)"',webpage) 

+

        title = m.group('title') 

+

        m = re.search(r'<a class="state" data-content-type="channel" data-content-id="(?P<uploader>\d+)"',webpage) 

+

        uploader = m.group('uploader') 

+

        info = { 

+

                'id':video_id, 

+

                'url':video_url, 

+

                'ext': 'flv', 

+

                'title': title, 

+

                'uploader': uploader 

+

                  } 

+

        return [info] 

+

 

+

 

+

def gen_extractors(): 

+

    """ Return a list of an instance of every supported extractor. 

+

    The order does matter; the first extractor matched is the one handling the URL. 

+

    """ 

+

    return [ 

+

        YoutubePlaylistIE(), 

+

        YoutubeChannelIE(), 

+

        YoutubeUserIE(), 

+

        YoutubeSearchIE(), 

+

        YoutubeIE(), 

+

        MetacafeIE(), 

+

        DailymotionIE(), 

+

        GoogleSearchIE(), 

+

        PhotobucketIE(), 

+

        YahooIE(), 

+

        YahooSearchIE(), 

+

        DepositFilesIE(), 

+

        FacebookIE(), 

+

        BlipTVUserIE(), 

+

        BlipTVIE(), 

+

        VimeoIE(), 

+

        MyVideoIE(), 

+

        ComedyCentralIE(), 

+

        EscapistIE(), 

+

        CollegeHumorIE(), 

+

        XVideosIE(), 

+

        SoundcloudIE(), 

+

        InfoQIE(), 

+

        MixcloudIE(), 

+

        StanfordOpenClassroomIE(), 

+

        MTVIE(), 

+

        YoukuIE(), 

+

        XNXXIE(), 

+

        GooglePlusIE(), 

+

        ArteTvIE(), 

+

        NBAIE(), 

+

        JustinTVIE(), 

+

        FunnyOrDieIE(), 

+

        TweetReelIE(), 

+

        SteamIE(), 

+

        UstreamIE(), 

+

        GenericIE() 

+

    ] 

+

 

+

 

+ +
+
+ + + + + diff --git a/test_coverage/youtube_dl_PostProcessor.html b/test_coverage/youtube_dl_PostProcessor.html new file mode 100644 index 000000000..421ce724e --- /dev/null +++ b/test_coverage/youtube_dl_PostProcessor.html @@ -0,0 +1,490 @@ + + + + + + + + Coverage for youtube_dl.PostProcessor: 14% + + + + + + + + + + + + +
+ +

Hot-keys on this page

+
+

+ r + m + x + p   toggle line displays +

+

+ j + k   next/prev highlighted chunk +

+

+ 0   (zero) top of page +

+

+ 1   (one) first highlighted chunk +

+
+
+ +
+ + + + + +
+

1

+

2

+

3

+

4

+

5

+

6

+

7

+

8

+

9

+

10

+

11

+

12

+

13

+

14

+

15

+

16

+

17

+

18

+

19

+

20

+

21

+

22

+

23

+

24

+

25

+

26

+

27

+

28

+

29

+

30

+

31

+

32

+

33

+

34

+

35

+

36

+

37

+

38

+

39

+

40

+

41

+

42

+

43

+

44

+

45

+

46

+

47

+

48

+

49

+

50

+

51

+

52

+

53

+

54

+

55

+

56

+

57

+

58

+

59

+

60

+

61

+

62

+

63

+

64

+

65

+

66

+

67

+

68

+

69

+

70

+

71

+

72

+

73

+

74

+

75

+

76

+

77

+

78

+

79

+

80

+

81

+

82

+

83

+

84

+

85

+

86

+

87

+

88

+

89

+

90

+

91

+

92

+

93

+

94

+

95

+

96

+

97

+

98

+

99

+

100

+

101

+

102

+

103

+

104

+

105

+

106

+

107

+

108

+

109

+

110

+

111

+

112

+

113

+

114

+

115

+

116

+

117

+

118

+

119

+

120

+

121

+

122

+

123

+

124

+

125

+

126

+

127

+

128

+

129

+

130

+

131

+

132

+

133

+

134

+

135

+

136

+

137

+

138

+

139

+

140

+

141

+

142

+

143

+

144

+

145

+

146

+

147

+

148

+

149

+

150

+

151

+

152

+

153

+

154

+

155

+

156

+

157

+

158

+

159

+

160

+

161

+

162

+

163

+

164

+

165

+

166

+

167

+

168

+

169

+

170

+

171

+

172

+

173

+

174

+

175

+

176

+

177

+

178

+

179

+

180

+

181

+

182

+

183

+

184

+

185

+

186

+

187

+

188

+

189

+

190

+

191

+

192

+

193

+

194

+

195

+

196

+

197

+

198

+

199

+

200

+

201

+

202

+

203

+

204

+ +
+

#!/usr/bin/env python 

+

# -*- coding: utf-8 -*- 

+

 

+

from __future__ import absolute_import 

+

 

+

import os 

+

import subprocess 

+

import sys 

+

import time 

+

 

+

from .utils import * 

+

 

+

 

+

class PostProcessor(object): 

+

    """Post Processor class. 

+

 

+

    PostProcessor objects can be added to downloaders with their 

+

    add_post_processor() method. When the downloader has finished a 

+

    successful download, it will take its internal chain of PostProcessors 

+

    and start calling the run() method on each one of them, first with 

+

    an initial argument and then with the returned value of the previous 

+

    PostProcessor. 

+

 

+

    The chain will be stopped if one of them ever returns None or the end 

+

    of the chain is reached. 

+

 

+

    PostProcessor objects follow a "mutual registration" process similar 

+

    to InfoExtractor objects. 

+

    """ 

+

 

+

    _downloader = None 

+

 

+

    def __init__(self, downloader=None): 

+

        self._downloader = downloader 

+

 

+

    def set_downloader(self, downloader): 

+

        """Sets the downloader for this PP.""" 

+

        self._downloader = downloader 

+

 

+

    def run(self, information): 

+

        """Run the PostProcessor. 

+

 

+

        The "information" argument is a dictionary like the ones 

+

        composed by InfoExtractors. The only difference is that this 

+

        one has an extra field called "filepath" that points to the 

+

        downloaded file. 

+

 

+

        When this method returns None, the postprocessing chain is 

+

        stopped. However, this method may return an information 

+

        dictionary that will be passed to the next postprocessing 

+

        object in the chain. It can be the one it received after 

+

        changing some fields. 

+

 

+

        In addition, this method may raise a PostProcessingError 

+

        exception that will be taken into account by the downloader 

+

        it was called from. 

+

        """ 

+

        return information # by default, do nothing 

+

 

+

class AudioConversionError(BaseException): 

+

    def __init__(self, message): 

+

        self.message = message 

+

 

+

class FFmpegExtractAudioPP(PostProcessor): 

+

    def __init__(self, downloader=None, preferredcodec=None, preferredquality=None, keepvideo=False, nopostoverwrites=False): 

+

        PostProcessor.__init__(self, downloader) 

+

        if preferredcodec is None: 

+

            preferredcodec = 'best' 

+

        self._preferredcodec = preferredcodec 

+

        self._preferredquality = preferredquality 

+

        self._keepvideo = keepvideo 

+

        self._nopostoverwrites = nopostoverwrites 

+

        self._exes = self.detect_executables() 

+

 

+

    @staticmethod 

+

    def detect_executables(): 

+

        def executable(exe): 

+

            try: 

+

                subprocess.Popen([exe, '-version'], stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate() 

+

            except OSError: 

+

                return False 

+

            return exe 

+

        programs = ['avprobe', 'avconv', 'ffmpeg', 'ffprobe'] 

+

        return dict((program, executable(program)) for program in programs) 

+

 

+

    def get_audio_codec(self, path): 

+

        if not self._exes['ffprobe'] and not self._exes['avprobe']: return None 

+

        try: 

+

            cmd = [self._exes['avprobe'] or self._exes['ffprobe'], '-show_streams', '--', encodeFilename(path)] 

+

            handle = subprocess.Popen(cmd, stderr=compat_subprocess_get_DEVNULL(), stdout=subprocess.PIPE) 

+

            output = handle.communicate()[0] 

+

            if handle.wait() != 0: 

+

                return None 

+

        except (IOError, OSError): 

+

            return None 

+

        audio_codec = None 

+

        for line in output.decode('ascii', 'ignore').split('\n'): 

+

            if line.startswith('codec_name='): 

+

                audio_codec = line.split('=')[1].strip() 

+

            elif line.strip() == 'codec_type=audio' and audio_codec is not None: 

+

                return audio_codec 

+

        return None 

+

 

+

    def run_ffmpeg(self, path, out_path, codec, more_opts): 

+

        if not self._exes['ffmpeg'] and not self._exes['avconv']: 

+

            raise AudioConversionError('ffmpeg or avconv not found. Please install one.') 

+

        if codec is None: 

+

            acodec_opts = [] 

+

        else: 

+

            acodec_opts = ['-acodec', codec] 

+

        cmd = ([self._exes['avconv'] or self._exes['ffmpeg'], '-y', '-i', encodeFilename(path), '-vn'] 

+

               + acodec_opts + more_opts + 

+

               ['--', encodeFilename(out_path)]) 

+

        p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) 

+

        stdout,stderr = p.communicate() 

+

        if p.returncode != 0: 

+

            msg = stderr.strip().split('\n')[-1] 

+

            raise AudioConversionError(msg) 

+

 

+

    def run(self, information): 

+

        path = information['filepath'] 

+

 

+

        filecodec = self.get_audio_codec(path) 

+

        if filecodec is None: 

+

            self._downloader.to_stderr(u'WARNING: unable to obtain file audio codec with ffprobe') 

+

            return None 

+

 

+

        more_opts = [] 

+

        if self._preferredcodec == 'best' or self._preferredcodec == filecodec or (self._preferredcodec == 'm4a' and filecodec == 'aac'): 

+

            if self._preferredcodec == 'm4a' and filecodec == 'aac': 

+

                # Lossless, but in another container 

+

                acodec = 'copy' 

+

                extension = self._preferredcodec 

+

                more_opts = [self._exes['avconv'] and '-bsf:a' or '-absf', 'aac_adtstoasc'] 

+

            elif filecodec in ['aac', 'mp3', 'vorbis']: 

+

                # Lossless if possible 

+

                acodec = 'copy' 

+

                extension = filecodec 

+

                if filecodec == 'aac': 

+

                    more_opts = ['-f', 'adts'] 

+

                if filecodec == 'vorbis': 

+

                    extension = 'ogg' 

+

            else: 

+

                # MP3 otherwise. 

+

                acodec = 'libmp3lame' 

+

                extension = 'mp3' 

+

                more_opts = [] 

+

                if self._preferredquality is not None: 

+

                    if int(self._preferredquality) < 10: 

+

                        more_opts += [self._exes['avconv'] and '-q:a' or '-aq', self._preferredquality] 

+

                    else: 

+

                        more_opts += [self._exes['avconv'] and '-b:a' or '-ab', self._preferredquality + 'k'] 

+

        else: 

+

            # We convert the audio (lossy) 

+

            acodec = {'mp3': 'libmp3lame', 'aac': 'aac', 'm4a': 'aac', 'vorbis': 'libvorbis', 'wav': None}[self._preferredcodec] 

+

            extension = self._preferredcodec 

+

            more_opts = [] 

+

            if self._preferredquality is not None: 

+

                if int(self._preferredquality) < 10: 

+

                    more_opts += [self._exes['avconv'] and '-q:a' or '-aq', self._preferredquality] 

+

                else: 

+

                    more_opts += [self._exes['avconv'] and '-b:a' or '-ab', self._preferredquality + 'k'] 

+

            if self._preferredcodec == 'aac': 

+

                more_opts += ['-f', 'adts'] 

+

            if self._preferredcodec == 'm4a': 

+

                more_opts += [self._exes['avconv'] and '-bsf:a' or '-absf', 'aac_adtstoasc'] 

+

            if self._preferredcodec == 'vorbis': 

+

                extension = 'ogg' 

+

            if self._preferredcodec == 'wav': 

+

                extension = 'wav' 

+

                more_opts += ['-f', 'wav'] 

+

 

+

        prefix, sep, ext = path.rpartition(u'.') # not os.path.splitext, since the latter does not work on unicode in all setups 

+

        new_path = prefix + sep + extension 

+

        try: 

+

            if self._nopostoverwrites and os.path.exists(encodeFilename(new_path)): 

+

                self._downloader.to_screen(u'[youtube] Post-process file %s exists, skipping' % new_path) 

+

            else: 

+

                self._downloader.to_screen(u'[' + (self._exes['avconv'] and 'avconv' or 'ffmpeg') + '] Destination: ' + new_path) 

+

                self.run_ffmpeg(path, new_path, acodec, more_opts) 

+

        except: 

+

            etype,e,tb = sys.exc_info() 

+

            if isinstance(e, AudioConversionError): 

+

                self._downloader.to_stderr(u'ERROR: audio conversion failed: ' + e.message) 

+

            else: 

+

                self._downloader.to_stderr(u'ERROR: error running ' + (self._exes['avconv'] and 'avconv' or 'ffmpeg')) 

+

            return None 

+

 

+

        # Try to update the date time for extracted audio file. 

+

        if information.get('filetime') is not None: 

+

            try: 

+

                os.utime(encodeFilename(new_path), (time.time(), information['filetime'])) 

+

            except: 

+

                self._downloader.to_stderr(u'WARNING: Cannot update utime of audio file') 

+

 

+

        if not self._keepvideo: 

+

            try: 

+

                os.remove(encodeFilename(path)) 

+

            except (IOError, OSError): 

+

                self._downloader.to_stderr(u'WARNING: Unable to remove downloaded video file') 

+

                return None 

+

 

+

        information['filepath'] = new_path 

+

        return information 

+ +
+
+ + + + + diff --git a/test_coverage/youtube_dl_update.html b/test_coverage/youtube_dl_update.html new file mode 100644 index 000000000..e630d2c2a --- /dev/null +++ b/test_coverage/youtube_dl_update.html @@ -0,0 +1,402 @@ + + + + + + + + Coverage for youtube_dl.update: 6% + + + + + + + + + + + + +
+ +

Hot-keys on this page

+
+

+ r + m + x + p   toggle line displays +

+

+ j + k   next/prev highlighted chunk +

+

+ 0   (zero) top of page +

+

+ 1   (one) first highlighted chunk +

+
+
+ +
+ + + + + +
+

1

+

2

+

3

+

4

+

5

+

6

+

7

+

8

+

9

+

10

+

11

+

12

+

13

+

14

+

15

+

16

+

17

+

18

+

19

+

20

+

21

+

22

+

23

+

24

+

25

+

26

+

27

+

28

+

29

+

30

+

31

+

32

+

33

+

34

+

35

+

36

+

37

+

38

+

39

+

40

+

41

+

42

+

43

+

44

+

45

+

46

+

47

+

48

+

49

+

50

+

51

+

52

+

53

+

54

+

55

+

56

+

57

+

58

+

59

+

60

+

61

+

62

+

63

+

64

+

65

+

66

+

67

+

68

+

69

+

70

+

71

+

72

+

73

+

74

+

75

+

76

+

77

+

78

+

79

+

80

+

81

+

82

+

83

+

84

+

85

+

86

+

87

+

88

+

89

+

90

+

91

+

92

+

93

+

94

+

95

+

96

+

97

+

98

+

99

+

100

+

101

+

102

+

103

+

104

+

105

+

106

+

107

+

108

+

109

+

110

+

111

+

112

+

113

+

114

+

115

+

116

+

117

+

118

+

119

+

120

+

121

+

122

+

123

+

124

+

125

+

126

+

127

+

128

+

129

+

130

+

131

+

132

+

133

+

134

+

135

+

136

+

137

+

138

+

139

+

140

+

141

+

142

+

143

+

144

+

145

+

146

+

147

+

148

+

149

+

150

+

151

+

152

+

153

+

154

+

155

+

156

+

157

+

158

+

159

+

160

+ +
+

import json 

+

import traceback 

+

import hashlib 

+

from zipimport import zipimporter 

+

 

+

from .utils import * 

+

from .version import __version__ 

+

 

+

def rsa_verify(message, signature, key): 

+

    from struct import pack 

+

    from hashlib import sha256 

+

    from sys import version_info 

+

    def b(x): 

+

        if version_info[0] == 2: return x 

+

        else: return x.encode('latin1') 

+

    assert(type(message) == type(b(''))) 

+

    block_size = 0 

+

    n = key[0] 

+

    while n: 

+

        block_size += 1 

+

        n >>= 8 

+

    signature = pow(int(signature, 16), key[1], key[0]) 

+

    raw_bytes = [] 

+

    while signature: 

+

        raw_bytes.insert(0, pack("B", signature & 0xFF)) 

+

        signature >>= 8 

+

    signature = (block_size - len(raw_bytes)) * b('\x00') + b('').join(raw_bytes) 

+

    if signature[0:2] != b('\x00\x01'): return False 

+

    signature = signature[2:] 

+

    if not b('\x00') in signature: return False 

+

    signature = signature[signature.index(b('\x00'))+1:] 

+

    if not signature.startswith(b('\x30\x31\x30\x0D\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x01\x05\x00\x04\x20')): return False 

+

    signature = signature[19:] 

+

    if signature != sha256(message).digest(): return False 

+

    return True 

+

 

+

def update_self(to_screen, verbose, filename): 

+

    """Update the program file with the latest version from the repository""" 

+

 

+

    UPDATE_URL = "http://rg3.github.com/youtube-dl/update/" 

+

    VERSION_URL = UPDATE_URL + 'LATEST_VERSION' 

+

    JSON_URL = UPDATE_URL + 'versions.json' 

+

    UPDATES_RSA_KEY = (0x9d60ee4d8f805312fdb15a62f87b95bd66177b91df176765d13514a0f1754bcd2057295c5b6f1d35daa6742c3ffc9a82d3e118861c207995a8031e151d863c9927e304576bc80692bc8e094896fcf11b66f3e29e04e3a71e9a11558558acea1840aec37fc396fb6b65dc81a1c4144e03bd1c011de62e3f1357b327d08426fe93, 65537) 

+

 

+

 

+

    if not isinstance(globals().get('__loader__'), zipimporter) and not hasattr(sys, "frozen"): 

+

        to_screen(u'It looks like you installed youtube-dl with pip, setup.py or a tarball. Please use that to update.') 

+

        return 

+

 

+

    # Check if there is a new version 

+

    try: 

+

        newversion = compat_urllib_request.urlopen(VERSION_URL).read().decode('utf-8').strip() 

+

    except: 

+

        if verbose: to_screen(compat_str(traceback.format_exc())) 

+

        to_screen(u'ERROR: can\'t find the current version. Please try again later.') 

+

        return 

+

    if newversion == __version__: 

+

        to_screen(u'youtube-dl is up-to-date (' + __version__ + ')') 

+

        return 

+

 

+

    # Download and check versions info 

+

    try: 

+

        versions_info = compat_urllib_request.urlopen(JSON_URL).read().decode('utf-8') 

+

        versions_info = json.loads(versions_info) 

+

    except: 

+

        if verbose: to_screen(compat_str(traceback.format_exc())) 

+

        to_screen(u'ERROR: can\'t obtain versions info. Please try again later.') 

+

        return 

+

    if not 'signature' in versions_info: 

+

        to_screen(u'ERROR: the versions file is not signed or corrupted. Aborting.') 

+

        return 

+

    signature = versions_info['signature'] 

+

    del versions_info['signature'] 

+

    if not rsa_verify(json.dumps(versions_info, sort_keys=True).encode('utf-8'), signature, UPDATES_RSA_KEY): 

+

        to_screen(u'ERROR: the versions file signature is invalid. Aborting.') 

+

        return 

+

 

+

    to_screen(u'Updating to version ' + versions_info['latest'] + '...') 

+

    version = versions_info['versions'][versions_info['latest']] 

+

    if version.get('notes'): 

+

        to_screen(u'PLEASE NOTE:') 

+

        for note in version['notes']: 

+

            to_screen(note) 

+

 

+

    if not os.access(filename, os.W_OK): 

+

        to_screen(u'ERROR: no write permissions on %s' % filename) 

+

        return 

+

 

+

    # Py2EXE 

+

    if hasattr(sys, "frozen"): 

+

        exe = os.path.abspath(filename) 

+

        directory = os.path.dirname(exe) 

+

        if not os.access(directory, os.W_OK): 

+

            to_screen(u'ERROR: no write permissions on %s' % directory) 

+

            return 

+

 

+

        try: 

+

            urlh = compat_urllib_request.urlopen(version['exe'][0]) 

+

            newcontent = urlh.read() 

+

            urlh.close() 

+

        except (IOError, OSError) as err: 

+

            if verbose: to_screen(compat_str(traceback.format_exc())) 

+

            to_screen(u'ERROR: unable to download latest version') 

+

            return 

+

 

+

        newcontent_hash = hashlib.sha256(newcontent).hexdigest() 

+

        if newcontent_hash != version['exe'][1]: 

+

            to_screen(u'ERROR: the downloaded file hash does not match. Aborting.') 

+

            return 

+

 

+

        try: 

+

            with open(exe + '.new', 'wb') as outf: 

+

                outf.write(newcontent) 

+

        except (IOError, OSError) as err: 

+

            if verbose: to_screen(compat_str(traceback.format_exc())) 

+

            to_screen(u'ERROR: unable to write the new version') 

+

            return 

+

 

+

        try: 

+

            bat = os.path.join(directory, 'youtube-dl-updater.bat') 

+

            b = open(bat, 'w') 

+

            b.write(""" 

+

echo Updating youtube-dl... 

+

ping 127.0.0.1 -n 5 -w 1000 > NUL 

+

move /Y "%s.new" "%s" 

+

del "%s" 

+

            \n""" %(exe, exe, bat)) 

+

            b.close() 

+

 

+

            os.startfile(bat) 

+

        except (IOError, OSError) as err: 

+

            if verbose: to_screen(compat_str(traceback.format_exc())) 

+

            to_screen(u'ERROR: unable to overwrite current version') 

+

            return 

+

 

+

    # Zip unix package 

+

    elif isinstance(globals().get('__loader__'), zipimporter): 

+

        try: 

+

            urlh = compat_urllib_request.urlopen(version['bin'][0]) 

+

            newcontent = urlh.read() 

+

            urlh.close() 

+

        except (IOError, OSError) as err: 

+

            if verbose: to_screen(compat_str(traceback.format_exc())) 

+

            to_screen(u'ERROR: unable to download latest version') 

+

            return 

+

 

+

        newcontent_hash = hashlib.sha256(newcontent).hexdigest() 

+

        if newcontent_hash != version['bin'][1]: 

+

            to_screen(u'ERROR: the downloaded file hash does not match. Aborting.') 

+

            return 

+

 

+

        try: 

+

            with open(filename, 'wb') as outf: 

+

                outf.write(newcontent) 

+

        except (IOError, OSError) as err: 

+

            if verbose: to_screen(compat_str(traceback.format_exc())) 

+

            to_screen(u'ERROR: unable to overwrite current version') 

+

            return 

+

 

+

    to_screen(u'Updated youtube-dl. Restart youtube-dl to use the new version.') 

+ +
+
+ + + + + diff --git a/test_coverage/youtube_dl_utils.html b/test_coverage/youtube_dl_utils.html new file mode 100644 index 000000000..90bd46ddc --- /dev/null +++ b/test_coverage/youtube_dl_utils.html @@ -0,0 +1,1160 @@ + + + + + + + + Coverage for youtube_dl.utils: 83% + + + + + + + + + + + + +
+ +

Hot-keys on this page

+
+

+ r + m + x + p   toggle line displays +

+

+ j + k   next/prev highlighted chunk +

+

+ 0   (zero) top of page +

+

+ 1   (one) first highlighted chunk +

+
+
+ +
+ + + + + +
+

1

+

2

+

3

+

4

+

5

+

6

+

7

+

8

+

9

+

10

+

11

+

12

+

13

+

14

+

15

+

16

+

17

+

18

+

19

+

20

+

21

+

22

+

23

+

24

+

25

+

26

+

27

+

28

+

29

+

30

+

31

+

32

+

33

+

34

+

35

+

36

+

37

+

38

+

39

+

40

+

41

+

42

+

43

+

44

+

45

+

46

+

47

+

48

+

49

+

50

+

51

+

52

+

53

+

54

+

55

+

56

+

57

+

58

+

59

+

60

+

61

+

62

+

63

+

64

+

65

+

66

+

67

+

68

+

69

+

70

+

71

+

72

+

73

+

74

+

75

+

76

+

77

+

78

+

79

+

80

+

81

+

82

+

83

+

84

+

85

+

86

+

87

+

88

+

89

+

90

+

91

+

92

+

93

+

94

+

95

+

96

+

97

+

98

+

99

+

100

+

101

+

102

+

103

+

104

+

105

+

106

+

107

+

108

+

109

+

110

+

111

+

112

+

113

+

114

+

115

+

116

+

117

+

118

+

119

+

120

+

121

+

122

+

123

+

124

+

125

+

126

+

127

+

128

+

129

+

130

+

131

+

132

+

133

+

134

+

135

+

136

+

137

+

138

+

139

+

140

+

141

+

142

+

143

+

144

+

145

+

146

+

147

+

148

+

149

+

150

+

151

+

152

+

153

+

154

+

155

+

156

+

157

+

158

+

159

+

160

+

161

+

162

+

163

+

164

+

165

+

166

+

167

+

168

+

169

+

170

+

171

+

172

+

173

+

174

+

175

+

176

+

177

+

178

+

179

+

180

+

181

+

182

+

183

+

184

+

185

+

186

+

187

+

188

+

189

+

190

+

191

+

192

+

193

+

194

+

195

+

196

+

197

+

198

+

199

+

200

+

201

+

202

+

203

+

204

+

205

+

206

+

207

+

208

+

209

+

210

+

211

+

212

+

213

+

214

+

215

+

216

+

217

+

218

+

219

+

220

+

221

+

222

+

223

+

224

+

225

+

226

+

227

+

228

+

229

+

230

+

231

+

232

+

233

+

234

+

235

+

236

+

237

+

238

+

239

+

240

+

241

+

242

+

243

+

244

+

245

+

246

+

247

+

248

+

249

+

250

+

251

+

252

+

253

+

254

+

255

+

256

+

257

+

258

+

259

+

260

+

261

+

262

+

263

+

264

+

265

+

266

+

267

+

268

+

269

+

270

+

271

+

272

+

273

+

274

+

275

+

276

+

277

+

278

+

279

+

280

+

281

+

282

+

283

+

284

+

285

+

286

+

287

+

288

+

289

+

290

+

291

+

292

+

293

+

294

+

295

+

296

+

297

+

298

+

299

+

300

+

301

+

302

+

303

+

304

+

305

+

306

+

307

+

308

+

309

+

310

+

311

+

312

+

313

+

314

+

315

+

316

+

317

+

318

+

319

+

320

+

321

+

322

+

323

+

324

+

325

+

326

+

327

+

328

+

329

+

330

+

331

+

332

+

333

+

334

+

335

+

336

+

337

+

338

+

339

+

340

+

341

+

342

+

343

+

344

+

345

+

346

+

347

+

348

+

349

+

350

+

351

+

352

+

353

+

354

+

355

+

356

+

357

+

358

+

359

+

360

+

361

+

362

+

363

+

364

+

365

+

366

+

367

+

368

+

369

+

370

+

371

+

372

+

373

+

374

+

375

+

376

+

377

+

378

+

379

+

380

+

381

+

382

+

383

+

384

+

385

+

386

+

387

+

388

+

389

+

390

+

391

+

392

+

393

+

394

+

395

+

396

+

397

+

398

+

399

+

400

+

401

+

402

+

403

+

404

+

405

+

406

+

407

+

408

+

409

+

410

+

411

+

412

+

413

+

414

+

415

+

416

+

417

+

418

+

419

+

420

+

421

+

422

+

423

+

424

+

425

+

426

+

427

+

428

+

429

+

430

+

431

+

432

+

433

+

434

+

435

+

436

+

437

+

438

+

439

+

440

+

441

+

442

+

443

+

444

+

445

+

446

+

447

+

448

+

449

+

450

+

451

+

452

+

453

+

454

+

455

+

456

+

457

+

458

+

459

+

460

+

461

+

462

+

463

+

464

+

465

+

466

+

467

+

468

+

469

+

470

+

471

+

472

+

473

+

474

+

475

+

476

+

477

+

478

+

479

+

480

+

481

+

482

+

483

+

484

+

485

+

486

+

487

+

488

+

489

+

490

+

491

+

492

+

493

+

494

+

495

+

496

+

497

+

498

+

499

+

500

+

501

+

502

+

503

+

504

+

505

+

506

+

507

+

508

+

509

+

510

+

511

+

512

+

513

+

514

+

515

+

516

+

517

+

518

+

519

+

520

+

521

+

522

+

523

+

524

+

525

+

526

+

527

+

528

+

529

+

530

+

531

+

532

+

533

+

534

+

535

+

536

+

537

+

538

+

539

+ +
+

#!/usr/bin/env python 

+

# -*- coding: utf-8 -*- 

+

 

+

import gzip 

+

import io 

+

import json 

+

import locale 

+

import os 

+

import re 

+

import sys 

+

import zlib 

+

import email.utils 

+

import json 

+

 

+

try: 

+

    import urllib.request as compat_urllib_request 

+

except ImportError: # Python 2 

+

    import urllib2 as compat_urllib_request 

+

 

+

try: 

+

    import urllib.error as compat_urllib_error 

+

except ImportError: # Python 2 

+

    import urllib2 as compat_urllib_error 

+

 

+

try: 

+

    import urllib.parse as compat_urllib_parse 

+

except ImportError: # Python 2 

+

    import urllib as compat_urllib_parse 

+

 

+

try: 

+

    from urllib.parse import urlparse as compat_urllib_parse_urlparse 

+

except ImportError: # Python 2 

+

    from urlparse import urlparse as compat_urllib_parse_urlparse 

+

 

+

try: 

+

    import http.cookiejar as compat_cookiejar 

+

except ImportError: # Python 2 

+

    import cookielib as compat_cookiejar 

+

 

+

try: 

+

    import html.entities as compat_html_entities 

+

except ImportError: # Python 2 

+

    import htmlentitydefs as compat_html_entities 

+

 

+

try: 

+

    import html.parser as compat_html_parser 

+

except ImportError: # Python 2 

+

    import HTMLParser as compat_html_parser 

+

 

+

try: 

+

    import http.client as compat_http_client 

+

except ImportError: # Python 2 

+

    import httplib as compat_http_client 

+

 

+

try: 

+

    from subprocess import DEVNULL 

+

    compat_subprocess_get_DEVNULL = lambda: DEVNULL 

+

except ImportError: 

+

    compat_subprocess_get_DEVNULL = lambda: open(os.path.devnull, 'w') 

+

 

+

try: 

+

    from urllib.parse import parse_qs as compat_parse_qs 

+

except ImportError: # Python 2 

+

    # HACK: The following is the correct parse_qs implementation from cpython 3's stdlib. 

+

    # Python 2's version is apparently totally broken 

+

    def _unquote(string, encoding='utf-8', errors='replace'): 

+

        if string == '': 

+

            return string 

+

        res = string.split('%') 

+

        if len(res) == 1: 

+

            return string 

+

        if encoding is None: 

+

            encoding = 'utf-8' 

+

        if errors is None: 

+

            errors = 'replace' 

+

        # pct_sequence: contiguous sequence of percent-encoded bytes, decoded 

+

        pct_sequence = b'' 

+

        string = res[0] 

+

        for item in res[1:]: 

+

            try: 

+

                if not item: 

+

                    raise ValueError 

+

                pct_sequence += item[:2].decode('hex') 

+

                rest = item[2:] 

+

                if not rest: 

+

                    # This segment was just a single percent-encoded character. 

+

                    # May be part of a sequence of code units, so delay decoding. 

+

                    # (Stored in pct_sequence). 

+

                    continue 

+

            except ValueError: 

+

                rest = '%' + item 

+

            # Encountered non-percent-encoded characters. Flush the current 

+

            # pct_sequence. 

+

            string += pct_sequence.decode(encoding, errors) + rest 

+

            pct_sequence = b'' 

+

        if pct_sequence: 

+

            # Flush the final pct_sequence 

+

            string += pct_sequence.decode(encoding, errors) 

+

        return string 

+

 

+

    def _parse_qsl(qs, keep_blank_values=False, strict_parsing=False, 

+

                encoding='utf-8', errors='replace'): 

+

        qs, _coerce_result = qs, unicode 

+

        pairs = [s2 for s1 in qs.split('&') for s2 in s1.split(';')] 

+

        r = [] 

+

        for name_value in pairs: 

+

            if not name_value and not strict_parsing: 

+

                continue 

+

            nv = name_value.split('=', 1) 

+

            if len(nv) != 2: 

+

                if strict_parsing: 

+

                    raise ValueError("bad query field: %r" % (name_value,)) 

+

                # Handle case of a control-name with no equal sign 

+

                if keep_blank_values: 

+

                    nv.append('') 

+

                else: 

+

                    continue 

+

            if len(nv[1]) or keep_blank_values: 

+

                name = nv[0].replace('+', ' ') 

+

                name = _unquote(name, encoding=encoding, errors=errors) 

+

                name = _coerce_result(name) 

+

                value = nv[1].replace('+', ' ') 

+

                value = _unquote(value, encoding=encoding, errors=errors) 

+

                value = _coerce_result(value) 

+

                r.append((name, value)) 

+

        return r 

+

 

+

    def compat_parse_qs(qs, keep_blank_values=False, strict_parsing=False, 

+

                encoding='utf-8', errors='replace'): 

+

        parsed_result = {} 

+

        pairs = _parse_qsl(qs, keep_blank_values, strict_parsing, 

+

                        encoding=encoding, errors=errors) 

+

        for name, value in pairs: 

+

            if name in parsed_result: 

+

                parsed_result[name].append(value) 

+

            else: 

+

                parsed_result[name] = [value] 

+

        return parsed_result 

+

 

+

try: 

+

    compat_str = unicode # Python 2 

+

except NameError: 

+

    compat_str = str 

+

 

+

try: 

+

    compat_chr = unichr # Python 2 

+

except NameError: 

+

    compat_chr = chr 

+

 

+

std_headers = { 

+

    'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) Gecko/20100101 Firefox/10.0', 

+

    'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7', 

+

    'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 

+

    'Accept-Encoding': 'gzip, deflate', 

+

    'Accept-Language': 'en-us,en;q=0.5', 

+

} 

+

 

+

def preferredencoding(): 

+

    """Get preferred encoding. 

+

 

+

    Returns the best encoding scheme for the system, based on 

+

    locale.getpreferredencoding() and some further tweaks. 

+

    """ 

+

    try: 

+

        pref = locale.getpreferredencoding() 

+

        u'TEST'.encode(pref) 

+

    except: 

+

        pref = 'UTF-8' 

+

 

+

    return pref 

+

 

+

if sys.version_info < (3,0): 

+

    def compat_print(s): 

+

        print(s.encode(preferredencoding(), 'xmlcharrefreplace')) 

+

else: 

+

    def compat_print(s): 

+

        assert type(s) == type(u'') 

+

        print(s) 

+

 

+

# In Python 2.x, json.dump expects a bytestream. 

+

# In Python 3.x, it writes to a character stream 

+

if sys.version_info < (3,0): 

+

    def write_json_file(obj, fn): 

+

        with open(fn, 'wb') as f: 

+

            json.dump(obj, f) 

+

else: 

+

    def write_json_file(obj, fn): 

+

        with open(fn, 'w', encoding='utf-8') as f: 

+

            json.dump(obj, f) 

+

 

+

def htmlentity_transform(matchobj): 

+

    """Transforms an HTML entity to a character. 

+

 

+

    This function receives a match object and is intended to be used with 

+

    the re.sub() function. 

+

    """ 

+

    entity = matchobj.group(1) 

+

 

+

    # Known non-numeric HTML entity 

+

    if entity in compat_html_entities.name2codepoint: 

+

        return compat_chr(compat_html_entities.name2codepoint[entity]) 

+

 

+

    mobj = re.match(u'(?u)#(x?\\d+)', entity) 

+

    if mobj is not None: 

+

        numstr = mobj.group(1) 

+

        if numstr.startswith(u'x'): 

+

            base = 16 

+

            numstr = u'0%s' % numstr 

+

        else: 

+

            base = 10 

+

        return compat_chr(int(numstr, base)) 

+

 

+

    # Unknown entity in name, return its literal representation 

+

    return (u'&%s;' % entity) 

+

 

+

compat_html_parser.locatestarttagend = re.compile(r"""<[a-zA-Z][-.a-zA-Z0-9:_]*(?:\s+(?:(?<=['"\s])[^\s/>][^\s/=>]*(?:\s*=+\s*(?:'[^']*'|"[^"]*"|(?!['"])[^>\s]*))?\s*)*)?\s*""", re.VERBOSE) # backport bugfix 

+

class AttrParser(compat_html_parser.HTMLParser): 

+

    """Modified HTMLParser that isolates a tag with the specified attribute""" 

+

    def __init__(self, attribute, value): 

+

        self.attribute = attribute 

+

        self.value = value 

+

        self.result = None 

+

        self.started = False 

+

        self.depth = {} 

+

        self.html = None 

+

        self.watch_startpos = False 

+

        self.error_count = 0 

+

        compat_html_parser.HTMLParser.__init__(self) 

+

 

+

    def error(self, message): 

+

        if self.error_count > 10 or self.started: 

+

            raise compat_html_parser.HTMLParseError(message, self.getpos()) 

+

        self.rawdata = '\n'.join(self.html.split('\n')[self.getpos()[0]:]) # skip one line 

+

        self.error_count += 1 

+

        self.goahead(1) 

+

 

+

    def loads(self, html): 

+

        self.html = html 

+

        self.feed(html) 

+

        self.close() 

+

 

+

    def handle_starttag(self, tag, attrs): 

+

        attrs = dict(attrs) 

+

        if self.started: 

+

            self.find_startpos(None) 

+

        if self.attribute in attrs and attrs[self.attribute] == self.value: 

+

            self.result = [tag] 

+

            self.started = True 

+

            self.watch_startpos = True 

+

        if self.started: 

+

            if not tag in self.depth: self.depth[tag] = 0 

+

            self.depth[tag] += 1 

+

 

+

    def handle_endtag(self, tag): 

+

        if self.started: 

+

            if tag in self.depth: self.depth[tag] -= 1 

+

            if self.depth[self.result[0]] == 0: 

+

                self.started = False 

+

                self.result.append(self.getpos()) 

+

 

+

    def find_startpos(self, x): 

+

        """Needed to put the start position of the result (self.result[1]) 

+

        after the opening tag with the requested id""" 

+

        if self.watch_startpos: 

+

            self.watch_startpos = False 

+

            self.result.append(self.getpos()) 

+

    handle_entityref = handle_charref = handle_data = handle_comment = \ 

+

    handle_decl = handle_pi = unknown_decl = find_startpos 

+

 

+

    def get_result(self): 

+

        if self.result is None: 

+

            return None 

+

        if len(self.result) != 3: 

+

            return None 

+

        lines = self.html.split('\n') 

+

        lines = lines[self.result[1][0]-1:self.result[2][0]] 

+

        lines[0] = lines[0][self.result[1][1]:] 

+

        if len(lines) == 1: 

+

            lines[-1] = lines[-1][:self.result[2][1]-self.result[1][1]] 

+

        lines[-1] = lines[-1][:self.result[2][1]] 

+

        return '\n'.join(lines).strip() 

+

 

+

def get_element_by_id(id, html): 

+

    """Return the content of the tag with the specified ID in the passed HTML document""" 

+

    return get_element_by_attribute("id", id, html) 

+

 

+

def get_element_by_attribute(attribute, value, html): 

+

    """Return the content of the tag with the specified attribute in the passed HTML document""" 

+

    parser = AttrParser(attribute, value) 

+

    try: 

+

        parser.loads(html) 

+

    except compat_html_parser.HTMLParseError: 

+

        pass 

+

    return parser.get_result() 

+

 

+

 

+

def clean_html(html): 

+

    """Clean an HTML snippet into a readable string""" 

+

    # Newline vs <br /> 

+

    html = html.replace('\n', ' ') 

+

    html = re.sub(r'\s*<\s*br\s*/?\s*>\s*', '\n', html) 

+

    html = re.sub(r'<\s*/\s*p\s*>\s*<\s*p[^>]*>', '\n', html) 

+

    # Strip html tags 

+

    html = re.sub('<.*?>', '', html) 

+

    # Replace html entities 

+

    html = unescapeHTML(html) 

+

    return html 

+

 

+

 

+

def sanitize_open(filename, open_mode): 

+

    """Try to open the given filename, and slightly tweak it if this fails. 

+

 

+

    Attempts to open the given filename. If this fails, it tries to change 

+

    the filename slightly, step by step, until it's either able to open it 

+

    or it fails and raises a final exception, like the standard open() 

+

    function. 

+

 

+

    It returns the tuple (stream, definitive_file_name). 

+

    """ 

+

    try: 

+

        if filename == u'-': 

+

            if sys.platform == 'win32': 

+

                import msvcrt 

+

                msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY) 

+

            return (sys.stdout, filename) 

+

        stream = open(encodeFilename(filename), open_mode) 

+

        return (stream, filename) 

+

    except (IOError, OSError) as err: 

+

        # In case of error, try to remove win32 forbidden chars 

+

        filename = re.sub(u'[/<>:"\\|\\\\?\\*]', u'#', filename) 

+

 

+

        # An exception here should be caught in the caller 

+

        stream = open(encodeFilename(filename), open_mode) 

+

        return (stream, filename) 

+

 

+

 

+

def timeconvert(timestr): 

+

    """Convert RFC 2822 defined time string into system timestamp""" 

+

    timestamp = None 

+

    timetuple = email.utils.parsedate_tz(timestr) 

+

    if timetuple is not None: 

+

        timestamp = email.utils.mktime_tz(timetuple) 

+

    return timestamp 

+

 

+

def sanitize_filename(s, restricted=False, is_id=False): 

+

    """Sanitizes a string so it could be used as part of a filename. 

+

    If restricted is set, use a stricter subset of allowed characters. 

+

    Set is_id if this is not an arbitrary string, but an ID that should be kept if possible 

+

    """ 

+

    def replace_insane(char): 

+

        if char == '?' or ord(char) < 32 or ord(char) == 127: 

+

            return '' 

+

        elif char == '"': 

+

            return '' if restricted else '\'' 

+

        elif char == ':': 

+

            return '_-' if restricted else ' -' 

+

        elif char in '\\/|*<>': 

+

            return '_' 

+

        if restricted and (char in '!&\'()[]{}$;`^,#' or char.isspace()): 

+

            return '_' 

+

        if restricted and ord(char) > 127: 

+

            return '_' 

+

        return char 

+

 

+

    result = u''.join(map(replace_insane, s)) 

+

    if not is_id: 

+

        while '__' in result: 

+

            result = result.replace('__', '_') 

+

        result = result.strip('_') 

+

        # Common case of "Foreign band name - English song title" 

+

        if restricted and result.startswith('-_'): 

+

            result = result[2:] 

+

        if not result: 

+

            result = '_' 

+

    return result 

+

 

+

def orderedSet(iterable): 

+

    """ Remove all duplicates from the input iterable """ 

+

    res = [] 

+

    for el in iterable: 

+

        if el not in res: 

+

            res.append(el) 

+

    return res 

+

 

+

def unescapeHTML(s): 

+

    """ 

+

    @param s a string 

+

    """ 

+

    assert type(s) == type(u'') 

+

 

+

    result = re.sub(u'(?u)&(.+?);', htmlentity_transform, s) 

+

    return result 

+

 

+

def encodeFilename(s): 

+

    """ 

+

    @param s The name of the file 

+

    """ 

+

 

+

    assert type(s) == type(u'') 

+

 

+

    # Python 3 has a Unicode API 

+

    if sys.version_info >= (3, 0): 

+

        return s 

+

 

+

    if sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5: 

+

        # Pass u'' directly to use Unicode APIs on Windows 2000 and up 

+

        # (Detecting Windows NT 4 is tricky because 'major >= 4' would 

+

        # match Windows 9x series as well. Besides, NT 4 is obsolete.) 

+

        return s 

+

    else: 

+

        return s.encode(sys.getfilesystemencoding(), 'ignore') 

+

 

+

 

+

class ExtractorError(Exception): 

+

    """Error during info extraction.""" 

+

    def __init__(self, msg, tb=None): 

+

        """ tb is the original traceback (so that it can be printed out) """ 

+

        super(ExtractorError, self).__init__(msg) 

+

        if tb is None: 

+

            tb = sys.exc_info()[2] 

+

        self.traceback = tb 

+

 

+

 

+

class DownloadError(Exception): 

+

    """Download Error exception. 

+

 

+

    This exception may be thrown by FileDownloader objects if they are not 

+

    configured to continue on errors. They will contain the appropriate 

+

    error message. 

+

    """ 

+

    pass 

+

 

+

 

+

class SameFileError(Exception): 

+

    """Same File exception. 

+

 

+

    This exception will be thrown by FileDownloader objects if they detect 

+

    multiple files would have to be downloaded to the same file on disk. 

+

    """ 

+

    pass 

+

 

+

 

+

class PostProcessingError(Exception): 

+

    """Post Processing exception. 

+

 

+

    This exception may be raised by PostProcessor's .run() method to 

+

    indicate an error in the postprocessing task. 

+

    """ 

+

    pass 

+

 

+

class MaxDownloadsReached(Exception): 

+

    """ --max-downloads limit has been reached. """ 

+

    pass 

+

 

+

 

+

class UnavailableVideoError(Exception): 

+

    """Unavailable Format exception. 

+

 

+

    This exception will be thrown when a video is requested 

+

    in a format that is not available for that video. 

+

    """ 

+

    pass 

+

 

+

 

+

class ContentTooShortError(Exception): 

+

    """Content Too Short exception. 

+

 

+

    This exception may be raised by FileDownloader objects when a file they 

+

    download is too small for what the server announced first, indicating 

+

    the connection was probably interrupted. 

+

    """ 

+

    # Both in bytes 

+

    downloaded = None 

+

    expected = None 

+

 

+

    def __init__(self, downloaded, expected): 

+

        self.downloaded = downloaded 

+

        self.expected = expected 

+

 

+

class YoutubeDLHandler(compat_urllib_request.HTTPHandler): 

+

    """Handler for HTTP requests and responses. 

+

 

+

    This class, when installed with an OpenerDirector, automatically adds 

+

    the standard headers to every HTTP request and handles gzipped and 

+

    deflated responses from web servers. If compression is to be avoided in 

+

    a particular request, the original request in the program code only has 

+

    to include the HTTP header "Youtubedl-No-Compression", which will be 

+

    removed before making the real request. 

+

 

+

    Part of this code was copied from: 

+

 

+

    http://techknack.net/python-urllib2-handlers/ 

+

 

+

    Andrew Rowls, the author of that code, agreed to release it to the 

+

    public domain. 

+

    """ 

+

 

+

    @staticmethod 

+

    def deflate(data): 

+

        try: 

+

            return zlib.decompress(data, -zlib.MAX_WBITS) 

+

        except zlib.error: 

+

            return zlib.decompress(data) 

+

 

+

    @staticmethod 

+

    def addinfourl_wrapper(stream, headers, url, code): 

+

        if hasattr(compat_urllib_request.addinfourl, 'getcode'): 

+

            return compat_urllib_request.addinfourl(stream, headers, url, code) 

+

        ret = compat_urllib_request.addinfourl(stream, headers, url) 

+

        ret.code = code 

+

        return ret 

+

 

+

    def http_request(self, req): 

+

        for h in std_headers: 

+

            if h in req.headers: 

+

                del req.headers[h] 

+

            req.add_header(h, std_headers[h]) 

+

        if 'Youtubedl-no-compression' in req.headers: 

+

            if 'Accept-encoding' in req.headers: 

+

                del req.headers['Accept-encoding'] 

+

            del req.headers['Youtubedl-no-compression'] 

+

        return req 

+

 

+

    def http_response(self, req, resp): 

+

        old_resp = resp 

+

        # gzip 

+

        if resp.headers.get('Content-encoding', '') == 'gzip': 

+

            gz = gzip.GzipFile(fileobj=io.BytesIO(resp.read()), mode='r') 

+

            resp = self.addinfourl_wrapper(gz, old_resp.headers, old_resp.url, old_resp.code) 

+

            resp.msg = old_resp.msg 

+

        # deflate 

+

        if resp.headers.get('Content-encoding', '') == 'deflate': 

+

            gz = io.BytesIO(self.deflate(resp.read())) 

+

            resp = self.addinfourl_wrapper(gz, old_resp.headers, old_resp.url, old_resp.code) 

+

            resp.msg = old_resp.msg 

+

        return resp 

+

 

+

    https_request = http_request 

+

    https_response = http_response 

+ +
+
+ + + + + diff --git a/test_coverage/youtube_dl_version.html b/test_coverage/youtube_dl_version.html new file mode 100644 index 000000000..d7bdade6e --- /dev/null +++ b/test_coverage/youtube_dl_version.html @@ -0,0 +1,86 @@ + + + + + + + + Coverage for youtube_dl.version: 100% + + + + + + + + + + + + +
+ +

Hot-keys on this page

+
+

+ r + m + x + p   toggle line displays +

+

+ j + k   next/prev highlighted chunk +

+

+ 0   (zero) top of page +

+

+ 1   (one) first highlighted chunk +

+
+
+ +
+ + + + + +
+

1

+

2

+ +
+

 

+

__version__ = '2012.12.11' 

+ +
+
+ + + + + diff --git a/update/LATEST_VERSION b/update/LATEST_VERSION index 4ae4a9f4d..7fb5054b1 100644 --- a/update/LATEST_VERSION +++ b/update/LATEST_VERSION @@ -1 +1 @@ -2012.12.11 \ No newline at end of file +2013.01.02 \ No newline at end of file diff --git a/update/versions.json b/update/versions.json index bc076ffcd..6c8412f7e 100644 --- a/update/versions.json +++ b/update/versions.json @@ -1,6 +1,6 @@ { - "latest": "2012.12.11", - "signature": "181a8ff9c3f3ced3dc3b72af69e0fa21e5d8adcfee3beb6fd2ad6d66a7131bc4d6f6e322a88506182755918fef59b0e24169cbe36ae017ebcbc5dd60388087eaf01517820804400f1c3640113ed2fbe3915b27d38b7e6330437fe689d158183bbf660ab2a8a017b8a2048442ff2ba62f590c45af83e06525c0e03c084355584d", + "latest": "2013.01.02", + "signature": "1579240235883d0b2ac02616e38bb4281697a15b832a6f5c65e4381c4d05a617d300665e39367881f72f7903418ea536e10bd4c66d8ed35ac190aa987e75d0332a11d496277882ddbbeced0f0247f5c0c84d014e7bd3cacad694715310f8c159e1fe7373eb5835a7b60c8ac7efb7915f979cdd3ca56fa92937e69b7b79f09611", "versions": { "2012.12.11": { "bin": [ @@ -15,6 +15,20 @@ "http://youtube-dl.org/downloads/2012.12.11/youtube-dl-2012.12.11.tar.gz", "b6d259c60fefba76701ea0ea7b34c99169fc2644ce1d89ad10213a70d11ffb0f" ] + }, + "2013.01.02": { + "bin": [ + "http://youtube-dl.org/downloads/2013.01.02/youtube-dl", + "f5b502f8aaa77675c4884938b1e4871ebca2611813a0c0e74f60c0fbd6dcca6b" + ], + "exe": [ + "http://youtube-dl.org/downloads/2013.01.02/youtube-dl.exe", + "75fa89d2ce297d102ff27675aa9d92545bbc91013f52ec52868c069f4f9f0422" + ], + "tar": [ + "http://youtube-dl.org/downloads/2013.01.02/youtube-dl-2013.01.02.tar.gz", + "6a66d022ac8e1c13da284036288a133ec8dba003b7bd3a5179d0c0daca8c8196" + ] } } } \ No newline at end of file