mirror of
				https://github.com/ytdl-org/youtube-dl.git
				synced 2025-10-29 09:26:20 -07:00 
			
		
		
		
	Compare commits
	
		
			89 Commits
		
	
	
		
			2014.11.13
			...
			2014.11.23
		
	
	| Author | SHA1 | Date | |
|---|---|---|---|
|  | b9042def9d | ||
|  | aa79ac0c82 | ||
|  | 88125905cf | ||
|  | dd60be2bf9 | ||
|  | 119b3caa46 | ||
|  | 49f0da7ae1 | ||
|  | 2cead7e7bc | ||
|  | 2c64b8ba63 | ||
|  | 42e12102a9 | ||
|  | 6127693ed9 | ||
|  | 71069d2157 | ||
|  | f3391db889 | ||
|  | 9b32eca3ce | ||
|  | ec06f0f610 | ||
|  | e6c9c8f6ee | ||
|  | 85b9275517 | ||
|  | dfd5313afd | ||
|  | be53e2a737 | ||
|  | a1c68b9ef2 | ||
|  | 4d46c1c68c | ||
|  | d6f714f321 | ||
|  | 8569f3d629 | ||
|  | fed5d03260 | ||
|  | 6adeffa7c6 | ||
|  | b244b5c3f9 | ||
|  | f42c190769 | ||
|  | c9bf41145f | ||
|  | 5239075bb6 | ||
|  | 84437adfa3 | ||
|  | 732ea2f09b | ||
|  | aff2f4f4f5 | ||
|  | 3b9f631c41 | ||
|  | 3ba098a6a5 | ||
|  | 1394646a0a | ||
|  | 61ee5aeb73 | ||
|  | 07e378fa18 | ||
|  | e07e931375 | ||
|  | 480b7c32a9 | ||
|  | f56875f271 | ||
|  | 92120217eb | ||
|  | 37eddd3143 | ||
|  | 0857baade3 | ||
|  | 23ad44b57b | ||
|  | f48d3e9bbc | ||
|  | fbf94a7815 | ||
|  | 1921b24551 | ||
|  | 28e614de5c | ||
|  | cd9ad1d7e8 | ||
|  | 162f54eca6 | ||
|  | 33a266f4ba | ||
|  | 6b592d93a2 | ||
|  | 4686ae4b64 | ||
|  | 8d05f2c16a | ||
|  | a4bb83956c | ||
|  | eb5376044c | ||
|  | 3cbcff8a2d | ||
|  | e983cf5277 | ||
|  | 0ab1ca5501 | ||
|  | 4baafa229d | ||
|  | 7f3e33a147 | ||
|  | b7558d9881 | ||
|  | a0f59cdcb4 | ||
|  | a4bc433619 | ||
|  | b6b70730bf | ||
|  | 6a68bb574a | ||
|  | 0cf166ad4f | ||
|  | 2707b50ffe | ||
|  | 939fe70de0 | ||
|  | 89c15fe0b3 | ||
|  | ec5f601670 | ||
|  | 8caa0c9779 | ||
|  | e2548b5b25 | ||
|  | bbefcf04bf | ||
|  | c7b0add86f | ||
|  | a0155d93d9 | ||
|  | 00d9ef0b70 | ||
|  | 0cc8888038 | ||
|  | c735450e07 | ||
|  | 71f8c7ce7a | ||
|  | 5fee0eeac0 | ||
|  | eb4157fd17 | ||
|  | 69ede8ef81 | ||
|  | 609a61e3e6 | ||
|  | bf951c5e29 | ||
|  | af63fed7d8 | ||
|  | 68d1d41c03 | ||
|  | 3deed1e91a | ||
|  | 11b28e93d3 | ||
|  | 3898c8a7b2 | 
							
								
								
									
										2
									
								
								AUTHORS
									
									
									
									
									
								
							
							
						
						
									
										2
									
								
								AUTHORS
									
									
									
									
									
								
							| @@ -81,3 +81,5 @@ winwon | ||||
| Xavier Beynon | ||||
| Gabriel Schubiner | ||||
| xantares | ||||
| Jan Matějka | ||||
| Mauroy Sébastien | ||||
|   | ||||
							
								
								
									
										18
									
								
								test/swftests/ConstArrayAccess.as
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										18
									
								
								test/swftests/ConstArrayAccess.as
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,18 @@ | ||||
| // input: [] | ||||
| // output: 4 | ||||
|  | ||||
| package { | ||||
| public class ConstArrayAccess { | ||||
| 	private static const x:int = 2; | ||||
| 	private static const ar:Array = ["42", "3411"]; | ||||
|  | ||||
|     public static function main():int{ | ||||
|         var c:ConstArrayAccess = new ConstArrayAccess(); | ||||
|         return c.f(); | ||||
|     } | ||||
|  | ||||
|     public function f(): int { | ||||
|     	return ar[1].length; | ||||
|     } | ||||
| } | ||||
| } | ||||
							
								
								
									
										12
									
								
								test/swftests/ConstantInt.as
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										12
									
								
								test/swftests/ConstantInt.as
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,12 @@ | ||||
| // input: [] | ||||
| // output: 2 | ||||
|  | ||||
| package { | ||||
| public class ConstantInt { | ||||
| 	private static const x:int = 2; | ||||
|  | ||||
|     public static function main():int{ | ||||
|         return x; | ||||
|     } | ||||
| } | ||||
| } | ||||
							
								
								
									
										10
									
								
								test/swftests/DictCall.as
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										10
									
								
								test/swftests/DictCall.as
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,10 @@ | ||||
| // input: [{"x": 1, "y": 2}] | ||||
| // output: 3 | ||||
|  | ||||
| package { | ||||
| public class DictCall { | ||||
|     public static function main(d:Object):int{ | ||||
|         return d.x + d.y; | ||||
|     } | ||||
| } | ||||
| } | ||||
							
								
								
									
										10
									
								
								test/swftests/EqualsOperator.as
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										10
									
								
								test/swftests/EqualsOperator.as
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,10 @@ | ||||
| // input: [] | ||||
| // output: false | ||||
|  | ||||
| package { | ||||
| public class EqualsOperator { | ||||
|     public static function main():Boolean{ | ||||
|         return 1 == 2; | ||||
|     } | ||||
| } | ||||
| } | ||||
							
								
								
									
										22
									
								
								test/swftests/MemberAssignment.as
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										22
									
								
								test/swftests/MemberAssignment.as
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,22 @@ | ||||
| // input: [1] | ||||
| // output: 2 | ||||
|  | ||||
| package { | ||||
| public class MemberAssignment { | ||||
|     public var v:int; | ||||
|  | ||||
|     public function g():int { | ||||
|         return this.v; | ||||
|     } | ||||
|  | ||||
|     public function f(a:int):int{ | ||||
|         this.v = a; | ||||
|         return this.v + this.g(); | ||||
|     } | ||||
|  | ||||
|     public static function main(a:int): int { | ||||
|         var v:MemberAssignment = new MemberAssignment(); | ||||
|         return v.f(a); | ||||
|     } | ||||
| } | ||||
| } | ||||
							
								
								
									
										24
									
								
								test/swftests/NeOperator.as
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										24
									
								
								test/swftests/NeOperator.as
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,24 @@ | ||||
| // input: [] | ||||
| // output: 123 | ||||
|  | ||||
| package { | ||||
| public class NeOperator { | ||||
|     public static function main(): int { | ||||
|         var res:int = 0; | ||||
|         if (1 != 2) { | ||||
|             res += 3; | ||||
|         } else { | ||||
|             res += 4; | ||||
|         } | ||||
|         if (2 != 2) { | ||||
|             res += 10; | ||||
|         } else { | ||||
|             res += 20; | ||||
|         } | ||||
|         if (9 == 9) { | ||||
|             res += 100; | ||||
|         } | ||||
|         return res; | ||||
|     } | ||||
| } | ||||
| } | ||||
							
								
								
									
										22
									
								
								test/swftests/PrivateVoidCall.as
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										22
									
								
								test/swftests/PrivateVoidCall.as
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,22 @@ | ||||
| // input: [] | ||||
| // output: 9 | ||||
|  | ||||
| package { | ||||
| public class PrivateVoidCall { | ||||
|     public static function main():int{ | ||||
|         var f:OtherClass = new OtherClass(); | ||||
|         f.func(); | ||||
|         return 9; | ||||
|     } | ||||
| } | ||||
| } | ||||
|  | ||||
| class OtherClass { | ||||
|     private function pf():void { | ||||
|         ; | ||||
|     } | ||||
|  | ||||
|     public function func():void { | ||||
|         this.pf(); | ||||
|     } | ||||
| } | ||||
							
								
								
									
										11
									
								
								test/swftests/StringBasics.as
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										11
									
								
								test/swftests/StringBasics.as
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,11 @@ | ||||
| // input: [] | ||||
| // output: 3 | ||||
|  | ||||
| package { | ||||
| public class StringBasics { | ||||
|     public static function main():int{ | ||||
|         var s:String = "abc"; | ||||
|         return s.length; | ||||
|     } | ||||
| } | ||||
| } | ||||
							
								
								
									
										11
									
								
								test/swftests/StringCharCodeAt.as
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										11
									
								
								test/swftests/StringCharCodeAt.as
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,11 @@ | ||||
| // input: [] | ||||
| // output: 9897 | ||||
|  | ||||
| package { | ||||
| public class StringCharCodeAt { | ||||
|     public static function main():int{ | ||||
|         var s:String = "abc"; | ||||
|         return s.charCodeAt(1) * 100 + s.charCodeAt(); | ||||
|     } | ||||
| } | ||||
| } | ||||
							
								
								
									
										11
									
								
								test/swftests/StringConversion.as
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										11
									
								
								test/swftests/StringConversion.as
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,11 @@ | ||||
| // input: [] | ||||
| // output: 2 | ||||
|  | ||||
| package { | ||||
| public class StringConversion { | ||||
|     public static function main():int{ | ||||
|         var s:String = String(99); | ||||
|         return s.length; | ||||
|     } | ||||
| } | ||||
| } | ||||
| @@ -1,4 +1,5 @@ | ||||
| #!/usr/bin/env python | ||||
| from __future__ import unicode_literals | ||||
|  | ||||
| # Allow direct execution | ||||
| import os | ||||
| @@ -19,7 +20,7 @@ def _download_restricted(url, filename, age): | ||||
|         'age_limit': age, | ||||
|         'skip_download': True, | ||||
|         'writeinfojson': True, | ||||
|         "outtmpl": "%(id)s.%(ext)s", | ||||
|         'outtmpl': '%(id)s.%(ext)s', | ||||
|     } | ||||
|     ydl = YoutubeDL(params) | ||||
|     ydl.add_default_info_extractors() | ||||
|   | ||||
| @@ -26,11 +26,13 @@ class TestCompat(unittest.TestCase): | ||||
|         self.assertEqual(compat_getenv('YOUTUBE-DL-TEST'), test_str) | ||||
|  | ||||
|     def test_compat_expanduser(self): | ||||
|         old_home = os.environ.get('HOME') | ||||
|         test_str = 'C:\Documents and Settings\тест\Application Data' | ||||
|         os.environ['HOME'] = ( | ||||
|             test_str if sys.version_info >= (3, 0) | ||||
|             else test_str.encode(get_filesystem_encoding())) | ||||
|         self.assertEqual(compat_expanduser('~'), test_str) | ||||
|         os.environ['HOME'] = old_home | ||||
|  | ||||
|     def test_all_present(self): | ||||
|         import youtube_dl.compat | ||||
|   | ||||
| @@ -1,5 +1,7 @@ | ||||
| #!/usr/bin/env python | ||||
|  | ||||
| from __future__ import unicode_literals | ||||
|  | ||||
| # Allow direct execution | ||||
| import os | ||||
| import sys | ||||
| @@ -210,9 +212,9 @@ for n, test_case in enumerate(defs): | ||||
|     tname = 'test_' + str(test_case['name']) | ||||
|     i = 1 | ||||
|     while hasattr(TestDownload, tname): | ||||
|         tname = 'test_'  + str(test_case['name']) + '_' + str(i) | ||||
|         tname = 'test_%s_%d' % (test_case['name'], i) | ||||
|         i += 1 | ||||
|     test_method.__name__ = tname | ||||
|     test_method.__name__ = str(tname) | ||||
|     setattr(TestDownload, test_method.__name__, test_method) | ||||
|     del test_method | ||||
|  | ||||
|   | ||||
| @@ -1,3 +1,6 @@ | ||||
| #!/usr/bin/env python | ||||
| from __future__ import unicode_literals | ||||
|  | ||||
| import unittest | ||||
|  | ||||
| import sys | ||||
| @@ -6,17 +9,19 @@ import subprocess | ||||
|  | ||||
| rootDir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) | ||||
|  | ||||
|  | ||||
| try: | ||||
|     _DEV_NULL = subprocess.DEVNULL | ||||
| except AttributeError: | ||||
|     _DEV_NULL = open(os.devnull, 'wb') | ||||
|  | ||||
|  | ||||
| class TestExecution(unittest.TestCase): | ||||
|     def test_import(self): | ||||
|         subprocess.check_call([sys.executable, '-c', 'import youtube_dl'], cwd=rootDir) | ||||
|  | ||||
|     def test_module_exec(self): | ||||
|         if sys.version_info >= (2,7): # Python 2.6 doesn't support package execution | ||||
|         if sys.version_info >= (2, 7):  # Python 2.6 doesn't support package execution | ||||
|             subprocess.check_call([sys.executable, '-m', 'youtube_dl', '--version'], cwd=rootDir, stdout=_DEV_NULL) | ||||
|  | ||||
|     def test_main_exec(self): | ||||
|   | ||||
| @@ -1,4 +1,5 @@ | ||||
| #!/usr/bin/env python | ||||
| from __future__ import unicode_literals | ||||
|  | ||||
| # Allow direct execution | ||||
| import os | ||||
| @@ -74,7 +75,7 @@ class TestYoutubeSubtitles(BaseTestSubtitles): | ||||
|         self.assertEqual(md5(subtitles['en']), '3cb210999d3e021bd6c7f0ea751eab06') | ||||
|  | ||||
|     def test_youtube_list_subtitles(self): | ||||
|         self.DL.expect_warning(u'Video doesn\'t have automatic captions') | ||||
|         self.DL.expect_warning('Video doesn\'t have automatic captions') | ||||
|         self.DL.params['listsubtitles'] = True | ||||
|         info_dict = self.getInfoDict() | ||||
|         self.assertEqual(info_dict, None) | ||||
| @@ -87,7 +88,7 @@ class TestYoutubeSubtitles(BaseTestSubtitles): | ||||
|         self.assertTrue(subtitles['it'] is not None) | ||||
|  | ||||
|     def test_youtube_nosubtitles(self): | ||||
|         self.DL.expect_warning(u'video doesn\'t have subtitles') | ||||
|         self.DL.expect_warning('video doesn\'t have subtitles') | ||||
|         self.url = 'n5BB19UTcdA' | ||||
|         self.DL.params['writesubtitles'] = True | ||||
|         self.DL.params['allsubtitles'] = True | ||||
| @@ -101,7 +102,7 @@ class TestYoutubeSubtitles(BaseTestSubtitles): | ||||
|         self.DL.params['subtitleslangs'] = langs | ||||
|         subtitles = self.getSubtitles() | ||||
|         for lang in langs: | ||||
|             self.assertTrue(subtitles.get(lang) is not None, u'Subtitles for \'%s\' not extracted' % lang) | ||||
|             self.assertTrue(subtitles.get(lang) is not None, 'Subtitles for \'%s\' not extracted' % lang) | ||||
|  | ||||
|  | ||||
| class TestDailymotionSubtitles(BaseTestSubtitles): | ||||
| @@ -130,20 +131,20 @@ class TestDailymotionSubtitles(BaseTestSubtitles): | ||||
|         self.assertEqual(len(subtitles.keys()), 5) | ||||
|  | ||||
|     def test_list_subtitles(self): | ||||
|         self.DL.expect_warning(u'Automatic Captions not supported by this server') | ||||
|         self.DL.expect_warning('Automatic Captions not supported by this server') | ||||
|         self.DL.params['listsubtitles'] = True | ||||
|         info_dict = self.getInfoDict() | ||||
|         self.assertEqual(info_dict, None) | ||||
|  | ||||
|     def test_automatic_captions(self): | ||||
|         self.DL.expect_warning(u'Automatic Captions not supported by this server') | ||||
|         self.DL.expect_warning('Automatic Captions not supported by this server') | ||||
|         self.DL.params['writeautomaticsub'] = True | ||||
|         self.DL.params['subtitleslang'] = ['en'] | ||||
|         subtitles = self.getSubtitles() | ||||
|         self.assertTrue(len(subtitles.keys()) == 0) | ||||
|  | ||||
|     def test_nosubtitles(self): | ||||
|         self.DL.expect_warning(u'video doesn\'t have subtitles') | ||||
|         self.DL.expect_warning('video doesn\'t have subtitles') | ||||
|         self.url = 'http://www.dailymotion.com/video/x12u166_le-zapping-tele-star-du-08-aout-2013_tv' | ||||
|         self.DL.params['writesubtitles'] = True | ||||
|         self.DL.params['allsubtitles'] = True | ||||
| @@ -156,7 +157,7 @@ class TestDailymotionSubtitles(BaseTestSubtitles): | ||||
|         self.DL.params['subtitleslangs'] = langs | ||||
|         subtitles = self.getSubtitles() | ||||
|         for lang in langs: | ||||
|             self.assertTrue(subtitles.get(lang) is not None, u'Subtitles for \'%s\' not extracted' % lang) | ||||
|             self.assertTrue(subtitles.get(lang) is not None, 'Subtitles for \'%s\' not extracted' % lang) | ||||
|  | ||||
|  | ||||
| class TestTedSubtitles(BaseTestSubtitles): | ||||
| @@ -185,13 +186,13 @@ class TestTedSubtitles(BaseTestSubtitles): | ||||
|         self.assertTrue(len(subtitles.keys()) >= 28) | ||||
|  | ||||
|     def test_list_subtitles(self): | ||||
|         self.DL.expect_warning(u'Automatic Captions not supported by this server') | ||||
|         self.DL.expect_warning('Automatic Captions not supported by this server') | ||||
|         self.DL.params['listsubtitles'] = True | ||||
|         info_dict = self.getInfoDict() | ||||
|         self.assertEqual(info_dict, None) | ||||
|  | ||||
|     def test_automatic_captions(self): | ||||
|         self.DL.expect_warning(u'Automatic Captions not supported by this server') | ||||
|         self.DL.expect_warning('Automatic Captions not supported by this server') | ||||
|         self.DL.params['writeautomaticsub'] = True | ||||
|         self.DL.params['subtitleslang'] = ['en'] | ||||
|         subtitles = self.getSubtitles() | ||||
| @@ -203,7 +204,7 @@ class TestTedSubtitles(BaseTestSubtitles): | ||||
|         self.DL.params['subtitleslangs'] = langs | ||||
|         subtitles = self.getSubtitles() | ||||
|         for lang in langs: | ||||
|             self.assertTrue(subtitles.get(lang) is not None, u'Subtitles for \'%s\' not extracted' % lang) | ||||
|             self.assertTrue(subtitles.get(lang) is not None, 'Subtitles for \'%s\' not extracted' % lang) | ||||
|  | ||||
|  | ||||
| class TestBlipTVSubtitles(BaseTestSubtitles): | ||||
| @@ -211,13 +212,13 @@ class TestBlipTVSubtitles(BaseTestSubtitles): | ||||
|     IE = BlipTVIE | ||||
|  | ||||
|     def test_list_subtitles(self): | ||||
|         self.DL.expect_warning(u'Automatic Captions not supported by this server') | ||||
|         self.DL.expect_warning('Automatic Captions not supported by this server') | ||||
|         self.DL.params['listsubtitles'] = True | ||||
|         info_dict = self.getInfoDict() | ||||
|         self.assertEqual(info_dict, None) | ||||
|  | ||||
|     def test_allsubtitles(self): | ||||
|         self.DL.expect_warning(u'Automatic Captions not supported by this server') | ||||
|         self.DL.expect_warning('Automatic Captions not supported by this server') | ||||
|         self.DL.params['writesubtitles'] = True | ||||
|         self.DL.params['allsubtitles'] = True | ||||
|         subtitles = self.getSubtitles() | ||||
| @@ -251,20 +252,20 @@ class TestVimeoSubtitles(BaseTestSubtitles): | ||||
|         self.assertEqual(set(subtitles.keys()), set(['de', 'en', 'es', 'fr'])) | ||||
|  | ||||
|     def test_list_subtitles(self): | ||||
|         self.DL.expect_warning(u'Automatic Captions not supported by this server') | ||||
|         self.DL.expect_warning('Automatic Captions not supported by this server') | ||||
|         self.DL.params['listsubtitles'] = True | ||||
|         info_dict = self.getInfoDict() | ||||
|         self.assertEqual(info_dict, None) | ||||
|  | ||||
|     def test_automatic_captions(self): | ||||
|         self.DL.expect_warning(u'Automatic Captions not supported by this server') | ||||
|         self.DL.expect_warning('Automatic Captions not supported by this server') | ||||
|         self.DL.params['writeautomaticsub'] = True | ||||
|         self.DL.params['subtitleslang'] = ['en'] | ||||
|         subtitles = self.getSubtitles() | ||||
|         self.assertTrue(len(subtitles.keys()) == 0) | ||||
|  | ||||
|     def test_nosubtitles(self): | ||||
|         self.DL.expect_warning(u'video doesn\'t have subtitles') | ||||
|         self.DL.expect_warning('video doesn\'t have subtitles') | ||||
|         self.url = 'http://vimeo.com/56015672' | ||||
|         self.DL.params['writesubtitles'] = True | ||||
|         self.DL.params['allsubtitles'] = True | ||||
| @@ -277,7 +278,7 @@ class TestVimeoSubtitles(BaseTestSubtitles): | ||||
|         self.DL.params['subtitleslangs'] = langs | ||||
|         subtitles = self.getSubtitles() | ||||
|         for lang in langs: | ||||
|             self.assertTrue(subtitles.get(lang) is not None, u'Subtitles for \'%s\' not extracted' % lang) | ||||
|             self.assertTrue(subtitles.get(lang) is not None, 'Subtitles for \'%s\' not extracted' % lang) | ||||
|  | ||||
|  | ||||
| class TestWallaSubtitles(BaseTestSubtitles): | ||||
| @@ -285,13 +286,13 @@ class TestWallaSubtitles(BaseTestSubtitles): | ||||
|     IE = WallaIE | ||||
|  | ||||
|     def test_list_subtitles(self): | ||||
|         self.DL.expect_warning(u'Automatic Captions not supported by this server') | ||||
|         self.DL.expect_warning('Automatic Captions not supported by this server') | ||||
|         self.DL.params['listsubtitles'] = True | ||||
|         info_dict = self.getInfoDict() | ||||
|         self.assertEqual(info_dict, None) | ||||
|  | ||||
|     def test_allsubtitles(self): | ||||
|         self.DL.expect_warning(u'Automatic Captions not supported by this server') | ||||
|         self.DL.expect_warning('Automatic Captions not supported by this server') | ||||
|         self.DL.params['writesubtitles'] = True | ||||
|         self.DL.params['allsubtitles'] = True | ||||
|         subtitles = self.getSubtitles() | ||||
| @@ -299,7 +300,7 @@ class TestWallaSubtitles(BaseTestSubtitles): | ||||
|         self.assertEqual(md5(subtitles['heb']), 'e758c5d7cb982f6bef14f377ec7a3920') | ||||
|  | ||||
|     def test_nosubtitles(self): | ||||
|         self.DL.expect_warning(u'video doesn\'t have subtitles') | ||||
|         self.DL.expect_warning('video doesn\'t have subtitles') | ||||
|         self.url = 'http://vod.walla.co.il/movie/2642630/one-direction-all-for-one' | ||||
|         self.DL.params['writesubtitles'] = True | ||||
|         self.DL.params['allsubtitles'] = True | ||||
|   | ||||
| @@ -1,4 +1,5 @@ | ||||
| #!/usr/bin/env python | ||||
| from __future__ import unicode_literals | ||||
|  | ||||
| # Allow direct execution | ||||
| import os | ||||
|   | ||||
| @@ -218,6 +218,7 @@ class TestUtil(unittest.TestCase): | ||||
|         self.assertEqual(parse_duration('0m0s'), 0) | ||||
|         self.assertEqual(parse_duration('0s'), 0) | ||||
|         self.assertEqual(parse_duration('01:02:03.05'), 3723.05) | ||||
|         self.assertEqual(parse_duration('T30M38S'), 1838) | ||||
|  | ||||
|     def test_fix_xml_ampersands(self): | ||||
|         self.assertEqual( | ||||
| @@ -284,6 +285,10 @@ class TestUtil(unittest.TestCase): | ||||
|         d = json.loads(stripped) | ||||
|         self.assertEqual(d, [{"id": "532cb", "x": 3}]) | ||||
|  | ||||
|         stripped = strip_jsonp('parseMetadata({"STATUS":"OK"})\n\n\n//epc') | ||||
|         d = json.loads(stripped) | ||||
|         self.assertEqual(d, {'STATUS': 'OK'}) | ||||
|  | ||||
|     def test_uppercase_escape(self): | ||||
|         self.assertEqual(uppercase_escape('aä'), 'aä') | ||||
|         self.assertEqual(uppercase_escape('\\U0001d550'), '𝕐') | ||||
|   | ||||
| @@ -624,7 +624,7 @@ class YoutubeDL(object): | ||||
|  | ||||
|             return self.process_ie_result( | ||||
|                 new_result, download=download, extra_info=extra_info) | ||||
|         elif result_type == 'playlist': | ||||
|         elif result_type == 'playlist' or result_type == 'multi_video': | ||||
|             # We process each entry in the playlist | ||||
|             playlist = ie_result.get('title', None) or ie_result.get('id', None) | ||||
|             self.to_screen('[download] Downloading playlist: %s' % playlist) | ||||
| @@ -679,6 +679,9 @@ class YoutubeDL(object): | ||||
|             ie_result['entries'] = playlist_results | ||||
|             return ie_result | ||||
|         elif result_type == 'compat_list': | ||||
|             self.report_warning( | ||||
|                 'Extractor %s returned a compat_list result. ' | ||||
|                 'It needs to be updated.' % ie_result.get('extractor')) | ||||
|             def _fixup(r): | ||||
|                 self.add_extra_info(r, | ||||
|                     { | ||||
| @@ -1001,7 +1004,7 @@ class YoutubeDL(object): | ||||
|             else: | ||||
|                 self.to_screen('[info] Writing video description metadata as JSON to: ' + infofn) | ||||
|                 try: | ||||
|                     write_json_file(info_dict, encodeFilename(infofn)) | ||||
|                     write_json_file(info_dict, infofn) | ||||
|                 except (OSError, IOError): | ||||
|                     self.report_error('Cannot write metadata to JSON file ' + infofn) | ||||
|                     return | ||||
|   | ||||
| @@ -1,6 +1,8 @@ | ||||
| #!/usr/bin/env python | ||||
| # -*- coding: utf-8 -*- | ||||
|  | ||||
| from __future__ import unicode_literals | ||||
|  | ||||
| __license__ = 'Public Domain' | ||||
|  | ||||
| import codecs | ||||
| @@ -17,6 +19,7 @@ from .compat import ( | ||||
|     compat_expanduser, | ||||
|     compat_getpass, | ||||
|     compat_print, | ||||
|     workaround_optparse_bug9161, | ||||
| ) | ||||
| from .utils import ( | ||||
|     DateRange, | ||||
| @@ -55,7 +58,9 @@ def _real_main(argv=None): | ||||
|         # https://github.com/rg3/youtube-dl/issues/820 | ||||
|         codecs.register(lambda name: codecs.lookup('utf-8') if name == 'cp65001' else None) | ||||
|  | ||||
|     setproctitle(u'youtube-dl') | ||||
|     workaround_optparse_bug9161() | ||||
|  | ||||
|     setproctitle('youtube-dl') | ||||
|  | ||||
|     parser, opts, args = parseOpts(argv) | ||||
|  | ||||
| @@ -71,10 +76,10 @@ def _real_main(argv=None): | ||||
|     if opts.headers is not None: | ||||
|         for h in opts.headers: | ||||
|             if h.find(':', 1) < 0: | ||||
|                 parser.error(u'wrong header formatting, it should be key:value, not "%s"'%h) | ||||
|                 parser.error('wrong header formatting, it should be key:value, not "%s"'%h) | ||||
|             key, value = h.split(':', 2) | ||||
|             if opts.verbose: | ||||
|                 write_string(u'[debug] Adding header from command line option %s:%s\n'%(key, value)) | ||||
|                 write_string('[debug] Adding header from command line option %s:%s\n'%(key, value)) | ||||
|             std_headers[key] = value | ||||
|  | ||||
|     # Dump user agent | ||||
| @@ -92,9 +97,9 @@ def _real_main(argv=None): | ||||
|                 batchfd = io.open(opts.batchfile, 'r', encoding='utf-8', errors='ignore') | ||||
|             batch_urls = read_batch_urls(batchfd) | ||||
|             if opts.verbose: | ||||
|                 write_string(u'[debug] Batch file urls: ' + repr(batch_urls) + u'\n') | ||||
|                 write_string('[debug] Batch file urls: ' + repr(batch_urls) + '\n') | ||||
|         except IOError: | ||||
|             sys.exit(u'ERROR: batch file could not be read') | ||||
|             sys.exit('ERROR: batch file could not be read') | ||||
|     all_urls = batch_urls + args | ||||
|     all_urls = [url.strip() for url in all_urls] | ||||
|     _enc = preferredencoding() | ||||
| @@ -107,7 +112,7 @@ def _real_main(argv=None): | ||||
|             compat_print(ie.IE_NAME + (' (CURRENTLY BROKEN)' if not ie._WORKING else '')) | ||||
|             matchedUrls = [url for url in all_urls if ie.suitable(url)] | ||||
|             for mu in matchedUrls: | ||||
|                 compat_print(u'  ' + mu) | ||||
|                 compat_print('  ' + mu) | ||||
|         sys.exit(0) | ||||
|     if opts.list_extractor_descriptions: | ||||
|         for ie in sorted(extractors, key=lambda ie: ie.IE_NAME.lower()): | ||||
| @@ -117,63 +122,63 @@ def _real_main(argv=None): | ||||
|             if desc is False: | ||||
|                 continue | ||||
|             if hasattr(ie, 'SEARCH_KEY'): | ||||
|                 _SEARCHES = (u'cute kittens', u'slithering pythons', u'falling cat', u'angry poodle', u'purple fish', u'running tortoise', u'sleeping bunny') | ||||
|                 _COUNTS = (u'', u'5', u'10', u'all') | ||||
|                 desc += u' (Example: "%s%s:%s" )' % (ie.SEARCH_KEY, random.choice(_COUNTS), random.choice(_SEARCHES)) | ||||
|                 _SEARCHES = ('cute kittens', 'slithering pythons', 'falling cat', 'angry poodle', 'purple fish', 'running tortoise', 'sleeping bunny') | ||||
|                 _COUNTS = ('', '5', '10', 'all') | ||||
|                 desc += ' (Example: "%s%s:%s" )' % (ie.SEARCH_KEY, random.choice(_COUNTS), random.choice(_SEARCHES)) | ||||
|             compat_print(desc) | ||||
|         sys.exit(0) | ||||
|  | ||||
|  | ||||
|     # Conflicting, missing and erroneous options | ||||
|     if opts.usenetrc and (opts.username is not None or opts.password is not None): | ||||
|         parser.error(u'using .netrc conflicts with giving username/password') | ||||
|         parser.error('using .netrc conflicts with giving username/password') | ||||
|     if opts.password is not None and opts.username is None: | ||||
|         parser.error(u'account username missing\n') | ||||
|         parser.error('account username missing\n') | ||||
|     if opts.outtmpl is not None and (opts.usetitle or opts.autonumber or opts.useid): | ||||
|         parser.error(u'using output template conflicts with using title, video ID or auto number') | ||||
|         parser.error('using output template conflicts with using title, video ID or auto number') | ||||
|     if opts.usetitle and opts.useid: | ||||
|         parser.error(u'using title conflicts with using video ID') | ||||
|         parser.error('using title conflicts with using video ID') | ||||
|     if opts.username is not None and opts.password is None: | ||||
|         opts.password = compat_getpass(u'Type account password and press [Return]: ') | ||||
|         opts.password = compat_getpass('Type account password and press [Return]: ') | ||||
|     if opts.ratelimit is not None: | ||||
|         numeric_limit = FileDownloader.parse_bytes(opts.ratelimit) | ||||
|         if numeric_limit is None: | ||||
|             parser.error(u'invalid rate limit specified') | ||||
|             parser.error('invalid rate limit specified') | ||||
|         opts.ratelimit = numeric_limit | ||||
|     if opts.min_filesize is not None: | ||||
|         numeric_limit = FileDownloader.parse_bytes(opts.min_filesize) | ||||
|         if numeric_limit is None: | ||||
|             parser.error(u'invalid min_filesize specified') | ||||
|             parser.error('invalid min_filesize specified') | ||||
|         opts.min_filesize = numeric_limit | ||||
|     if opts.max_filesize is not None: | ||||
|         numeric_limit = FileDownloader.parse_bytes(opts.max_filesize) | ||||
|         if numeric_limit is None: | ||||
|             parser.error(u'invalid max_filesize specified') | ||||
|             parser.error('invalid max_filesize specified') | ||||
|         opts.max_filesize = numeric_limit | ||||
|     if opts.retries is not None: | ||||
|         try: | ||||
|             opts.retries = int(opts.retries) | ||||
|         except (TypeError, ValueError): | ||||
|             parser.error(u'invalid retry count specified') | ||||
|             parser.error('invalid retry count specified') | ||||
|     if opts.buffersize is not None: | ||||
|         numeric_buffersize = FileDownloader.parse_bytes(opts.buffersize) | ||||
|         if numeric_buffersize is None: | ||||
|             parser.error(u'invalid buffer size specified') | ||||
|             parser.error('invalid buffer size specified') | ||||
|         opts.buffersize = numeric_buffersize | ||||
|     if opts.playliststart <= 0: | ||||
|         raise ValueError(u'Playlist start must be positive') | ||||
|         raise ValueError('Playlist start must be positive') | ||||
|     if opts.playlistend not in (-1, None) and opts.playlistend < opts.playliststart: | ||||
|         raise ValueError(u'Playlist end must be greater than playlist start') | ||||
|         raise ValueError('Playlist end must be greater than playlist start') | ||||
|     if opts.extractaudio: | ||||
|         if opts.audioformat not in ['best', 'aac', 'mp3', 'm4a', 'opus', 'vorbis', 'wav']: | ||||
|             parser.error(u'invalid audio format specified') | ||||
|             parser.error('invalid audio format specified') | ||||
|     if opts.audioquality: | ||||
|         opts.audioquality = opts.audioquality.strip('k').strip('K') | ||||
|         if not opts.audioquality.isdigit(): | ||||
|             parser.error(u'invalid audio quality specified') | ||||
|             parser.error('invalid audio quality specified') | ||||
|     if opts.recodevideo is not None: | ||||
|         if opts.recodevideo not in ['mp4', 'flv', 'webm', 'ogg', 'mkv']: | ||||
|             parser.error(u'invalid video recode format specified') | ||||
|             parser.error('invalid video recode format specified') | ||||
|     if opts.date is not None: | ||||
|         date = DateRange.day(opts.date) | ||||
|     else: | ||||
| @@ -193,17 +198,17 @@ def _real_main(argv=None): | ||||
|         if opts.outtmpl is not None: | ||||
|             opts.outtmpl = opts.outtmpl.decode(preferredencoding()) | ||||
|     outtmpl =((opts.outtmpl is not None and opts.outtmpl) | ||||
|             or (opts.format == '-1' and opts.usetitle and u'%(title)s-%(id)s-%(format)s.%(ext)s') | ||||
|             or (opts.format == '-1' and u'%(id)s-%(format)s.%(ext)s') | ||||
|             or (opts.usetitle and opts.autonumber and u'%(autonumber)s-%(title)s-%(id)s.%(ext)s') | ||||
|             or (opts.usetitle and u'%(title)s-%(id)s.%(ext)s') | ||||
|             or (opts.useid and u'%(id)s.%(ext)s') | ||||
|             or (opts.autonumber and u'%(autonumber)s-%(id)s.%(ext)s') | ||||
|             or (opts.format == '-1' and opts.usetitle and '%(title)s-%(id)s-%(format)s.%(ext)s') | ||||
|             or (opts.format == '-1' and '%(id)s-%(format)s.%(ext)s') | ||||
|             or (opts.usetitle and opts.autonumber and '%(autonumber)s-%(title)s-%(id)s.%(ext)s') | ||||
|             or (opts.usetitle and '%(title)s-%(id)s.%(ext)s') | ||||
|             or (opts.useid and '%(id)s.%(ext)s') | ||||
|             or (opts.autonumber and '%(autonumber)s-%(id)s.%(ext)s') | ||||
|             or DEFAULT_OUTTMPL) | ||||
|     if not os.path.splitext(outtmpl)[1] and opts.extractaudio: | ||||
|         parser.error(u'Cannot download a video and extract audio into the same' | ||||
|                      u' file! Use "{0}.%(ext)s" instead of "{0}" as the output' | ||||
|                      u' template'.format(outtmpl)) | ||||
|         parser.error('Cannot download a video and extract audio into the same' | ||||
|                      ' file! Use "{0}.%(ext)s" instead of "{0}" as the output' | ||||
|                      ' template'.format(outtmpl)) | ||||
|  | ||||
|     any_printing = opts.geturl or opts.gettitle or opts.getid or opts.getthumbnail or opts.getdescription or opts.getfilename or opts.getformat or opts.getduration or opts.dumpjson or opts.dump_single_json | ||||
|     download_archive_fn = compat_expanduser(opts.download_archive) if opts.download_archive is not None else opts.download_archive | ||||
| @@ -330,7 +335,7 @@ def _real_main(argv=None): | ||||
|         # Maybe do nothing | ||||
|         if (len(all_urls) < 1) and (opts.load_info_filename is None): | ||||
|             if not (opts.update_self or opts.rm_cachedir): | ||||
|                 parser.error(u'you must provide at least one URL') | ||||
|                 parser.error('you must provide at least one URL') | ||||
|             else: | ||||
|                 sys.exit() | ||||
|  | ||||
| @@ -340,7 +345,7 @@ def _real_main(argv=None): | ||||
|             else: | ||||
|                 retcode = ydl.download(all_urls) | ||||
|         except MaxDownloadsReached: | ||||
|             ydl.to_screen(u'--max-download limit reached, aborting.') | ||||
|             ydl.to_screen('--max-download limit reached, aborting.') | ||||
|             retcode = 101 | ||||
|  | ||||
|     sys.exit(retcode) | ||||
| @@ -352,6 +357,6 @@ def main(argv=None): | ||||
|     except DownloadError: | ||||
|         sys.exit(1) | ||||
|     except SameFileError: | ||||
|         sys.exit(u'ERROR: fixed output name but more than one file to download') | ||||
|         sys.exit('ERROR: fixed output name but more than one file to download') | ||||
|     except KeyboardInterrupt: | ||||
|         sys.exit(u'\nERROR: Interrupted by user') | ||||
|         sys.exit('\nERROR: Interrupted by user') | ||||
|   | ||||
| @@ -8,7 +8,7 @@ import re | ||||
| import shutil | ||||
| import traceback | ||||
|  | ||||
| from .compat import compat_expanduser | ||||
| from .compat import compat_expanduser, compat_getenv | ||||
| from .utils import write_json_file | ||||
|  | ||||
|  | ||||
| @@ -19,7 +19,7 @@ class Cache(object): | ||||
|     def _get_root_dir(self): | ||||
|         res = self._ydl.params.get('cachedir') | ||||
|         if res is None: | ||||
|             cache_root = os.environ.get('XDG_CACHE_HOME', '~/.cache') | ||||
|             cache_root = compat_getenv('XDG_CACHE_HOME', '~/.cache') | ||||
|             res = os.path.join(cache_root, 'youtube-dl') | ||||
|         return compat_expanduser(res) | ||||
|  | ||||
|   | ||||
| @@ -1,6 +1,7 @@ | ||||
| from __future__ import unicode_literals | ||||
|  | ||||
| import getpass | ||||
| import optparse | ||||
| import os | ||||
| import subprocess | ||||
| import sys | ||||
| @@ -288,6 +289,36 @@ if sys.version_info < (3, 0) and sys.platform == 'win32': | ||||
| else: | ||||
|     compat_getpass = getpass.getpass | ||||
|  | ||||
| # Old 2.6 and 2.7 releases require kwargs to be bytes | ||||
| try: | ||||
|     (lambda x: x)(**{'x': 0}) | ||||
| except TypeError: | ||||
|     def compat_kwargs(kwargs): | ||||
|         return dict((bytes(k), v) for k, v in kwargs.items()) | ||||
| else: | ||||
|     compat_kwargs = lambda kwargs: kwargs | ||||
|  | ||||
|  | ||||
| # Fix https://github.com/rg3/youtube-dl/issues/4223 | ||||
| # See http://bugs.python.org/issue9161 for what is broken | ||||
| def workaround_optparse_bug9161(): | ||||
|     op = optparse.OptionParser() | ||||
|     og = optparse.OptionGroup(op, 'foo') | ||||
|     try: | ||||
|         og.add_option('-t') | ||||
|     except TypeError: | ||||
|         real_add_option = optparse.OptionGroup.add_option | ||||
|  | ||||
|         def _compat_add_option(self, *args, **kwargs): | ||||
|             enc = lambda v: ( | ||||
|                 v.encode('ascii', 'replace') if isinstance(v, compat_str) | ||||
|                 else v) | ||||
|             bargs = [enc(a) for a in args] | ||||
|             bkwargs = dict( | ||||
|                 (k, enc(v)) for k, v in kwargs.items()) | ||||
|             return real_add_option(self, *bargs, **bkwargs) | ||||
|         optparse.OptionGroup.add_option = _compat_add_option | ||||
|  | ||||
|  | ||||
| __all__ = [ | ||||
|     'compat_HTTPError', | ||||
| @@ -299,6 +330,7 @@ __all__ = [ | ||||
|     'compat_html_entities', | ||||
|     'compat_html_parser', | ||||
|     'compat_http_client', | ||||
|     'compat_kwargs', | ||||
|     'compat_ord', | ||||
|     'compat_parse_qs', | ||||
|     'compat_print', | ||||
| @@ -314,4 +346,5 @@ __all__ = [ | ||||
|     'compat_xml_parse_error', | ||||
|     'shlex_quote', | ||||
|     'subprocess_check_output', | ||||
|     'workaround_optparse_bug9161', | ||||
| ] | ||||
|   | ||||
| @@ -1,3 +1,5 @@ | ||||
| from __future__ import unicode_literals | ||||
|  | ||||
| import os | ||||
| import re | ||||
| import sys | ||||
| @@ -159,14 +161,14 @@ class FileDownloader(object): | ||||
|  | ||||
|     def temp_name(self, filename): | ||||
|         """Returns a temporary filename for the given filename.""" | ||||
|         if self.params.get('nopart', False) or filename == u'-' or \ | ||||
|         if self.params.get('nopart', False) or filename == '-' or \ | ||||
|                 (os.path.exists(encodeFilename(filename)) and not os.path.isfile(encodeFilename(filename))): | ||||
|             return filename | ||||
|         return filename + u'.part' | ||||
|         return filename + '.part' | ||||
|  | ||||
|     def undo_temp_name(self, filename): | ||||
|         if filename.endswith(u'.part'): | ||||
|             return filename[:-len(u'.part')] | ||||
|         if filename.endswith('.part'): | ||||
|             return filename[:-len('.part')] | ||||
|         return filename | ||||
|  | ||||
|     def try_rename(self, old_filename, new_filename): | ||||
| @@ -175,7 +177,7 @@ class FileDownloader(object): | ||||
|                 return | ||||
|             os.rename(encodeFilename(old_filename), encodeFilename(new_filename)) | ||||
|         except (IOError, OSError) as err: | ||||
|             self.report_error(u'unable to rename file: %s' % compat_str(err)) | ||||
|             self.report_error('unable to rename file: %s' % compat_str(err)) | ||||
|  | ||||
|     def try_utime(self, filename, last_modified_hdr): | ||||
|         """Try to set the last-modified time of the given file.""" | ||||
| @@ -200,10 +202,10 @@ class FileDownloader(object): | ||||
|  | ||||
|     def report_destination(self, filename): | ||||
|         """Report destination filename.""" | ||||
|         self.to_screen(u'[download] Destination: ' + filename) | ||||
|         self.to_screen('[download] Destination: ' + filename) | ||||
|  | ||||
|     def _report_progress_status(self, msg, is_last_line=False): | ||||
|         fullmsg = u'[download] ' + msg | ||||
|         fullmsg = '[download] ' + msg | ||||
|         if self.params.get('progress_with_newline', False): | ||||
|             self.to_screen(fullmsg) | ||||
|         else: | ||||
| @@ -211,13 +213,13 @@ class FileDownloader(object): | ||||
|                 prev_len = getattr(self, '_report_progress_prev_line_length', | ||||
|                                    0) | ||||
|                 if prev_len > len(fullmsg): | ||||
|                     fullmsg += u' ' * (prev_len - len(fullmsg)) | ||||
|                     fullmsg += ' ' * (prev_len - len(fullmsg)) | ||||
|                 self._report_progress_prev_line_length = len(fullmsg) | ||||
|                 clear_line = u'\r' | ||||
|                 clear_line = '\r' | ||||
|             else: | ||||
|                 clear_line = (u'\r\x1b[K' if sys.stderr.isatty() else u'\r') | ||||
|                 clear_line = ('\r\x1b[K' if sys.stderr.isatty() else '\r') | ||||
|             self.to_screen(clear_line + fullmsg, skip_eol=not is_last_line) | ||||
|         self.to_console_title(u'youtube-dl ' + msg) | ||||
|         self.to_console_title('youtube-dl ' + msg) | ||||
|  | ||||
|     def report_progress(self, percent, data_len_str, speed, eta): | ||||
|         """Report download progress.""" | ||||
| @@ -233,7 +235,7 @@ class FileDownloader(object): | ||||
|             percent_str = 'Unknown %' | ||||
|         speed_str = self.format_speed(speed) | ||||
|  | ||||
|         msg = (u'%s of %s at %s ETA %s' % | ||||
|         msg = ('%s of %s at %s ETA %s' % | ||||
|                (percent_str, data_len_str, speed_str, eta_str)) | ||||
|         self._report_progress_status(msg) | ||||
|  | ||||
| @@ -243,37 +245,37 @@ class FileDownloader(object): | ||||
|         downloaded_str = format_bytes(downloaded_data_len) | ||||
|         speed_str = self.format_speed(speed) | ||||
|         elapsed_str = FileDownloader.format_seconds(elapsed) | ||||
|         msg = u'%s at %s (%s)' % (downloaded_str, speed_str, elapsed_str) | ||||
|         msg = '%s at %s (%s)' % (downloaded_str, speed_str, elapsed_str) | ||||
|         self._report_progress_status(msg) | ||||
|  | ||||
|     def report_finish(self, data_len_str, tot_time): | ||||
|         """Report download finished.""" | ||||
|         if self.params.get('noprogress', False): | ||||
|             self.to_screen(u'[download] Download completed') | ||||
|             self.to_screen('[download] Download completed') | ||||
|         else: | ||||
|             self._report_progress_status( | ||||
|                 (u'100%% of %s in %s' % | ||||
|                 ('100%% of %s in %s' % | ||||
|                  (data_len_str, self.format_seconds(tot_time))), | ||||
|                 is_last_line=True) | ||||
|  | ||||
|     def report_resuming_byte(self, resume_len): | ||||
|         """Report attempt to resume at given byte.""" | ||||
|         self.to_screen(u'[download] Resuming download at byte %s' % resume_len) | ||||
|         self.to_screen('[download] Resuming download at byte %s' % resume_len) | ||||
|  | ||||
|     def report_retry(self, count, retries): | ||||
|         """Report retry in case of HTTP error 5xx""" | ||||
|         self.to_screen(u'[download] Got server HTTP error. Retrying (attempt %d of %d)...' % (count, retries)) | ||||
|         self.to_screen('[download] Got server HTTP error. Retrying (attempt %d of %d)...' % (count, retries)) | ||||
|  | ||||
|     def report_file_already_downloaded(self, file_name): | ||||
|         """Report file has already been fully downloaded.""" | ||||
|         try: | ||||
|             self.to_screen(u'[download] %s has already been downloaded' % file_name) | ||||
|             self.to_screen('[download] %s has already been downloaded' % file_name) | ||||
|         except UnicodeEncodeError: | ||||
|             self.to_screen(u'[download] The file has already been downloaded') | ||||
|             self.to_screen('[download] The file has already been downloaded') | ||||
|  | ||||
|     def report_unable_to_resume(self): | ||||
|         """Report it was impossible to resume download.""" | ||||
|         self.to_screen(u'[download] Unable to resume') | ||||
|         self.to_screen('[download] Unable to resume') | ||||
|  | ||||
|     def download(self, filename, info_dict): | ||||
|         """Download to a filename using the info from info_dict | ||||
| @@ -293,7 +295,7 @@ class FileDownloader(object): | ||||
|  | ||||
|     def real_download(self, filename, info_dict): | ||||
|         """Real download process. Redefine in subclasses.""" | ||||
|         raise NotImplementedError(u'This method must be implemented by subclasses') | ||||
|         raise NotImplementedError('This method must be implemented by subclasses') | ||||
|  | ||||
|     def _hook_progress(self, status): | ||||
|         for ph in self._progress_hooks: | ||||
|   | ||||
| @@ -115,6 +115,7 @@ from .fktv import ( | ||||
|     FKTVPosteckeIE, | ||||
| ) | ||||
| from .flickr import FlickrIE | ||||
| from .folketinget import FolketingetIE | ||||
| from .fourtube import FourTubeIE | ||||
| from .franceculture import FranceCultureIE | ||||
| from .franceinter import FranceInterIE | ||||
| @@ -127,6 +128,7 @@ from .francetv import ( | ||||
| ) | ||||
| from .freesound import FreesoundIE | ||||
| from .freespeech import FreespeechIE | ||||
| from .freevideo import FreeVideoIE | ||||
| from .funnyordie import FunnyOrDieIE | ||||
| from .gamekings import GamekingsIE | ||||
| from .gameone import ( | ||||
| @@ -378,6 +380,7 @@ from .teachingchannel import TeachingChannelIE | ||||
| from .teamcoco import TeamcocoIE | ||||
| from .techtalks import TechTalksIE | ||||
| from .ted import TEDIE | ||||
| from .telebruxelles import TeleBruxellesIE | ||||
| from .telecinco import TelecincoIE | ||||
| from .telemb import TeleMBIE | ||||
| from .tenplay import TenPlayIE | ||||
|   | ||||
| @@ -5,13 +5,12 @@ import re | ||||
|  | ||||
| from .common import InfoExtractor | ||||
| from ..utils import ( | ||||
|     ExtractorError, | ||||
|     find_xpath_attr, | ||||
|     unified_strdate, | ||||
|     determine_ext, | ||||
|     get_element_by_id, | ||||
|     get_element_by_attribute, | ||||
|     int_or_none, | ||||
|     qualities, | ||||
| ) | ||||
|  | ||||
| # There are different sources of video in arte.tv, the extraction process  | ||||
| @@ -102,79 +101,54 @@ class ArteTVPlus7IE(InfoExtractor): | ||||
|             'upload_date': unified_strdate(upload_date_str), | ||||
|             'thumbnail': player_info.get('programImage') or player_info.get('VTU', {}).get('IUR'), | ||||
|         } | ||||
|         qfunc = qualities(['HQ', 'MQ', 'EQ', 'SQ']) | ||||
|  | ||||
|         all_formats = [] | ||||
|         formats = [] | ||||
|         for format_id, format_dict in player_info['VSR'].items(): | ||||
|             fmt = dict(format_dict) | ||||
|             fmt['format_id'] = format_id | ||||
|             all_formats.append(fmt) | ||||
|         # Some formats use the m3u8 protocol | ||||
|         all_formats = list(filter(lambda f: f.get('videoFormat') != 'M3U8', all_formats)) | ||||
|         def _match_lang(f): | ||||
|             if f.get('versionCode') is None: | ||||
|                 return True | ||||
|             # Return true if that format is in the language of the url | ||||
|             if lang == 'fr': | ||||
|                 l = 'F' | ||||
|             elif lang == 'de': | ||||
|                 l = 'A' | ||||
|             else: | ||||
|                 l = lang | ||||
|             regexes = [r'VO?%s' % l, r'VO?.-ST%s' % l] | ||||
|             return any(re.match(r, f['versionCode']) for r in regexes) | ||||
|         # Some formats may not be in the same language as the url | ||||
|         # TODO: Might want not to drop videos that does not match requested language | ||||
|         # but to process those formats with lower precedence | ||||
|         formats = filter(_match_lang, all_formats) | ||||
|         formats = list(formats)  # in python3 filter returns an iterator | ||||
|         if not formats: | ||||
|             # Some videos are only available in the 'Originalversion' | ||||
|             # they aren't tagged as being in French or German | ||||
|             # Sometimes there are neither videos of requested lang code | ||||
|             # nor original version videos available | ||||
|             # For such cases we just take all_formats as is | ||||
|             formats = all_formats | ||||
|             if not formats: | ||||
|                 raise ExtractorError('The formats list is empty') | ||||
|             f = dict(format_dict) | ||||
|             versionCode = f.get('versionCode') | ||||
|  | ||||
|         if re.match(r'[A-Z]Q', formats[0]['quality']) is not None: | ||||
|             def sort_key(f): | ||||
|                 return ['HQ', 'MQ', 'EQ', 'SQ'].index(f['quality']) | ||||
|         else: | ||||
|             def sort_key(f): | ||||
|                 versionCode = f.get('versionCode') | ||||
|                 if versionCode is None: | ||||
|                     versionCode = '' | ||||
|                 return ( | ||||
|                     # Sort first by quality | ||||
|                     int(f.get('height', -1)), | ||||
|                     int(f.get('bitrate', -1)), | ||||
|                     # The original version with subtitles has lower relevance | ||||
|                     re.match(r'VO-ST(F|A)', versionCode) is None, | ||||
|                     # The version with sourds/mal subtitles has also lower relevance | ||||
|                     re.match(r'VO?(F|A)-STM\1', versionCode) is None, | ||||
|                     # Prefer http downloads over m3u8 | ||||
|                     0 if f['url'].endswith('m3u8') else 1, | ||||
|                 ) | ||||
|         formats = sorted(formats, key=sort_key) | ||||
|         def _format(format_info): | ||||
|             info = { | ||||
|                 'format_id': format_info['format_id'], | ||||
|                 'format_note': '%s, %s' % (format_info.get('versionCode'), format_info.get('versionLibelle')), | ||||
|                 'width': int_or_none(format_info.get('width')), | ||||
|                 'height': int_or_none(format_info.get('height')), | ||||
|                 'tbr': int_or_none(format_info.get('bitrate')), | ||||
|             langcode = { | ||||
|                 'fr': 'F', | ||||
|                 'de': 'A', | ||||
|             }.get(lang, lang) | ||||
|             lang_rexs = [r'VO?%s' % langcode, r'VO?.-ST%s' % langcode] | ||||
|             lang_pref = ( | ||||
|                 None if versionCode is None else ( | ||||
|                     10 if any(re.match(r, versionCode) for r in lang_rexs) | ||||
|                     else -10)) | ||||
|             source_pref = 0 | ||||
|             if versionCode is not None: | ||||
|                 # The original version with subtitles has lower relevance | ||||
|                 if re.match(r'VO-ST(F|A)', versionCode): | ||||
|                     source_pref -= 10 | ||||
|                 # The version with sourds/mal subtitles has also lower relevance | ||||
|                 elif re.match(r'VO?(F|A)-STM\1', versionCode): | ||||
|                     source_pref -= 9 | ||||
|             format = { | ||||
|                 'format_id': format_id, | ||||
|                 'preference': -10 if f.get('videoFormat') == 'M3U8' else None, | ||||
|                 'language_preference': lang_pref, | ||||
|                 'format_note': '%s, %s' % (f.get('versionCode'), f.get('versionLibelle')), | ||||
|                 'width': int_or_none(f.get('width')), | ||||
|                 'height': int_or_none(f.get('height')), | ||||
|                 'tbr': int_or_none(f.get('bitrate')), | ||||
|                 'quality': qfunc(f['quality']), | ||||
|                 'source_preference': source_pref, | ||||
|             } | ||||
|             if format_info['mediaType'] == 'rtmp': | ||||
|                 info['url'] = format_info['streamer'] | ||||
|                 info['play_path'] = 'mp4:' + format_info['url'] | ||||
|                 info['ext'] = 'flv' | ||||
|             else: | ||||
|                 info['url'] = format_info['url'] | ||||
|                 info['ext'] = determine_ext(info['url']) | ||||
|             return info | ||||
|         info_dict['formats'] = [_format(f) for f in formats] | ||||
|  | ||||
|             if f.get('mediaType') == 'rtmp': | ||||
|                 format['url'] = f['streamer'] | ||||
|                 format['play_path'] = 'mp4:' + f['url'] | ||||
|                 format['ext'] = 'flv' | ||||
|             else: | ||||
|                 format['url'] = f['url'] | ||||
|  | ||||
|             formats.append(format) | ||||
|  | ||||
|         self._sort_formats(formats) | ||||
|  | ||||
|         info_dict['formats'] = formats | ||||
|         return info_dict | ||||
|  | ||||
|  | ||||
|   | ||||
| @@ -71,11 +71,12 @@ class BlipTVIE(SubtitlesInfoExtractor): | ||||
|         mobj = re.match(self._VALID_URL, url) | ||||
|         lookup_id = mobj.group('lookup_id') | ||||
|  | ||||
|         # See https://github.com/rg3/youtube-dl/issues/857 | ||||
|         # See https://github.com/rg3/youtube-dl/issues/857 and | ||||
|         # https://github.com/rg3/youtube-dl/issues/4197 | ||||
|         if lookup_id: | ||||
|             info_page = self._download_webpage( | ||||
|                 'http://blip.tv/play/%s.x?p=1' % lookup_id, lookup_id, 'Resolving lookup id') | ||||
|             video_id = self._search_regex(r'data-episode-id="([0-9]+)', info_page, 'video_id') | ||||
|             video_id = self._search_regex(r'config\.id\s*=\s*"([0-9]+)', info_page, 'video_id') | ||||
|         else: | ||||
|             video_id = mobj.group('id') | ||||
|  | ||||
| @@ -165,9 +166,17 @@ class BlipTVIE(SubtitlesInfoExtractor): | ||||
|  | ||||
|  | ||||
| class BlipTVUserIE(InfoExtractor): | ||||
|     _VALID_URL = r'(?:(?:(?:https?://)?(?:\w+\.)?blip\.tv/)|bliptvuser:)(?!api\.swf)([^/]+)/*$' | ||||
|     _VALID_URL = r'(?:(?:https?://(?:\w+\.)?blip\.tv/)|bliptvuser:)(?!api\.swf)([^/]+)/*$' | ||||
|     _PAGE_SIZE = 12 | ||||
|     IE_NAME = 'blip.tv:user' | ||||
|     _TEST = { | ||||
|         'url': 'http://blip.tv/actone', | ||||
|         'info_dict': { | ||||
|             'id': 'actone', | ||||
|             'title': 'Act One: The Series', | ||||
|         }, | ||||
|         'playlist_count': 5, | ||||
|     } | ||||
|  | ||||
|     def _real_extract(self, url): | ||||
|         mobj = re.match(self._VALID_URL, url) | ||||
| @@ -178,6 +187,7 @@ class BlipTVUserIE(InfoExtractor): | ||||
|         page = self._download_webpage(url, username, 'Downloading user page') | ||||
|         mobj = re.search(r'data-users-id="([^"]+)"', page) | ||||
|         page_base = page_base % mobj.group(1) | ||||
|         title = self._og_search_title(page) | ||||
|  | ||||
|         # Download video ids using BlipTV Ajax calls. Result size per | ||||
|         # query is limited (currently to 12 videos) so we need to query | ||||
| @@ -214,4 +224,5 @@ class BlipTVUserIE(InfoExtractor): | ||||
|  | ||||
|         urls = ['http://blip.tv/%s' % video_id for video_id in video_ids] | ||||
|         url_entries = [self.url_result(vurl, 'BlipTV') for vurl in urls] | ||||
|         return [self.playlist_result(url_entries, playlist_title=username)] | ||||
|         return self.playlist_result( | ||||
|             url_entries, playlist_title=title, playlist_id=username) | ||||
|   | ||||
| @@ -111,6 +111,8 @@ class BrightcoveIE(InfoExtractor): | ||||
|                             lambda m: m.group(1) + '/>', object_str) | ||||
|         # Fix up some stupid XML, see https://github.com/rg3/youtube-dl/issues/1608 | ||||
|         object_str = object_str.replace('<--', '<!--') | ||||
|         # remove namespace to simplify extraction | ||||
|         object_str = re.sub(r'(<object[^>]*)(xmlns=".*?")', r'\1', object_str) | ||||
|         object_str = fix_xml_ampersands(object_str) | ||||
|  | ||||
|         object_doc = xml.etree.ElementTree.fromstring(object_str.encode('utf-8')) | ||||
| @@ -219,7 +221,7 @@ class BrightcoveIE(InfoExtractor): | ||||
|         webpage = self._download_webpage(req, video_id) | ||||
|  | ||||
|         error_msg = self._html_search_regex( | ||||
|             r"<h1>We're sorry.</h1>\s*<p>(.*?)</p>", webpage, | ||||
|             r"<h1>We're sorry.</h1>([\s\n]*<p>.*?</p>)+", webpage, | ||||
|             'error message', default=None) | ||||
|         if error_msg is not None: | ||||
|             raise ExtractorError( | ||||
|   | ||||
| @@ -2,7 +2,6 @@ from __future__ import unicode_literals | ||||
|  | ||||
| import re | ||||
|  | ||||
| from .common import InfoExtractor | ||||
| from .mtv import MTVServicesInfoExtractor | ||||
| from ..utils import ( | ||||
|     compat_str, | ||||
| @@ -31,7 +30,7 @@ class ComedyCentralIE(MTVServicesInfoExtractor): | ||||
|     } | ||||
|  | ||||
|  | ||||
| class ComedyCentralShowsIE(InfoExtractor): | ||||
| class ComedyCentralShowsIE(MTVServicesInfoExtractor): | ||||
|     IE_DESC = 'The Daily Show / The Colbert Report' | ||||
|     # urls can be abbreviations like :thedailyshow or :colbert | ||||
|     # urls for episodes like: | ||||
| @@ -109,18 +108,8 @@ class ComedyCentralShowsIE(InfoExtractor): | ||||
|         '400': (384, 216), | ||||
|     } | ||||
|  | ||||
|     @staticmethod | ||||
|     def _transform_rtmp_url(rtmp_video_url): | ||||
|         m = re.match(r'^rtmpe?://.*?/(?P<finalid>gsp\.comedystor/.*)$', rtmp_video_url) | ||||
|         if not m: | ||||
|             raise ExtractorError('Cannot transform RTMP url') | ||||
|         base = 'http://mtvnmobile.vo.llnwd.net/kip0/_pxn=1+_pxI0=Ripod-h264+_pxL0=undefined+_pxM0=+_pxK=18639+_pxE=mp4/44620/mtvnorigin/' | ||||
|         return base + m.group('finalid') | ||||
|  | ||||
|     def _real_extract(self, url): | ||||
|         mobj = re.match(self._VALID_URL, url, re.VERBOSE) | ||||
|         if mobj is None: | ||||
|             raise ExtractorError('Invalid URL: %s' % url) | ||||
|         mobj = re.match(self._VALID_URL, url) | ||||
|  | ||||
|         if mobj.group('shortname'): | ||||
|             if mobj.group('shortname') in ('tds', 'thedailyshow'): | ||||
| @@ -212,9 +201,6 @@ class ComedyCentralShowsIE(InfoExtractor): | ||||
|                     'ext': self._video_extensions.get(format, 'mp4'), | ||||
|                     'height': h, | ||||
|                     'width': w, | ||||
|  | ||||
|                     'format_note': 'HTTP 400 at the moment (patches welcome!)', | ||||
|                     'preference': -100, | ||||
|                 }) | ||||
|                 formats.append({ | ||||
|                     'format_id': 'rtmp-%s' % format, | ||||
|   | ||||
| @@ -43,7 +43,11 @@ class InfoExtractor(object): | ||||
|     information possibly downloading the video to the file system, among | ||||
|     other possible outcomes. | ||||
|  | ||||
|     The dictionaries must include the following fields: | ||||
|     The type field determines the the type of the result. | ||||
|     By far the most common value (and the default if _type is missing) is | ||||
|     "video", which indicates a single video. | ||||
|  | ||||
|     For a video, the dictionaries must include the following fields: | ||||
|  | ||||
|     id:             Video identifier. | ||||
|     title:          Video title, unescaped. | ||||
| @@ -87,6 +91,11 @@ class InfoExtractor(object): | ||||
|                                  by this field, regardless of all other values. | ||||
|                                  -1 for default (order by other properties), | ||||
|                                  -2 or smaller for less than default. | ||||
|                     * language_preference  Is this in the correct requested | ||||
|                                  language? | ||||
|                                  10 if it's what the URL is about, | ||||
|                                  -1 for default (don't know), | ||||
|                                  -10 otherwise, other values reserved for now. | ||||
|                     * quality    Order number of the video quality of this | ||||
|                                  format, irrespective of the file format. | ||||
|                                  -1 for default (order by other properties), | ||||
| @@ -146,6 +155,38 @@ class InfoExtractor(object): | ||||
|  | ||||
|     Unless mentioned otherwise, None is equivalent to absence of information. | ||||
|  | ||||
|  | ||||
|     _type "playlist" indicates multiple videos. | ||||
|     There must be a key "entries", which is a list or a PagedList object, each | ||||
|     element of which is a valid dictionary under this specfication. | ||||
|  | ||||
|     Additionally, playlists can have "title" and "id" attributes with the same | ||||
|     semantics as videos (see above). | ||||
|  | ||||
|  | ||||
|     _type "multi_video" indicates that there are multiple videos that | ||||
|     form a single show, for examples multiple acts of an opera or TV episode. | ||||
|     It must have an entries key like a playlist and contain all the keys | ||||
|     required for a video at the same time. | ||||
|  | ||||
|  | ||||
|     _type "url" indicates that the video must be extracted from another | ||||
|     location, possibly by a different extractor. Its only required key is: | ||||
|     "url" - the next URL to extract. | ||||
|  | ||||
|     Additionally, it may have properties believed to be identical to the | ||||
|     resolved entity, for example "title" if the title of the referred video is | ||||
|     known ahead of time. | ||||
|  | ||||
|  | ||||
|     _type "url_transparent" entities have the same specification as "url", but | ||||
|     indicate that the given additional information is more precise than the one | ||||
|     associated with the resolved URL. | ||||
|     This is useful when a site employs a video service that hosts the video and | ||||
|     its technical metadata, but that video service does not embed a useful | ||||
|     title, description etc. | ||||
|  | ||||
|  | ||||
|     Subclasses of this one should re-define the _real_initialize() and | ||||
|     _real_extract() methods and define a _VALID_URL regexp. | ||||
|     Probably, they should also be added to the list of extractors. | ||||
| @@ -615,6 +656,7 @@ class InfoExtractor(object): | ||||
|  | ||||
|             return ( | ||||
|                 preference, | ||||
|                 f.get('language_preference') if f.get('language_preference') is not None else -1, | ||||
|                 f.get('quality') if f.get('quality') is not None else -1, | ||||
|                 f.get('height') if f.get('height') is not None else -1, | ||||
|                 f.get('width') if f.get('width') is not None else -1, | ||||
|   | ||||
| @@ -264,8 +264,6 @@ Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text | ||||
|             if not lang_code: | ||||
|                 continue | ||||
|             sub_root = xml.etree.ElementTree.fromstring(subtitle) | ||||
|             if not sub_root: | ||||
|                 subtitles[lang_code] = '' | ||||
|             if sub_format == 'ass': | ||||
|                 subtitles[lang_code] = self._convert_subtitles_to_ass(sub_root) | ||||
|             else: | ||||
|   | ||||
| @@ -20,7 +20,7 @@ class EpornerIE(InfoExtractor): | ||||
|             'display_id': 'Infamous-Tiffany-Teen-Strip-Tease-Video', | ||||
|             'ext': 'mp4', | ||||
|             'title': 'Infamous Tiffany Teen Strip Tease Video', | ||||
|             'duration': 194, | ||||
|             'duration': 1838, | ||||
|             'view_count': int, | ||||
|             'age_limit': 18, | ||||
|         } | ||||
| @@ -57,9 +57,7 @@ class EpornerIE(InfoExtractor): | ||||
|             formats.append(fmt) | ||||
|         self._sort_formats(formats) | ||||
|  | ||||
|         duration = parse_duration(self._search_regex( | ||||
|             r'class="mbtim">([0-9:]+)</div>', webpage, 'duration', | ||||
|             fatal=False)) | ||||
|         duration = parse_duration(self._html_search_meta('duration', webpage)) | ||||
|         view_count = str_to_int(self._search_regex( | ||||
|             r'id="cinemaviews">\s*([0-9,]+)\s*<small>views', | ||||
|             webpage, 'view count', fatal=False)) | ||||
|   | ||||
							
								
								
									
										75
									
								
								youtube_dl/extractor/folketinget.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										75
									
								
								youtube_dl/extractor/folketinget.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,75 @@ | ||||
| # -*- coding: utf-8 -*- | ||||
| from __future__ import unicode_literals | ||||
|  | ||||
| from .common import InfoExtractor | ||||
| from ..compat import compat_parse_qs | ||||
| from ..utils import ( | ||||
|     int_or_none, | ||||
|     parse_duration, | ||||
|     parse_iso8601, | ||||
|     xpath_text, | ||||
| ) | ||||
|  | ||||
|  | ||||
| class FolketingetIE(InfoExtractor): | ||||
|     IE_DESC = 'Folketinget (ft.dk; Danish parliament)' | ||||
|     _VALID_URL = r'https?://(?:www\.)?ft\.dk/webtv/video/[^?#]*?\.(?P<id>[0-9]+)\.aspx' | ||||
|     _TEST = { | ||||
|         'url': 'http://www.ft.dk/webtv/video/20141/eru/td.1165642.aspx?as=1#player', | ||||
|         'info_dict': { | ||||
|             'id': '1165642', | ||||
|             'ext': 'mp4', | ||||
|             'title': 'Åbent samråd i Erhvervsudvalget', | ||||
|             'description': 'Åbent samråd med erhvervs- og vækstministeren om regeringens politik på teleområdet', | ||||
|             'view_count': int, | ||||
|             'width': 768, | ||||
|             'height': 432, | ||||
|             'tbr': 928000, | ||||
|             'timestamp': 1416493800, | ||||
|             'upload_date': '20141120', | ||||
|             'duration': 3960, | ||||
|         }, | ||||
|         'params': { | ||||
|             'skip_download': 'rtmpdump required', | ||||
|         } | ||||
|     } | ||||
|  | ||||
|     def _real_extract(self, url): | ||||
|         video_id = self._match_id(url) | ||||
|         webpage = self._download_webpage(url, video_id) | ||||
|  | ||||
|         title = self._og_search_title(webpage) | ||||
|         description = self._html_search_regex( | ||||
|             r'(?s)<div class="video-item-agenda"[^>]*>(.*?)<', | ||||
|             webpage, 'description', fatal=False) | ||||
|  | ||||
|         player_params = compat_parse_qs(self._search_regex( | ||||
|             r'<embed src="http://ft\.arkena\.tv/flash/ftplayer\.swf\?([^"]+)"', | ||||
|             webpage, 'player params')) | ||||
|         xml_url = player_params['xml'][0] | ||||
|         doc = self._download_xml(xml_url, video_id) | ||||
|  | ||||
|         timestamp = parse_iso8601(xpath_text(doc, './/date')) | ||||
|         duration = parse_duration(xpath_text(doc, './/duration')) | ||||
|         width = int_or_none(xpath_text(doc, './/width')) | ||||
|         height = int_or_none(xpath_text(doc, './/height')) | ||||
|         view_count = int_or_none(xpath_text(doc, './/views')) | ||||
|  | ||||
|         formats = [{ | ||||
|             'format_id': n.attrib['bitrate'], | ||||
|             'url': xpath_text(n, './url', fatal=True), | ||||
|             'tbr': int_or_none(n.attrib['bitrate']), | ||||
|         } for n in doc.findall('.//streams/stream')] | ||||
|         self._sort_formats(formats) | ||||
|  | ||||
|         return { | ||||
|             'id': video_id, | ||||
|             'title': title, | ||||
|             'formats': formats, | ||||
|             'description': description, | ||||
|             'timestamp': timestamp, | ||||
|             'width': width, | ||||
|             'height': height, | ||||
|             'duration': duration, | ||||
|             'view_count': view_count, | ||||
|         } | ||||
							
								
								
									
										38
									
								
								youtube_dl/extractor/freevideo.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										38
									
								
								youtube_dl/extractor/freevideo.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,38 @@ | ||||
| from __future__ import unicode_literals | ||||
|  | ||||
| from .common import InfoExtractor | ||||
| from ..utils import ExtractorError | ||||
|  | ||||
|  | ||||
| class FreeVideoIE(InfoExtractor): | ||||
|     _VALID_URL = r'^http://www.freevideo.cz/vase-videa/(?P<id>[^.]+)\.html(?:$|[?#])' | ||||
|  | ||||
|     _TEST = { | ||||
|         'url': 'http://www.freevideo.cz/vase-videa/vysukany-zadecek-22033.html', | ||||
|         'info_dict': { | ||||
|             'id': 'vysukany-zadecek-22033', | ||||
|             'ext': 'mp4', | ||||
|             "title": "vysukany-zadecek-22033", | ||||
|             "age_limit": 18, | ||||
|         }, | ||||
|         'skip': 'Blocked outside .cz', | ||||
|     } | ||||
|  | ||||
|     def _real_extract(self, url): | ||||
|         video_id = self._match_id(url) | ||||
|         webpage, handle = self._download_webpage_handle(url, video_id) | ||||
|         if '//www.czechav.com/' in handle.geturl(): | ||||
|             raise ExtractorError( | ||||
|                 'Access to freevideo is blocked from your location', | ||||
|                 expected=True) | ||||
|  | ||||
|         video_url = self._search_regex( | ||||
|             r'\s+url: "(http://[a-z0-9-]+.cdn.freevideo.cz/stream/.*?/video.mp4)"', | ||||
|             webpage, 'video URL') | ||||
|  | ||||
|         return { | ||||
|             'id': video_id, | ||||
|             'url': video_url, | ||||
|             'title': video_id, | ||||
|             'age_limit': 18, | ||||
|         } | ||||
| @@ -434,7 +434,17 @@ class GenericIE(InfoExtractor): | ||||
|                 'title': 'Chet Chat 171 - Oct 29, 2014', | ||||
|                 'upload_date': '20141029', | ||||
|             } | ||||
|         } | ||||
|         }, | ||||
|         # Livestream embed | ||||
|         { | ||||
|             'url': 'http://www.esa.int/Our_Activities/Space_Science/Rosetta/Philae_comet_touch-down_webcast', | ||||
|             'info_dict': { | ||||
|                 'id': '67864563', | ||||
|                 'ext': 'flv', | ||||
|                 'upload_date': '20141112', | ||||
|                 'title': 'Rosetta #CometLanding webcast HL 10', | ||||
|             } | ||||
|         }, | ||||
|     ] | ||||
|  | ||||
|     def report_following_redirect(self, new_url): | ||||
| @@ -916,6 +926,12 @@ class GenericIE(InfoExtractor): | ||||
|         if mobj is not None: | ||||
|             return self.url_result(self._proto_relative_url(mobj.group('url'), scheme='http:'), 'CondeNast') | ||||
|  | ||||
|         mobj = re.search( | ||||
|             r'<iframe[^>]+src="(?P<url>https?://new\.livestream\.com/[^"]+/player[^"]+)"', | ||||
|             webpage) | ||||
|         if mobj is not None: | ||||
|             return self.url_result(mobj.group('url'), 'Livestream') | ||||
|  | ||||
|         def check_video(vurl): | ||||
|             vpath = compat_urlparse.urlparse(vurl).path | ||||
|             vext = determine_ext(vpath) | ||||
| @@ -963,7 +979,7 @@ class GenericIE(InfoExtractor): | ||||
|                 found = filter_video(re.findall(r'<meta.*?property="og:video".*?content="(.*?)"', webpage)) | ||||
|         if not found: | ||||
|             # HTML5 video | ||||
|             found = re.findall(r'(?s)<video[^<]*(?:>.*?<source[^>]*)?\s+src="([^"]+)"', webpage) | ||||
|             found = re.findall(r'(?s)<video[^<]*(?:>.*?<source[^>]*)?\s+src=["\'](.*?)["\']', webpage) | ||||
|         if not found: | ||||
|             found = re.search( | ||||
|                 r'(?i)<meta\s+(?=(?:[a-z-]+="[^"]+"\s+)*http-equiv="refresh")' | ||||
|   | ||||
| @@ -1,16 +1,14 @@ | ||||
| from __future__ import unicode_literals | ||||
|  | ||||
| import re | ||||
| from .common import InfoExtractor | ||||
| from ..utils import ( | ||||
|     parse_duration, | ||||
|     int_or_none, | ||||
| ) | ||||
|  | ||||
|  | ||||
| class GoldenMoustacheIE(InfoExtractor): | ||||
|     _VALID_URL = r'https?://(?:www\.)?goldenmoustache\.com/(?P<display_id>[\w-]+)-(?P<id>\d+)' | ||||
|     _TEST = { | ||||
|     _TESTS = [{ | ||||
|         'url': 'http://www.goldenmoustache.com/suricate-le-poker-3700/', | ||||
|         'md5': '0f904432fa07da5054d6c8beb5efb51a', | ||||
|         'info_dict': { | ||||
| @@ -21,7 +19,18 @@ class GoldenMoustacheIE(InfoExtractor): | ||||
|             'thumbnail': 're:^https?://.*\.jpg$', | ||||
|             'view_count': int, | ||||
|         } | ||||
|     } | ||||
|     }, { | ||||
|         'url': 'http://www.goldenmoustache.com/le-lab-tout-effacer-mc-fly-et-carlito-55249/', | ||||
|         'md5': '27f0c50fb4dd5f01dc9082fc67cd5700', | ||||
|         'info_dict': { | ||||
|             'id': '55249', | ||||
|             'ext': 'mp4', | ||||
|             'title': 'Le LAB - Tout Effacer (Mc Fly et Carlito)', | ||||
|             'description': 'md5:9b7fbf11023fb2250bd4b185e3de3b2a', | ||||
|             'thumbnail': 're:^https?://.*\.(?:png|jpg)$', | ||||
|             'view_count': int, | ||||
|         } | ||||
|     }] | ||||
|  | ||||
|     def _real_extract(self, url): | ||||
|         video_id = self._match_id(url) | ||||
| @@ -30,7 +39,7 @@ class GoldenMoustacheIE(InfoExtractor): | ||||
|         video_url = self._html_search_regex( | ||||
|             r'data-src-type="mp4" data-src="([^"]+)"', webpage, 'video URL') | ||||
|         title = self._html_search_regex( | ||||
|             r'<title>(.*?) - Golden Moustache</title>', webpage, 'title') | ||||
|             r'<title>(.*?)(?: - Golden Moustache)?</title>', webpage, 'title') | ||||
|         thumbnail = self._og_search_thumbnail(webpage) | ||||
|         description = self._og_search_description(webpage) | ||||
|         view_count = int_or_none(self._html_search_regex( | ||||
|   | ||||
| @@ -18,7 +18,7 @@ from ..utils import ( | ||||
|  | ||||
| class LivestreamIE(InfoExtractor): | ||||
|     IE_NAME = 'livestream' | ||||
|     _VALID_URL = r'http://new\.livestream\.com/.*?/(?P<event_name>.*?)(/videos/(?P<id>\d+))?/?$' | ||||
|     _VALID_URL = r'https?://new\.livestream\.com/.*?/(?P<event_name>.*?)(/videos/(?P<id>[0-9]+)(?:/player)?)?/?(?:$|[?#])' | ||||
|     _TESTS = [{ | ||||
|         'url': 'http://new.livestream.com/CoheedandCambria/WebsterHall/videos/4719370', | ||||
|         'md5': '53274c76ba7754fb0e8d072716f2292b', | ||||
| @@ -37,6 +37,9 @@ class LivestreamIE(InfoExtractor): | ||||
|             'title': 'TEDCity2.0 (English)', | ||||
|         }, | ||||
|         'playlist_mincount': 4, | ||||
|     }, { | ||||
|         'url': 'https://new.livestream.com/accounts/362/events/3557232/videos/67864563/player?autoPlay=false&height=360&mute=false&width=640', | ||||
|         'only_matching': True, | ||||
|     }] | ||||
|  | ||||
|     def _parse_smil(self, video_id, smil_url): | ||||
|   | ||||
| @@ -16,7 +16,7 @@ class MailRuIE(InfoExtractor): | ||||
|             'url': 'http://my.mail.ru/video/top#video=/mail/sonypicturesrus/75/76', | ||||
|             'md5': 'dea205f03120046894db4ebb6159879a', | ||||
|             'info_dict': { | ||||
|                 'id': '46301138', | ||||
|                 'id': '46301138_76', | ||||
|                 'ext': 'mp4', | ||||
|                 'title': 'Новый Человек-Паук. Высокое напряжение. Восстание Электро', | ||||
|                 'timestamp': 1393232740, | ||||
| @@ -30,7 +30,7 @@ class MailRuIE(InfoExtractor): | ||||
|             'url': 'http://my.mail.ru/corp/hitech/video/news_hi-tech_mail_ru/1263.html', | ||||
|             'md5': '00a91a58c3402204dcced523777b475f', | ||||
|             'info_dict': { | ||||
|                 'id': '46843144', | ||||
|                 'id': '46843144_1263', | ||||
|                 'ext': 'mp4', | ||||
|                 'title': 'Samsung Galaxy S5 Hammer Smash Fail Battery Explosion', | ||||
|                 'timestamp': 1397217632, | ||||
| @@ -54,33 +54,36 @@ class MailRuIE(InfoExtractor): | ||||
|  | ||||
|         author = video_data['author'] | ||||
|         uploader = author['name'] | ||||
|         uploader_id = author['id'] | ||||
|         uploader_id = author.get('id') or author.get('email') | ||||
|         view_count = video_data.get('views_count') | ||||
|  | ||||
|         movie = video_data['movie'] | ||||
|         content_id = str(movie['contentId']) | ||||
|         title = movie['title'] | ||||
|         meta_data = video_data['meta'] | ||||
|         content_id = '%s_%s' % ( | ||||
|             meta_data.get('accId', ''), meta_data['itemId']) | ||||
|         title = meta_data['title'] | ||||
|         if title.endswith('.mp4'): | ||||
|             title = title[:-4] | ||||
|         thumbnail = movie['poster'] | ||||
|         duration = movie['duration'] | ||||
|  | ||||
|         view_count = video_data['views_count'] | ||||
|         thumbnail = meta_data['poster'] | ||||
|         duration = meta_data['duration'] | ||||
|         timestamp = meta_data['timestamp'] | ||||
|  | ||||
|         formats = [ | ||||
|             { | ||||
|                 'url': video['url'], | ||||
|                 'format_id': video['name'], | ||||
|                 'format_id': video['key'], | ||||
|                 'height': int(video['key'].rstrip('p')) | ||||
|             } for video in video_data['videos'] | ||||
|         ] | ||||
|         self._sort_formats(formats) | ||||
|  | ||||
|         return { | ||||
|             'id': content_id, | ||||
|             'title': title, | ||||
|             'thumbnail': thumbnail, | ||||
|             'timestamp': video_data['timestamp'], | ||||
|             'timestamp': timestamp, | ||||
|             'uploader': uploader, | ||||
|             'uploader_id': uploader_id, | ||||
|             'duration': duration, | ||||
|             'view_count': view_count, | ||||
|             'formats': formats, | ||||
|         } | ||||
|         } | ||||
|   | ||||
| @@ -145,7 +145,8 @@ class MTVServicesInfoExtractor(InfoExtractor): | ||||
|         idoc = self._download_xml( | ||||
|             feed_url + '?' + data, video_id, | ||||
|             'Downloading info', transform_source=fix_xml_ampersands) | ||||
|         return [self._get_video_info(item) for item in idoc.findall('.//item')] | ||||
|         return self.playlist_result( | ||||
|             [self._get_video_info(item) for item in idoc.findall('.//item')]) | ||||
|  | ||||
|     def _real_extract(self, url): | ||||
|         title = url_basename(url) | ||||
| @@ -186,7 +187,8 @@ class MTVServicesEmbeddedIE(MTVServicesInfoExtractor): | ||||
|     def _get_feed_url(self, uri): | ||||
|         video_id = self._id_from_uri(uri) | ||||
|         site_id = uri.replace(video_id, '') | ||||
|         config_url = 'http://media.mtvnservices.com/pmt/e1/players/{0}/config.xml'.format(site_id) | ||||
|         config_url = ('http://media.mtvnservices.com/pmt/e1/players/{0}/' | ||||
|             'context4/context5/config.xml'.format(site_id)) | ||||
|         config_doc = self._download_xml(config_url, video_id) | ||||
|         feed_node = config_doc.find('.//feed') | ||||
|         feed_url = feed_node.text.strip().split('?')[0] | ||||
|   | ||||
| @@ -7,6 +7,7 @@ from ..utils import ( | ||||
|     unified_strdate, | ||||
|     parse_duration, | ||||
|     qualities, | ||||
|     strip_jsonp, | ||||
|     url_basename, | ||||
| ) | ||||
|  | ||||
| @@ -63,7 +64,7 @@ class NPOIE(InfoExtractor): | ||||
|             'http://e.omroep.nl/metadata/aflevering/%s' % video_id, | ||||
|             video_id, | ||||
|             # We have to remove the javascript callback | ||||
|             transform_source=lambda j: re.sub(r'parseMetadata\((.*?)\);\n//.*$', r'\1', j) | ||||
|             transform_source=strip_jsonp, | ||||
|         ) | ||||
|         token_page = self._download_webpage( | ||||
|             'http://ida.omroep.nl/npoplayer/i.js', | ||||
|   | ||||
| @@ -38,10 +38,11 @@ class RtlXlIE(InfoExtractor): | ||||
|         progname = info['abstracts'][0]['name'] | ||||
|         subtitle = material['title'] or info['episodes'][0]['name'] | ||||
|  | ||||
|         videopath = material['videopath'] | ||||
|         f4m_url = 'http://manifest.us.rtl.nl' + videopath | ||||
|         # Use unencrypted m3u8 streams (See https://github.com/rg3/youtube-dl/issues/4118) | ||||
|         videopath = material['videopath'].replace('.f4m', '.m3u8') | ||||
|         m3u8_url = 'http://manifest.us.rtl.nl' + videopath | ||||
|  | ||||
|         formats = self._extract_f4m_formats(f4m_url, uuid) | ||||
|         formats = self._extract_m3u8_formats(m3u8_url, uuid, ext='mp4') | ||||
|  | ||||
|         video_urlpart = videopath.split('/flash/')[1][:-4] | ||||
|         PG_URL_TEMPLATE = 'http://pg.us.rtl.nl/rtlxl/network/%s/progressive/%s.mp4' | ||||
| @@ -54,9 +55,12 @@ class RtlXlIE(InfoExtractor): | ||||
|             { | ||||
|                 'url': PG_URL_TEMPLATE % ('a3m', video_urlpart), | ||||
|                 'format_id': 'pg-hd', | ||||
|                 'quality': 0, | ||||
|             } | ||||
|         ]) | ||||
|  | ||||
|         self._sort_formats(formats) | ||||
|  | ||||
|         return { | ||||
|             'id': uuid, | ||||
|             'title': '%s - %s' % (progname, subtitle), | ||||
|   | ||||
| @@ -5,6 +5,7 @@ import re | ||||
|  | ||||
| from .common import InfoExtractor | ||||
| from ..compat import compat_urlparse | ||||
| from .spiegeltv import SpiegeltvIE | ||||
|  | ||||
|  | ||||
| class SpiegelIE(InfoExtractor): | ||||
| @@ -42,7 +43,11 @@ class SpiegelIE(InfoExtractor): | ||||
|  | ||||
|     def _real_extract(self, url): | ||||
|         video_id = self._match_id(url) | ||||
|         webpage = self._download_webpage(url, video_id) | ||||
|         webpage, handle = self._download_webpage_handle(url, video_id) | ||||
|  | ||||
|         # 302 to spiegel.tv, like http://www.spiegel.de/video/der-film-zum-wochenende-die-wahrheit-ueber-maenner-video-99003272.html | ||||
|         if SpiegeltvIE.suitable(handle.geturl()): | ||||
|             return self.url_result(handle.geturl(), 'Spiegeltv') | ||||
|  | ||||
|         title = re.sub(r'\s+', ' ', self._html_search_regex( | ||||
|             r'(?s)<(?:h1|div) class="module-title"[^>]*>(.*?)</(?:h1|div)>', | ||||
|   | ||||
| @@ -1,13 +1,13 @@ | ||||
| # coding: utf-8 | ||||
| from __future__ import unicode_literals | ||||
|  | ||||
| import re | ||||
| from .common import InfoExtractor | ||||
| from ..utils import float_or_none | ||||
|  | ||||
|  | ||||
| class SpiegeltvIE(InfoExtractor): | ||||
|     _VALID_URL = r'https?://(?:www\.)?spiegel\.tv/filme/(?P<id>[\-a-z0-9]+)' | ||||
|     _TEST = { | ||||
|     _VALID_URL = r'https?://(?:www\.)?spiegel\.tv/(?:#/)?filme/(?P<id>[\-a-z0-9]+)' | ||||
|     _TESTS = [{ | ||||
|         'url': 'http://www.spiegel.tv/filme/flug-mh370/', | ||||
|         'info_dict': { | ||||
|             'id': 'flug-mh370', | ||||
| @@ -20,12 +20,15 @@ class SpiegeltvIE(InfoExtractor): | ||||
|             # rtmp download | ||||
|             'skip_download': True, | ||||
|         } | ||||
|     } | ||||
|     }, { | ||||
|         'url': 'http://www.spiegel.tv/#/filme/alleskino-die-wahrheit-ueber-maenner/', | ||||
|         'only_matching': True, | ||||
|     }] | ||||
|  | ||||
|     def _real_extract(self, url): | ||||
|         mobj = re.match(self._VALID_URL, url) | ||||
|         video_id = mobj.group('id') | ||||
|  | ||||
|         if '/#/' in url: | ||||
|             url = url.replace('/#/', '/') | ||||
|         video_id = self._match_id(url) | ||||
|         webpage = self._download_webpage(url, video_id) | ||||
|         title = self._html_search_regex(r'<h1.*?>(.*?)</h1>', webpage, 'title') | ||||
|  | ||||
| @@ -61,12 +64,8 @@ class SpiegeltvIE(InfoExtractor): | ||||
|             }) | ||||
|  | ||||
|         description = media_json['subtitle'] | ||||
|         duration = media_json['duration_in_ms'] / 1000. | ||||
|  | ||||
|         if is_wide: | ||||
|             format = '16x9' | ||||
|         else: | ||||
|             format = '4x3' | ||||
|         duration = float_or_none(media_json.get('duration_in_ms'), scale=1000) | ||||
|         format = '16x9' if is_wide else '4x3' | ||||
|  | ||||
|         url = server + 'mp4:' + uuid + '_spiegeltv_0500_' + format + '.m4v' | ||||
|  | ||||
|   | ||||
| @@ -1,3 +1,5 @@ | ||||
| from __future__ import unicode_literals | ||||
|  | ||||
| import re | ||||
|  | ||||
| from .common import InfoExtractor | ||||
| @@ -9,24 +11,23 @@ from ..utils import ( | ||||
|  | ||||
|  | ||||
| class StanfordOpenClassroomIE(InfoExtractor): | ||||
|     IE_NAME = u'stanfordoc' | ||||
|     IE_DESC = u'Stanford Open ClassRoom' | ||||
|     _VALID_URL = r'^(?:https?://)?openclassroom\.stanford\.edu(?P<path>/?|(/MainFolder/(?:HomePage|CoursePage|VideoPage)\.php([?]course=(?P<course>[^&]+)(&video=(?P<video>[^&]+))?(&.*)?)?))$' | ||||
|     IE_NAME = 'stanfordoc' | ||||
|     IE_DESC = 'Stanford Open ClassRoom' | ||||
|     _VALID_URL = r'https?://openclassroom\.stanford\.edu(?P<path>/?|(/MainFolder/(?:HomePage|CoursePage|VideoPage)\.php([?]course=(?P<course>[^&]+)(&video=(?P<video>[^&]+))?(&.*)?)?))$' | ||||
|     _TEST = { | ||||
|         u'url': u'http://openclassroom.stanford.edu/MainFolder/VideoPage.php?course=PracticalUnix&video=intro-environment&speed=100', | ||||
|         u'file': u'PracticalUnix_intro-environment.mp4', | ||||
|         u'md5': u'544a9468546059d4e80d76265b0443b8', | ||||
|         u'info_dict': { | ||||
|             u"title": u"Intro Environment" | ||||
|         'url': 'http://openclassroom.stanford.edu/MainFolder/VideoPage.php?course=PracticalUnix&video=intro-environment&speed=100', | ||||
|         'md5': '544a9468546059d4e80d76265b0443b8', | ||||
|         'info_dict': { | ||||
|             'id': 'PracticalUnix_intro-environment', | ||||
|             'ext': 'mp4', | ||||
|             'title': 'Intro Environment', | ||||
|         } | ||||
|     } | ||||
|  | ||||
|     def _real_extract(self, url): | ||||
|         mobj = re.match(self._VALID_URL, url) | ||||
|         if mobj is None: | ||||
|             raise ExtractorError(u'Invalid URL: %s' % url) | ||||
|  | ||||
|         if mobj.group('course') and mobj.group('video'): # A specific video | ||||
|         if mobj.group('course') and mobj.group('video'):  # A specific video | ||||
|             course = mobj.group('course') | ||||
|             video = mobj.group('video') | ||||
|             info = { | ||||
| @@ -35,7 +36,6 @@ class StanfordOpenClassroomIE(InfoExtractor): | ||||
|                 'upload_date': None, | ||||
|             } | ||||
|  | ||||
|             self.report_extraction(info['id']) | ||||
|             baseUrl = 'http://openclassroom.stanford.edu/MainFolder/courses/' + course + '/videos/' | ||||
|             xmlUrl = baseUrl + video + '.xml' | ||||
|             mdoc = self._download_xml(xmlUrl, info['id']) | ||||
| @@ -43,63 +43,49 @@ class StanfordOpenClassroomIE(InfoExtractor): | ||||
|                 info['title'] = mdoc.findall('./title')[0].text | ||||
|                 info['url'] = baseUrl + mdoc.findall('./videoFile')[0].text | ||||
|             except IndexError: | ||||
|                 raise ExtractorError(u'Invalid metadata XML file') | ||||
|             info['ext'] = info['url'].rpartition('.')[2] | ||||
|             return [info] | ||||
|         elif mobj.group('course'): # A course page | ||||
|                 raise ExtractorError('Invalid metadata XML file') | ||||
|             return info | ||||
|         elif mobj.group('course'):  # A course page | ||||
|             course = mobj.group('course') | ||||
|             info = { | ||||
|                 'id': course, | ||||
|                 'type': 'playlist', | ||||
|                 '_type': 'playlist', | ||||
|                 'uploader': None, | ||||
|                 'upload_date': None, | ||||
|             } | ||||
|  | ||||
|             coursepage = self._download_webpage(url, info['id'], | ||||
|                                         note='Downloading course info page', | ||||
|                                         errnote='Unable to download course info page') | ||||
|             coursepage = self._download_webpage( | ||||
|                 url, info['id'], | ||||
|                 note='Downloading course info page', | ||||
|                 errnote='Unable to download course info page') | ||||
|  | ||||
|             info['title'] = self._html_search_regex('<h1>([^<]+)</h1>', coursepage, 'title', default=info['id']) | ||||
|             info['title'] = self._html_search_regex( | ||||
|                 r'<h1>([^<]+)</h1>', coursepage, 'title', default=info['id']) | ||||
|  | ||||
|             info['description'] = self._html_search_regex('<description>([^<]+)</description>', | ||||
|                 coursepage, u'description', fatal=False) | ||||
|             info['description'] = self._html_search_regex( | ||||
|                 r'(?s)<description>([^<]+)</description>', | ||||
|                 coursepage, 'description', fatal=False) | ||||
|  | ||||
|             links = orderedSet(re.findall('<a href="(VideoPage.php\?[^"]+)">', coursepage)) | ||||
|             info['list'] = [ | ||||
|                 { | ||||
|                     'type': 'reference', | ||||
|                     'url': 'http://openclassroom.stanford.edu/MainFolder/' + unescapeHTML(vpage), | ||||
|                 } | ||||
|                     for vpage in links] | ||||
|             results = [] | ||||
|             for entry in info['list']: | ||||
|                 assert entry['type'] == 'reference' | ||||
|                 results += self.extract(entry['url']) | ||||
|             return results | ||||
|         else: # Root page | ||||
|             info['entries'] = [self.url_result( | ||||
|                 'http://openclassroom.stanford.edu/MainFolder/%s' % unescapeHTML(l) | ||||
|             ) for l in links] | ||||
|             return info | ||||
|         else:  # Root page | ||||
|             info = { | ||||
|                 'id': 'Stanford OpenClassroom', | ||||
|                 'type': 'playlist', | ||||
|                 '_type': 'playlist', | ||||
|                 'uploader': None, | ||||
|                 'upload_date': None, | ||||
|             } | ||||
|             info['title'] = info['id'] | ||||
|  | ||||
|             rootURL = 'http://openclassroom.stanford.edu/MainFolder/HomePage.php' | ||||
|             rootpage = self._download_webpage(rootURL, info['id'], | ||||
|                 errnote=u'Unable to download course info page') | ||||
|  | ||||
|             info['title'] = info['id'] | ||||
|                 errnote='Unable to download course info page') | ||||
|  | ||||
|             links = orderedSet(re.findall('<a href="(CoursePage.php\?[^"]+)">', rootpage)) | ||||
|             info['list'] = [ | ||||
|                 { | ||||
|                     'type': 'reference', | ||||
|                     'url': 'http://openclassroom.stanford.edu/MainFolder/' + unescapeHTML(cpage), | ||||
|                 } | ||||
|                     for cpage in links] | ||||
|  | ||||
|             results = [] | ||||
|             for entry in info['list']: | ||||
|                 assert entry['type'] == 'reference' | ||||
|                 results += self.extract(entry['url']) | ||||
|             return results | ||||
|             info['entries'] = [self.url_result( | ||||
|                 'http://openclassroom.stanford.edu/MainFolder/%s' % unescapeHTML(l) | ||||
|             ) for l in links] | ||||
|             return info | ||||
|   | ||||
| @@ -1,27 +1,24 @@ | ||||
| # -*- coding: utf-8 -*- | ||||
|  | ||||
| import re | ||||
| from __future__ import unicode_literals | ||||
|  | ||||
| from .common import InfoExtractor | ||||
| from ..utils import determine_ext | ||||
|  | ||||
|  | ||||
| class SztvHuIE(InfoExtractor): | ||||
|     _VALID_URL = r'(?:http://)?(?:(?:www\.)?sztv\.hu|www\.tvszombathely\.hu)/(?:[^/]+)/.+-(?P<id>[0-9]+)' | ||||
|     _VALID_URL = r'http://(?:(?:www\.)?sztv\.hu|www\.tvszombathely\.hu)/(?:[^/]+)/.+-(?P<id>[0-9]+)' | ||||
|     _TEST = { | ||||
|         u'url': u'http://sztv.hu/hirek/cserkeszek-nepszerusitettek-a-kornyezettudatos-eletmodot-a-savaria-teren-20130909', | ||||
|         u'file': u'20130909.mp4', | ||||
|         u'md5': u'a6df607b11fb07d0e9f2ad94613375cb', | ||||
|         u'info_dict': { | ||||
|             u"title": u"Cserkészek népszerűsítették a környezettudatos életmódot a Savaria téren", | ||||
|             u"description": u'A zöld nap játékos ismeretterjesztő programjait a Magyar Cserkész Szövetség szervezte, akik az ország nyolc városában adják át tudásukat az érdeklődőknek. A PET...', | ||||
|         'url': 'http://sztv.hu/hirek/cserkeszek-nepszerusitettek-a-kornyezettudatos-eletmodot-a-savaria-teren-20130909', | ||||
|         'md5': 'a6df607b11fb07d0e9f2ad94613375cb', | ||||
|         'info_dict': { | ||||
|             'id': '20130909', | ||||
|             'ext': 'mp4', | ||||
|             'title': 'Cserkészek népszerűsítették a környezettudatos életmódot a Savaria téren', | ||||
|             'description': 'A zöld nap játékos ismeretterjesztő programjait a Magyar Cserkész Szövetség szervezte, akik az ország nyolc városában adják át tudásukat az érdeklődőknek. A PET...', | ||||
|         }, | ||||
|         u'skip': u'Service temporarily disabled as of 2013-11-20' | ||||
|     } | ||||
|  | ||||
|     def _real_extract(self, url): | ||||
|         mobj = re.match(self._VALID_URL, url) | ||||
|         video_id = mobj.group('id') | ||||
|         video_id = self._match_id(url) | ||||
|         webpage = self._download_webpage(url, video_id) | ||||
|         video_file = self._search_regex( | ||||
|             r'file: "...:(.*?)",', webpage, 'video file') | ||||
| @@ -39,7 +36,6 @@ class SztvHuIE(InfoExtractor): | ||||
|             'id': video_id, | ||||
|             'url': video_url, | ||||
|             'title': title, | ||||
|             'ext': determine_ext(video_url), | ||||
|             'description': description, | ||||
|             'thumbnail': thumbnail, | ||||
|         } | ||||
|   | ||||
							
								
								
									
										60
									
								
								youtube_dl/extractor/telebruxelles.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										60
									
								
								youtube_dl/extractor/telebruxelles.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,60 @@ | ||||
| # coding: utf-8 | ||||
| from __future__ import unicode_literals | ||||
|  | ||||
| from .common import InfoExtractor | ||||
|  | ||||
|  | ||||
| class TeleBruxellesIE(InfoExtractor): | ||||
|     _VALID_URL = r'https?://(?:www\.)?telebruxelles\.be/(news|sport|dernier-jt)/?(?P<id>[^/#?]+)' | ||||
|     _TESTS = [{ | ||||
|         'url': 'http://www.telebruxelles.be/news/auditions-devant-parlement-francken-galant-tres-attendus/', | ||||
|         'md5': '59439e568c9ee42fb77588b2096b214f', | ||||
|         'info_dict': { | ||||
|             'id': '11942', | ||||
|             'display_id': 'auditions-devant-parlement-francken-galant-tres-attendus', | ||||
|             'ext': 'flv', | ||||
|             'title': 'Parlement : Francken et Galant répondent aux interpellations de l’opposition', | ||||
|             'description': 're:Les auditions des ministres se poursuivent*' | ||||
|         }, | ||||
|         'params': { | ||||
|             'skip_download': 'requires rtmpdump' | ||||
|         }, | ||||
|     }, { | ||||
|         'url': 'http://www.telebruxelles.be/sport/basket-brussels-bat-mons-80-74/', | ||||
|         'md5': '181d3fbdcf20b909309e5aef5c6c6047', | ||||
|         'info_dict': { | ||||
|             'id': '10091', | ||||
|             'display_id': 'basket-brussels-bat-mons-80-74', | ||||
|             'ext': 'flv', | ||||
|             'title': 'Basket : le Brussels bat Mons 80-74', | ||||
|             'description': 're:^Ils l\u2019on fait ! En basket, le B*', | ||||
|         }, | ||||
|         'params': { | ||||
|             'skip_download': 'requires rtmpdump' | ||||
|         }, | ||||
|     }] | ||||
|  | ||||
|     def _real_extract(self, url): | ||||
|         display_id = self._match_id(url) | ||||
|         webpage = self._download_webpage(url, display_id) | ||||
|  | ||||
|         article_id = self._html_search_regex( | ||||
|             r"<article id=\"post-(\d+)\"", webpage, 'article ID') | ||||
|         title = self._html_search_regex( | ||||
|             r'<h1 class=\"entry-title\">(.*?)</h1>', webpage, 'title') | ||||
|         description = self._og_search_description(webpage) | ||||
|  | ||||
|         rtmp_url = self._html_search_regex( | ||||
|             r"file: \"(rtmp://\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}:\d{1,5}/vod/mp4:\" \+ \"\w+\" \+ \".mp4)\"", | ||||
|             webpage, 'RTMP url') | ||||
|         rtmp_url = rtmp_url.replace("\" + \"", "") | ||||
|  | ||||
|         return { | ||||
|             'id': article_id, | ||||
|             'display_id': display_id, | ||||
|             'title': title, | ||||
|             'description': description, | ||||
|             'url': rtmp_url, | ||||
|             'ext': 'flv', | ||||
|             'rtmp_live': True  # if rtmpdump is not called with "--live" argument, the download is blocked and can be completed | ||||
|         } | ||||
| @@ -121,4 +121,7 @@ class VH1IE(MTVIE): | ||||
|         idoc = self._download_xml( | ||||
|             doc_url, video_id, | ||||
|             'Downloading info', transform_source=fix_xml_ampersands) | ||||
|         return [self._get_video_info(item) for item in idoc.findall('.//item')] | ||||
|         return self.playlist_result( | ||||
|             [self._get_video_info(item) for item in idoc.findall('.//item')], | ||||
|             playlist_id=video_id, | ||||
|         ) | ||||
|   | ||||
| @@ -307,6 +307,11 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor): | ||||
|         '171': {'ext': 'webm', 'vcodec': 'none', 'format_note': 'DASH audio', 'abr': 128, 'preference': -50}, | ||||
|         '172': {'ext': 'webm', 'vcodec': 'none', 'format_note': 'DASH audio', 'abr': 256, 'preference': -50}, | ||||
|  | ||||
|         # Dash webm audio with opus inside | ||||
|         '249': {'ext': 'webm', 'vcodec': 'none', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 50, 'preference': -50}, | ||||
|         '250': {'ext': 'webm', 'vcodec': 'none', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 70, 'preference': -50}, | ||||
|         '251': {'ext': 'webm', 'vcodec': 'none', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 160, 'preference': -50}, | ||||
|  | ||||
|         # RTMP (unnamed) | ||||
|         '_rtmp': {'protocol': 'rtmp'}, | ||||
|     } | ||||
| @@ -401,6 +406,19 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor): | ||||
|                 'format': '141', | ||||
|             }, | ||||
|         }, | ||||
|         # Controversy video | ||||
|         { | ||||
|             'url': 'https://www.youtube.com/watch?v=T4XJQO3qol8', | ||||
|             'info_dict': { | ||||
|                 'id': 'T4XJQO3qol8', | ||||
|                 'ext': 'mp4', | ||||
|                 'upload_date': '20100909', | ||||
|                 'uploader': 'The Amazing Atheist', | ||||
|                 'uploader_id': 'TheAmazingAtheist', | ||||
|                 'title': 'Burning Everyone\'s Koran', | ||||
|                 'description': 'SUBSCRIBE: http://www.youtube.com/saturninefilms\n\nEven Obama has taken a stand against freedom on this issue: http://www.huffingtonpost.com/2010/09/09/obama-gma-interview-quran_n_710282.html', | ||||
|             } | ||||
|         } | ||||
|     ] | ||||
|  | ||||
|     def __init__(self, *args, **kwargs): | ||||
| @@ -661,7 +679,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor): | ||||
|         video_id = self.extract_id(url) | ||||
|  | ||||
|         # Get video webpage | ||||
|         url = proto + '://www.youtube.com/watch?v=%s&gl=US&hl=en&has_verified=1' % video_id | ||||
|         url = proto + '://www.youtube.com/watch?v=%s&gl=US&hl=en&has_verified=1&bpctr=9999999999' % video_id | ||||
|         pref_cookies = [ | ||||
|             c for c in self._downloader.cookiejar | ||||
|             if c.domain == '.youtube.com' and c.name == 'PREF'] | ||||
| @@ -991,7 +1009,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor): | ||||
|                         existing_format.update(f) | ||||
|  | ||||
|             except (ExtractorError, KeyError) as e: | ||||
|                 self.report_warning('Skipping DASH manifest: %s' % e, video_id) | ||||
|                 self.report_warning('Skipping DASH manifest: %r' % e, video_id) | ||||
|  | ||||
|         self._sort_formats(formats) | ||||
|  | ||||
|   | ||||
| @@ -61,7 +61,7 @@ class JSInterpreter(object): | ||||
|             pass | ||||
|  | ||||
|         m = re.match( | ||||
|             r'^(?P<var>[a-zA-Z0-9_]+)\.(?P<member>[^(]+)(?:\(+(?P<args>[^()]*)\))?$', | ||||
|             r'^(?P<var>[$a-zA-Z0-9_]+)\.(?P<member>[^(]+)(?:\(+(?P<args>[^()]*)\))?$', | ||||
|             expr) | ||||
|         if m: | ||||
|             variable = m.group('var') | ||||
|   | ||||
| @@ -8,6 +8,7 @@ import sys | ||||
| from .compat import ( | ||||
|     compat_expanduser, | ||||
|     compat_getenv, | ||||
|     compat_kwargs, | ||||
| ) | ||||
| from .utils import ( | ||||
|     get_term_width, | ||||
| @@ -112,7 +113,7 @@ def parseOpts(overrideArguments=None): | ||||
|         'conflict_handler': 'resolve', | ||||
|     } | ||||
|  | ||||
|     parser = optparse.OptionParser(**kw) | ||||
|     parser = optparse.OptionParser(**compat_kwargs(kw)) | ||||
|  | ||||
|     general = optparse.OptionGroup(parser, 'General Options') | ||||
|     general.add_option( | ||||
|   | ||||
| @@ -62,15 +62,17 @@ class _ScopeDict(dict): | ||||
|  | ||||
|  | ||||
| class _AVMClass(object): | ||||
|     def __init__(self, name_idx, name): | ||||
|     def __init__(self, name_idx, name, static_properties=None): | ||||
|         self.name_idx = name_idx | ||||
|         self.name = name | ||||
|         self.method_names = {} | ||||
|         self.method_idxs = {} | ||||
|         self.methods = {} | ||||
|         self.method_pyfunctions = {} | ||||
|         self.static_properties = static_properties if static_properties else {} | ||||
|  | ||||
|         self.variables = _ScopeDict(self) | ||||
|         self.constants = {} | ||||
|  | ||||
|     def make_object(self): | ||||
|         return _AVMClass_Object(self) | ||||
| @@ -148,8 +150,38 @@ def _read_byte(reader): | ||||
|     return res | ||||
|  | ||||
|  | ||||
| StringClass = _AVMClass('(no name idx)', 'String') | ||||
| ByteArrayClass = _AVMClass('(no name idx)', 'ByteArray') | ||||
| TimerClass = _AVMClass('(no name idx)', 'Timer') | ||||
| TimerEventClass = _AVMClass('(no name idx)', 'TimerEvent', {'TIMER': 'timer'}) | ||||
| _builtin_classes = { | ||||
|     StringClass.name: StringClass, | ||||
|     ByteArrayClass.name: ByteArrayClass, | ||||
|     TimerClass.name: TimerClass, | ||||
|     TimerEventClass.name: TimerEventClass, | ||||
| } | ||||
|  | ||||
|  | ||||
| class _Undefined(object): | ||||
|     def __bool__(self): | ||||
|         return False | ||||
|     __nonzero__ = __bool__ | ||||
|  | ||||
|     def __hash__(self): | ||||
|         return 0 | ||||
|  | ||||
|     def __str__(self): | ||||
|         return 'undefined' | ||||
|     __repr__ = __str__ | ||||
|  | ||||
| undefined = _Undefined() | ||||
|  | ||||
|  | ||||
| class SWFInterpreter(object): | ||||
|     def __init__(self, file_contents): | ||||
|         self._patched_functions = { | ||||
|             (TimerClass, 'addEventListener'): lambda params: undefined, | ||||
|         } | ||||
|         code_tag = next(tag | ||||
|                         for tag_code, tag in _extract_tags(file_contents) | ||||
|                         if tag_code == 82) | ||||
| @@ -170,11 +202,13 @@ class SWFInterpreter(object): | ||||
|  | ||||
|         # Constant pool | ||||
|         int_count = u30() | ||||
|         self.constant_ints = [0] | ||||
|         for _c in range(1, int_count): | ||||
|             s32() | ||||
|             self.constant_ints.append(s32()) | ||||
|         self.constant_uints = [0] | ||||
|         uint_count = u30() | ||||
|         for _c in range(1, uint_count): | ||||
|             u32() | ||||
|             self.constant_uints.append(u32()) | ||||
|         double_count = u30() | ||||
|         read_bytes(max(0, (double_count - 1)) * 8) | ||||
|         string_count = u30() | ||||
| @@ -212,6 +246,10 @@ class SWFInterpreter(object): | ||||
|                 u30()  # namespace_idx | ||||
|                 name_idx = u30() | ||||
|                 self.multinames.append(self.constant_strings[name_idx]) | ||||
|             elif kind == 0x09: | ||||
|                 name_idx = u30() | ||||
|                 u30() | ||||
|                 self.multinames.append(self.constant_strings[name_idx]) | ||||
|             else: | ||||
|                 self.multinames.append(_Multiname(kind)) | ||||
|                 for _c2 in range(MULTINAME_SIZES[kind]): | ||||
| @@ -258,13 +296,28 @@ class SWFInterpreter(object): | ||||
|             kind = kind_full & 0x0f | ||||
|             attrs = kind_full >> 4 | ||||
|             methods = {} | ||||
|             if kind in [0x00, 0x06]:  # Slot or Const | ||||
|             constants = None | ||||
|             if kind == 0x00:  # Slot | ||||
|                 u30()  # Slot id | ||||
|                 u30()  # type_name_idx | ||||
|                 vindex = u30() | ||||
|                 if vindex != 0: | ||||
|                     read_byte()  # vkind | ||||
|             elif kind in [0x01, 0x02, 0x03]:  # Method / Getter / Setter | ||||
|             elif kind == 0x06:  # Const | ||||
|                 u30()  # Slot id | ||||
|                 u30()  # type_name_idx | ||||
|                 vindex = u30() | ||||
|                 vkind = 'any' | ||||
|                 if vindex != 0: | ||||
|                     vkind = read_byte() | ||||
|                 if vkind == 0x03:  # Constant_Int | ||||
|                     value = self.constant_ints[vindex] | ||||
|                 elif vkind == 0x04:  # Constant_UInt | ||||
|                     value = self.constant_uints[vindex] | ||||
|                 else: | ||||
|                     return {}, None  # Ignore silently for now | ||||
|                 constants = {self.multinames[trait_name_idx]: value} | ||||
|             elif kind in (0x01, 0x02, 0x03):  # Method / Getter / Setter | ||||
|                 u30()  # disp_id | ||||
|                 method_idx = u30() | ||||
|                 methods[self.multinames[trait_name_idx]] = method_idx | ||||
| @@ -283,7 +336,7 @@ class SWFInterpreter(object): | ||||
|                 for _c3 in range(metadata_count): | ||||
|                     u30()  # metadata index | ||||
|  | ||||
|             return methods | ||||
|             return methods, constants | ||||
|  | ||||
|         # Classes | ||||
|         class_count = u30() | ||||
| @@ -305,18 +358,22 @@ class SWFInterpreter(object): | ||||
|             u30()  # iinit | ||||
|             trait_count = u30() | ||||
|             for _c2 in range(trait_count): | ||||
|                 trait_methods = parse_traits_info() | ||||
|                 trait_methods, trait_constants = parse_traits_info() | ||||
|                 avm_class.register_methods(trait_methods) | ||||
|                 if trait_constants: | ||||
|                     avm_class.constants.update(trait_constants) | ||||
|  | ||||
|         assert len(classes) == class_count | ||||
|         self._classes_by_name = dict((c.name, c) for c in classes) | ||||
|  | ||||
|         for avm_class in classes: | ||||
|             u30()  # cinit | ||||
|             avm_class.cinit_idx = u30() | ||||
|             trait_count = u30() | ||||
|             for _c2 in range(trait_count): | ||||
|                 trait_methods = parse_traits_info() | ||||
|                 trait_methods, trait_constants = parse_traits_info() | ||||
|                 avm_class.register_methods(trait_methods) | ||||
|                 if trait_constants: | ||||
|                     avm_class.constants.update(trait_constants) | ||||
|  | ||||
|         # Scripts | ||||
|         script_count = u30() | ||||
| @@ -329,6 +386,7 @@ class SWFInterpreter(object): | ||||
|         # Method bodies | ||||
|         method_body_count = u30() | ||||
|         Method = collections.namedtuple('Method', ['code', 'local_count']) | ||||
|         self._all_methods = [] | ||||
|         for _c in range(method_body_count): | ||||
|             method_idx = u30() | ||||
|             u30()  # max_stack | ||||
| @@ -337,9 +395,10 @@ class SWFInterpreter(object): | ||||
|             u30()  # max_scope_depth | ||||
|             code_length = u30() | ||||
|             code = read_bytes(code_length) | ||||
|             m = Method(code, local_count) | ||||
|             self._all_methods.append(m) | ||||
|             for avm_class in classes: | ||||
|                 if method_idx in avm_class.method_idxs: | ||||
|                     m = Method(code, local_count) | ||||
|                     avm_class.methods[avm_class.method_idxs[method_idx]] = m | ||||
|             exception_count = u30() | ||||
|             for _c2 in range(exception_count): | ||||
| @@ -354,13 +413,27 @@ class SWFInterpreter(object): | ||||
|  | ||||
|         assert p + code_reader.tell() == len(code_tag) | ||||
|  | ||||
|     def extract_class(self, class_name): | ||||
|     def patch_function(self, avm_class, func_name, f): | ||||
|         self._patched_functions[(avm_class, func_name)] = f | ||||
|  | ||||
|     def extract_class(self, class_name, call_cinit=True): | ||||
|         try: | ||||
|             return self._classes_by_name[class_name] | ||||
|             res = self._classes_by_name[class_name] | ||||
|         except KeyError: | ||||
|             raise ExtractorError('Class %r not found' % class_name) | ||||
|  | ||||
|         if call_cinit and hasattr(res, 'cinit_idx'): | ||||
|             res.register_methods({'$cinit': res.cinit_idx}) | ||||
|             res.methods['$cinit'] = self._all_methods[res.cinit_idx] | ||||
|             cinit = self.extract_function(res, '$cinit') | ||||
|             cinit([]) | ||||
|  | ||||
|         return res | ||||
|  | ||||
|     def extract_function(self, avm_class, func_name): | ||||
|         p = self._patched_functions.get((avm_class, func_name)) | ||||
|         if p: | ||||
|             return p | ||||
|         if func_name in avm_class.method_pyfunctions: | ||||
|             return avm_class.method_pyfunctions[func_name] | ||||
|         if func_name in self._classes_by_name: | ||||
| @@ -379,10 +452,15 @@ class SWFInterpreter(object): | ||||
|             registers = [avm_class.variables] + list(args) + [None] * m.local_count | ||||
|             stack = [] | ||||
|             scopes = collections.deque([ | ||||
|                 self._classes_by_name, avm_class.variables]) | ||||
|                 self._classes_by_name, avm_class.constants, avm_class.variables]) | ||||
|             while True: | ||||
|                 opcode = _read_byte(coder) | ||||
|                 if opcode == 17:  # iftrue | ||||
|                 if opcode == 9:  # label | ||||
|                     pass  # Spec says: "Do nothing." | ||||
|                 elif opcode == 16:  # jump | ||||
|                     offset = s24() | ||||
|                     coder.seek(coder.tell() + offset) | ||||
|                 elif opcode == 17:  # iftrue | ||||
|                     offset = s24() | ||||
|                     value = stack.pop() | ||||
|                     if value: | ||||
| @@ -392,9 +470,40 @@ class SWFInterpreter(object): | ||||
|                     value = stack.pop() | ||||
|                     if not value: | ||||
|                         coder.seek(coder.tell() + offset) | ||||
|                 elif opcode == 19:  # ifeq | ||||
|                     offset = s24() | ||||
|                     value2 = stack.pop() | ||||
|                     value1 = stack.pop() | ||||
|                     if value2 == value1: | ||||
|                         coder.seek(coder.tell() + offset) | ||||
|                 elif opcode == 20:  # ifne | ||||
|                     offset = s24() | ||||
|                     value2 = stack.pop() | ||||
|                     value1 = stack.pop() | ||||
|                     if value2 != value1: | ||||
|                         coder.seek(coder.tell() + offset) | ||||
|                 elif opcode == 21:  # iflt | ||||
|                     offset = s24() | ||||
|                     value2 = stack.pop() | ||||
|                     value1 = stack.pop() | ||||
|                     if value1 < value2: | ||||
|                         coder.seek(coder.tell() + offset) | ||||
|                 elif opcode == 32:  # pushnull | ||||
|                     stack.append(None) | ||||
|                 elif opcode == 33:  # pushundefined | ||||
|                     stack.append(undefined) | ||||
|                 elif opcode == 36:  # pushbyte | ||||
|                     v = _read_byte(coder) | ||||
|                     stack.append(v) | ||||
|                 elif opcode == 37:  # pushshort | ||||
|                     v = u30() | ||||
|                     stack.append(v) | ||||
|                 elif opcode == 38:  # pushtrue | ||||
|                     stack.append(True) | ||||
|                 elif opcode == 39:  # pushfalse | ||||
|                     stack.append(False) | ||||
|                 elif opcode == 40:  # pushnan | ||||
|                     stack.append(float('NaN')) | ||||
|                 elif opcode == 42:  # dup | ||||
|                     value = stack[-1] | ||||
|                     stack.append(value) | ||||
| @@ -419,11 +528,31 @@ class SWFInterpreter(object): | ||||
|                         [stack.pop() for _ in range(arg_count)])) | ||||
|                     obj = stack.pop() | ||||
|  | ||||
|                     if isinstance(obj, _AVMClass_Object): | ||||
|                     if obj == StringClass: | ||||
|                         if mname == 'String': | ||||
|                             assert len(args) == 1 | ||||
|                             assert isinstance(args[0], ( | ||||
|                                 int, compat_str, _Undefined)) | ||||
|                             if args[0] == undefined: | ||||
|                                 res = 'undefined' | ||||
|                             else: | ||||
|                                 res = compat_str(args[0]) | ||||
|                             stack.append(res) | ||||
|                             continue | ||||
|                         else: | ||||
|                             raise NotImplementedError( | ||||
|                                 'Function String.%s is not yet implemented' | ||||
|                                 % mname) | ||||
|                     elif isinstance(obj, _AVMClass_Object): | ||||
|                         func = self.extract_function(obj.avm_class, mname) | ||||
|                         res = func(args) | ||||
|                         stack.append(res) | ||||
|                         continue | ||||
|                     elif isinstance(obj, _AVMClass): | ||||
|                         func = self.extract_function(obj, mname) | ||||
|                         res = func(args) | ||||
|                         stack.append(res) | ||||
|                         continue | ||||
|                     elif isinstance(obj, _ScopeDict): | ||||
|                         if mname in obj.avm_class.method_names: | ||||
|                             func = self.extract_function(obj.avm_class, mname) | ||||
| @@ -442,6 +571,13 @@ class SWFInterpreter(object): | ||||
|                                 res = obj.split(args[0]) | ||||
|                             stack.append(res) | ||||
|                             continue | ||||
|                         elif mname == 'charCodeAt': | ||||
|                             assert len(args) <= 1 | ||||
|                             idx = 0 if len(args) == 0 else args[0] | ||||
|                             assert isinstance(idx, int) | ||||
|                             res = ord(obj[idx]) | ||||
|                             stack.append(res) | ||||
|                             continue | ||||
|                     elif isinstance(obj, list): | ||||
|                         if mname == 'slice': | ||||
|                             assert len(args) == 1 | ||||
| @@ -458,9 +594,18 @@ class SWFInterpreter(object): | ||||
|                     raise NotImplementedError( | ||||
|                         'Unsupported property %r on %r' | ||||
|                         % (mname, obj)) | ||||
|                 elif opcode == 71:  # returnvoid | ||||
|                     res = undefined | ||||
|                     return res | ||||
|                 elif opcode == 72:  # returnvalue | ||||
|                     res = stack.pop() | ||||
|                     return res | ||||
|                 elif opcode == 73:  # constructsuper | ||||
|                     # Not yet implemented, just hope it works without it | ||||
|                     arg_count = u30() | ||||
|                     args = list(reversed( | ||||
|                         [stack.pop() for _ in range(arg_count)])) | ||||
|                     obj = stack.pop() | ||||
|                 elif opcode == 74:  # constructproperty | ||||
|                     index = u30() | ||||
|                     arg_count = u30() | ||||
| @@ -481,6 +626,17 @@ class SWFInterpreter(object): | ||||
|                     args = list(reversed( | ||||
|                         [stack.pop() for _ in range(arg_count)])) | ||||
|                     obj = stack.pop() | ||||
|                     if isinstance(obj, _AVMClass_Object): | ||||
|                         func = self.extract_function(obj.avm_class, mname) | ||||
|                         res = func(args) | ||||
|                         assert res is undefined | ||||
|                         continue | ||||
|                     if isinstance(obj, _ScopeDict): | ||||
|                         assert mname in obj.avm_class.method_names | ||||
|                         func = self.extract_function(obj.avm_class, mname) | ||||
|                         res = func(args) | ||||
|                         assert res is undefined | ||||
|                         continue | ||||
|                     if mname == 'reverse': | ||||
|                         assert isinstance(obj, list) | ||||
|                         obj.reverse() | ||||
| @@ -504,7 +660,10 @@ class SWFInterpreter(object): | ||||
|                             break | ||||
|                     else: | ||||
|                         res = scopes[0] | ||||
|                     stack.append(res[mname]) | ||||
|                     if mname not in res and mname in _builtin_classes: | ||||
|                         stack.append(_builtin_classes[mname]) | ||||
|                     else: | ||||
|                         stack.append(res[mname]) | ||||
|                 elif opcode == 94:  # findproperty | ||||
|                     index = u30() | ||||
|                     mname = self.multinames[index] | ||||
| @@ -524,9 +683,15 @@ class SWFInterpreter(object): | ||||
|                             break | ||||
|                     else: | ||||
|                         scope = avm_class.variables | ||||
|                     # I cannot find where static variables are initialized | ||||
|                     # so let's just return None | ||||
|                     res = scope.get(mname) | ||||
|  | ||||
|                     if mname in scope: | ||||
|                         res = scope[mname] | ||||
|                     elif mname in _builtin_classes: | ||||
|                         res = _builtin_classes[mname] | ||||
|                     else: | ||||
|                         # Assume unitialized | ||||
|                         # TODO warn here | ||||
|                         res = undefined | ||||
|                     stack.append(res) | ||||
|                 elif opcode == 97:  # setproperty | ||||
|                     index = u30() | ||||
| @@ -548,22 +713,57 @@ class SWFInterpreter(object): | ||||
|                     pname = self.multinames[index] | ||||
|                     if pname == 'length': | ||||
|                         obj = stack.pop() | ||||
|                         assert isinstance(obj, list) | ||||
|                         assert isinstance(obj, (compat_str, list)) | ||||
|                         stack.append(len(obj)) | ||||
|                     elif isinstance(pname, compat_str):  # Member access | ||||
|                         obj = stack.pop() | ||||
|                         if isinstance(obj, _AVMClass): | ||||
|                             res = obj.static_properties[pname] | ||||
|                             stack.append(res) | ||||
|                             continue | ||||
|  | ||||
|                         assert isinstance(obj, (dict, _ScopeDict)),\ | ||||
|                             'Accessing member %r on %r' % (pname, obj) | ||||
|                         res = obj.get(pname, undefined) | ||||
|                         stack.append(res) | ||||
|                     else:  # Assume attribute access | ||||
|                         idx = stack.pop() | ||||
|                         assert isinstance(idx, int) | ||||
|                         obj = stack.pop() | ||||
|                         assert isinstance(obj, list) | ||||
|                         stack.append(obj[idx]) | ||||
|                 elif opcode == 104:  # initproperty | ||||
|                     index = u30() | ||||
|                     value = stack.pop() | ||||
|                     idx = self.multinames[index] | ||||
|                     if isinstance(idx, _Multiname): | ||||
|                         idx = stack.pop() | ||||
|                     obj = stack.pop() | ||||
|                     obj[idx] = value | ||||
|                 elif opcode == 115:  # convert_ | ||||
|                     value = stack.pop() | ||||
|                     intvalue = int(value) | ||||
|                     stack.append(intvalue) | ||||
|                 elif opcode == 128:  # coerce | ||||
|                     u30() | ||||
|                 elif opcode == 130:  # coerce_a | ||||
|                     value = stack.pop() | ||||
|                     # um, yes, it's any value | ||||
|                     stack.append(value) | ||||
|                 elif opcode == 133:  # coerce_s | ||||
|                     assert isinstance(stack[-1], (type(None), compat_str)) | ||||
|                 elif opcode == 147:  # decrement | ||||
|                     value = stack.pop() | ||||
|                     assert isinstance(value, int) | ||||
|                     stack.append(value - 1) | ||||
|                 elif opcode == 149:  # typeof | ||||
|                     value = stack.pop() | ||||
|                     return { | ||||
|                         _Undefined: 'undefined', | ||||
|                         compat_str: 'String', | ||||
|                         int: 'Number', | ||||
|                         float: 'Number', | ||||
|                     }[type(value)] | ||||
|                 elif opcode == 160:  # add | ||||
|                     value2 = stack.pop() | ||||
|                     value1 = stack.pop() | ||||
| @@ -574,16 +774,37 @@ class SWFInterpreter(object): | ||||
|                     value1 = stack.pop() | ||||
|                     res = value1 - value2 | ||||
|                     stack.append(res) | ||||
|                 elif opcode == 162:  # multiply | ||||
|                     value2 = stack.pop() | ||||
|                     value1 = stack.pop() | ||||
|                     res = value1 * value2 | ||||
|                     stack.append(res) | ||||
|                 elif opcode == 164:  # modulo | ||||
|                     value2 = stack.pop() | ||||
|                     value1 = stack.pop() | ||||
|                     res = value1 % value2 | ||||
|                     stack.append(res) | ||||
|                 elif opcode == 168:  # bitand | ||||
|                     value2 = stack.pop() | ||||
|                     value1 = stack.pop() | ||||
|                     assert isinstance(value1, int) | ||||
|                     assert isinstance(value2, int) | ||||
|                     res = value1 & value2 | ||||
|                     stack.append(res) | ||||
|                 elif opcode == 171:  # equals | ||||
|                     value2 = stack.pop() | ||||
|                     value1 = stack.pop() | ||||
|                     result = value1 == value2 | ||||
|                     stack.append(result) | ||||
|                 elif opcode == 175:  # greaterequals | ||||
|                     value2 = stack.pop() | ||||
|                     value1 = stack.pop() | ||||
|                     result = value1 >= value2 | ||||
|                     stack.append(result) | ||||
|                 elif opcode == 192:  # increment_i | ||||
|                     value = stack.pop() | ||||
|                     assert isinstance(value, int) | ||||
|                     stack.append(value + 1) | ||||
|                 elif opcode == 208:  # getlocal_0 | ||||
|                     stack.append(registers[0]) | ||||
|                 elif opcode == 209:  # getlocal_1 | ||||
|   | ||||
| @@ -63,7 +63,7 @@ def preferredencoding(): | ||||
|     """ | ||||
|     try: | ||||
|         pref = locale.getpreferredencoding() | ||||
|         u'TEST'.encode(pref) | ||||
|         'TEST'.encode(pref) | ||||
|     except: | ||||
|         pref = 'UTF-8' | ||||
|  | ||||
| @@ -71,12 +71,25 @@ def preferredencoding(): | ||||
|  | ||||
|  | ||||
| def write_json_file(obj, fn): | ||||
|     """ Encode obj as JSON and write it to fn, atomically """ | ||||
|     """ Encode obj as JSON and write it to fn, atomically if possible """ | ||||
|  | ||||
|     fn = encodeFilename(fn) | ||||
|     if sys.version_info < (3, 0) and sys.platform != 'win32': | ||||
|         encoding = get_filesystem_encoding() | ||||
|         # os.path.basename returns a bytes object, but NamedTemporaryFile | ||||
|         # will fail if the filename contains non ascii characters unless we | ||||
|         # use a unicode object | ||||
|         path_basename = lambda f: os.path.basename(fn).decode(encoding) | ||||
|         # the same for os.path.dirname | ||||
|         path_dirname = lambda f: os.path.dirname(fn).decode(encoding) | ||||
|     else: | ||||
|         path_basename = os.path.basename | ||||
|         path_dirname = os.path.dirname | ||||
|  | ||||
|     args = { | ||||
|         'suffix': '.tmp', | ||||
|         'prefix': os.path.basename(fn) + '.', | ||||
|         'dir': os.path.dirname(fn), | ||||
|         'prefix': path_basename(fn) + '.', | ||||
|         'dir': path_dirname(fn), | ||||
|         'delete': False, | ||||
|     } | ||||
|  | ||||
| @@ -95,6 +108,13 @@ def write_json_file(obj, fn): | ||||
|     try: | ||||
|         with tf: | ||||
|             json.dump(obj, tf) | ||||
|         if sys.platform == 'win32': | ||||
|             # Need to remove existing file on Windows, else os.rename raises | ||||
|             # WindowsError or FileExistsError. | ||||
|             try: | ||||
|                 os.unlink(fn) | ||||
|             except OSError: | ||||
|                 pass | ||||
|         os.rename(tf.name, fn) | ||||
|     except: | ||||
|         try: | ||||
| @@ -203,7 +223,7 @@ def sanitize_open(filename, open_mode): | ||||
|     It returns the tuple (stream, definitive_file_name). | ||||
|     """ | ||||
|     try: | ||||
|         if filename == u'-': | ||||
|         if filename == '-': | ||||
|             if sys.platform == 'win32': | ||||
|                 import msvcrt | ||||
|                 msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY) | ||||
| @@ -216,7 +236,7 @@ def sanitize_open(filename, open_mode): | ||||
|  | ||||
|         # In case of error, try to remove win32 forbidden chars | ||||
|         alt_filename = os.path.join( | ||||
|                         re.sub(u'[/<>:"\\|\\\\?\\*]', u'#', path_part) | ||||
|                         re.sub('[/<>:"\\|\\\\?\\*]', '#', path_part) | ||||
|                         for path_part in os.path.split(filename) | ||||
|                        ) | ||||
|         if alt_filename == filename: | ||||
| @@ -255,7 +275,7 @@ def sanitize_filename(s, restricted=False, is_id=False): | ||||
|             return '_' | ||||
|         return char | ||||
|  | ||||
|     result = u''.join(map(replace_insane, s)) | ||||
|     result = ''.join(map(replace_insane, s)) | ||||
|     if not is_id: | ||||
|         while '__' in result: | ||||
|             result = result.replace('__', '_') | ||||
| @@ -285,15 +305,15 @@ def _htmlentity_transform(entity): | ||||
|     mobj = re.match(r'#(x?[0-9]+)', entity) | ||||
|     if mobj is not None: | ||||
|         numstr = mobj.group(1) | ||||
|         if numstr.startswith(u'x'): | ||||
|         if numstr.startswith('x'): | ||||
|             base = 16 | ||||
|             numstr = u'0%s' % numstr | ||||
|             numstr = '0%s' % numstr | ||||
|         else: | ||||
|             base = 10 | ||||
|         return compat_chr(int(numstr, base)) | ||||
|  | ||||
|     # Unknown entity in name, return its literal representation | ||||
|     return (u'&%s;' % entity) | ||||
|     return ('&%s;' % entity) | ||||
|  | ||||
|  | ||||
| def unescapeHTML(s): | ||||
| @@ -317,7 +337,7 @@ def encodeFilename(s, for_subprocess=False): | ||||
|         return s | ||||
|  | ||||
|     if sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5: | ||||
|         # Pass u'' directly to use Unicode APIs on Windows 2000 and up | ||||
|         # Pass '' directly to use Unicode APIs on Windows 2000 and up | ||||
|         # (Detecting Windows NT 4 is tricky because 'major >= 4' would | ||||
|         # match Windows 9x series as well. Besides, NT 4 is obsolete.) | ||||
|         if not for_subprocess: | ||||
| @@ -400,6 +420,7 @@ def make_HTTPS_handler(opts_no_check_certificate, **kwargs): | ||||
|             pass  # Python < 3.4 | ||||
|         return compat_urllib_request.HTTPSHandler(context=context, **kwargs) | ||||
|  | ||||
|  | ||||
| class ExtractorError(Exception): | ||||
|     """Error during info extraction.""" | ||||
|     def __init__(self, msg, tb=None, expected=False, cause=None, video_id=None): | ||||
| @@ -412,9 +433,15 @@ class ExtractorError(Exception): | ||||
|         if video_id is not None: | ||||
|             msg = video_id + ': ' + msg | ||||
|         if cause: | ||||
|             msg += u' (caused by %r)' % cause | ||||
|             msg += ' (caused by %r)' % cause | ||||
|         if not expected: | ||||
|             msg = msg + u'; please report this issue on https://yt-dl.org/bug . Be sure to call youtube-dl with the --verbose flag and include its complete output. Make sure you are using the latest version; type  youtube-dl -U  to update.' | ||||
|             if ytdl_is_updateable(): | ||||
|                 update_cmd = 'type  youtube-dl -U  to update' | ||||
|             else: | ||||
|                 update_cmd = 'see  https://yt-dl.org/update  on how to update' | ||||
|             msg += '; please report this issue on https://yt-dl.org/bug .' | ||||
|             msg += ' Make sure you are using the latest version; %s.' % update_cmd | ||||
|             msg += ' Be sure to call youtube-dl with the --verbose flag and include its complete output.' | ||||
|         super(ExtractorError, self).__init__(msg) | ||||
|  | ||||
|         self.traceback = tb | ||||
| @@ -425,7 +452,7 @@ class ExtractorError(Exception): | ||||
|     def format_traceback(self): | ||||
|         if self.traceback is None: | ||||
|             return None | ||||
|         return u''.join(traceback.format_tb(self.traceback)) | ||||
|         return ''.join(traceback.format_tb(self.traceback)) | ||||
|  | ||||
|  | ||||
| class RegexNotFoundError(ExtractorError): | ||||
| @@ -653,17 +680,17 @@ def unified_strdate(date_str): | ||||
|             upload_date = datetime.datetime(*timetuple[:6]).strftime('%Y%m%d') | ||||
|     return upload_date | ||||
|  | ||||
| def determine_ext(url, default_ext=u'unknown_video'): | ||||
| def determine_ext(url, default_ext='unknown_video'): | ||||
|     if url is None: | ||||
|         return default_ext | ||||
|     guess = url.partition(u'?')[0].rpartition(u'.')[2] | ||||
|     guess = url.partition('?')[0].rpartition('.')[2] | ||||
|     if re.match(r'^[A-Za-z0-9]+$', guess): | ||||
|         return guess | ||||
|     else: | ||||
|         return default_ext | ||||
|  | ||||
| def subtitles_filename(filename, sub_lang, sub_format): | ||||
|     return filename.rsplit('.', 1)[0] + u'.' + sub_lang + u'.' + sub_format | ||||
|     return filename.rsplit('.', 1)[0] + '.' + sub_lang + '.' + sub_format | ||||
|  | ||||
| def date_from_str(date_str): | ||||
|     """ | ||||
| @@ -843,7 +870,7 @@ def bytes_to_intlist(bs): | ||||
| def intlist_to_bytes(xs): | ||||
|     if not xs: | ||||
|         return b'' | ||||
|     return struct.pack('%dB' % len(xs), *xs) | ||||
|     return struct_pack('%dB' % len(xs), *xs) | ||||
|  | ||||
|  | ||||
| # Cross-platform file locking | ||||
| @@ -955,7 +982,7 @@ def shell_quote(args): | ||||
|             # We may get a filename encoded with 'encodeFilename' | ||||
|             a = a.decode(encoding) | ||||
|         quoted_args.append(pipes.quote(a)) | ||||
|     return u' '.join(quoted_args) | ||||
|     return ' '.join(quoted_args) | ||||
|  | ||||
|  | ||||
| def takewhile_inclusive(pred, seq): | ||||
| @@ -971,31 +998,31 @@ def smuggle_url(url, data): | ||||
|     """ Pass additional data in a URL for internal use. """ | ||||
|  | ||||
|     sdata = compat_urllib_parse.urlencode( | ||||
|         {u'__youtubedl_smuggle': json.dumps(data)}) | ||||
|     return url + u'#' + sdata | ||||
|         {'__youtubedl_smuggle': json.dumps(data)}) | ||||
|     return url + '#' + sdata | ||||
|  | ||||
|  | ||||
| def unsmuggle_url(smug_url, default=None): | ||||
|     if not '#__youtubedl_smuggle' in smug_url: | ||||
|         return smug_url, default | ||||
|     url, _, sdata = smug_url.rpartition(u'#') | ||||
|     jsond = compat_parse_qs(sdata)[u'__youtubedl_smuggle'][0] | ||||
|     url, _, sdata = smug_url.rpartition('#') | ||||
|     jsond = compat_parse_qs(sdata)['__youtubedl_smuggle'][0] | ||||
|     data = json.loads(jsond) | ||||
|     return url, data | ||||
|  | ||||
|  | ||||
| def format_bytes(bytes): | ||||
|     if bytes is None: | ||||
|         return u'N/A' | ||||
|         return 'N/A' | ||||
|     if type(bytes) is str: | ||||
|         bytes = float(bytes) | ||||
|     if bytes == 0.0: | ||||
|         exponent = 0 | ||||
|     else: | ||||
|         exponent = int(math.log(bytes, 1024.0)) | ||||
|     suffix = [u'B', u'KiB', u'MiB', u'GiB', u'TiB', u'PiB', u'EiB', u'ZiB', u'YiB'][exponent] | ||||
|     suffix = ['B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB', 'YiB'][exponent] | ||||
|     converted = float(bytes) / float(1024 ** exponent) | ||||
|     return u'%.2f%s' % (converted, suffix) | ||||
|     return '%.2f%s' % (converted, suffix) | ||||
|  | ||||
|  | ||||
| def get_term_width(): | ||||
| @@ -1018,8 +1045,8 @@ def month_by_name(name): | ||||
|     """ Return the number of a month by (locale-independently) English name """ | ||||
|  | ||||
|     ENGLISH_NAMES = [ | ||||
|         u'January', u'February', u'March', u'April', u'May', u'June', | ||||
|         u'July', u'August', u'September', u'October', u'November', u'December'] | ||||
|         'January', 'February', 'March', 'April', 'May', 'June', | ||||
|         'July', 'August', 'September', 'October', 'November', 'December'] | ||||
|     try: | ||||
|         return ENGLISH_NAMES.index(name) + 1 | ||||
|     except ValueError: | ||||
| @@ -1030,7 +1057,7 @@ def fix_xml_ampersands(xml_str): | ||||
|     """Replace all the '&' by '&' in XML""" | ||||
|     return re.sub( | ||||
|         r'&(?!amp;|lt;|gt;|apos;|quot;|#x[0-9a-fA-F]{,4};|#[0-9]{,4};)', | ||||
|         u'&', | ||||
|         '&', | ||||
|         xml_str) | ||||
|  | ||||
|  | ||||
| @@ -1063,7 +1090,7 @@ def remove_end(s, end): | ||||
|  | ||||
| def url_basename(url): | ||||
|     path = compat_urlparse.urlparse(url).path | ||||
|     return path.strip(u'/').split(u'/')[-1] | ||||
|     return path.strip('/').split('/')[-1] | ||||
|  | ||||
|  | ||||
| class HEADRequest(compat_urllib_request.Request): | ||||
| @@ -1088,7 +1115,7 @@ def str_to_int(int_str): | ||||
|     """ A more relaxed version of int_or_none """ | ||||
|     if int_str is None: | ||||
|         return None | ||||
|     int_str = re.sub(r'[,\.\+]', u'', int_str) | ||||
|     int_str = re.sub(r'[,\.\+]', '', int_str) | ||||
|     return int(int_str) | ||||
|  | ||||
|  | ||||
| @@ -1103,7 +1130,12 @@ def parse_duration(s): | ||||
|     s = s.strip() | ||||
|  | ||||
|     m = re.match( | ||||
|         r'(?i)(?:(?:(?P<hours>[0-9]+)\s*(?:[:h]|hours?)\s*)?(?P<mins>[0-9]+)\s*(?:[:m]|mins?|minutes?)\s*)?(?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?\s*(?:s|secs?|seconds?)?$', s) | ||||
|         r'''(?ix)T? | ||||
|             (?: | ||||
|                 (?:(?P<hours>[0-9]+)\s*(?:[:h]|hours?)\s*)? | ||||
|                 (?P<mins>[0-9]+)\s*(?:[:m]|mins?|minutes?)\s* | ||||
|             )? | ||||
|             (?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?\s*(?:s|secs?|seconds?)?$''', s) | ||||
|     if not m: | ||||
|         return None | ||||
|     res = int(m.group('secs')) | ||||
| @@ -1118,7 +1150,7 @@ def parse_duration(s): | ||||
|  | ||||
| def prepend_extension(filename, ext): | ||||
|     name, real_ext = os.path.splitext(filename)  | ||||
|     return u'{0}.{1}{2}'.format(name, ext, real_ext) | ||||
|     return '{0}.{1}{2}'.format(name, ext, real_ext) | ||||
|  | ||||
|  | ||||
| def check_executable(exe, args=[]): | ||||
| @@ -1133,7 +1165,7 @@ def check_executable(exe, args=[]): | ||||
|  | ||||
| def get_exe_version(exe, args=['--version'], | ||||
|                     version_re=r'version\s+([0-9._-a-zA-Z]+)', | ||||
|                     unrecognized=u'present'): | ||||
|                     unrecognized='present'): | ||||
|     """ Returns the version of the specified executable, | ||||
|     or False if the executable is not present """ | ||||
|     try: | ||||
| @@ -1254,7 +1286,7 @@ def escape_url(url): | ||||
|     ).geturl() | ||||
|  | ||||
| try: | ||||
|     struct.pack(u'!I', 0) | ||||
|     struct.pack('!I', 0) | ||||
| except TypeError: | ||||
|     # In Python 2.6 (and some 2.7 versions), struct requires a bytes argument | ||||
|     def struct_pack(spec, *args): | ||||
| @@ -1275,7 +1307,7 @@ def read_batch_urls(batch_fd): | ||||
|     def fixup(url): | ||||
|         if not isinstance(url, compat_str): | ||||
|             url = url.decode('utf-8', 'replace') | ||||
|         BOM_UTF8 = u'\xef\xbb\xbf' | ||||
|         BOM_UTF8 = '\xef\xbb\xbf' | ||||
|         if url.startswith(BOM_UTF8): | ||||
|             url = url[len(BOM_UTF8):] | ||||
|         url = url.strip() | ||||
| @@ -1331,7 +1363,8 @@ def parse_age_limit(s): | ||||
|  | ||||
|  | ||||
| def strip_jsonp(code): | ||||
|     return re.sub(r'(?s)^[a-zA-Z0-9_]+\s*\(\s*(.*)\);?\s*?\s*$', r'\1', code) | ||||
|     return re.sub( | ||||
|         r'(?s)^[a-zA-Z0-9_]+\s*\(\s*(.*)\);?\s*?(?://[^\n]*)*$', r'\1', code) | ||||
|  | ||||
|  | ||||
| def js_to_json(code): | ||||
| @@ -1393,3 +1426,10 @@ def is_outdated_version(version, limit, assume_new=True): | ||||
|         return version_tuple(version) < version_tuple(limit) | ||||
|     except ValueError: | ||||
|         return not assume_new | ||||
|  | ||||
|  | ||||
| def ytdl_is_updateable(): | ||||
|     """ Returns if youtube-dl can be updated with -U """ | ||||
|     from zipimport import zipimporter | ||||
|  | ||||
|     return isinstance(globals().get('__loader__'), zipimporter) or hasattr(sys, 'frozen') | ||||
|   | ||||
| @@ -1,2 +1,2 @@ | ||||
|  | ||||
| __version__ = '2014.11.13.1' | ||||
| __version__ = '2014.11.23' | ||||
|   | ||||
		Reference in New Issue
	
	Block a user