mirror of
				https://github.com/ytdl-org/youtube-dl.git
				synced 2025-10-29 09:26:20 -07:00 
			
		
		
		
	[compat] Add compat_urllib_parse_urlencode and eliminate encode_dict
encode_dict functionality has been improved and moved directly into compat_urllib_parse_urlencode All occurrences of compat_urllib_parse.urlencode throughout the codebase have been replaced by compat_urllib_parse_urlencode Closes #8974
This commit is contained in:
		| @@ -169,6 +169,31 @@ except ImportError:  # Python 2 | ||||
|         string = string.replace('+', ' ') | ||||
|         return compat_urllib_parse_unquote(string, encoding, errors) | ||||
|  | ||||
| try: | ||||
|     from urllib.parse import urlencode as compat_urllib_parse_urlencode | ||||
| except ImportError:  # Python 2 | ||||
|     # Python 2 will choke in urlencode on mixture of byte and unicode strings. | ||||
|     # Possible solutions are to either port it from python 3 with all | ||||
|     # the friends or manually ensure input query contains only byte strings. | ||||
|     # We will stick with latter thus recursively encoding the whole query. | ||||
|     def compat_urllib_parse_urlencode(query, doseq=0, encoding='utf-8'): | ||||
|         def encode_elem(e): | ||||
|             if isinstance(e, dict): | ||||
|                 e = encode_dict(e) | ||||
|             elif isinstance(e, (list, tuple,)): | ||||
|                 e = encode_list(e) | ||||
|             elif isinstance(e, compat_str): | ||||
|                 e = e.encode(encoding) | ||||
|             return e | ||||
|  | ||||
|         def encode_dict(d): | ||||
|             return dict((encode_elem(k), encode_elem(v)) for k, v in d.items()) | ||||
|  | ||||
|         def encode_list(l): | ||||
|             return [encode_elem(e) for e in l] | ||||
|  | ||||
|         return compat_urllib_parse.urlencode(encode_elem(query), doseq=doseq) | ||||
|  | ||||
| try: | ||||
|     from urllib.request import DataHandler as compat_urllib_request_DataHandler | ||||
| except ImportError:  # Python < 3.4 | ||||
| @@ -588,6 +613,7 @@ __all__ = [ | ||||
|     'compat_urllib_parse_unquote', | ||||
|     'compat_urllib_parse_unquote_plus', | ||||
|     'compat_urllib_parse_unquote_to_bytes', | ||||
|     'compat_urllib_parse_urlencode', | ||||
|     'compat_urllib_parse_urlparse', | ||||
|     'compat_urllib_request', | ||||
|     'compat_urllib_request_DataHandler', | ||||
|   | ||||
| @@ -6,7 +6,7 @@ from .common import InfoExtractor | ||||
| from ..compat import ( | ||||
|     compat_HTTPError, | ||||
|     compat_str, | ||||
|     compat_urllib_parse, | ||||
|     compat_urllib_parse_urlencode, | ||||
|     compat_urllib_parse_urlparse, | ||||
| ) | ||||
| from ..utils import ( | ||||
| @@ -60,7 +60,7 @@ class AddAnimeIE(InfoExtractor): | ||||
|             confirm_url = ( | ||||
|                 parsed_url.scheme + '://' + parsed_url.netloc + | ||||
|                 action + '?' + | ||||
|                 compat_urllib_parse.urlencode({ | ||||
|                 compat_urllib_parse_urlencode({ | ||||
|                     'jschl_vc': vc, 'jschl_answer': compat_str(av_val)})) | ||||
|             self._download_webpage( | ||||
|                 confirm_url, video_id, | ||||
|   | ||||
| @@ -9,7 +9,6 @@ from ..compat import ( | ||||
| ) | ||||
| from ..utils import ( | ||||
|     determine_ext, | ||||
|     encode_dict, | ||||
|     extract_attributes, | ||||
|     ExtractorError, | ||||
|     sanitized_Request, | ||||
| @@ -71,7 +70,7 @@ class AnimeOnDemandIE(InfoExtractor): | ||||
|             post_url = compat_urlparse.urljoin(self._LOGIN_URL, post_url) | ||||
|  | ||||
|         request = sanitized_Request( | ||||
|             post_url, urlencode_postdata(encode_dict(login_form))) | ||||
|             post_url, urlencode_postdata(login_form)) | ||||
|         request.add_header('Referer', self._LOGIN_URL) | ||||
|  | ||||
|         response = self._download_webpage( | ||||
|   | ||||
| @@ -8,7 +8,7 @@ import re | ||||
| from .common import InfoExtractor | ||||
| from ..compat import ( | ||||
|     compat_str, | ||||
|     compat_urllib_parse, | ||||
|     compat_urllib_parse_urlencode, | ||||
| ) | ||||
| from ..utils import ( | ||||
|     int_or_none, | ||||
| @@ -86,7 +86,7 @@ class AtresPlayerIE(InfoExtractor): | ||||
|         } | ||||
|  | ||||
|         request = sanitized_Request( | ||||
|             self._LOGIN_URL, compat_urllib_parse.urlencode(login_form).encode('utf-8')) | ||||
|             self._LOGIN_URL, compat_urllib_parse_urlencode(login_form).encode('utf-8')) | ||||
|         request.add_header('Content-Type', 'application/x-www-form-urlencoded') | ||||
|         response = self._download_webpage( | ||||
|             request, None, 'Logging in as %s' % username) | ||||
|   | ||||
| @@ -5,7 +5,7 @@ import itertools | ||||
|  | ||||
| from .common import InfoExtractor | ||||
| from ..compat import ( | ||||
|     compat_urllib_parse, | ||||
|     compat_urllib_parse_urlencode, | ||||
|     compat_str, | ||||
| ) | ||||
| from ..utils import ( | ||||
| @@ -58,7 +58,7 @@ class BambuserIE(InfoExtractor): | ||||
|         } | ||||
|  | ||||
|         request = sanitized_Request( | ||||
|             self._LOGIN_URL, compat_urllib_parse.urlencode(login_form).encode('utf-8')) | ||||
|             self._LOGIN_URL, compat_urllib_parse_urlencode(login_form).encode('utf-8')) | ||||
|         request.add_header('Referer', self._LOGIN_URL) | ||||
|         response = self._download_webpage( | ||||
|             request, None, 'Logging in as %s' % username) | ||||
|   | ||||
| @@ -6,7 +6,7 @@ import re | ||||
|  | ||||
| from .common import InfoExtractor | ||||
| from ..compat import ( | ||||
|     compat_urllib_parse, | ||||
|     compat_urllib_parse_urlencode, | ||||
|     compat_urlparse, | ||||
| ) | ||||
| from ..utils import ( | ||||
| @@ -139,7 +139,7 @@ class CamdemyFolderIE(InfoExtractor): | ||||
|         parsed_url = list(compat_urlparse.urlparse(url)) | ||||
|         query = dict(compat_urlparse.parse_qsl(parsed_url[4])) | ||||
|         query.update({'displayMode': 'list'}) | ||||
|         parsed_url[4] = compat_urllib_parse.urlencode(query) | ||||
|         parsed_url[4] = compat_urllib_parse_urlencode(query) | ||||
|         final_url = compat_urlparse.urlunparse(parsed_url) | ||||
|  | ||||
|         page = self._download_webpage(final_url, folder_id) | ||||
|   | ||||
| @@ -5,8 +5,8 @@ import re | ||||
|  | ||||
| from .common import InfoExtractor | ||||
| from ..compat import ( | ||||
|     compat_urllib_parse, | ||||
|     compat_urllib_parse_unquote, | ||||
|     compat_urllib_parse_urlencode, | ||||
|     compat_urllib_parse_urlparse, | ||||
| ) | ||||
| from ..utils import ( | ||||
| @@ -102,7 +102,7 @@ class CeskaTelevizeIE(InfoExtractor): | ||||
|  | ||||
|         req = sanitized_Request( | ||||
|             'http://www.ceskatelevize.cz/ivysilani/ajax/get-client-playlist', | ||||
|             data=compat_urllib_parse.urlencode(data)) | ||||
|             data=compat_urllib_parse_urlencode(data)) | ||||
|  | ||||
|         req.add_header('Content-type', 'application/x-www-form-urlencoded') | ||||
|         req.add_header('x-addr', '127.0.0.1') | ||||
|   | ||||
| @@ -6,7 +6,7 @@ import re | ||||
| from .common import InfoExtractor | ||||
| from ..compat import ( | ||||
|     compat_parse_qs, | ||||
|     compat_urllib_parse, | ||||
|     compat_urllib_parse_urlencode, | ||||
|     compat_HTTPError, | ||||
| ) | ||||
| from ..utils import ( | ||||
| @@ -64,7 +64,7 @@ class CloudyIE(InfoExtractor): | ||||
|                 'errorUrl': error_url, | ||||
|             }) | ||||
|  | ||||
|         data_url = self._API_URL % (video_host, compat_urllib_parse.urlencode(form)) | ||||
|         data_url = self._API_URL % (video_host, compat_urllib_parse_urlencode(form)) | ||||
|         player_data = self._download_webpage( | ||||
|             data_url, video_id, 'Downloading player data') | ||||
|         data = compat_parse_qs(player_data) | ||||
|   | ||||
| @@ -5,7 +5,7 @@ import re | ||||
| from .mtv import MTVServicesInfoExtractor | ||||
| from ..compat import ( | ||||
|     compat_str, | ||||
|     compat_urllib_parse, | ||||
|     compat_urllib_parse_urlencode, | ||||
| ) | ||||
| from ..utils import ( | ||||
|     ExtractorError, | ||||
| @@ -201,7 +201,7 @@ class ComedyCentralShowsIE(MTVServicesInfoExtractor): | ||||
|         # Correct cc.com in uri | ||||
|         uri = re.sub(r'(episode:[^.]+)(\.cc)?\.com', r'\1.com', uri) | ||||
|  | ||||
|         index_url = 'http://%s.cc.com/feeds/mrss?%s' % (show_name, compat_urllib_parse.urlencode({'uri': uri})) | ||||
|         index_url = 'http://%s.cc.com/feeds/mrss?%s' % (show_name, compat_urllib_parse_urlencode({'uri': uri})) | ||||
|         idoc = self._download_xml( | ||||
|             index_url, epTitle, | ||||
|             'Downloading show index', 'Unable to download episode index') | ||||
|   | ||||
| @@ -21,7 +21,7 @@ from ..compat import ( | ||||
|     compat_os_name, | ||||
|     compat_str, | ||||
|     compat_urllib_error, | ||||
|     compat_urllib_parse, | ||||
|     compat_urllib_parse_urlencode, | ||||
|     compat_urlparse, | ||||
| ) | ||||
| from ..utils import ( | ||||
| @@ -1300,7 +1300,7 @@ class InfoExtractor(object): | ||||
|                         'plugin': 'flowplayer-3.2.0.1', | ||||
|                     } | ||||
|                 f4m_url += '&' if '?' in f4m_url else '?' | ||||
|                 f4m_url += compat_urllib_parse.urlencode(f4m_params) | ||||
|                 f4m_url += compat_urllib_parse_urlencode(f4m_params) | ||||
|                 formats.extend(self._extract_f4m_formats(f4m_url, video_id, f4m_id='hds', fatal=False)) | ||||
|                 continue | ||||
|  | ||||
|   | ||||
| @@ -5,7 +5,7 @@ import re | ||||
|  | ||||
| from .common import InfoExtractor | ||||
| from ..compat import ( | ||||
|     compat_urllib_parse, | ||||
|     compat_urllib_parse_urlencode, | ||||
|     compat_urllib_parse_urlparse, | ||||
|     compat_urlparse, | ||||
| ) | ||||
| @@ -97,7 +97,7 @@ class CondeNastIE(InfoExtractor): | ||||
|         video_id = self._search_regex(r'videoId: [\'"](.+?)[\'"]', params, 'video id') | ||||
|         player_id = self._search_regex(r'playerId: [\'"](.+?)[\'"]', params, 'player id') | ||||
|         target = self._search_regex(r'target: [\'"](.+?)[\'"]', params, 'target') | ||||
|         data = compat_urllib_parse.urlencode({'videoId': video_id, | ||||
|         data = compat_urllib_parse_urlencode({'videoId': video_id, | ||||
|                                               'playerId': player_id, | ||||
|                                               'target': target, | ||||
|                                               }) | ||||
|   | ||||
| @@ -11,8 +11,8 @@ from math import pow, sqrt, floor | ||||
| from .common import InfoExtractor | ||||
| from ..compat import ( | ||||
|     compat_etree_fromstring, | ||||
|     compat_urllib_parse, | ||||
|     compat_urllib_parse_unquote, | ||||
|     compat_urllib_parse_urlencode, | ||||
|     compat_urllib_request, | ||||
|     compat_urlparse, | ||||
| ) | ||||
| @@ -78,7 +78,7 @@ class CrunchyrollBaseIE(InfoExtractor): | ||||
|         # See https://github.com/rg3/youtube-dl/issues/7202. | ||||
|         qs['skip_wall'] = ['1'] | ||||
|         return compat_urlparse.urlunparse( | ||||
|             parsed_url._replace(query=compat_urllib_parse.urlencode(qs, True))) | ||||
|             parsed_url._replace(query=compat_urllib_parse_urlencode(qs, True))) | ||||
|  | ||||
|  | ||||
| class CrunchyrollIE(CrunchyrollBaseIE): | ||||
| @@ -308,7 +308,7 @@ Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text | ||||
|  | ||||
|         playerdata_url = compat_urllib_parse_unquote(self._html_search_regex(r'"config_url":"([^"]+)', webpage, 'playerdata_url')) | ||||
|         playerdata_req = sanitized_Request(playerdata_url) | ||||
|         playerdata_req.data = compat_urllib_parse.urlencode({'current_page': webpage_url}) | ||||
|         playerdata_req.data = compat_urllib_parse_urlencode({'current_page': webpage_url}) | ||||
|         playerdata_req.add_header('Content-Type', 'application/x-www-form-urlencoded') | ||||
|         playerdata = self._download_webpage(playerdata_req, video_id, note='Downloading media info') | ||||
|  | ||||
| @@ -322,7 +322,7 @@ Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text | ||||
|             streamdata_req = sanitized_Request( | ||||
|                 'http://www.crunchyroll.com/xml/?req=RpcApiVideoPlayer_GetStandardConfig&media_id=%s&video_format=%s&video_quality=%s' | ||||
|                 % (stream_id, stream_format, stream_quality), | ||||
|                 compat_urllib_parse.urlencode({'current_page': url}).encode('utf-8')) | ||||
|                 compat_urllib_parse_urlencode({'current_page': url}).encode('utf-8')) | ||||
|             streamdata_req.add_header('Content-Type', 'application/x-www-form-urlencoded') | ||||
|             streamdata = self._download_xml( | ||||
|                 streamdata_req, video_id, | ||||
|   | ||||
| @@ -8,8 +8,8 @@ import itertools | ||||
| from .common import InfoExtractor | ||||
| from ..compat import ( | ||||
|     compat_parse_qs, | ||||
|     compat_urllib_parse, | ||||
|     compat_urllib_parse_unquote, | ||||
|     compat_urllib_parse_urlencode, | ||||
|     compat_urlparse, | ||||
| ) | ||||
| from ..utils import ( | ||||
| @@ -70,7 +70,7 @@ class DaumIE(InfoExtractor): | ||||
|  | ||||
|     def _real_extract(self, url): | ||||
|         video_id = compat_urllib_parse_unquote(self._match_id(url)) | ||||
|         query = compat_urllib_parse.urlencode({'vid': video_id}) | ||||
|         query = compat_urllib_parse_urlencode({'vid': video_id}) | ||||
|         movie_data = self._download_json( | ||||
|             'http://videofarm.daum.net/controller/api/closed/v1_2/IntegratedMovieData.json?' + query, | ||||
|             video_id, 'Downloading video formats info') | ||||
| @@ -86,7 +86,7 @@ class DaumIE(InfoExtractor): | ||||
|         formats = [] | ||||
|         for format_el in movie_data['output_list']['output_list']: | ||||
|             profile = format_el['profile'] | ||||
|             format_query = compat_urllib_parse.urlencode({ | ||||
|             format_query = compat_urllib_parse_urlencode({ | ||||
|                 'vid': video_id, | ||||
|                 'profile': profile, | ||||
|             }) | ||||
|   | ||||
| @@ -6,7 +6,7 @@ import base64 | ||||
|  | ||||
| from .common import InfoExtractor | ||||
| from ..compat import ( | ||||
|     compat_urllib_parse, | ||||
|     compat_urllib_parse_urlencode, | ||||
|     compat_str, | ||||
| ) | ||||
| from ..utils import ( | ||||
| @@ -106,7 +106,7 @@ class DCNVideoIE(DCNBaseIE): | ||||
|  | ||||
|         webpage = self._download_webpage( | ||||
|             'http://admin.mangomolo.com/analytics/index.php/customers/embed/video?' + | ||||
|             compat_urllib_parse.urlencode({ | ||||
|             compat_urllib_parse_urlencode({ | ||||
|                 'id': video_data['id'], | ||||
|                 'user_id': video_data['user_id'], | ||||
|                 'signature': video_data['signature'], | ||||
| @@ -133,7 +133,7 @@ class DCNLiveIE(DCNBaseIE): | ||||
|  | ||||
|         webpage = self._download_webpage( | ||||
|             'http://admin.mangomolo.com/analytics/index.php/customers/embed/index?' + | ||||
|             compat_urllib_parse.urlencode({ | ||||
|             compat_urllib_parse_urlencode({ | ||||
|                 'id': base64.b64encode(channel_data['user_id'].encode()).decode(), | ||||
|                 'channelid': base64.b64encode(channel_data['id'].encode()).decode(), | ||||
|                 'signature': channel_data['signature'], | ||||
| @@ -174,7 +174,7 @@ class DCNSeasonIE(InfoExtractor): | ||||
|         data['show_id'] = show_id | ||||
|         request = sanitized_Request( | ||||
|             'http://admin.mangomolo.com/analytics/index.php/plus/show', | ||||
|             compat_urllib_parse.urlencode(data), | ||||
|             compat_urllib_parse_urlencode(data), | ||||
|             { | ||||
|                 'Origin': 'http://www.dcndigital.ae', | ||||
|                 'Content-Type': 'application/x-www-form-urlencoded' | ||||
|   | ||||
| @@ -6,7 +6,7 @@ import itertools | ||||
| from .amp import AMPIE | ||||
| from ..compat import ( | ||||
|     compat_HTTPError, | ||||
|     compat_urllib_parse, | ||||
|     compat_urllib_parse_urlencode, | ||||
|     compat_urlparse, | ||||
| ) | ||||
| from ..utils import ( | ||||
| @@ -50,7 +50,7 @@ class DramaFeverBaseIE(AMPIE): | ||||
|         } | ||||
|  | ||||
|         request = sanitized_Request( | ||||
|             self._LOGIN_URL, compat_urllib_parse.urlencode(login_form).encode('utf-8')) | ||||
|             self._LOGIN_URL, compat_urllib_parse_urlencode(login_form).encode('utf-8')) | ||||
|         response = self._download_webpage( | ||||
|             request, None, 'Logging in as %s' % username) | ||||
|  | ||||
|   | ||||
| @@ -3,7 +3,7 @@ from __future__ import unicode_literals | ||||
| import re | ||||
|  | ||||
| from .common import InfoExtractor | ||||
| from ..compat import compat_urllib_parse | ||||
| from ..compat import compat_urllib_parse_urlencode | ||||
| from ..utils import ( | ||||
|     ExtractorError, | ||||
|     unescapeHTML | ||||
| @@ -43,7 +43,7 @@ class EroProfileIE(InfoExtractor): | ||||
|         if username is None: | ||||
|             return | ||||
|  | ||||
|         query = compat_urllib_parse.urlencode({ | ||||
|         query = compat_urllib_parse_urlencode({ | ||||
|             'username': username, | ||||
|             'password': password, | ||||
|             'url': 'http://www.eroprofile.com/', | ||||
|   | ||||
| @@ -5,12 +5,11 @@ import hashlib | ||||
|  | ||||
| from .common import InfoExtractor | ||||
| from ..compat import ( | ||||
|     compat_urllib_parse, | ||||
|     compat_urllib_parse_urlencode, | ||||
|     compat_urllib_request, | ||||
|     compat_urlparse, | ||||
| ) | ||||
| from ..utils import ( | ||||
|     encode_dict, | ||||
|     ExtractorError, | ||||
|     sanitized_Request, | ||||
| ) | ||||
| @@ -57,7 +56,7 @@ class FC2IE(InfoExtractor): | ||||
|             'Submit': ' Login ', | ||||
|         } | ||||
|  | ||||
|         login_data = compat_urllib_parse.urlencode(encode_dict(login_form_strs)).encode('utf-8') | ||||
|         login_data = compat_urllib_parse_urlencode(login_form_strs).encode('utf-8') | ||||
|         request = sanitized_Request( | ||||
|             'https://secure.id.fc2.com/index.php?mode=login&switch_language=en', login_data) | ||||
|  | ||||
|   | ||||
| @@ -4,8 +4,8 @@ import re | ||||
|  | ||||
| from .common import InfoExtractor | ||||
| from ..compat import ( | ||||
|     compat_urllib_parse, | ||||
|     compat_parse_qs, | ||||
|     compat_urllib_parse_urlencode, | ||||
|     compat_urllib_parse_urlparse, | ||||
|     compat_urlparse, | ||||
| ) | ||||
| @@ -109,7 +109,7 @@ class FiveMinIE(InfoExtractor): | ||||
|  | ||||
|         response = self._download_json( | ||||
|             'https://syn.5min.com/handlers/SenseHandler.ashx?' + | ||||
|             compat_urllib_parse.urlencode({ | ||||
|             compat_urllib_parse_urlencode({ | ||||
|                 'func': 'GetResults', | ||||
|                 'playlist': video_id, | ||||
|                 'sid': sid, | ||||
|   | ||||
| @@ -1,7 +1,7 @@ | ||||
| from __future__ import unicode_literals | ||||
|  | ||||
| from .common import InfoExtractor | ||||
| from ..compat import compat_urllib_parse | ||||
| from ..compat import compat_urllib_parse_urlencode | ||||
| from ..utils import ( | ||||
|     ExtractorError, | ||||
|     int_or_none, | ||||
| @@ -42,7 +42,7 @@ class FlickrIE(InfoExtractor): | ||||
|         } | ||||
|         if secret: | ||||
|             query['secret'] = secret | ||||
|         data = self._download_json(self._API_BASE_URL + compat_urllib_parse.urlencode(query), video_id, note) | ||||
|         data = self._download_json(self._API_BASE_URL + compat_urllib_parse_urlencode(query), video_id, note) | ||||
|         if data['stat'] != 'ok': | ||||
|             raise ExtractorError(data['message']) | ||||
|         return data | ||||
|   | ||||
| @@ -5,7 +5,6 @@ from .common import InfoExtractor | ||||
| from ..utils import ( | ||||
|     clean_html, | ||||
|     determine_ext, | ||||
|     encode_dict, | ||||
|     int_or_none, | ||||
|     sanitized_Request, | ||||
|     ExtractorError, | ||||
| @@ -54,10 +53,10 @@ class FunimationIE(InfoExtractor): | ||||
|         (username, password) = self._get_login_info() | ||||
|         if username is None: | ||||
|             return | ||||
|         data = urlencode_postdata(encode_dict({ | ||||
|         data = urlencode_postdata({ | ||||
|             'email_field': username, | ||||
|             'password_field': password, | ||||
|         })) | ||||
|         }) | ||||
|         login_request = sanitized_Request('http://www.funimation.com/login', data, headers={ | ||||
|             'User-Agent': 'Mozilla/5.0 (Windows NT 5.2; WOW64; rv:42.0) Gecko/20100101 Firefox/42.0', | ||||
|             'Content-Type': 'application/x-www-form-urlencoded' | ||||
|   | ||||
| @@ -3,7 +3,7 @@ from __future__ import unicode_literals | ||||
| import re | ||||
|  | ||||
| from .common import InfoExtractor | ||||
| from ..compat import compat_urllib_parse | ||||
| from ..compat import compat_urllib_parse_urlencode | ||||
| from ..utils import ( | ||||
|     remove_end, | ||||
|     HEADRequest, | ||||
| @@ -123,7 +123,7 @@ class GDCVaultIE(InfoExtractor): | ||||
|             'password': password, | ||||
|         } | ||||
|  | ||||
|         request = sanitized_Request(login_url, compat_urllib_parse.urlencode(login_form)) | ||||
|         request = sanitized_Request(login_url, compat_urllib_parse_urlencode(login_form)) | ||||
|         request.add_header('Content-Type', 'application/x-www-form-urlencoded') | ||||
|         self._download_webpage(request, display_id, 'Logging in') | ||||
|         start_page = self._download_webpage(webpage_url, display_id, 'Getting authenticated video page') | ||||
|   | ||||
| @@ -3,7 +3,7 @@ from __future__ import unicode_literals | ||||
| import base64 | ||||
|  | ||||
| from .common import InfoExtractor | ||||
| from ..compat import compat_urllib_parse | ||||
| from ..compat import compat_urllib_parse_urlencode | ||||
| from ..utils import ( | ||||
|     ExtractorError, | ||||
|     HEADRequest, | ||||
| @@ -35,7 +35,7 @@ class HotNewHipHopIE(InfoExtractor): | ||||
|                 r'"contentUrl" content="(.*?)"', webpage, 'content URL') | ||||
|             return self.url_result(video_url, ie='Youtube') | ||||
|  | ||||
|         reqdata = compat_urllib_parse.urlencode([ | ||||
|         reqdata = compat_urllib_parse_urlencode([ | ||||
|             ('mediaType', 's'), | ||||
|             ('mediaId', video_id), | ||||
|         ]) | ||||
|   | ||||
| @@ -4,7 +4,7 @@ import json | ||||
| import time | ||||
|  | ||||
| from .common import InfoExtractor | ||||
| from ..compat import compat_urllib_parse | ||||
| from ..compat import compat_urllib_parse_urlencode | ||||
| from ..utils import ( | ||||
|     ExtractorError, | ||||
|     sanitized_Request, | ||||
| @@ -28,7 +28,7 @@ class HypemIE(InfoExtractor): | ||||
|         track_id = self._match_id(url) | ||||
|  | ||||
|         data = {'ax': 1, 'ts': time.time()} | ||||
|         request = sanitized_Request(url + '?' + compat_urllib_parse.urlencode(data)) | ||||
|         request = sanitized_Request(url + '?' + compat_urllib_parse_urlencode(data)) | ||||
|         response, urlh = self._download_webpage_handle( | ||||
|             request, track_id, 'Downloading webpage with the url') | ||||
|  | ||||
|   | ||||
| @@ -5,7 +5,7 @@ import re | ||||
| from .common import InfoExtractor | ||||
| from ..compat import ( | ||||
|     compat_urlparse, | ||||
|     compat_urllib_parse, | ||||
|     compat_urllib_parse_urlencode, | ||||
| ) | ||||
| from ..utils import ( | ||||
|     xpath_with_ns, | ||||
| @@ -38,7 +38,7 @@ class InternetVideoArchiveIE(InfoExtractor): | ||||
|         # Other player ids return m3u8 urls | ||||
|         cleaned_dic['playerid'] = '247' | ||||
|         cleaned_dic['videokbrate'] = '100000' | ||||
|         return compat_urllib_parse.urlencode(cleaned_dic) | ||||
|         return compat_urllib_parse_urlencode(cleaned_dic) | ||||
|  | ||||
|     def _real_extract(self, url): | ||||
|         query = compat_urlparse.urlparse(url).query | ||||
|   | ||||
| @@ -14,7 +14,7 @@ from .common import InfoExtractor | ||||
| from ..compat import ( | ||||
|     compat_parse_qs, | ||||
|     compat_str, | ||||
|     compat_urllib_parse, | ||||
|     compat_urllib_parse_urlencode, | ||||
|     compat_urllib_parse_urlparse, | ||||
| ) | ||||
| from ..utils import ( | ||||
| @@ -322,7 +322,7 @@ class IqiyiIE(InfoExtractor): | ||||
|             'bird_t': timestamp, | ||||
|         } | ||||
|         validation_result = self._download_json( | ||||
|             'http://kylin.iqiyi.com/validate?' + compat_urllib_parse.urlencode(validation_params), None, | ||||
|             'http://kylin.iqiyi.com/validate?' + compat_urllib_parse_urlencode(validation_params), None, | ||||
|             note='Validate credentials', errnote='Unable to validate credentials') | ||||
|  | ||||
|         MSG_MAP = { | ||||
| @@ -456,7 +456,7 @@ class IqiyiIE(InfoExtractor): | ||||
|                         'QY00001': auth_result['data']['u'], | ||||
|                     }) | ||||
|                 api_video_url += '?' if '?' not in api_video_url else '&' | ||||
|                 api_video_url += compat_urllib_parse.urlencode(param) | ||||
|                 api_video_url += compat_urllib_parse_urlencode(param) | ||||
|                 js = self._download_json( | ||||
|                     api_video_url, video_id, | ||||
|                     note='Download video info of segment %d for format %s' % (segment_index + 1, format_id)) | ||||
| @@ -494,7 +494,7 @@ class IqiyiIE(InfoExtractor): | ||||
|         } | ||||
|  | ||||
|         api_url = 'http://cache.video.qiyi.com/vms' + '?' + \ | ||||
|             compat_urllib_parse.urlencode(param) | ||||
|             compat_urllib_parse_urlencode(param) | ||||
|         raw_data = self._download_json(api_url, video_id) | ||||
|         return raw_data | ||||
|  | ||||
|   | ||||
| @@ -5,7 +5,7 @@ import re | ||||
|  | ||||
| from .common import InfoExtractor | ||||
| from ..compat import ( | ||||
|     compat_urllib_parse, | ||||
|     compat_urllib_parse_urlencode, | ||||
|     compat_urlparse, | ||||
| ) | ||||
| from ..utils import qualities | ||||
| @@ -62,7 +62,7 @@ class IvideonIE(InfoExtractor): | ||||
|         quality = qualities(self._QUALITIES) | ||||
|  | ||||
|         formats = [{ | ||||
|             'url': 'https://streaming.ivideon.com/flv/live?%s' % compat_urllib_parse.urlencode({ | ||||
|             'url': 'https://streaming.ivideon.com/flv/live?%s' % compat_urllib_parse_urlencode({ | ||||
|                 'server': server_id, | ||||
|                 'camera': camera_id, | ||||
|                 'sessionId': 'demo', | ||||
|   | ||||
| @@ -6,7 +6,7 @@ import base64 | ||||
|  | ||||
| from .common import InfoExtractor | ||||
| from ..compat import ( | ||||
|     compat_urllib_parse, | ||||
|     compat_urllib_parse_urlencode, | ||||
|     compat_urlparse, | ||||
|     compat_parse_qs, | ||||
| ) | ||||
| @@ -71,7 +71,7 @@ class KalturaIE(InfoExtractor): | ||||
|                 for k, v in a.items(): | ||||
|                     params['%d:%s' % (i, k)] = v | ||||
|  | ||||
|         query = compat_urllib_parse.urlencode(params) | ||||
|         query = compat_urllib_parse_urlencode(params) | ||||
|         url = self._API_BASE + query | ||||
|         data = self._download_json(url, video_id, *args, **kwargs) | ||||
|  | ||||
|   | ||||
| @@ -5,7 +5,7 @@ import re | ||||
|  | ||||
| from .common import InfoExtractor | ||||
| from ..compat import ( | ||||
|     compat_urllib_parse, | ||||
|     compat_urllib_parse_urlencode, | ||||
|     compat_urlparse, | ||||
| ) | ||||
| from ..utils import ( | ||||
| @@ -90,7 +90,7 @@ class Laola1TvIE(InfoExtractor): | ||||
|  | ||||
|         hd_doc = self._download_xml( | ||||
|             'http://www.laola1.tv/server/hd_video.php?%s' | ||||
|             % compat_urllib_parse.urlencode({ | ||||
|             % compat_urllib_parse_urlencode({ | ||||
|                 'play': video_id, | ||||
|                 'partner': partner_id, | ||||
|                 'portal': portal, | ||||
| @@ -108,7 +108,7 @@ class Laola1TvIE(InfoExtractor): | ||||
|  | ||||
|         req = sanitized_Request( | ||||
|             'https://club.laola1.tv/sp/laola1/api/v3/user/session/premium/player/stream-access?%s' % | ||||
|             compat_urllib_parse.urlencode({ | ||||
|             compat_urllib_parse_urlencode({ | ||||
|                 'videoId': video_id, | ||||
|                 'target': VS_TARGETS.get(kind, '2'), | ||||
|                 'label': _v('label'), | ||||
|   | ||||
| @@ -11,7 +11,7 @@ from .common import InfoExtractor | ||||
| from ..compat import ( | ||||
|     compat_ord, | ||||
|     compat_str, | ||||
|     compat_urllib_parse, | ||||
|     compat_urllib_parse_urlencode, | ||||
| ) | ||||
| from ..utils import ( | ||||
|     determine_ext, | ||||
| @@ -122,7 +122,7 @@ class LeIE(InfoExtractor): | ||||
|             'domain': 'www.le.com' | ||||
|         } | ||||
|         play_json_req = sanitized_Request( | ||||
|             'http://api.le.com/mms/out/video/playJson?' + compat_urllib_parse.urlencode(params) | ||||
|             'http://api.le.com/mms/out/video/playJson?' + compat_urllib_parse_urlencode(params) | ||||
|         ) | ||||
|         cn_verification_proxy = self._downloader.params.get('cn_verification_proxy') | ||||
|         if cn_verification_proxy: | ||||
| @@ -151,7 +151,7 @@ class LeIE(InfoExtractor): | ||||
|         for format_id in formats: | ||||
|             if format_id in dispatch: | ||||
|                 media_url = playurl['domain'][0] + dispatch[format_id][0] | ||||
|                 media_url += '&' + compat_urllib_parse.urlencode({ | ||||
|                 media_url += '&' + compat_urllib_parse_urlencode({ | ||||
|                     'm3v': 1, | ||||
|                     'format': 1, | ||||
|                     'expect': 3, | ||||
| @@ -305,7 +305,7 @@ class LetvCloudIE(InfoExtractor): | ||||
|             } | ||||
|             self.sign_data(data) | ||||
|             return self._download_json( | ||||
|                 'http://api.letvcloud.com/gpc.php?' + compat_urllib_parse.urlencode(data), | ||||
|                 'http://api.letvcloud.com/gpc.php?' + compat_urllib_parse_urlencode(data), | ||||
|                 media_id, 'Downloading playJson data for type %s' % cf) | ||||
|  | ||||
|         play_json = get_play_json(cf, time.time()) | ||||
|   | ||||
| @@ -6,7 +6,7 @@ import json | ||||
| from .common import InfoExtractor | ||||
| from ..compat import ( | ||||
|     compat_str, | ||||
|     compat_urllib_parse, | ||||
|     compat_urllib_parse_urlencode, | ||||
| ) | ||||
| from ..utils import ( | ||||
|     ExtractorError, | ||||
| @@ -36,7 +36,7 @@ class LyndaBaseIE(InfoExtractor): | ||||
|             'stayPut': 'false' | ||||
|         } | ||||
|         request = sanitized_Request( | ||||
|             self._LOGIN_URL, compat_urllib_parse.urlencode(login_form).encode('utf-8')) | ||||
|             self._LOGIN_URL, compat_urllib_parse_urlencode(login_form).encode('utf-8')) | ||||
|         login_page = self._download_webpage( | ||||
|             request, None, 'Logging in as %s' % username) | ||||
|  | ||||
| @@ -65,7 +65,7 @@ class LyndaBaseIE(InfoExtractor): | ||||
|                     'stayPut': 'false', | ||||
|                 } | ||||
|                 request = sanitized_Request( | ||||
|                     self._LOGIN_URL, compat_urllib_parse.urlencode(confirm_form).encode('utf-8')) | ||||
|                     self._LOGIN_URL, compat_urllib_parse_urlencode(confirm_form).encode('utf-8')) | ||||
|                 login_page = self._download_webpage( | ||||
|                     request, None, | ||||
|                     'Confirming log in and log out from another device') | ||||
|   | ||||
| @@ -4,7 +4,7 @@ from __future__ import unicode_literals | ||||
| import random | ||||
|  | ||||
| from .common import InfoExtractor | ||||
| from ..compat import compat_urllib_parse | ||||
| from ..compat import compat_urllib_parse_urlencode | ||||
| from ..utils import ( | ||||
|     sanitized_Request, | ||||
|     xpath_text, | ||||
| @@ -29,7 +29,7 @@ class MatchTVIE(InfoExtractor): | ||||
|     def _real_extract(self, url): | ||||
|         video_id = 'matchtv-live' | ||||
|         request = sanitized_Request( | ||||
|             'http://player.matchtv.ntvplus.tv/player/smil?%s' % compat_urllib_parse.urlencode({ | ||||
|             'http://player.matchtv.ntvplus.tv/player/smil?%s' % compat_urllib_parse_urlencode({ | ||||
|                 'ts': '', | ||||
|                 'quality': 'SD', | ||||
|                 'contentId': '561d2c0df7159b37178b4567', | ||||
|   | ||||
| @@ -5,8 +5,8 @@ import re | ||||
| from .common import InfoExtractor | ||||
| from ..compat import ( | ||||
|     compat_parse_qs, | ||||
|     compat_urllib_parse, | ||||
|     compat_urllib_parse_unquote, | ||||
|     compat_urllib_parse_urlencode, | ||||
| ) | ||||
| from ..utils import ( | ||||
|     determine_ext, | ||||
| @@ -117,7 +117,7 @@ class MetacafeIE(InfoExtractor): | ||||
|             'filters': '0', | ||||
|             'submit': "Continue - I'm over 18", | ||||
|         } | ||||
|         request = sanitized_Request(self._FILTER_POST, compat_urllib_parse.urlencode(disclaimer_form)) | ||||
|         request = sanitized_Request(self._FILTER_POST, compat_urllib_parse_urlencode(disclaimer_form)) | ||||
|         request.add_header('Content-Type', 'application/x-www-form-urlencoded') | ||||
|         self.report_age_confirmation() | ||||
|         self._download_webpage(request, None, False, 'Unable to confirm age') | ||||
|   | ||||
| @@ -2,7 +2,7 @@ | ||||
| from __future__ import unicode_literals | ||||
|  | ||||
| from .common import InfoExtractor | ||||
| from ..compat import compat_urllib_parse | ||||
| from ..compat import compat_urllib_parse_urlencode | ||||
| from ..utils import ( | ||||
|     int_or_none, | ||||
|     parse_duration, | ||||
| @@ -39,7 +39,7 @@ class MinhatecaIE(InfoExtractor): | ||||
|         ] | ||||
|         req = sanitized_Request( | ||||
|             'http://minhateca.com.br/action/License/Download', | ||||
|             data=compat_urllib_parse.urlencode(token_data)) | ||||
|             data=compat_urllib_parse_urlencode(token_data)) | ||||
|         req.add_header('Content-Type', 'application/x-www-form-urlencoded') | ||||
|         data = self._download_json( | ||||
|             req, video_id, note='Downloading metadata') | ||||
|   | ||||
| @@ -2,11 +2,10 @@ from __future__ import unicode_literals | ||||
|  | ||||
| from .common import InfoExtractor | ||||
| from ..compat import ( | ||||
|     compat_urllib_parse, | ||||
|     compat_urllib_parse_urlencode, | ||||
|     compat_urlparse, | ||||
| ) | ||||
| from ..utils import ( | ||||
|     encode_dict, | ||||
|     get_element_by_attribute, | ||||
|     int_or_none, | ||||
| ) | ||||
| @@ -60,7 +59,7 @@ class MiTeleIE(InfoExtractor): | ||||
|                 'sta': '0', | ||||
|             } | ||||
|             media = self._download_json( | ||||
|                 '%s/?%s' % (gat, compat_urllib_parse.urlencode(encode_dict(token_data))), | ||||
|                 '%s/?%s' % (gat, compat_urllib_parse_urlencode(token_data)), | ||||
|                 display_id, 'Downloading %s JSON' % location['loc']) | ||||
|             file_ = media.get('file') | ||||
|             if not file_: | ||||
|   | ||||
| @@ -5,7 +5,7 @@ import json | ||||
| import re | ||||
|  | ||||
| from .common import InfoExtractor | ||||
| from ..compat import compat_urllib_parse | ||||
| from ..compat import compat_urllib_parse_urlencode | ||||
| from ..utils import ( | ||||
|     ExtractorError, | ||||
|     int_or_none, | ||||
| @@ -77,7 +77,7 @@ class MoeVideoIE(InfoExtractor): | ||||
|             ], | ||||
|         ] | ||||
|         r_json = json.dumps(r) | ||||
|         post = compat_urllib_parse.urlencode({'r': r_json}) | ||||
|         post = compat_urllib_parse_urlencode({'r': r_json}) | ||||
|         req = sanitized_Request(self._API_URL, post) | ||||
|         req.add_header('Content-type', 'application/x-www-form-urlencoded') | ||||
|  | ||||
|   | ||||
| @@ -5,7 +5,7 @@ import os.path | ||||
| import re | ||||
|  | ||||
| from .common import InfoExtractor | ||||
| from ..compat import compat_urllib_parse | ||||
| from ..compat import compat_urllib_parse_urlencode | ||||
| from ..utils import ( | ||||
|     ExtractorError, | ||||
|     remove_start, | ||||
| @@ -88,7 +88,7 @@ class MonikerIE(InfoExtractor): | ||||
|             fields = re.findall(r'type="hidden" name="(.+?)"\s* value="?(.+?)">', orig_webpage) | ||||
|             data = dict(fields) | ||||
|  | ||||
|             post = compat_urllib_parse.urlencode(data) | ||||
|             post = compat_urllib_parse_urlencode(data) | ||||
|             headers = { | ||||
|                 b'Content-Type': b'application/x-www-form-urlencoded', | ||||
|             } | ||||
|   | ||||
| @@ -3,7 +3,7 @@ from __future__ import unicode_literals | ||||
| import re | ||||
|  | ||||
| from .common import InfoExtractor | ||||
| from ..compat import compat_urllib_parse | ||||
| from ..compat import compat_urllib_parse_urlencode | ||||
| from ..utils import ( | ||||
|     ExtractorError, | ||||
|     sanitized_Request, | ||||
| @@ -58,7 +58,7 @@ class MooshareIE(InfoExtractor): | ||||
|         } | ||||
|  | ||||
|         request = sanitized_Request( | ||||
|             'http://mooshare.biz/%s' % video_id, compat_urllib_parse.urlencode(download_form)) | ||||
|             'http://mooshare.biz/%s' % video_id, compat_urllib_parse_urlencode(download_form)) | ||||
|         request.add_header('Content-Type', 'application/x-www-form-urlencoded') | ||||
|  | ||||
|         self._sleep(5, video_id) | ||||
|   | ||||
| @@ -4,7 +4,7 @@ import re | ||||
|  | ||||
| from .common import InfoExtractor | ||||
| from ..compat import ( | ||||
|     compat_urllib_parse, | ||||
|     compat_urllib_parse_urlencode, | ||||
|     compat_str, | ||||
| ) | ||||
| from ..utils import ( | ||||
| @@ -171,7 +171,7 @@ class MTVServicesInfoExtractor(InfoExtractor): | ||||
|         data = {'uri': uri} | ||||
|         if self._LANG: | ||||
|             data['lang'] = self._LANG | ||||
|         return compat_urllib_parse.urlencode(data) | ||||
|         return compat_urllib_parse_urlencode(data) | ||||
|  | ||||
|     def _get_videos_info(self, uri): | ||||
|         video_id = self._id_from_uri(uri) | ||||
|   | ||||
| @@ -1,9 +1,7 @@ | ||||
| from __future__ import unicode_literals | ||||
|  | ||||
| from .common import InfoExtractor | ||||
| from ..compat import ( | ||||
|     compat_urllib_parse, | ||||
| ) | ||||
| from ..compat import compat_urllib_parse_urlencode | ||||
|  | ||||
|  | ||||
| class MuzuTVIE(InfoExtractor): | ||||
| @@ -25,7 +23,7 @@ class MuzuTVIE(InfoExtractor): | ||||
|     def _real_extract(self, url): | ||||
|         video_id = self._match_id(url) | ||||
|  | ||||
|         info_data = compat_urllib_parse.urlencode({ | ||||
|         info_data = compat_urllib_parse_urlencode({ | ||||
|             'format': 'json', | ||||
|             'url': url, | ||||
|         }) | ||||
| @@ -41,7 +39,7 @@ class MuzuTVIE(InfoExtractor): | ||||
|             if video_info.get('v%s' % quality): | ||||
|                 break | ||||
|  | ||||
|         data = compat_urllib_parse.urlencode({ | ||||
|         data = compat_urllib_parse_urlencode({ | ||||
|             'ai': video_id, | ||||
|             # Even if each time you watch a video the hash changes, | ||||
|             # it seems to work for different videos, and it will work | ||||
|   | ||||
| @@ -9,8 +9,8 @@ import json | ||||
| from .common import InfoExtractor | ||||
| from ..compat import ( | ||||
|     compat_ord, | ||||
|     compat_urllib_parse, | ||||
|     compat_urllib_parse_unquote, | ||||
|     compat_urllib_parse_urlencode, | ||||
| ) | ||||
| from ..utils import ( | ||||
|     ExtractorError, | ||||
| @@ -112,7 +112,7 @@ class MyVideoIE(InfoExtractor): | ||||
|                 encxml = compat_urllib_parse_unquote(b) | ||||
|         if not params.get('domain'): | ||||
|             params['domain'] = 'www.myvideo.de' | ||||
|         xmldata_url = '%s?%s' % (encxml, compat_urllib_parse.urlencode(params)) | ||||
|         xmldata_url = '%s?%s' % (encxml, compat_urllib_parse_urlencode(params)) | ||||
|         if 'flash_playertype=MTV' in xmldata_url: | ||||
|             self._downloader.report_warning('avoiding MTV player') | ||||
|             xmldata_url = ( | ||||
|   | ||||
| @@ -5,7 +5,7 @@ import re | ||||
|  | ||||
| from .common import InfoExtractor | ||||
| from ..compat import ( | ||||
|     compat_urllib_parse, | ||||
|     compat_urllib_parse_urlencode, | ||||
|     compat_urlparse, | ||||
| ) | ||||
| from ..utils import ( | ||||
| @@ -53,8 +53,8 @@ class NaverIE(InfoExtractor): | ||||
|             raise ExtractorError('couldn\'t extract vid and key') | ||||
|         vid = m_id.group(1) | ||||
|         key = m_id.group(2) | ||||
|         query = compat_urllib_parse.urlencode({'vid': vid, 'inKey': key, }) | ||||
|         query_urls = compat_urllib_parse.urlencode({ | ||||
|         query = compat_urllib_parse_urlencode({'vid': vid, 'inKey': key, }) | ||||
|         query_urls = compat_urllib_parse_urlencode({ | ||||
|             'masterVid': vid, | ||||
|             'protocol': 'p2p', | ||||
|             'inKey': key, | ||||
|   | ||||
| @@ -6,7 +6,7 @@ import re | ||||
|  | ||||
| from .common import InfoExtractor | ||||
| from ..compat import ( | ||||
|     compat_urllib_parse, | ||||
|     compat_urllib_parse_urlencode, | ||||
|     compat_urlparse, | ||||
| ) | ||||
| from ..utils import ( | ||||
| @@ -97,7 +97,7 @@ class NBAIE(InfoExtractor): | ||||
|     _PAGE_SIZE = 30 | ||||
|  | ||||
|     def _fetch_page(self, team, video_id, page): | ||||
|         search_url = 'http://searchapp2.nba.com/nba-search/query.jsp?' + compat_urllib_parse.urlencode({ | ||||
|         search_url = 'http://searchapp2.nba.com/nba-search/query.jsp?' + compat_urllib_parse_urlencode({ | ||||
|             'type': 'teamvideo', | ||||
|             'start': page * self._PAGE_SIZE + 1, | ||||
|             'npp': (page + 1) * self._PAGE_SIZE + 1, | ||||
|   | ||||
| @@ -8,7 +8,7 @@ import re | ||||
|  | ||||
| from .common import InfoExtractor | ||||
| from ..compat import ( | ||||
|     compat_urllib_parse, | ||||
|     compat_urllib_parse_urlencode, | ||||
|     compat_str, | ||||
|     compat_itertools_count, | ||||
| ) | ||||
| @@ -153,7 +153,7 @@ class NetEaseMusicIE(NetEaseMusicBaseIE): | ||||
|             'ids': '[%s]' % song_id | ||||
|         } | ||||
|         info = self.query_api( | ||||
|             'song/detail?' + compat_urllib_parse.urlencode(params), | ||||
|             'song/detail?' + compat_urllib_parse_urlencode(params), | ||||
|             song_id, 'Downloading song info')['songs'][0] | ||||
|  | ||||
|         formats = self.extract_formats(info) | ||||
|   | ||||
| @@ -2,7 +2,7 @@ | ||||
| from __future__ import unicode_literals | ||||
|  | ||||
| from .mtv import MTVServicesInfoExtractor | ||||
| from ..compat import compat_urllib_parse | ||||
| from ..compat import compat_urllib_parse_urlencode | ||||
|  | ||||
|  | ||||
| class NextMovieIE(MTVServicesInfoExtractor): | ||||
| @@ -20,7 +20,7 @@ class NextMovieIE(MTVServicesInfoExtractor): | ||||
|     }] | ||||
|  | ||||
|     def _get_feed_query(self, uri): | ||||
|         return compat_urllib_parse.urlencode({ | ||||
|         return compat_urllib_parse_urlencode({ | ||||
|             'feed': '1505', | ||||
|             'mgid': uri, | ||||
|         }) | ||||
|   | ||||
| @@ -1,7 +1,7 @@ | ||||
| from __future__ import unicode_literals | ||||
|  | ||||
| from .common import InfoExtractor | ||||
| from ..compat import compat_urllib_parse | ||||
| from ..compat import compat_urllib_parse_urlencode | ||||
| from ..utils import sanitized_Request | ||||
|  | ||||
|  | ||||
| @@ -40,7 +40,7 @@ class NFBIE(InfoExtractor): | ||||
|  | ||||
|         request = sanitized_Request( | ||||
|             'https://www.nfb.ca/film/%s/player_config' % video_id, | ||||
|             compat_urllib_parse.urlencode({'getConfig': 'true'}).encode('ascii')) | ||||
|             compat_urllib_parse_urlencode({'getConfig': 'true'}).encode('ascii')) | ||||
|         request.add_header('Content-Type', 'application/x-www-form-urlencoded') | ||||
|         request.add_header('X-NFB-Referer', 'http://www.nfb.ca/medias/flash/NFBVideoPlayer.swf') | ||||
|  | ||||
|   | ||||
| @@ -7,7 +7,7 @@ import os | ||||
| from .common import InfoExtractor | ||||
| from ..compat import ( | ||||
|     compat_urlparse, | ||||
|     compat_urllib_parse, | ||||
|     compat_urllib_parse_urlencode, | ||||
|     compat_urllib_parse_urlparse | ||||
| ) | ||||
| from ..utils import ( | ||||
| @@ -38,7 +38,7 @@ class NHLBaseInfoExtractor(InfoExtractor): | ||||
|             parsed_url = compat_urllib_parse_urlparse(initial_video_url) | ||||
|             filename, ext = os.path.splitext(parsed_url.path) | ||||
|             path = '%s_sd%s' % (filename, ext) | ||||
|             data = compat_urllib_parse.urlencode({ | ||||
|             data = compat_urllib_parse_urlencode({ | ||||
|                 'type': 'fvod', | ||||
|                 'path': compat_urlparse.urlunparse(parsed_url[:2] + (path,) + parsed_url[3:]) | ||||
|             }) | ||||
| @@ -211,7 +211,7 @@ class NHLVideocenterIE(NHLBaseInfoExtractor): | ||||
|             r'tab0"[^>]*?>(.*?)</td>', | ||||
|             webpage, 'playlist title', flags=re.DOTALL).lower().capitalize() | ||||
|  | ||||
|         data = compat_urllib_parse.urlencode({ | ||||
|         data = compat_urllib_parse_urlencode({ | ||||
|             'cid': cat_id, | ||||
|             # This is the default value | ||||
|             'count': 12, | ||||
|   | ||||
| @@ -2,7 +2,7 @@ | ||||
| from __future__ import unicode_literals | ||||
|  | ||||
| from .mtv import MTVServicesInfoExtractor | ||||
| from ..compat import compat_urllib_parse | ||||
| from ..compat import compat_urllib_parse_urlencode | ||||
|  | ||||
|  | ||||
| class NickIE(MTVServicesInfoExtractor): | ||||
| @@ -54,7 +54,7 @@ class NickIE(MTVServicesInfoExtractor): | ||||
|     }] | ||||
|  | ||||
|     def _get_feed_query(self, uri): | ||||
|         return compat_urllib_parse.urlencode({ | ||||
|         return compat_urllib_parse_urlencode({ | ||||
|             'feed': 'nick_arc_player_prime', | ||||
|             'mgid': uri, | ||||
|         }) | ||||
|   | ||||
| @@ -7,11 +7,10 @@ import datetime | ||||
|  | ||||
| from .common import InfoExtractor | ||||
| from ..compat import ( | ||||
|     compat_urllib_parse, | ||||
|     compat_urllib_parse_urlencode, | ||||
|     compat_urlparse, | ||||
| ) | ||||
| from ..utils import ( | ||||
|     encode_dict, | ||||
|     ExtractorError, | ||||
|     int_or_none, | ||||
|     parse_duration, | ||||
| @@ -101,7 +100,7 @@ class NiconicoIE(InfoExtractor): | ||||
|             'mail': username, | ||||
|             'password': password, | ||||
|         } | ||||
|         login_data = compat_urllib_parse.urlencode(encode_dict(login_form_strs)).encode('utf-8') | ||||
|         login_data = compat_urllib_parse_urlencode(login_form_strs).encode('utf-8') | ||||
|         request = sanitized_Request( | ||||
|             'https://secure.nicovideo.jp/secure/login', login_data) | ||||
|         login_results = self._download_webpage( | ||||
| @@ -141,7 +140,7 @@ class NiconicoIE(InfoExtractor): | ||||
|                 r'\'thumbPlayKey\'\s*:\s*\'(.*?)\'', ext_player_info, 'thumbPlayKey') | ||||
|  | ||||
|             # Get flv info | ||||
|             flv_info_data = compat_urllib_parse.urlencode({ | ||||
|             flv_info_data = compat_urllib_parse_urlencode({ | ||||
|                 'k': thumb_play_key, | ||||
|                 'v': video_id | ||||
|             }) | ||||
|   | ||||
| @@ -8,7 +8,7 @@ import hashlib | ||||
| from .common import InfoExtractor | ||||
| from ..compat import ( | ||||
|     compat_str, | ||||
|     compat_urllib_parse, | ||||
|     compat_urllib_parse_urlencode, | ||||
|     compat_urlparse, | ||||
| ) | ||||
| from ..utils import ( | ||||
| @@ -75,7 +75,7 @@ class NocoIE(InfoExtractor): | ||||
|             'username': username, | ||||
|             'password': password, | ||||
|         } | ||||
|         request = sanitized_Request(self._LOGIN_URL, compat_urllib_parse.urlencode(login_form)) | ||||
|         request = sanitized_Request(self._LOGIN_URL, compat_urllib_parse_urlencode(login_form)) | ||||
|         request.add_header('Content-Type', 'application/x-www-form-urlencoded; charset=UTF-8') | ||||
|  | ||||
|         login = self._download_json(request, None, 'Logging in as %s' % username) | ||||
|   | ||||
| @@ -7,7 +7,6 @@ from ..compat import compat_urlparse | ||||
| from ..utils import ( | ||||
|     ExtractorError, | ||||
|     NO_DEFAULT, | ||||
|     encode_dict, | ||||
|     sanitized_Request, | ||||
|     urlencode_postdata, | ||||
| ) | ||||
| @@ -73,7 +72,7 @@ class NovaMovIE(InfoExtractor): | ||||
|             if not post_url.startswith('http'): | ||||
|                 post_url = compat_urlparse.urljoin(url, post_url) | ||||
|             request = sanitized_Request( | ||||
|                 post_url, urlencode_postdata(encode_dict(fields))) | ||||
|                 post_url, urlencode_postdata(fields)) | ||||
|             request.add_header('Content-Type', 'application/x-www-form-urlencoded') | ||||
|             request.add_header('Referer', post_url) | ||||
|             webpage = self._download_webpage( | ||||
|   | ||||
| @@ -1,7 +1,7 @@ | ||||
| from __future__ import unicode_literals | ||||
|  | ||||
| from .common import InfoExtractor | ||||
| from ..compat import compat_urllib_parse | ||||
| from ..compat import compat_urllib_parse_urlencode | ||||
| from ..utils import ( | ||||
|     int_or_none, | ||||
|     qualities, | ||||
| @@ -38,7 +38,7 @@ class NprIE(InfoExtractor): | ||||
|         playlist_id = self._match_id(url) | ||||
|  | ||||
|         config = self._download_json( | ||||
|             'http://api.npr.org/query?%s' % compat_urllib_parse.urlencode({ | ||||
|             'http://api.npr.org/query?%s' % compat_urllib_parse_urlencode({ | ||||
|                 'id': playlist_id, | ||||
|                 'fields': 'titles,audio,show', | ||||
|                 'format': 'json', | ||||
|   | ||||
| @@ -9,7 +9,7 @@ from ..utils import ( | ||||
|     ExtractorError, | ||||
|     unsmuggle_url, | ||||
| ) | ||||
| from ..compat import compat_urllib_parse | ||||
| from ..compat import compat_urllib_parse_urlencode | ||||
|  | ||||
|  | ||||
| class OoyalaBaseIE(InfoExtractor): | ||||
| @@ -35,7 +35,7 @@ class OoyalaBaseIE(InfoExtractor): | ||||
|         for supported_format in ('mp4', 'm3u8', 'hds', 'rtmp'): | ||||
|             auth_data = self._download_json( | ||||
|                 self._AUTHORIZATION_URL_TEMPLATE % (pcode, embed_code) + | ||||
|                 compat_urllib_parse.urlencode({ | ||||
|                 compat_urllib_parse_urlencode({ | ||||
|                     'domain': domain, | ||||
|                     'supportedFormats': supported_format | ||||
|                 }), | ||||
|   | ||||
| @@ -65,7 +65,7 @@ class PatreonIE(InfoExtractor): | ||||
|  | ||||
|         request = sanitized_Request( | ||||
|             'https://www.patreon.com/processLogin', | ||||
|             compat_urllib_parse.urlencode(login_form).encode('utf-8') | ||||
|             compat_urllib_parse_urlencode(login_form).encode('utf-8') | ||||
|         ) | ||||
|         login_page = self._download_webpage(request, None, note='Logging in as %s' % username) | ||||
|  | ||||
|   | ||||
| @@ -5,7 +5,7 @@ import re | ||||
| import os.path | ||||
|  | ||||
| from .common import InfoExtractor | ||||
| from ..compat import compat_urllib_parse | ||||
| from ..compat import compat_urllib_parse_urlencode | ||||
| from ..utils import ( | ||||
|     ExtractorError, | ||||
|     sanitized_Request, | ||||
| @@ -40,7 +40,7 @@ class PlayedIE(InfoExtractor): | ||||
|  | ||||
|         self._sleep(2, video_id) | ||||
|  | ||||
|         post = compat_urllib_parse.urlencode(data) | ||||
|         post = compat_urllib_parse_urlencode(data) | ||||
|         headers = { | ||||
|             b'Content-Type': b'application/x-www-form-urlencoded', | ||||
|         } | ||||
|   | ||||
| @@ -4,7 +4,7 @@ from __future__ import unicode_literals | ||||
| from .common import InfoExtractor | ||||
| from ..compat import ( | ||||
|     compat_urlparse, | ||||
|     compat_urllib_parse, | ||||
|     compat_urllib_parse_urlencode, | ||||
| ) | ||||
| from ..utils import ( | ||||
|     ExtractorError, | ||||
| @@ -106,7 +106,7 @@ class PlaytvakIE(InfoExtractor): | ||||
|         }) | ||||
|  | ||||
|         info_url = compat_urlparse.urlunparse( | ||||
|             parsed_url._replace(query=compat_urllib_parse.urlencode(qs, True))) | ||||
|             parsed_url._replace(query=compat_urllib_parse_urlencode(qs, True))) | ||||
|  | ||||
|         json_info = self._download_json( | ||||
|             info_url, video_id, | ||||
|   | ||||
| @@ -8,7 +8,7 @@ import collections | ||||
| from .common import InfoExtractor | ||||
| from ..compat import ( | ||||
|     compat_str, | ||||
|     compat_urllib_parse, | ||||
|     compat_urllib_parse_urlencode, | ||||
|     compat_urlparse, | ||||
| ) | ||||
| from ..utils import ( | ||||
| @@ -76,7 +76,7 @@ class PluralsightIE(PluralsightBaseIE): | ||||
|             post_url = compat_urlparse.urljoin(self._LOGIN_URL, post_url) | ||||
|  | ||||
|         request = sanitized_Request( | ||||
|             post_url, compat_urllib_parse.urlencode(login_form).encode('utf-8')) | ||||
|             post_url, compat_urllib_parse_urlencode(login_form).encode('utf-8')) | ||||
|         request.add_header('Content-Type', 'application/x-www-form-urlencoded') | ||||
|  | ||||
|         response = self._download_webpage( | ||||
|   | ||||
| @@ -2,8 +2,8 @@ | ||||
| from __future__ import unicode_literals | ||||
|  | ||||
| from ..compat import ( | ||||
|     compat_urllib_parse, | ||||
|     compat_urllib_parse_unquote, | ||||
|     compat_urllib_parse_urlencode, | ||||
| ) | ||||
| from .common import InfoExtractor | ||||
| from ..utils import ( | ||||
| @@ -50,7 +50,7 @@ class Porn91IE(InfoExtractor): | ||||
|             r'so.addVariable\(\'seccode\',\'([^\']+)\'', webpage, 'sec code') | ||||
|         max_vid = self._search_regex( | ||||
|             r'so.addVariable\(\'max_vid\',\'(\d+)\'', webpage, 'max vid') | ||||
|         url_params = compat_urllib_parse.urlencode({ | ||||
|         url_params = compat_urllib_parse_urlencode({ | ||||
|             'VID': file_id, | ||||
|             'mp4': '1', | ||||
|             'seccode': sec_code, | ||||
|   | ||||
| @@ -1,7 +1,7 @@ | ||||
| from __future__ import unicode_literals | ||||
|  | ||||
| from .common import InfoExtractor | ||||
| from ..compat import compat_urllib_parse | ||||
| from ..compat import compat_urllib_parse_urlencode | ||||
| from ..utils import ( | ||||
|     ExtractorError, | ||||
|     sanitized_Request, | ||||
| @@ -42,7 +42,7 @@ class PrimeShareTVIE(InfoExtractor): | ||||
|         self._sleep(wait_time, video_id) | ||||
|  | ||||
|         req = sanitized_Request( | ||||
|             url, compat_urllib_parse.urlencode(fields), headers) | ||||
|             url, compat_urllib_parse_urlencode(fields), headers) | ||||
|         video_page = self._download_webpage( | ||||
|             req, video_id, 'Downloading video page') | ||||
|  | ||||
|   | ||||
| @@ -4,7 +4,7 @@ from __future__ import unicode_literals | ||||
| import re | ||||
|  | ||||
| from .common import InfoExtractor | ||||
| from ..compat import compat_urllib_parse | ||||
| from ..compat import compat_urllib_parse_urlencode | ||||
| from ..utils import ( | ||||
|     determine_ext, | ||||
|     ExtractorError, | ||||
| @@ -34,7 +34,7 @@ class PromptFileIE(InfoExtractor): | ||||
|                                  expected=True) | ||||
|  | ||||
|         fields = self._hidden_inputs(webpage) | ||||
|         post = compat_urllib_parse.urlencode(fields) | ||||
|         post = compat_urllib_parse_urlencode(fields) | ||||
|         req = sanitized_Request(url, post) | ||||
|         req.add_header('Content-type', 'application/x-www-form-urlencoded') | ||||
|         webpage = self._download_webpage( | ||||
|   | ||||
| @@ -5,9 +5,7 @@ import re | ||||
|  | ||||
| from hashlib import sha1 | ||||
| from .common import InfoExtractor | ||||
| from ..compat import ( | ||||
|     compat_urllib_parse, | ||||
| ) | ||||
| from ..compat import compat_urllib_parse_urlencode | ||||
| from ..utils import ( | ||||
|     ExtractorError, | ||||
|     determine_ext, | ||||
| @@ -235,7 +233,7 @@ class ProSiebenSat1IE(InfoExtractor): | ||||
|         client_name = 'kolibri-2.0.19-splec4' | ||||
|         client_location = url | ||||
|  | ||||
|         videos_api_url = 'http://vas.sim-technik.de/vas/live/v2/videos?%s' % compat_urllib_parse.urlencode({ | ||||
|         videos_api_url = 'http://vas.sim-technik.de/vas/live/v2/videos?%s' % compat_urllib_parse_urlencode({ | ||||
|             'access_token': access_token, | ||||
|             'client_location': client_location, | ||||
|             'client_name': client_name, | ||||
| @@ -256,7 +254,7 @@ class ProSiebenSat1IE(InfoExtractor): | ||||
|         client_id = g[:2] + sha1(''.join([clip_id, g, access_token, client_location, g, client_name]) | ||||
|                                  .encode('utf-8')).hexdigest() | ||||
|  | ||||
|         sources_api_url = 'http://vas.sim-technik.de/vas/live/v2/videos/%s/sources?%s' % (clip_id, compat_urllib_parse.urlencode({ | ||||
|         sources_api_url = 'http://vas.sim-technik.de/vas/live/v2/videos/%s/sources?%s' % (clip_id, compat_urllib_parse_urlencode({ | ||||
|             'access_token': access_token, | ||||
|             'client_id': client_id, | ||||
|             'client_location': client_location, | ||||
| @@ -270,7 +268,7 @@ class ProSiebenSat1IE(InfoExtractor): | ||||
|                                           client_location, source_ids_str, g, client_name]) | ||||
|                                  .encode('utf-8')).hexdigest() | ||||
|  | ||||
|         url_api_url = 'http://vas.sim-technik.de/vas/live/v2/videos/%s/sources/url?%s' % (clip_id, compat_urllib_parse.urlencode({ | ||||
|         url_api_url = 'http://vas.sim-technik.de/vas/live/v2/videos/%s/sources/url?%s' % (clip_id, compat_urllib_parse_urlencode({ | ||||
|             'access_token': access_token, | ||||
|             'client_id': client_id, | ||||
|             'client_location': client_location, | ||||
|   | ||||
| @@ -2,7 +2,7 @@ | ||||
| from __future__ import unicode_literals | ||||
|  | ||||
| from .common import InfoExtractor | ||||
| from ..compat import compat_urllib_parse | ||||
| from ..compat import compat_urllib_parse_urlencode | ||||
| from ..utils import ( | ||||
|     ExtractorError, | ||||
|     int_or_none, | ||||
| @@ -81,7 +81,7 @@ class ShahidIE(InfoExtractor): | ||||
|         video = self._download_json( | ||||
|             '%s/%s/%s?%s' % ( | ||||
|                 api_vars['url'], api_vars['playerType'], api_vars['id'], | ||||
|                 compat_urllib_parse.urlencode({ | ||||
|                 compat_urllib_parse_urlencode({ | ||||
|                     'apiKey': 'sh@hid0nlin3', | ||||
|                     'hash': 'b2wMCTHpSmyxGqQjJFOycRmLSex+BpTK/ooxy6vHaqs=', | ||||
|                 })), | ||||
|   | ||||
| @@ -3,7 +3,7 @@ from __future__ import unicode_literals | ||||
| import base64 | ||||
|  | ||||
| from .common import InfoExtractor | ||||
| from ..compat import compat_urllib_parse | ||||
| from ..compat import compat_urllib_parse_urlencode | ||||
| from ..utils import ( | ||||
|     ExtractorError, | ||||
|     int_or_none, | ||||
| @@ -45,7 +45,7 @@ class SharedIE(InfoExtractor): | ||||
|  | ||||
|         download_form = self._hidden_inputs(webpage) | ||||
|         request = sanitized_Request( | ||||
|             url, compat_urllib_parse.urlencode(download_form)) | ||||
|             url, compat_urllib_parse_urlencode(download_form)) | ||||
|         request.add_header('Content-Type', 'application/x-www-form-urlencoded') | ||||
|  | ||||
|         video_page = self._download_webpage( | ||||
|   | ||||
| @@ -4,7 +4,7 @@ from __future__ import unicode_literals | ||||
| import re | ||||
|  | ||||
| from .common import InfoExtractor | ||||
| from ..compat import compat_urllib_parse | ||||
| from ..compat import compat_urllib_parse_urlencode | ||||
| from ..utils import ( | ||||
|     parse_duration, | ||||
|     sanitized_Request, | ||||
| @@ -47,7 +47,7 @@ class ShareSixIE(InfoExtractor): | ||||
|         fields = { | ||||
|             'method_free': 'Free' | ||||
|         } | ||||
|         post = compat_urllib_parse.urlencode(fields) | ||||
|         post = compat_urllib_parse_urlencode(fields) | ||||
|         req = sanitized_Request(url, post) | ||||
|         req.add_header('Content-type', 'application/x-www-form-urlencoded') | ||||
|  | ||||
|   | ||||
| @@ -4,7 +4,7 @@ from __future__ import unicode_literals | ||||
| import re | ||||
|  | ||||
| from .common import InfoExtractor | ||||
| from ..compat import compat_urllib_parse | ||||
| from ..compat import compat_urllib_parse_urlencode | ||||
| from ..utils import sanitized_Request | ||||
|  | ||||
|  | ||||
| @@ -39,7 +39,7 @@ class SinaIE(InfoExtractor): | ||||
|     ] | ||||
|  | ||||
|     def _extract_video(self, video_id): | ||||
|         data = compat_urllib_parse.urlencode({'vid': video_id}) | ||||
|         data = compat_urllib_parse_urlencode({'vid': video_id}) | ||||
|         url_doc = self._download_xml('http://v.iask.com/v_play.php?%s' % data, | ||||
|                                      video_id, 'Downloading video url') | ||||
|         image_page = self._download_webpage( | ||||
|   | ||||
| @@ -7,7 +7,7 @@ import hashlib | ||||
| import uuid | ||||
|  | ||||
| from .common import InfoExtractor | ||||
| from ..compat import compat_urllib_parse | ||||
| from ..compat import compat_urllib_parse_urlencode | ||||
| from ..utils import ( | ||||
|     ExtractorError, | ||||
|     int_or_none, | ||||
| @@ -175,7 +175,7 @@ class SmotriIE(InfoExtractor): | ||||
|             video_form['pass'] = hashlib.md5(video_password.encode('utf-8')).hexdigest() | ||||
|  | ||||
|         request = sanitized_Request( | ||||
|             'http://smotri.com/video/view/url/bot/', compat_urllib_parse.urlencode(video_form)) | ||||
|             'http://smotri.com/video/view/url/bot/', compat_urllib_parse_urlencode(video_form)) | ||||
|         request.add_header('Content-Type', 'application/x-www-form-urlencoded') | ||||
|  | ||||
|         video = self._download_json(request, video_id, 'Downloading video JSON') | ||||
| @@ -338,7 +338,7 @@ class SmotriBroadcastIE(InfoExtractor): | ||||
|             } | ||||
|  | ||||
|             request = sanitized_Request( | ||||
|                 broadcast_url + '/?no_redirect=1', compat_urllib_parse.urlencode(login_form)) | ||||
|                 broadcast_url + '/?no_redirect=1', compat_urllib_parse_urlencode(login_form)) | ||||
|             request.add_header('Content-Type', 'application/x-www-form-urlencoded') | ||||
|             broadcast_page = self._download_webpage( | ||||
|                 request, broadcast_id, 'Logging in and confirming age') | ||||
|   | ||||
| @@ -6,7 +6,7 @@ import re | ||||
| from .common import InfoExtractor | ||||
| from ..compat import ( | ||||
|     compat_str, | ||||
|     compat_urllib_parse, | ||||
|     compat_urllib_parse_urlencode, | ||||
| ) | ||||
| from ..utils import ( | ||||
|     ExtractorError, | ||||
| @@ -170,7 +170,7 @@ class SohuIE(InfoExtractor): | ||||
|                     if retries > 0: | ||||
|                         download_note += ' (retry #%d)' % retries | ||||
|                     part_info = self._parse_json(self._download_webpage( | ||||
|                         'http://%s/?%s' % (allot, compat_urllib_parse.urlencode(params)), | ||||
|                         'http://%s/?%s' % (allot, compat_urllib_parse_urlencode(params)), | ||||
|                         video_id, download_note), video_id) | ||||
|  | ||||
|                     video_url = part_info['url'] | ||||
|   | ||||
| @@ -11,10 +11,9 @@ from .common import ( | ||||
| from ..compat import ( | ||||
|     compat_str, | ||||
|     compat_urlparse, | ||||
|     compat_urllib_parse, | ||||
|     compat_urllib_parse_urlencode, | ||||
| ) | ||||
| from ..utils import ( | ||||
|     encode_dict, | ||||
|     ExtractorError, | ||||
|     int_or_none, | ||||
|     unified_strdate, | ||||
| @@ -393,7 +392,7 @@ class SoundcloudUserIE(SoundcloudIE): | ||||
|         query = COMMON_QUERY.copy() | ||||
|         query['offset'] = 0 | ||||
|  | ||||
|         next_href = base_url + '?' + compat_urllib_parse.urlencode(query) | ||||
|         next_href = base_url + '?' + compat_urllib_parse_urlencode(query) | ||||
|  | ||||
|         entries = [] | ||||
|         for i in itertools.count(): | ||||
| @@ -424,7 +423,7 @@ class SoundcloudUserIE(SoundcloudIE): | ||||
|             qs = compat_urlparse.parse_qs(parsed_next_href.query) | ||||
|             qs.update(COMMON_QUERY) | ||||
|             next_href = compat_urlparse.urlunparse( | ||||
|                 parsed_next_href._replace(query=compat_urllib_parse.urlencode(qs, True))) | ||||
|                 parsed_next_href._replace(query=compat_urllib_parse_urlencode(qs, True))) | ||||
|  | ||||
|         return { | ||||
|             '_type': 'playlist', | ||||
| @@ -460,7 +459,7 @@ class SoundcloudPlaylistIE(SoundcloudIE): | ||||
|         if token: | ||||
|             data_dict['secret_token'] = token | ||||
|  | ||||
|         data = compat_urllib_parse.urlencode(data_dict) | ||||
|         data = compat_urllib_parse_urlencode(data_dict) | ||||
|         data = self._download_json( | ||||
|             base_url + data, playlist_id, 'Downloading playlist') | ||||
|  | ||||
| @@ -500,7 +499,8 @@ class SoundcloudSearchIE(SearchInfoExtractor, SoundcloudIE): | ||||
|         query['client_id'] = self._CLIENT_ID | ||||
|         query['linked_partitioning'] = '1' | ||||
|         query['offset'] = 0 | ||||
|         data = compat_urllib_parse.urlencode(encode_dict(query)) | ||||
|         data = compat_urllib_parse_urlencode(query) | ||||
|         data = compat_urllib_parse_urlencode(query) | ||||
|         next_url = '{0}{1}?{2}'.format(self._API_V2_BASE, endpoint, data) | ||||
|  | ||||
|         collected_results = 0 | ||||
|   | ||||
| @@ -4,7 +4,7 @@ from __future__ import unicode_literals | ||||
| import re | ||||
|  | ||||
| from .common import InfoExtractor | ||||
| from ..compat import compat_urllib_parse | ||||
| from ..compat import compat_urllib_parse_urlencode | ||||
| from ..utils import sanitized_Request | ||||
|  | ||||
|  | ||||
| @@ -35,7 +35,7 @@ class StreamcloudIE(InfoExtractor): | ||||
|             (?:id="[^"]+"\s+)? | ||||
|             value="([^"]*)" | ||||
|             ''', orig_webpage) | ||||
|         post = compat_urllib_parse.urlencode(fields) | ||||
|         post = compat_urllib_parse_urlencode(fields) | ||||
|  | ||||
|         self._sleep(12, video_id) | ||||
|         headers = { | ||||
|   | ||||
| @@ -5,8 +5,8 @@ import json | ||||
|  | ||||
| from .common import InfoExtractor | ||||
| from ..compat import ( | ||||
|     compat_urllib_parse, | ||||
|     compat_urllib_parse_unquote, | ||||
|     compat_urllib_parse_urlencode, | ||||
|     compat_urlparse, | ||||
| ) | ||||
| from ..utils import ( | ||||
| @@ -74,7 +74,7 @@ class TelecincoIE(InfoExtractor): | ||||
|         info_el = self._download_xml(info_url, episode).find('./video/info') | ||||
|  | ||||
|         video_link = info_el.find('videoUrl/link').text | ||||
|         token_query = compat_urllib_parse.urlencode({'id': video_link}) | ||||
|         token_query = compat_urllib_parse_urlencode({'id': video_link}) | ||||
|         token_info = self._download_json( | ||||
|             embed_data['flashvars']['ov_tk'] + '?' + token_query, | ||||
|             episode, | ||||
|   | ||||
| @@ -5,7 +5,7 @@ import codecs | ||||
| import re | ||||
|  | ||||
| from .common import InfoExtractor | ||||
| from ..compat import compat_urllib_parse | ||||
| from ..compat import compat_urllib_parse_urlencode | ||||
| from ..utils import ( | ||||
|     ExtractorError, | ||||
|     int_or_none, | ||||
| @@ -41,7 +41,7 @@ class TubiTvIE(InfoExtractor): | ||||
|             'username': username, | ||||
|             'password': password, | ||||
|         } | ||||
|         payload = compat_urllib_parse.urlencode(form_data).encode('utf-8') | ||||
|         payload = compat_urllib_parse_urlencode(form_data).encode('utf-8') | ||||
|         request = sanitized_Request(self._LOGIN_URL, payload) | ||||
|         request.add_header('Content-Type', 'application/x-www-form-urlencoded') | ||||
|         login_page = self._download_webpage( | ||||
|   | ||||
| @@ -9,12 +9,11 @@ from .common import InfoExtractor | ||||
| from ..compat import ( | ||||
|     compat_parse_qs, | ||||
|     compat_str, | ||||
|     compat_urllib_parse, | ||||
|     compat_urllib_parse_urlencode, | ||||
|     compat_urllib_parse_urlparse, | ||||
|     compat_urlparse, | ||||
| ) | ||||
| from ..utils import ( | ||||
|     encode_dict, | ||||
|     ExtractorError, | ||||
|     int_or_none, | ||||
|     orderedSet, | ||||
| @@ -82,7 +81,7 @@ class TwitchBaseIE(InfoExtractor): | ||||
|             post_url = compat_urlparse.urljoin(redirect_url, post_url) | ||||
|  | ||||
|         request = sanitized_Request( | ||||
|             post_url, compat_urllib_parse.urlencode(encode_dict(login_form)).encode('utf-8')) | ||||
|             post_url, compat_urllib_parse_urlencode(login_form).encode('utf-8')) | ||||
|         request.add_header('Referer', redirect_url) | ||||
|         response = self._download_webpage( | ||||
|             request, None, 'Logging in as %s' % username) | ||||
| @@ -250,7 +249,7 @@ class TwitchVodIE(TwitchItemBaseIE): | ||||
|         formats = self._extract_m3u8_formats( | ||||
|             '%s/vod/%s?%s' % ( | ||||
|                 self._USHER_BASE, item_id, | ||||
|                 compat_urllib_parse.urlencode({ | ||||
|                 compat_urllib_parse_urlencode({ | ||||
|                     'allow_source': 'true', | ||||
|                     'allow_audio_only': 'true', | ||||
|                     'allow_spectre': 'true', | ||||
| @@ -442,7 +441,7 @@ class TwitchStreamIE(TwitchBaseIE): | ||||
|         } | ||||
|         formats = self._extract_m3u8_formats( | ||||
|             '%s/api/channel/hls/%s.m3u8?%s' | ||||
|             % (self._USHER_BASE, channel_id, compat_urllib_parse.urlencode(query)), | ||||
|             % (self._USHER_BASE, channel_id, compat_urllib_parse_urlencode(query)), | ||||
|             channel_id, 'mp4') | ||||
|         self._prefer_source(formats) | ||||
|  | ||||
|   | ||||
| @@ -3,7 +3,7 @@ from __future__ import unicode_literals | ||||
| from .common import InfoExtractor | ||||
| from ..compat import ( | ||||
|     compat_HTTPError, | ||||
|     compat_urllib_parse, | ||||
|     compat_urllib_parse_urlencode, | ||||
|     compat_urllib_request, | ||||
|     compat_urlparse, | ||||
| ) | ||||
| @@ -71,7 +71,7 @@ class UdemyIE(InfoExtractor): | ||||
|     def _download_lecture(self, course_id, lecture_id): | ||||
|         return self._download_json( | ||||
|             'https://www.udemy.com/api-2.0/users/me/subscribed-courses/%s/lectures/%s?%s' % ( | ||||
|                 course_id, lecture_id, compat_urllib_parse.urlencode({ | ||||
|                 course_id, lecture_id, compat_urllib_parse_urlencode({ | ||||
|                     'video_only': '', | ||||
|                     'auto_play': '', | ||||
|                     'fields[lecture]': 'title,description,asset', | ||||
| @@ -139,7 +139,7 @@ class UdemyIE(InfoExtractor): | ||||
|         }) | ||||
|  | ||||
|         request = sanitized_Request( | ||||
|             self._LOGIN_URL, compat_urllib_parse.urlencode(login_form).encode('utf-8')) | ||||
|             self._LOGIN_URL, compat_urllib_parse_urlencode(login_form).encode('utf-8')) | ||||
|         request.add_header('Referer', self._ORIGIN_URL) | ||||
|         request.add_header('Origin', self._ORIGIN_URL) | ||||
|  | ||||
|   | ||||
| @@ -3,7 +3,7 @@ from __future__ import unicode_literals | ||||
|  | ||||
| from .common import InfoExtractor | ||||
| from ..compat import ( | ||||
|     compat_urllib_parse, | ||||
|     compat_urllib_parse_urlencode, | ||||
|     compat_urlparse, | ||||
| ) | ||||
| from ..utils import ( | ||||
| @@ -48,7 +48,7 @@ class Vbox7IE(InfoExtractor): | ||||
|                                         webpage, 'title').split('/')[0].strip() | ||||
|  | ||||
|         info_url = 'http://vbox7.com/play/magare.do' | ||||
|         data = compat_urllib_parse.urlencode({'as3': '1', 'vid': video_id}) | ||||
|         data = compat_urllib_parse_urlencode({'as3': '1', 'vid': video_id}) | ||||
|         info_request = sanitized_Request(info_url, data) | ||||
|         info_request.add_header('Content-Type', 'application/x-www-form-urlencoded') | ||||
|         info_response = self._download_webpage(info_request, video_id, 'Downloading info webpage') | ||||
|   | ||||
| @@ -2,7 +2,7 @@ from __future__ import unicode_literals | ||||
|  | ||||
| from .common import InfoExtractor | ||||
| from ..compat import ( | ||||
|     compat_urllib_parse, | ||||
|     compat_urllib_parse_urlencode, | ||||
|     compat_urlparse, | ||||
| ) | ||||
| from ..utils import ( | ||||
| @@ -93,7 +93,7 @@ class ViddlerIE(InfoExtractor): | ||||
|         headers = {'Referer': 'http://static.cdn-ec.viddler.com/js/arpeggio/v2/embed.html'} | ||||
|         request = sanitized_Request( | ||||
|             'http://api.viddler.com/api/v2/viddler.videos.getPlaybackDetails.json?%s' | ||||
|             % compat_urllib_parse.urlencode(query), None, headers) | ||||
|             % compat_urllib_parse_urlencode(query), None, headers) | ||||
|         data = self._download_json(request, video_id)['video'] | ||||
|  | ||||
|         formats = [] | ||||
|   | ||||
| @@ -12,7 +12,6 @@ from ..compat import ( | ||||
| ) | ||||
| from ..utils import ( | ||||
|     determine_ext, | ||||
|     encode_dict, | ||||
|     ExtractorError, | ||||
|     InAdvancePagedList, | ||||
|     int_or_none, | ||||
| @@ -42,13 +41,13 @@ class VimeoBaseInfoExtractor(InfoExtractor): | ||||
|         self.report_login() | ||||
|         webpage = self._download_webpage(self._LOGIN_URL, None, False) | ||||
|         token, vuid = self._extract_xsrft_and_vuid(webpage) | ||||
|         data = urlencode_postdata(encode_dict({ | ||||
|         data = urlencode_postdata({ | ||||
|             'action': 'login', | ||||
|             'email': username, | ||||
|             'password': password, | ||||
|             'service': 'vimeo', | ||||
|             'token': token, | ||||
|         })) | ||||
|         }) | ||||
|         login_request = sanitized_Request(self._LOGIN_URL, data) | ||||
|         login_request.add_header('Content-Type', 'application/x-www-form-urlencoded') | ||||
|         login_request.add_header('Referer', self._LOGIN_URL) | ||||
| @@ -255,10 +254,10 @@ class VimeoIE(VimeoBaseInfoExtractor): | ||||
|         if password is None: | ||||
|             raise ExtractorError('This video is protected by a password, use the --video-password option', expected=True) | ||||
|         token, vuid = self._extract_xsrft_and_vuid(webpage) | ||||
|         data = urlencode_postdata(encode_dict({ | ||||
|         data = urlencode_postdata({ | ||||
|             'password': password, | ||||
|             'token': token, | ||||
|         })) | ||||
|         }) | ||||
|         if url.startswith('http://'): | ||||
|             # vimeo only supports https now, but the user can give an http url | ||||
|             url = url.replace('http://', 'https://') | ||||
| @@ -274,7 +273,7 @@ class VimeoIE(VimeoBaseInfoExtractor): | ||||
|         password = self._downloader.params.get('videopassword') | ||||
|         if password is None: | ||||
|             raise ExtractorError('This video is protected by a password, use the --video-password option') | ||||
|         data = urlencode_postdata(encode_dict({'password': password})) | ||||
|         data = urlencode_postdata({'password': password}) | ||||
|         pass_url = url + '/check-password' | ||||
|         password_request = sanitized_Request(pass_url, data) | ||||
|         password_request.add_header('Content-Type', 'application/x-www-form-urlencoded') | ||||
| @@ -575,7 +574,7 @@ class VimeoChannelIE(VimeoBaseInfoExtractor): | ||||
|         token, vuid = self._extract_xsrft_and_vuid(webpage) | ||||
|         fields['token'] = token | ||||
|         fields['password'] = password | ||||
|         post = urlencode_postdata(encode_dict(fields)) | ||||
|         post = urlencode_postdata(fields) | ||||
|         password_path = self._search_regex( | ||||
|             r'action="([^"]+)"', login_form, 'password URL') | ||||
|         password_url = compat_urlparse.urljoin(page_url, password_path) | ||||
|   | ||||
| @@ -7,7 +7,7 @@ import json | ||||
| from .common import InfoExtractor | ||||
| from ..compat import ( | ||||
|     compat_str, | ||||
|     compat_urllib_parse, | ||||
|     compat_urllib_parse_urlencode, | ||||
| ) | ||||
| from ..utils import ( | ||||
|     ExtractorError, | ||||
| @@ -204,7 +204,7 @@ class VKIE(InfoExtractor): | ||||
|  | ||||
|         request = sanitized_Request( | ||||
|             'https://login.vk.com/?act=login', | ||||
|             compat_urllib_parse.urlencode(login_form).encode('utf-8')) | ||||
|             compat_urllib_parse_urlencode(login_form).encode('utf-8')) | ||||
|         login_page = self._download_webpage( | ||||
|             request, None, note='Logging in as %s' % username) | ||||
|  | ||||
|   | ||||
| @@ -7,7 +7,7 @@ from ..utils import ( | ||||
|     float_or_none, | ||||
|     int_or_none, | ||||
| ) | ||||
| from ..compat import compat_urllib_parse | ||||
| from ..compat import compat_urllib_parse_urlencode | ||||
|  | ||||
|  | ||||
| class VLiveIE(InfoExtractor): | ||||
| @@ -43,7 +43,7 @@ class VLiveIE(InfoExtractor): | ||||
|  | ||||
|         playinfo = self._download_json( | ||||
|             'http://global.apis.naver.com/rmcnmv/rmcnmv/vod_play_videoInfo.json?%s' | ||||
|             % compat_urllib_parse.urlencode({ | ||||
|             % compat_urllib_parse_urlencode({ | ||||
|                 'videoId': long_video_id, | ||||
|                 'key': key, | ||||
|                 'ptc': 'http', | ||||
|   | ||||
| @@ -2,7 +2,7 @@ | ||||
| from __future__ import unicode_literals | ||||
|  | ||||
| from .common import InfoExtractor | ||||
| from ..compat import compat_urllib_parse | ||||
| from ..compat import compat_urllib_parse_urlencode | ||||
| from ..utils import ( | ||||
|     ExtractorError, | ||||
|     NO_DEFAULT, | ||||
| @@ -38,7 +38,7 @@ class VodlockerIE(InfoExtractor): | ||||
|  | ||||
|         if fields['op'] == 'download1': | ||||
|             self._sleep(3, video_id)  # they do detect when requests happen too fast! | ||||
|             post = compat_urllib_parse.urlencode(fields) | ||||
|             post = compat_urllib_parse_urlencode(fields) | ||||
|             req = sanitized_Request(url, post) | ||||
|             req.add_header('Content-type', 'application/x-www-form-urlencoded') | ||||
|             webpage = self._download_webpage( | ||||
|   | ||||
| @@ -4,10 +4,9 @@ from __future__ import unicode_literals | ||||
| import re | ||||
|  | ||||
| from .common import InfoExtractor | ||||
| from ..compat import compat_urllib_parse | ||||
| from ..compat import compat_urllib_parse_urlencode | ||||
| from ..utils import ( | ||||
|     ExtractorError, | ||||
|     encode_dict, | ||||
|     int_or_none, | ||||
|     sanitized_Request, | ||||
| ) | ||||
| @@ -109,7 +108,7 @@ class XFileShareIE(InfoExtractor): | ||||
|             if countdown: | ||||
|                 self._sleep(countdown, video_id) | ||||
|  | ||||
|             post = compat_urllib_parse.urlencode(encode_dict(fields)) | ||||
|             post = compat_urllib_parse_urlencode(fields) | ||||
|  | ||||
|             req = sanitized_Request(url, post) | ||||
|             req.add_header('Content-type', 'application/x-www-form-urlencoded') | ||||
|   | ||||
| @@ -8,6 +8,7 @@ import re | ||||
| from .common import InfoExtractor, SearchInfoExtractor | ||||
| from ..compat import ( | ||||
|     compat_urllib_parse, | ||||
|     compat_urllib_parse_urlencode, | ||||
|     compat_urlparse, | ||||
| ) | ||||
| from ..utils import ( | ||||
| @@ -303,7 +304,7 @@ class YahooIE(InfoExtractor): | ||||
|         region = self._search_regex( | ||||
|             r'\\?"region\\?"\s*:\s*\\?"([^"]+?)\\?"', | ||||
|             webpage, 'region', fatal=False, default='US') | ||||
|         data = compat_urllib_parse.urlencode({ | ||||
|         data = compat_urllib_parse_urlencode({ | ||||
|             'protocol': 'http', | ||||
|             'region': region, | ||||
|         }) | ||||
|   | ||||
| @@ -7,7 +7,7 @@ import hashlib | ||||
| from .common import InfoExtractor | ||||
| from ..compat import ( | ||||
|     compat_str, | ||||
|     compat_urllib_parse, | ||||
|     compat_urllib_parse_urlencode, | ||||
| ) | ||||
| from ..utils import ( | ||||
|     ExtractorError, | ||||
| @@ -170,7 +170,7 @@ class YandexMusicPlaylistIE(YandexMusicPlaylistBaseIE): | ||||
|             missing_track_ids = set(map(compat_str, track_ids)) - set(present_track_ids) | ||||
|             request = sanitized_Request( | ||||
|                 'https://music.yandex.ru/handlers/track-entries.jsx', | ||||
|                 compat_urllib_parse.urlencode({ | ||||
|                 compat_urllib_parse_urlencode({ | ||||
|                     'entries': ','.join(missing_track_ids), | ||||
|                     'lang': mu.get('settings', {}).get('lang', 'en'), | ||||
|                     'external-domain': 'music.yandex.ru', | ||||
|   | ||||
| @@ -8,7 +8,7 @@ import time | ||||
|  | ||||
| from .common import InfoExtractor | ||||
| from ..compat import ( | ||||
|     compat_urllib_parse, | ||||
|     compat_urllib_parse_urlencode, | ||||
|     compat_ord, | ||||
| ) | ||||
| from ..utils import ( | ||||
| @@ -138,7 +138,7 @@ class YoukuIE(InfoExtractor): | ||||
|                     '_00' + \ | ||||
|                     '/st/' + self.parse_ext_l(format) + \ | ||||
|                     '/fileid/' + get_fileid(format, n) + '?' + \ | ||||
|                     compat_urllib_parse.urlencode(param) | ||||
|                     compat_urllib_parse_urlencode(param) | ||||
|                 video_urls.append(video_url) | ||||
|             video_urls_dict[format] = video_urls | ||||
|  | ||||
|   | ||||
| @@ -17,16 +17,15 @@ from ..swfinterp import SWFInterpreter | ||||
| from ..compat import ( | ||||
|     compat_chr, | ||||
|     compat_parse_qs, | ||||
|     compat_urllib_parse, | ||||
|     compat_urllib_parse_unquote, | ||||
|     compat_urllib_parse_unquote_plus, | ||||
|     compat_urllib_parse_urlencode, | ||||
|     compat_urllib_parse_urlparse, | ||||
|     compat_urlparse, | ||||
|     compat_str, | ||||
| ) | ||||
| from ..utils import ( | ||||
|     clean_html, | ||||
|     encode_dict, | ||||
|     error_to_compat_str, | ||||
|     ExtractorError, | ||||
|     float_or_none, | ||||
| @@ -116,7 +115,7 @@ class YoutubeBaseInfoExtractor(InfoExtractor): | ||||
|             'hl': 'en_US', | ||||
|         } | ||||
|  | ||||
|         login_data = compat_urllib_parse.urlencode(encode_dict(login_form_strs)).encode('ascii') | ||||
|         login_data = compat_urllib_parse_urlencode(login_form_strs).encode('ascii') | ||||
|  | ||||
|         req = sanitized_Request(self._LOGIN_URL, login_data) | ||||
|         login_results = self._download_webpage( | ||||
| @@ -149,7 +148,7 @@ class YoutubeBaseInfoExtractor(InfoExtractor): | ||||
|                 'TrustDevice': 'on', | ||||
|             }) | ||||
|  | ||||
|             tfa_data = compat_urllib_parse.urlencode(encode_dict(tfa_form_strs)).encode('ascii') | ||||
|             tfa_data = compat_urllib_parse_urlencode(tfa_form_strs).encode('ascii') | ||||
|  | ||||
|             tfa_req = sanitized_Request(self._TWOFACTOR_URL, tfa_data) | ||||
|             tfa_results = self._download_webpage( | ||||
| @@ -1007,7 +1006,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor): | ||||
|                 continue | ||||
|             sub_formats = [] | ||||
|             for ext in self._SUBTITLE_FORMATS: | ||||
|                 params = compat_urllib_parse.urlencode({ | ||||
|                 params = compat_urllib_parse_urlencode({ | ||||
|                     'lang': lang, | ||||
|                     'v': video_id, | ||||
|                     'fmt': ext, | ||||
| @@ -1056,7 +1055,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor): | ||||
|             if caption_url: | ||||
|                 timestamp = args['timestamp'] | ||||
|                 # We get the available subtitles | ||||
|                 list_params = compat_urllib_parse.urlencode({ | ||||
|                 list_params = compat_urllib_parse_urlencode({ | ||||
|                     'type': 'list', | ||||
|                     'tlangs': 1, | ||||
|                     'asrs': 1, | ||||
| @@ -1075,7 +1074,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor): | ||||
|                     sub_lang = lang_node.attrib['lang_code'] | ||||
|                     sub_formats = [] | ||||
|                     for ext in self._SUBTITLE_FORMATS: | ||||
|                         params = compat_urllib_parse.urlencode({ | ||||
|                         params = compat_urllib_parse_urlencode({ | ||||
|                             'lang': original_lang, | ||||
|                             'tlang': sub_lang, | ||||
|                             'fmt': ext, | ||||
| @@ -1094,7 +1093,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor): | ||||
|             caption_tracks = args['caption_tracks'] | ||||
|             caption_translation_languages = args['caption_translation_languages'] | ||||
|             caption_url = compat_parse_qs(caption_tracks.split(',')[0])['u'][0] | ||||
|             parsed_caption_url = compat_urlparse.urlparse(caption_url) | ||||
|             parsed_caption_url = compat_urllib_parse_urlparse(caption_url) | ||||
|             caption_qs = compat_parse_qs(parsed_caption_url.query) | ||||
|  | ||||
|             sub_lang_list = {} | ||||
| @@ -1110,7 +1109,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor): | ||||
|                         'fmt': [ext], | ||||
|                     }) | ||||
|                     sub_url = compat_urlparse.urlunparse(parsed_caption_url._replace( | ||||
|                         query=compat_urllib_parse.urlencode(caption_qs, True))) | ||||
|                         query=compat_urllib_parse_urlencode(caption_qs, True))) | ||||
|                     sub_formats.append({ | ||||
|                         'url': sub_url, | ||||
|                         'ext': ext, | ||||
| @@ -1140,7 +1139,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor): | ||||
|             'cpn': [cpn], | ||||
|         }) | ||||
|         playback_url = compat_urlparse.urlunparse( | ||||
|             parsed_playback_url._replace(query=compat_urllib_parse.urlencode(qs, True))) | ||||
|             parsed_playback_url._replace(query=compat_urllib_parse_urlencode(qs, True))) | ||||
|  | ||||
|         self._download_webpage( | ||||
|             playback_url, video_id, 'Marking watched', | ||||
| @@ -1225,7 +1224,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor): | ||||
|             # this can be viewed without login into Youtube | ||||
|             url = proto + '://www.youtube.com/embed/%s' % video_id | ||||
|             embed_webpage = self._download_webpage(url, video_id, 'Downloading embed webpage') | ||||
|             data = compat_urllib_parse.urlencode({ | ||||
|             data = compat_urllib_parse_urlencode({ | ||||
|                 'video_id': video_id, | ||||
|                 'eurl': 'https://youtube.googleapis.com/v/' + video_id, | ||||
|                 'sts': self._search_regex( | ||||
| @@ -2085,7 +2084,7 @@ class YoutubeSearchIE(SearchInfoExtractor, YoutubePlaylistIE): | ||||
|                 'spf': 'navigate', | ||||
|             } | ||||
|             url_query.update(self._EXTRA_QUERY_ARGS) | ||||
|             result_url = 'https://www.youtube.com/results?' + compat_urllib_parse.urlencode(url_query) | ||||
|             result_url = 'https://www.youtube.com/results?' + compat_urllib_parse_urlencode(url_query) | ||||
|             data = self._download_json( | ||||
|                 result_url, video_id='query "%s"' % query, | ||||
|                 note='Downloading page %s' % pagenum, | ||||
|   | ||||
| @@ -47,6 +47,7 @@ from .compat import ( | ||||
|     compat_str, | ||||
|     compat_urllib_error, | ||||
|     compat_urllib_parse, | ||||
|     compat_urllib_parse_urlencode, | ||||
|     compat_urllib_parse_urlparse, | ||||
|     compat_urllib_request, | ||||
|     compat_urlparse, | ||||
| @@ -1315,7 +1316,7 @@ def shell_quote(args): | ||||
| def smuggle_url(url, data): | ||||
|     """ Pass additional data in a URL for internal use. """ | ||||
|  | ||||
|     sdata = compat_urllib_parse.urlencode( | ||||
|     sdata = compat_urllib_parse_urlencode( | ||||
|         {'__youtubedl_smuggle': json.dumps(data)}) | ||||
|     return url + '#' + sdata | ||||
|  | ||||
| @@ -1789,22 +1790,15 @@ def read_batch_urls(batch_fd): | ||||
|  | ||||
|  | ||||
| def urlencode_postdata(*args, **kargs): | ||||
|     return compat_urllib_parse.urlencode(*args, **kargs).encode('ascii') | ||||
|     return compat_urllib_parse_urlencode(*args, **kargs).encode('ascii') | ||||
|  | ||||
|  | ||||
| def update_url_query(url, query): | ||||
|     parsed_url = compat_urlparse.urlparse(url) | ||||
|     qs = compat_parse_qs(parsed_url.query) | ||||
|     qs.update(query) | ||||
|     qs = encode_dict(qs) | ||||
|     return compat_urlparse.urlunparse(parsed_url._replace( | ||||
|         query=compat_urllib_parse.urlencode(qs, True))) | ||||
|  | ||||
|  | ||||
| def encode_dict(d, encoding='utf-8'): | ||||
|     def encode(v): | ||||
|         return v.encode(encoding) if isinstance(v, compat_basestring) else v | ||||
|     return dict((encode(k), encode(v)) for k, v in d.items()) | ||||
|         query=compat_urllib_parse_urlencode(qs, True))) | ||||
|  | ||||
|  | ||||
| def dict_get(d, key_or_keys, default=None, skip_false_values=True): | ||||
|   | ||||
		Reference in New Issue
	
	Block a user