diff --git a/plugin.video.viwx/addon.xml b/plugin.video.viwx/addon.xml index bd97d283b..f6ec59f8e 100644 --- a/plugin.video.viwx/addon.xml +++ b/plugin.video.viwx/addon.xml @@ -1,13 +1,13 @@ - + - - + + - + video @@ -32,18 +32,11 @@ [B]v1.4.1[/B] [B]Fixes:[/B] -* Yet again sign-in errors. Now affecting both new users and existing users who had signed out. - -[B]v1.4.0[/B] -[B]Fixes:[/B] -* Collection ITV sport failed with KeyError('collection') -* Again occasional sign-in errors for new users. Hopefully the final fix. -* A rare failure to list live channels. -* Adapt to changes at ITVX causing submenu 'My ITVX' to fail sometimes. +* ViwX failed to start with 'FetchError: Forbidden'. And issue only experienced by users of OSMC and possibly some other systems that still use OpenSSL v1.1.1. [B]New Features:[/B] -* Support for signed programmes. (Enable the new setting 'Use signed programmes whenever available'). -* Live hero items (those in orange on the main menu) now have a context menu 'Watch from the start'. +* Episodes in 'Continue Watching' now have context menu item 'Show all episodes', which opens the programme folder with all series and episodes. +* Trending now shows programmes with episodes as folder, rather than playing the first episode of series 1. true diff --git a/plugin.video.viwx/changelog.txt b/plugin.video.viwx/changelog.txt index 33f9aa663..ac7c0c54b 100644 --- a/plugin.video.viwx/changelog.txt +++ b/plugin.video.viwx/changelog.txt @@ -1,3 +1,11 @@ +v1.5.0 +Fixes: +- ViwX failed to start with 'FetchError: Forbidden'. And issue only experienced by users of OSMC and possibly some other systems that still use OpenSSL v1.1.1. + +New Features: +- Episodes in 'Continue Watching' now have context menu item 'Show all episodes', which opens the programme folder with all series and episodes. +- Trending now shows programmes with episodes as folder, rather than playing the first episode of series 1. + v1.4.1 Fixes: - Yet again sign-in errors. Now affecting both new users and existing users who had signed out. diff --git a/plugin.video.viwx/resources/language/resource.language.en_gb/strings.po b/plugin.video.viwx/resources/language/resource.language.en_gb/strings.po index 7a7a9faf5..25db222ca 100644 --- a/plugin.video.viwx/resources/language/resource.language.en_gb/strings.po +++ b/plugin.video.viwx/resources/language/resource.language.en_gb/strings.po @@ -235,15 +235,15 @@ msgstr "" # Menu texts msgctxt "#30801" -msgid "" +msgid "Add to My List" msgstr "" msgctxt "#30802" -msgid "" +msgid "Remove from My List" msgstr "" msgctxt "#30803" -msgid "" +msgid "View all episodes" msgstr "" msgctxt "#30804" diff --git a/plugin.video.viwx/resources/lib/fetch.py b/plugin.video.viwx/resources/lib/fetch.py index ec6132c15..97fc13510 100644 --- a/plugin.video.viwx/resources/lib/fetch.py +++ b/plugin.video.viwx/resources/lib/fetch.py @@ -10,7 +10,9 @@ import requests import pickle import time + from requests.cookies import RequestsCookieJar +from requests.adapters import HTTPAdapter import json from codequick import Script @@ -58,6 +60,27 @@ def clear(self, domain=None, path=None, name=None) -> None: pass +class CustomHttpAdapter(HTTPAdapter): + """A custom HTTP Adaptor to work around the issue that www.itv.com returns + 403 FORBIDDEN on OSMC 2024.05-1 and probably others systems running openssl 1.1.1. + + It looks like the use of ssl.OP_NO_TICKET with openssl 1.1.1 causes trouble + with ITVX's servers. Since urllib3 v2+ sets this option by default we create + our own SSLContext for use in HTTPS connection. + Apart from the OP_NO_TICKET option, ssl's default context appears to be very + much like that created by urllib3, so I guess it's safe for general use here. + + """ + def init_poolmanager(self, *args, **kwargs): + import urllib3 + import ssl + logger.info('Urllib3 version %s', urllib3.__version__) + logger.info(ssl.OPENSSL_VERSION) + + ctx = ssl.create_default_context() + super().init_poolmanager(*args, **kwargs, ssl_context=ctx) + + class HttpSession(requests.sessions.Session): instance = None @@ -84,6 +107,7 @@ def __init__(self): 'Pragma': 'no-cache', }) self.cookies = _create_cookiejar() + self.mount('https://', CustomHttpAdapter()) # noinspection PyShadowingNames def request( diff --git a/plugin.video.viwx/resources/lib/itvx.py b/plugin.video.viwx/resources/lib/itvx.py index 3b81355e8..3985ac3f0 100644 --- a/plugin.video.viwx/resources/lib/itvx.py +++ b/plugin.video.viwx/resources/lib/itvx.py @@ -216,7 +216,7 @@ def collection_content(url=None, slider=None, hide_paid=False): # Only found on main page items_list = page_data['trendingSliderContent']['items'] for trending_item in items_list: - yield parsex.parse_trending_collection_item(trending_item, hide_paid) + yield parsex.parse_collection_item(trending_item, hide_paid) return else: @@ -280,11 +280,7 @@ def episodes(url, use_cache=False, prefer_bsl=False): return cached_data['series_map'], cached_data['programme_id'] page_data = get_page_data(url, cache_time=0) - try: - programme = page_data['programme'] - except KeyError: - logger.warning("Trying to parse episodes in legacy format for programme %s", url) - return legacy_episodes(url) + programme = page_data['programme'] programme_id = programme.get('encodedProgrammeId', {}).get('underscore') programme_title = programme['title'] programme_thumb = programme['image'].format(**parsex.IMG_PROPS_THUMB) @@ -329,53 +325,6 @@ def episodes(url, use_cache=False, prefer_bsl=False): return series_map, programme_id -def legacy_episodes(url): - """Get a listing of series and their episodes - - Use legacy data structure that was in use before 2-8-23. - - """ - brand_data = get_page_data(url, cache_time=0)['title']['brand'] - brand_title = brand_data['title'] - brand_thumb = brand_data['imageUrl'].format(**parsex.IMG_PROPS_THUMB) - brand_fanart = brand_data['imageUrl'].format(**parsex.IMG_PROPS_FANART) - if 'FREE' in brand_data['tier']: - brand_description = brand_data['synopses'].get('ninety', '') - else: - brand_description = parsex.premium_plot(brand_data['synopses'].get('ninety', '')) - series_data = brand_data['series'] - - if not series_data: - return {} - - # The field 'seriesNumber' is not guaranteed to be unique - and not guaranteed an integer either. - # Midsummer murder for instance has 2 series with seriesNumber 4 - # By using this mapping, setdefault() and extend() on the episode list, series with the same - # seriesNumber are automatically merged. - series_map = {} - for series in series_data: - title = series['title'] - series_idx = series['seriesNumber'] - series_obj = series_map.setdefault( - series_idx, { - 'series': { - 'label': title, - 'art': {'thumb': brand_thumb, 'fanart': brand_fanart}, - # TODO: add more info, like series number, number of episodes - 'info': {'title': '[B]{} - {}[/B]'.format(brand_title, series['title']), - 'plot': '{}\n\n{} - {} episodes'.format( - brand_description, title, series['seriesAvailableEpisodeCount'])}, - - 'params': {'url': url, 'series_idx': series_idx} - }, - 'episodes': [] - }) - series_obj['episodes'].extend( - [parsex.parse_legacy_episode_title(episode, brand_fanart) for episode in series['episodes']]) - cache.set_item(url, {'programme_id': None, 'series_map': series_map}, expire_time=1800) - return series_map, None - - def categories(): """Return all available categorie names.""" data = get_page_data('https://www.itv.com/watch/categories', cache_time=86400) diff --git a/plugin.video.viwx/resources/lib/main.py b/plugin.video.viwx/resources/lib/main.py index d3b673744..02854e491 100644 --- a/plugin.video.viwx/resources/lib/main.py +++ b/plugin.video.viwx/resources/lib/main.py @@ -42,6 +42,8 @@ TXT_NO_ITEMS_FOUND = 30608 TXT_PLAY_FROM_START = 30620 TXT_PREMIUM_CONTENT = 30622 +TXT_ADD_TO_MYLIST = 30801 +TXT_REMOVE_FROM_MYLIST = 30802 def empty_folder(): @@ -158,6 +160,7 @@ def _generate_page(self): for show in shows_list: try: li = Listitem.from_dict(callb_map[show['type']], **show['show']) + li.context.extend(show.get('ctx_mnu', [])) # Create 'My List' add/remove context menu entries here, so as to be able to update these # entries after adding/removing an item, even when the underlying data is cached. _my_list_context_mnu(li, show.get('programme_id')) @@ -223,10 +226,10 @@ def _my_list_context_mnu(list_item, programme_id, refresh=True, retry=True): try: if programme_id in cache.my_list_programmes: - list_item.context.script(update_mylist, "Remove from My List", + list_item.context.script(update_mylist, utils.addon_info.localise(TXT_REMOVE_FROM_MYLIST), progr_id=programme_id, operation='remove', refresh=refresh) else: - list_item.context.script(update_mylist, "Add to My List", + list_item.context.script(update_mylist, utils.addon_info.localise(TXT_ADD_TO_MYLIST), progr_id=programme_id, operation='add', refresh=refresh) except TypeError: if retry and cache.my_list_programmes is None: @@ -591,7 +594,7 @@ def play_stream_catchup(plugin, url, name, set_resume_point=False): 'subtitles.translate.orig_lang': 'en', 'subtitles.translate.type': 'srt'}) if set_resume_point: - resume_time = itvx.get_resume_point(production_id) + resume_time = itvx.get_resume_point(production_id) if resume_time: list_item.setProperties({ 'ResumeTime': str(resume_time), diff --git a/plugin.video.viwx/resources/lib/parsex.py b/plugin.video.viwx/resources/lib/parsex.py index f70a3a857..fbe1bb0e9 100644 --- a/plugin.video.viwx/resources/lib/parsex.py +++ b/plugin.video.viwx/resources/lib/parsex.py @@ -10,6 +10,7 @@ import logging import pytz from datetime import datetime +from urllib.parse import urlencode from codequick.support import logger_id from codequick import Script @@ -18,7 +19,7 @@ from .errors import ParseError TXT_PLAY_FROM_START = 30620 - +TXT_VIEW_ALL_EPISODES = 30803 logger = logging.getLogger(logger_id + '.parse') @@ -272,10 +273,11 @@ def parse_collection_item(show_data, hide_paid=False): else: plot = show_data['description'] + img = show_data.get('imageTemplate') or show_data.get('imageUrl', '') programme_item = { 'label': title, - 'art': {'thumb': show_data['imageTemplate'].format(**IMG_PROPS_THUMB), - 'fanart': show_data['imageTemplate'].format(**IMG_PROPS_FANART)}, + 'art': {'thumb': img.format(**IMG_PROPS_THUMB), + 'fanart': img.format(**IMG_PROPS_FANART)}, 'info': {'title': title if is_playable else '[B]{}[/B] {}'.format(title, content_info), 'plot': plot, 'sorttitle': sort_title(title)}, @@ -358,43 +360,6 @@ def parse_shortform_item(item_data, time_zone, time_fmt, hide_paid=False): return None -def parse_trending_collection_item(trending_item, hide_paid=False): - """Parse an item in the collection 'Trending' - The only real difference with the regular parse_collection_item() is - adding field `contentInfo` to plot and the fact that all items are being - treated as playable. - - """ - try: - # No idea if premium content can be trending, but just to be sure. - plot = '\n'.join((trending_item['description'], trending_item['contentInfo'])) - if trending_item.get('isPaid'): - if hide_paid: - return None - plot = premium_plot(plot) - - # NOTE: - # Especially titles of type 'special' may lack a field encodedEpisodeID. For those titles it - # should not be necessary, but for episodes they are a requirement otherwise the page - # will always return the first episode. - - return { - 'type': 'title', - 'programme_id': trending_item['encodedProgrammeId']['underscore'], - 'show': { - 'label': trending_item['title'], - 'art': {'thumb': trending_item['imageUrl'].format(**IMG_PROPS_THUMB)}, - 'info': {'plot': plot, 'sorttitle': sort_title(trending_item['title'])}, - 'params': {'url': build_url(trending_item['titleSlug'], - trending_item['encodedProgrammeId']['letterA'], - trending_item.get('encodedEpisodeId', {}).get('letterA'))} - } - } - except Exception: - logger.warning("Failed to parse trending_collection_item:\n%s", json.dumps(trending_item, indent=4)) - return None - - def parse_category_item(prog, category_id): # At least all items without an encodedEpisodeId are playable. # Unfortunately there are items that do have an episodeId, but are in fact single @@ -521,40 +486,6 @@ def parse_episode_title(title_data, brand_fanart=None, prefer_bsl=False): return title_obj -def parse_legacy_episode_title(title_data, brand_fanart=None): - """Parse a title from episodes listing in old format""" - # Note: episodeTitle may be None - title = title_data['episodeTitle'] or title_data['numberedEpisodeTitle'] - img_url = title_data['imageUrl'] - plot = '\n\n'.join((title_data['synopsis'], title_data['guidance'] or '')) - if 'PAID' in title_data.get('tier', []): - plot = premium_plot(plot) - - title_obj = { - 'label': title, - 'art': {'thumb': img_url.format(**IMG_PROPS_THUMB), - 'fanart': brand_fanart, - # 'poster': img_url.format(**IMG_PROPS_POSTER) - }, - 'info': {'title': title_data['numberedEpisodeTitle'], - 'plot': plot, - 'duration': utils.duration_2_seconds(title_data['duration']), - 'date': title_data['broadcastDateTime']}, - 'params': {'url': title_data['playlistUrl'], 'name': title} - } - if title_data['titleType'] == 'EPISODE': - try: - episode_nr = int(title_data['episodeNumber']) - except ValueError: - episode_nr = None - try: - series_nr = int(title_data['seriesNumber']) - except ValueError: - series_nr = None - title_obj['info'].update(episode=episode_nr, season=series_nr) - return title_obj - - def parse_search_result(search_data): entity_type = search_data['entityType'] result_data = search_data['data'] @@ -652,6 +583,7 @@ def parse_my_list_item(item, hide_paid=False): def parse_last_watched_item(item, utc_now): progr_name = item.get('programmeTitle', '') + progr_id = item.get('programmeId', '').replace('/', '_') episode_name = item.get('episodeTitle') series_nr = item.get('seriesNumber') episode_nr = item.get('episodeNumber') @@ -686,9 +618,10 @@ def parse_last_watched_item(item, utc_now): else: title = '{} - [I]{}% watched[/I]'.format(progr_name, int(item['percentageWatched'] * 100)) + item_dict = { 'type': 'vodstream', - 'programme_id': item['programmeId'].replace('/', '_'), + 'programme_id': progr_id, 'show': { 'label': episode_name or progr_name, 'art': {'thumb': img_link.format(**IMG_PROPS_THUMB), @@ -715,6 +648,15 @@ def parse_last_watched_item(item, utc_now): } if item['contentType'] == 'FILM': item_dict['show']['art']['poster'] = img_link.format(**IMG_PROPS_POSTER) + elif item['contentType'] == 'EPISODE' and progr_id: + ctx_mnu = (utils.addon_info.localise(TXT_VIEW_ALL_EPISODES), + ''.join(('Container.Update(plugin://', + utils.addon_info.id, + '/resources/lib/main/wrapper.list_productions?', + urlencode({'url': '/watch/undefined/' + progr_id}), + ')')) + ) + item_dict['ctx_mnu'] = [ctx_mnu] return item_dict diff --git a/plugin.video.viwx/resources/lib/utils.py b/plugin.video.viwx/resources/lib/utils.py index f9dba3951..ead88813a 100644 --- a/plugin.video.viwx/resources/lib/utils.py +++ b/plugin.video.viwx/resources/lib/utils.py @@ -1,5 +1,5 @@ # ---------------------------------------------------------------------------------------------------------------------- -# Copyright (c) 2022-2023 Dimitri Kroon. +# Copyright (c) 2022-2024 Dimitri Kroon. # This file is part of plugin.video.viwx. # SPDX-License-Identifier: GPL-2.0-or-later # See LICENSE.txt @@ -49,73 +49,6 @@ def random_string(length: int) -> str: return result -def ttml_to_srt(ttml_data, outfile): - """Convert subtitles in XML format to a format that kodi accepts""" - from xml.etree import ElementTree - import re - - # Get XML namespace - match = re.search(r'xmlns="(.*?)" ', ttml_data, re.DOTALL) - if match: - xmlns = ''.join(('{', match.group(1), '}')) - else: - xmlns = '' - - FONT_COL_WHITE = '' - FONT_END_TAG = '\n' - - root = ElementTree.fromstring(ttml_data) - - dflt_styles = {} - path = ''.join(('./', xmlns, 'head', '/', xmlns, 'styling', '/', xmlns, 'style')) - styles = root.findall(path) - for style_def in styles: - style_id = style_def.get(xmlns + 'id') - colors = [value for tag, value in style_def.items() if tag.endswith('color')] - if colors: - col = colors[0] - # strip possible alpha value if color is a HTML encoded RBGA value - if col.startswith('#'): - col = col[:7] - dflt_styles[style_id] = ''.join(('')) - - body = root.find(xmlns + 'body') - if body is None: - return - - index = 0 - # lines = [] - color_tag = "{http://www.w3.org/ns/ttml#styling}" + 'color' - - for paragraph in body.iter(xmlns + 'p'): - index += 1 - - t_start = paragraph.get('begin') - t_end = paragraph.get('end') - if not (t_start and t_end): - continue - outfile.write(str(index) + '\n') - # convert xml time format: begin="00:03:33:14" end="00:03:36:06" - # to srt format: 00:03:33,140 --> 00:03:36,060 - outfile.write(''.join((t_start[0:-3], ',', t_start[-2:], '0', ' --> ', t_end[0:-3], ',', t_end[-2:], '0\n'))) - - p_style = paragraph.get('style') - p_col = dflt_styles.get(p_style, FONT_COL_WHITE) - if paragraph.text: - outfile.write(''.join((p_col, paragraph.text, FONT_END_TAG))) - for el in paragraph: - if el.tag.endswith('span') and el.text: - col = el.get(color_tag, 'white') - # col = [v for k, v in el.items() if k.endswith('color')] - # if col: - outfile.write(''.join(('', el.text, FONT_END_TAG))) - # else: - # lines.append(''.join((FONT_COL_WHITE, el.text, FONT_END_TAG))) - if el.tail: - outfile.write(''.join((p_col, el.tail, FONT_END_TAG))) - outfile.write('\n') - - def vtt_to_srt(vtt_doc: str, colourize=True) -> str: """Convert a string containing subtitles in vtt format into a format kodi accepts.