Skip to content

Commit

Permalink
[plugin.video.viwx] v1.5.0
Browse files Browse the repository at this point in the history
  • Loading branch information
dimkroon authored Aug 1, 2024
1 parent 4387220 commit d7876b7
Show file tree
Hide file tree
Showing 8 changed files with 68 additions and 216 deletions.
21 changes: 7 additions & 14 deletions plugin.video.viwx/addon.xml
Original file line number Diff line number Diff line change
@@ -1,13 +1,13 @@
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<addon id="plugin.video.viwx" name="viwX" version="1.4.1" provider-name="Dimitri Kroon">
<addon id="plugin.video.viwx" name="viwX" version="1.5.0" provider-name="Dimitri Kroon">
<requires>
<import addon="xbmc.python" version="3.0.0"/>
<import addon="inputstream.adaptive" version="19.0.5"/>
<import addon="script.module.requests" version="2.31.0"/>
<import addon="script.module.pytz"/>
<import addon="script.module.tzlocal"/>
<import addon="script.module.pytz" version="2023.3"/>
<import addon="script.module.tzlocal" version="5.0.1"/>
<import addon="script.module.codequick" version="1.0.3"/>
<import addon="script.module.inputstreamhelper"/>
<import addon="script.module.inputstreamhelper" version="0.6.1"/>
</requires>
<extension point="xbmc.python.pluginsource" library="addon.py">
<provides>video</provides>
Expand All @@ -32,18 +32,11 @@
<news>
[B]v1.4.1[/B]
[B]Fixes:[/B]
* Yet again sign-in errors. Now affecting both new users and existing users who had signed out.

[B]v1.4.0[/B]
[B]Fixes:[/B]
* Collection ITV sport failed with KeyError('collection')
* Again occasional sign-in errors for new users. Hopefully the final fix.
* A rare failure to list live channels.
* Adapt to changes at ITVX causing submenu 'My ITVX' to fail sometimes.
* ViwX failed to start with 'FetchError: Forbidden'. And issue only experienced by users of OSMC and possibly some other systems that still use OpenSSL v1.1.1.

[B]New Features:[/B]
* Support for signed programmes. (Enable the new setting 'Use signed programmes whenever available').
* Live hero items (those in orange on the main menu) now have a context menu 'Watch from the start'.
* Episodes in 'Continue Watching' now have context menu item 'Show all episodes', which opens the programme folder with all series and episodes.
* Trending now shows programmes with episodes as folder, rather than playing the first episode of series 1.
</news>
<reuselanguageinvoker>true</reuselanguageinvoker>
</extension>
Expand Down
8 changes: 8 additions & 0 deletions plugin.video.viwx/changelog.txt
Original file line number Diff line number Diff line change
@@ -1,3 +1,11 @@
v1.5.0
Fixes:
- ViwX failed to start with 'FetchError: Forbidden'. And issue only experienced by users of OSMC and possibly some other systems that still use OpenSSL v1.1.1.

New Features:
- Episodes in 'Continue Watching' now have context menu item 'Show all episodes', which opens the programme folder with all series and episodes.
- Trending now shows programmes with episodes as folder, rather than playing the first episode of series 1.

v1.4.1
Fixes:
- Yet again sign-in errors. Now affecting both new users and existing users who had signed out.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -235,15 +235,15 @@ msgstr ""

# Menu texts
msgctxt "#30801"
msgid ""
msgid "Add to My List"
msgstr ""

msgctxt "#30802"
msgid ""
msgid "Remove from My List"
msgstr ""

msgctxt "#30803"
msgid ""
msgid "View all episodes"
msgstr ""

msgctxt "#30804"
Expand Down
24 changes: 24 additions & 0 deletions plugin.video.viwx/resources/lib/fetch.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,9 @@
import requests
import pickle
import time

from requests.cookies import RequestsCookieJar
from requests.adapters import HTTPAdapter
import json

from codequick import Script
Expand Down Expand Up @@ -58,6 +60,27 @@ def clear(self, domain=None, path=None, name=None) -> None:
pass


class CustomHttpAdapter(HTTPAdapter):
"""A custom HTTP Adaptor to work around the issue that www.itv.com returns
403 FORBIDDEN on OSMC 2024.05-1 and probably others systems running openssl 1.1.1.
It looks like the use of ssl.OP_NO_TICKET with openssl 1.1.1 causes trouble
with ITVX's servers. Since urllib3 v2+ sets this option by default we create
our own SSLContext for use in HTTPS connection.
Apart from the OP_NO_TICKET option, ssl's default context appears to be very
much like that created by urllib3, so I guess it's safe for general use here.
"""
def init_poolmanager(self, *args, **kwargs):
import urllib3
import ssl
logger.info('Urllib3 version %s', urllib3.__version__)
logger.info(ssl.OPENSSL_VERSION)

ctx = ssl.create_default_context()
super().init_poolmanager(*args, **kwargs, ssl_context=ctx)


class HttpSession(requests.sessions.Session):
instance = None

Expand All @@ -84,6 +107,7 @@ def __init__(self):
'Pragma': 'no-cache',
})
self.cookies = _create_cookiejar()
self.mount('https://', CustomHttpAdapter())

# noinspection PyShadowingNames
def request(
Expand Down
55 changes: 2 additions & 53 deletions plugin.video.viwx/resources/lib/itvx.py
Original file line number Diff line number Diff line change
Expand Up @@ -216,7 +216,7 @@ def collection_content(url=None, slider=None, hide_paid=False):
# Only found on main page
items_list = page_data['trendingSliderContent']['items']
for trending_item in items_list:
yield parsex.parse_trending_collection_item(trending_item, hide_paid)
yield parsex.parse_collection_item(trending_item, hide_paid)
return

else:
Expand Down Expand Up @@ -280,11 +280,7 @@ def episodes(url, use_cache=False, prefer_bsl=False):
return cached_data['series_map'], cached_data['programme_id']

page_data = get_page_data(url, cache_time=0)
try:
programme = page_data['programme']
except KeyError:
logger.warning("Trying to parse episodes in legacy format for programme %s", url)
return legacy_episodes(url)
programme = page_data['programme']
programme_id = programme.get('encodedProgrammeId', {}).get('underscore')
programme_title = programme['title']
programme_thumb = programme['image'].format(**parsex.IMG_PROPS_THUMB)
Expand Down Expand Up @@ -329,53 +325,6 @@ def episodes(url, use_cache=False, prefer_bsl=False):
return series_map, programme_id


def legacy_episodes(url):
"""Get a listing of series and their episodes
Use legacy data structure that was in use before 2-8-23.
"""
brand_data = get_page_data(url, cache_time=0)['title']['brand']
brand_title = brand_data['title']
brand_thumb = brand_data['imageUrl'].format(**parsex.IMG_PROPS_THUMB)
brand_fanart = brand_data['imageUrl'].format(**parsex.IMG_PROPS_FANART)
if 'FREE' in brand_data['tier']:
brand_description = brand_data['synopses'].get('ninety', '')
else:
brand_description = parsex.premium_plot(brand_data['synopses'].get('ninety', ''))
series_data = brand_data['series']

if not series_data:
return {}

# The field 'seriesNumber' is not guaranteed to be unique - and not guaranteed an integer either.
# Midsummer murder for instance has 2 series with seriesNumber 4
# By using this mapping, setdefault() and extend() on the episode list, series with the same
# seriesNumber are automatically merged.
series_map = {}
for series in series_data:
title = series['title']
series_idx = series['seriesNumber']
series_obj = series_map.setdefault(
series_idx, {
'series': {
'label': title,
'art': {'thumb': brand_thumb, 'fanart': brand_fanart},
# TODO: add more info, like series number, number of episodes
'info': {'title': '[B]{} - {}[/B]'.format(brand_title, series['title']),
'plot': '{}\n\n{} - {} episodes'.format(
brand_description, title, series['seriesAvailableEpisodeCount'])},

'params': {'url': url, 'series_idx': series_idx}
},
'episodes': []
})
series_obj['episodes'].extend(
[parsex.parse_legacy_episode_title(episode, brand_fanart) for episode in series['episodes']])
cache.set_item(url, {'programme_id': None, 'series_map': series_map}, expire_time=1800)
return series_map, None


def categories():
"""Return all available categorie names."""
data = get_page_data('https://www.itv.com/watch/categories', cache_time=86400)
Expand Down
9 changes: 6 additions & 3 deletions plugin.video.viwx/resources/lib/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,8 @@
TXT_NO_ITEMS_FOUND = 30608
TXT_PLAY_FROM_START = 30620
TXT_PREMIUM_CONTENT = 30622
TXT_ADD_TO_MYLIST = 30801
TXT_REMOVE_FROM_MYLIST = 30802


def empty_folder():
Expand Down Expand Up @@ -158,6 +160,7 @@ def _generate_page(self):
for show in shows_list:
try:
li = Listitem.from_dict(callb_map[show['type']], **show['show'])
li.context.extend(show.get('ctx_mnu', []))
# Create 'My List' add/remove context menu entries here, so as to be able to update these
# entries after adding/removing an item, even when the underlying data is cached.
_my_list_context_mnu(li, show.get('programme_id'))
Expand Down Expand Up @@ -223,10 +226,10 @@ def _my_list_context_mnu(list_item, programme_id, refresh=True, retry=True):

try:
if programme_id in cache.my_list_programmes:
list_item.context.script(update_mylist, "Remove from My List",
list_item.context.script(update_mylist, utils.addon_info.localise(TXT_REMOVE_FROM_MYLIST),
progr_id=programme_id, operation='remove', refresh=refresh)
else:
list_item.context.script(update_mylist, "Add to My List",
list_item.context.script(update_mylist, utils.addon_info.localise(TXT_ADD_TO_MYLIST),
progr_id=programme_id, operation='add', refresh=refresh)
except TypeError:
if retry and cache.my_list_programmes is None:
Expand Down Expand Up @@ -591,7 +594,7 @@ def play_stream_catchup(plugin, url, name, set_resume_point=False):
'subtitles.translate.orig_lang': 'en',
'subtitles.translate.type': 'srt'})
if set_resume_point:
resume_time = itvx.get_resume_point(production_id)
resume_time = itvx.get_resume_point(production_id)
if resume_time:
list_item.setProperties({
'ResumeTime': str(resume_time),
Expand Down
92 changes: 17 additions & 75 deletions plugin.video.viwx/resources/lib/parsex.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
import logging
import pytz
from datetime import datetime
from urllib.parse import urlencode

from codequick.support import logger_id
from codequick import Script
Expand All @@ -18,7 +19,7 @@
from .errors import ParseError

TXT_PLAY_FROM_START = 30620

TXT_VIEW_ALL_EPISODES = 30803

logger = logging.getLogger(logger_id + '.parse')

Expand Down Expand Up @@ -272,10 +273,11 @@ def parse_collection_item(show_data, hide_paid=False):
else:
plot = show_data['description']

img = show_data.get('imageTemplate') or show_data.get('imageUrl', '')
programme_item = {
'label': title,
'art': {'thumb': show_data['imageTemplate'].format(**IMG_PROPS_THUMB),
'fanart': show_data['imageTemplate'].format(**IMG_PROPS_FANART)},
'art': {'thumb': img.format(**IMG_PROPS_THUMB),
'fanart': img.format(**IMG_PROPS_FANART)},
'info': {'title': title if is_playable else '[B]{}[/B] {}'.format(title, content_info),
'plot': plot,
'sorttitle': sort_title(title)},
Expand Down Expand Up @@ -358,43 +360,6 @@ def parse_shortform_item(item_data, time_zone, time_fmt, hide_paid=False):
return None


def parse_trending_collection_item(trending_item, hide_paid=False):
"""Parse an item in the collection 'Trending'
The only real difference with the regular parse_collection_item() is
adding field `contentInfo` to plot and the fact that all items are being
treated as playable.
"""
try:
# No idea if premium content can be trending, but just to be sure.
plot = '\n'.join((trending_item['description'], trending_item['contentInfo']))
if trending_item.get('isPaid'):
if hide_paid:
return None
plot = premium_plot(plot)

# NOTE:
# Especially titles of type 'special' may lack a field encodedEpisodeID. For those titles it
# should not be necessary, but for episodes they are a requirement otherwise the page
# will always return the first episode.

return {
'type': 'title',
'programme_id': trending_item['encodedProgrammeId']['underscore'],
'show': {
'label': trending_item['title'],
'art': {'thumb': trending_item['imageUrl'].format(**IMG_PROPS_THUMB)},
'info': {'plot': plot, 'sorttitle': sort_title(trending_item['title'])},
'params': {'url': build_url(trending_item['titleSlug'],
trending_item['encodedProgrammeId']['letterA'],
trending_item.get('encodedEpisodeId', {}).get('letterA'))}
}
}
except Exception:
logger.warning("Failed to parse trending_collection_item:\n%s", json.dumps(trending_item, indent=4))
return None


def parse_category_item(prog, category_id):
# At least all items without an encodedEpisodeId are playable.
# Unfortunately there are items that do have an episodeId, but are in fact single
Expand Down Expand Up @@ -521,40 +486,6 @@ def parse_episode_title(title_data, brand_fanart=None, prefer_bsl=False):
return title_obj


def parse_legacy_episode_title(title_data, brand_fanart=None):
"""Parse a title from episodes listing in old format"""
# Note: episodeTitle may be None
title = title_data['episodeTitle'] or title_data['numberedEpisodeTitle']
img_url = title_data['imageUrl']
plot = '\n\n'.join((title_data['synopsis'], title_data['guidance'] or ''))
if 'PAID' in title_data.get('tier', []):
plot = premium_plot(plot)

title_obj = {
'label': title,
'art': {'thumb': img_url.format(**IMG_PROPS_THUMB),
'fanart': brand_fanart,
# 'poster': img_url.format(**IMG_PROPS_POSTER)
},
'info': {'title': title_data['numberedEpisodeTitle'],
'plot': plot,
'duration': utils.duration_2_seconds(title_data['duration']),
'date': title_data['broadcastDateTime']},
'params': {'url': title_data['playlistUrl'], 'name': title}
}
if title_data['titleType'] == 'EPISODE':
try:
episode_nr = int(title_data['episodeNumber'])
except ValueError:
episode_nr = None
try:
series_nr = int(title_data['seriesNumber'])
except ValueError:
series_nr = None
title_obj['info'].update(episode=episode_nr, season=series_nr)
return title_obj


def parse_search_result(search_data):
entity_type = search_data['entityType']
result_data = search_data['data']
Expand Down Expand Up @@ -652,6 +583,7 @@ def parse_my_list_item(item, hide_paid=False):

def parse_last_watched_item(item, utc_now):
progr_name = item.get('programmeTitle', '')
progr_id = item.get('programmeId', '').replace('/', '_')
episode_name = item.get('episodeTitle')
series_nr = item.get('seriesNumber')
episode_nr = item.get('episodeNumber')
Expand Down Expand Up @@ -686,9 +618,10 @@ def parse_last_watched_item(item, utc_now):
else:
title = '{} - [I]{}% watched[/I]'.format(progr_name, int(item['percentageWatched'] * 100))


item_dict = {
'type': 'vodstream',
'programme_id': item['programmeId'].replace('/', '_'),
'programme_id': progr_id,
'show': {
'label': episode_name or progr_name,
'art': {'thumb': img_link.format(**IMG_PROPS_THUMB),
Expand All @@ -715,6 +648,15 @@ def parse_last_watched_item(item, utc_now):
}
if item['contentType'] == 'FILM':
item_dict['show']['art']['poster'] = img_link.format(**IMG_PROPS_POSTER)
elif item['contentType'] == 'EPISODE' and progr_id:
ctx_mnu = (utils.addon_info.localise(TXT_VIEW_ALL_EPISODES),
''.join(('Container.Update(plugin://',
utils.addon_info.id,
'/resources/lib/main/wrapper.list_productions?',
urlencode({'url': '/watch/undefined/' + progr_id}),
')'))
)
item_dict['ctx_mnu'] = [ctx_mnu]
return item_dict


Expand Down
Loading

0 comments on commit d7876b7

Please sign in to comment.