# -*- coding: utf-8 -*-
import re
import os
import base64
import json
import time
import six
import traceback
import sys
from kodi_six import xbmcplugin, xbmcgui, xbmcaddon, xbmcvfs, xbmc
from six.moves import urllib_request, urllib_parse, urllib_error, http_cookiejar, html_parser
from xml.sax.saxutils import escape
from xml.etree import ElementTree
class NoRedirection(urllib_error.HTTPError):
def http_response(self, request, response):
return response
https_response = http_response
global gLSProDynamicCodeNumber
viewmode = None
tsdownloader = False
hlsretry = False
TRANSLATEPATH = xbmc.translatePath if six.PY2 else xbmcvfs.translatePath
LOGINFO = xbmc.LOGNOTICE if six.PY2 else xbmc.LOGINFO
resolve_url = ['180upload.com', 'allmyvideos.net', 'bestreams.net', 'clicknupload.com', 'cloudzilla.to', 'movshare.net', 'novamov.com', 'nowvideo.sx', 'videoweed.es', 'daclips.in', 'datemule.com', 'fastvideo.in', 'faststream.in', 'filehoot.com', 'filenuke.com', 'sharesix.com', 'plus.google.com', 'picasaweb.google.com', 'gorillavid.com', 'gorillavid.in', 'grifthost.com', 'hugefiles.net', 'ipithos.to', 'ishared.eu', 'kingfiles.net', 'mail.ru', 'my.mail.ru', 'videoapi.my.mail.ru', 'mightyupload.com', 'mooshare.biz', 'movdivx.com', 'movpod.net', 'movpod.in', 'movreel.com', 'mrfile.me', 'nosvideo.com', 'openload.io', 'played.to', 'bitshare.com', 'filefactory.com', 'k2s.cc', 'oboom.com', 'rapidgator.net', 'primeshare.tv', 'bitshare.com', 'filefactory.com', 'k2s.cc', 'oboom.com', 'rapidgator.net', 'sharerepo.com', 'stagevu.com', 'streamcloud.eu', 'streamin.to', 'thefile.me', 'thevideo.me', 'tusfiles.net', 'uploadc.com', 'zalaa.com', 'uploadrocket.net', 'uptobox.com', 'v-vids.com', 'veehd.com', 'vidbull.com', 'videomega.tv', 'vidplay.net', 'vidspot.net', 'vidto.me', 'vidzi.tv', 'vimeo.com', 'vk.com', 'vodlocker.com', 'xfileload.com', 'xvidstage.com', 'zettahost.tv']
g_ignoreSetResolved = ['plugin.video.f4mTester', 'plugin.video.SportsDevil', 'plugin.video.sportsdevil', 'plugin.video.ZemTV-shani']
gLSProDynamicCodeNumber = 0
addon = xbmcaddon.Addon()
addon_name = addon.getAddonInfo('name')
addon_version = addon.getAddonInfo('version')
profile = TRANSLATEPATH(addon.getAddonInfo('profile'))
if not os.path.exists(profile):
try:
os.makedirs(profile)
except:
pass
home = TRANSLATEPATH(addon.getAddonInfo('path'))
sys.path.append(os.path.join(home, 'resources', 'lib'))
favorites = os.path.join(profile, 'favorites')
history = os.path.join(profile, 'history')
REV = os.path.join(profile, 'list_revision')
icon = os.path.join(home, 'icon.png')
FANART = os.path.join(home, 'fanart.jpg')
source_file = os.path.join(home, 'source_file')
functions_dir = profile
debug = addon.getSetting('debug')
origem ='https://gitlab.com/afonsocosta/magellan_matrix/-/raw/main/magellan.txt'
if os.path.exists(favorites):
FAV = open(favorites).read()
else:
FAV = []
if os.path.exists(source_file):
SOURCES = open(source_file).read()
else:
SOURCES = [{"url": origem, "fanart": FANART}]
def addon_log(string, level=xbmc.LOGDEBUG):
if debug == 'false':
xbmc.log("[plugin.video.Magellan_Matrix-{0}]: {1}".format(addon_version, string), LOGINFO)
else:
xbmc.log("[plugin.video.Magellan_Matrix-{0}]: {1}".format(addon_version, string), level)
def makeRequest(url, headers=None):
try:
if headers is None:
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36'}
if '|' in url:
url, header_in_page = url.split('|')
header_in_page = header_in_page.split('&')
for h in header_in_page:
if len(h.split('=')) == 2:
n, v = h.split('=')
else:
vals = h.split('=')
n = vals[0]
v = '='.join(vals[1:])
headers[n] = v
req = urllib_request.Request(url, None, headers)
response = urllib_request.urlopen(req)
result = response.read()
encoding = None
content_type = response.headers.get('content-type', '')
if 'charset=' in content_type:
encoding = content_type.split('charset=')[-1]
if encoding is None:
epattern = r']+)'''
epattern = epattern.encode('utf8') if six.PY3 else epattern
r = re.search(epattern, result, re.IGNORECASE)
if r:
encoding = r.group(1).decode('utf8') if six.PY3 else r.group(1)
if encoding is not None:
result = result.decode(encoding.lower(), errors='ignore')
result = result.encode('utf8') if six.PY2 else result
else:
result = result.decode('latin-1', errors='ignore') if six.PY3 else result.encode('utf-8')
response.close()
except urllib_error.URLError as e:
addon_log('URL: {0}'.format(url))
if hasattr(e, 'code'):
msg = 'We failed with error code - {0}'.format(e.code)
addon_log(msg)
xbmcgui.Dialog().notification(addon_name, msg, icon, 10000, False)
elif hasattr(e, 'reason'):
addon_log('We failed to reach a server.')
addon_log('Reason: {0}'.format(e.reason))
msg = 'We failed to reach a server. - {0}'.format(e.reason)
xbmcgui.Dialog().notification(addon_name, msg, icon, 10000, False)
return result
def getSources():
try:
if os.path.exists(favorites):
addDir('Favorites', 'url', 4, os.path.join(home, 'resources', 'favorite.png'), FANART, '', '', '', '')
if addon.getSetting("browse_community") == "true":
addDir('Community Files', 'community_files', 16, icon, FANART, '', '', '', '')
if addon.getSetting("searchotherplugins") == "true":
addDir('Search Other Plugins', 'Search Plugins', 25, icon, FANART, '', '', '', '')
if os.path.exists(source_file):
#sources = json.loads(open(source_file, "r").read())
sources = SOURCES
if len(sources) > 1:
for i in sources:
try:
thumb = icon
fanart = FANART
desc = ''
date = ''
credits = ''
genre = ''
if 'thumbnail' in i:
thumb = i['thumbnail']
if 'fanart' in i:
fanart = i['fanart']
if 'description' in i:
desc = i['description']
if 'date' in i:
date = i['date']
if 'genre' in i:
genre = i['genre']
if 'credits' in i:
credits = i['credits']
title = i['title'].encode('utf-8') if six.PY2 else i['title']
url = i['url'].encode('utf-8') if six.PY2 else i['url']
# url = url + "fix" if url.endswith(".xml") and six.PY3 else url
addDir(title, url, 1, thumb, fanart, desc, genre, date, credits, 'source')
except:
traceback.print_exc()
else:
if len(sources) == 1:
if isinstance(sources[0], list):
getData(sources[0][1].encode('utf-8'), FANART) if six.PY2 else sources[0][1]
else:
getData(sources[0]['url'], sources[0]['fanart'])
except:
traceback.print_exc()
def index():
if os.path.exists(favorites):
addDir('FAVORITOS', 'url', 4, os.path.join(home, 'resources', 'favorite.png'), FANART, '', '', '', '')
getData(origem, FANART)
def addSource(url=None):
if url is None:
if addon.getSetting("new_file_source") != "":
source_url = addon.getSetting('new_file_source')
elif addon.getSetting("new_url_source") != "":
source_url = addon.getSetting('new_url_source')
else:
source_url = url
if source_url == '' or source_url is None:
return
addon_log('Adding New Source: {0}'.format(source_url))
media_info = None
data = getSoup(source_url)
if isinstance(data, ElementTree.ElementTree) or isinstance(data, ElementTree.Element):
if data.find('channels_info') is not None:
media_info = data.find('channels_info')
elif data.find('items_info') is not None:
media_info = data.find('items_info')
if media_info:
source_media = {}
source_media['url'] = source_url
try:
source_media['title'] = media_info.find('title').text
except:
pass
try:
source_media['thumbnail'] = media_info.find('thumbnail').text
except:
pass
try:
source_media['fanart'] = media_info.find('fanart').text
except:
pass
try:
source_media['genre'] = media_info.find('genre').text
except:
pass
try:
source_media['description'] = media_info.find('description').text
except:
pass
try:
source_media['date'] = media_info.find('date').text
except:
pass
try:
source_media['credits'] = media_info.find('credits').text
except:
pass
else:
if '/' in source_url:
nameStr = source_url.split('/')[-1].split('.')[0]
if '\\' in source_url:
nameStr = source_url.split('\\')[-1].split('.')[0]
if '%' in nameStr:
nameStr = urllib_parse.unquote_plus(nameStr)
keyboard = xbmc.Keyboard(nameStr, 'Displayed Name, Rename?')
keyboard.doModal()
if (keyboard.isConfirmed() is False):
return
newStr = keyboard.getText()
if len(newStr) == 0:
return
source_media = {}
source_media['title'] = newStr
source_media['url'] = source_url
source_media['fanart'] = fanart
if os.path.exists(source_file) is False:
source_list = []
source_list.append(source_media)
b = open(source_file, "w")
b.write(json.dumps(source_list))
b.close()
else:
sources = json.loads(open(source_file, "r").read())
sources.append(source_media)
b = open(source_file, "w")
b.write(json.dumps(sources))
b.close()
addon.setSetting('new_url_source', "")
addon.setSetting('new_file_source', "")
xbmcgui.Dialog().notification(addon_name, 'New source added', icon, 5000, False)
if url is not None:
if 'community-links' in url:
xbmc.executebuiltin("XBMC.Container.Update({0}?mode=10,replace)".format(sys.argv[0]))
else:
addon.openSettings()
def rmSource(name):
sources = json.loads(open(source_file, "r").read())
for index in range(len(sources)):
if isinstance(sources[index], list):
if sources[index][0] == name:
del sources[index]
b = open(source_file, "w")
b.write(json.dumps(sources))
b.close()
break
else:
if sources[index]['title'] == name:
del sources[index]
b = open(source_file, "w")
b.write(json.dumps(sources))
b.close()
break
xbmc.executebuiltin("XBMC.Container.Refresh")
def getSoup(url, data=None):
global viewmode, tsdownloader, hlsretry
tsdownloader = False
hlsretry = False
if url.startswith('http://') or url.startswith('https://'):
enckey = False
if '$$TSDOWNLOADER$$' in url:
tsdownloader = True
url = url.replace("$$TSDOWNLOADER$$", "")
if '$$HLSRETRY$$' in url:
hlsretry = True
url = url.replace("$$HLSRETRY$$", "")
if '$$LSProEncKey=' in url:
enckey = url.split('$$LSProEncKey=')[1].split('$$')[0]
rp = '$$LSProEncKey={0}$$'.format(enckey)
url = url.replace(rp, "")
try:
data = makeRequest(url)
import gzip, binascii
k = 'tbbfgu5q3am5oxcfn7wl37'
try:
from StringIO import StringIO as BytesIO ## for Python 2
except ImportError:
from io import BytesIO ## for Python 3
if k in data:
data = data.split(k)
buf = BytesIO(binascii.unhexlify(data[0]))
f = gzip.GzipFile(fileobj=buf)
data = f.read()
data = data.decode('utf-8')
except:
data = makeRequest(url)
if enckey:
from Cryptodome.Cipher import AES
from Cryptodome.Util.Padding import unpad
missingbytes = 16 - len(enckey)
enckey = enckey + (chr(0) * (missingbytes))
data = base64.b64decode(data)
cipher = AES.new(enckey.encode(), AES.MODE_ECB)
data = unpad(cipher.decrypt(data), AES.block_size).decode()
if re.search("#EXTM3U", data) or 'm3u' in url:
return data
elif data is None:
if xbmcvfs.exists(url):
if url.startswith("smb://") or url.startswith("nfs://"):
copy = xbmcvfs.copy(url, os.path.join(profile, 'temp', 'source_temp.txt'))
if copy:
if six.PY2:
data = open(os.path.join(profile, 'temp', 'source_temp.txt'), "r").read()
else:
data = open(os.path.join(profile, 'temp', 'source_temp.txt'), "r", encoding='utf-8').read()
xbmcvfs.delete(os.path.join(profile, 'temp', 'source_temp.txt'))
else:
addon_log("failed to copy from smb:")
else:
if six.PY2:
data = open(url, 'r').read()
else:
data = open(url, 'r', encoding='utf-8').read()
if re.match("#EXTM3U", data) or 'm3u' in url:
return data
else:
addon_log("Soup Data not found!")
return
if '' in data:
try:
viewmode = re.findall('(.*?)<', data)[0]
xbmc.executebuiltin("Container.SetViewMode({0})".format(viewmode))
except:
pass
xml = None
try:
xml = ElementTree.fromstring(data)
except ElementTree.ParseError as err:
xbmcgui.Dialog().notification(addon_name, 'Failed to parse xml: {0}'.format(err.msg), icon, 10000, False)
except Exception as err:
xbmcgui.Dialog().notification(addon_name, 'An error occurred: {0}'.format(err), icon, 10000, False)
return xml
def processPyFunction(data):
try:
if data and len(data) > 0 and data.startswith('$pyFunction:'):
data = doEval(data.split('$pyFunction:')[1], '', None, None)
except:
pass
return data
def getData(url, fanart, data=None):
soup = getSoup(url, data)
channels = None
if isinstance(soup, ElementTree.Element):
if (soup.tag == 'channels' and len(soup) > 0 and addon.getSetting('donotshowbychannels') == 'false') or (soup.tag == 'items' and len(soup) > 0):
channels = soup.findall('channel')
tepg = None
media_info = None
if soup.find('channels_info') is not None:
media_info = soup.find('channels_info')
elif soup.find('items_info') is not None:
media_info = soup.find('items_info')
if media_info:
try:
if media_info.find('epg') is not None:
epg = media_info.find('epg').text
reg_item = media_info.find('epg_regex')
regexs = parse_regex(reg_item)
if '$doregex' in epg and getRegexParsed is not None:
tepg, setres = getRegexParsed(regexs, epg)
if tepg:
try:
tepg = json.dumps(tepg)
except:
tepg = str(tepg)
if functions_dir not in sys.path:
sys.path.append(functions_dir)
filename = 'LSProPageEPG.txt'
filenamewithpath = os.path.join(functions_dir, filename)
with open(filenamewithpath, 'w') as f:
f.write(tepg)
f.close()
except BaseException as err:
addon_log('error getting EPG page data: {0}'.format(str(err)))
for channel in channels:
linkedUrl = ''
lcount = 0
if channel.findall('externallink'):
linkedUrl = channel.findall('externallink')[0].text
# lcount = len(channel.findall('externallink')) # gujal
try:
linkedUrl = base64.b32decode(linkedUrl[::-1]).decode('utf-8')
except:
pass
if lcount > 1:
linkedUrl = ''
name = channel.find('name').text
name = processPyFunction(name)
if channel.find('thumbnail') is not None:
thumbnail = channel.find('thumbnail').text
else:
thumbnail = ''
thumbnail = processPyFunction(thumbnail)
if channel.find('fanart') is not None:
fanArt = channel.find('fanart').text
elif addon.getSetting('use_thumb') == "true":
fanArt = thumbnail
else:
fanArt = fanart
if channel.find('info') is not None:
desc = channel.find('info').text
else:
desc = ''
if channel.find('genre') is not None:
genre = channel.find('genre').text
else:
genre = ''
if channel.find('date') is not None:
date = channel.find('date').text
else:
date = ''
if channel.find('credits') is not None:
credits = channel.find('credits').text
else:
credits = ''
try:
name = name.encode('utf-8') if six.PY2 else name
if linkedUrl == '':
url = url.encode('utf-8') if six.PY2 else url
addDir(name, url, 2, thumbnail, fanArt, desc, genre, date, credits, True)
else:
linkedUrl = linkedUrl.encode('utf-8') if six.PY2 else linkedUrl
addDir(name, linkedUrl, 1, thumbnail, fanArt, desc, genre, date, None, 'source')
except:
addon_log('There was a problem adding directory from getData(): {0}'.format(name))
if channels is None or len(channels) == 0:
addon_log('No Channels: getItems')
getItems(soup.findall('item'), fanart)
else:
parse_m3u(soup)
# borrow from https://github.com/enen92/P2P-Streams-XBMC/blob/master/plugin.video.p2p-streams/resources/core/livestreams.py
# This will not go through the getItems functions ( means you must have ready to play url, no regex)
def parse_m3u(data):
if data:
content = data.rstrip()
else:
content = ''
match = re.compile(r'#EXTINF:(.+?),(.*?)[\n\r]+([^\r\n]+)').findall(content)
total = len(match)
for other, channel_name, stream_url in match:
if 'tvg-logo' in other:
thumbnail = re_me(other, 'tvg-logo=[\'"](.*?)[\'"]')
if thumbnail:
if thumbnail.startswith('http'):
thumbnail = thumbnail
elif addon.getSetting('logo-folderPath') != "":
logo_url = addon.getSetting('logo-folderPath')
thumbnail = logo_url + thumbnail
else:
thumbnail = thumbnail
else:
thumbnail = ''
if 'type' in other:
mode_type = re_me(other, 'type=[\'"](.*?)[\'"]')
if mode_type == 'yt-dl':
stream_url = stream_url + "&mode=18"
elif mode_type == 'regex':
url = stream_url.split('®exs=')
regexs = parse_regex(getSoup('', data=url[1]))
addLink(url[0], channel_name, thumbnail, '', '', '', '', '', None, regexs, total)
continue
elif tsdownloader and '.ts' in stream_url:
stream_url = 'plugin://plugin.video.f4mTester/?url={0}&streamtype=TSDOWNLOADER&name={1}'.format(urllib_parse.quote_plus(stream_url), urllib_parse.quote(channel_name))
elif hlsretry and '.m3u8' in stream_url:
stream_url = 'plugin://plugin.video.f4mTester/?url={0}&streamtype=HLSRETRY&name={1}'.format(urllib_parse.quote_plus(stream_url), urllib_parse.quote(channel_name))
addLink(stream_url, channel_name, thumbnail, '', '', '', '', '', None, '', total)
def getChannelItems(name, url, fanart):
soup = getSoup(url)
channel_list = soup.find('./channel/[name="{0}"]'.format(name))
if channel_list.find('items') is not None:
items = channel_list.find('items').findall('item')
else:
items = channel_list.findall('item')
if channel_list.find('fanart') is not None:
fanArt = channel_list.find('fanart').text
else:
fanArt = fanart
for channel in channel_list.findall('subchannel'):
name = channel.find('name').text
name = processPyFunction(name)
if channel.find('thumbnail') is not None:
thumbnail = channel.find('thumbnail').text
thumbnail = processPyFunction(thumbnail)
else:
thumbnail = ''
if channel.find('fanart') is not None:
fanArt = channel.find('fanart').text
elif addon.getSetting('use_thumb') == "true":
fanArt = thumbnail
else:
fanArt = ''
if channel.find('info') is not None:
desc = channel.find('info').text
else:
desc = ''
if channel.find('genre') is not None:
genre = channel.find('genre').text
else:
genre = ''
if channel.find('date') is not None:
date = channel.find('date').text
else:
date = ''
if channel.find('credits') is not None:
credits = channel.find('credits').text
else:
credits = ''
try:
if six.PY2:
name = name.encode('utf-8')
url = url.encode('utf-8')
addDir(name, url, 3, thumbnail, fanArt, desc, genre, credits, date)
except:
addon_log('There was a problem adding directory - {0}'.format(name))
getItems(items, fanArt)
def getSubChannelItems(name, url, fanart):
soup = getSoup(url)
channel_list = soup.find('./channel/subchannel/[name="{0}"]'.format(name))
items = channel_list.find('subitems').findall('subitem')
getItems(items, fanart)
def getItems(items, fanart, dontLink=False):
total = len(items)
# addon_log('Total Items: {0}'.format(total))
add_playlist = addon.getSetting('add_playlist')
ask_playlist_items = addon.getSetting('ask_playlist_items')
parentalblock = addon.getSetting('parentalblocked')
parentalblock = parentalblock == "true"
for item in items:
isXMLSource = False
isJsonrpc = False
if isinstance(item.find('parentalblock'), ElementTree.Element):
applyblock = item.find('parentalblock').text
else:
applyblock = 'false'
if applyblock == 'true' and parentalblock:
continue
if isinstance(item.find('title'), ElementTree.Element):
name = item.find('title').text
if name == '':
name = 'unknown?'
name = processPyFunction(name)
else:
addon_log('Name Error')
name = ''
regexs = None
if isinstance(item.find('regex'), ElementTree.Element):
regexs = parse_regex(item.findall('regex'))
iepg = None
try:
if isinstance(item.find('epg'), ElementTree.Element):
# addon_log('xxxxxxxxxxxxxxitemEPG')
# ** basic regex on epg_url tag for epg added to item name ** #
if isinstance(item.find('epg_url'), ElementTree.Element) and item.find('epg_url').text is not None:
try:
epg_url = item.find('epg_url').text
epg_regex = item.find('epg_regex').text
epg_name = get_epg(epg_url, epg_regex)
if epg_name:
name += ' - ' + epg_name
except:
pass
# ** py function block regex to generate epg for item plot ** #
elif item.find('epg').text:
epg = item.find('epg').text
if '$doregex' in epg:
reg_item = item.find('epg_regex')
# if page tag is not provided use epg generated in channel info or items info
if isinstance(reg_item.find('page'), ElementTree.Element):
if reg_item.find('page').text is None or reg_item.find('page').text == "":
filename = 'LSProPageEPG.txt'
filenamewithpath = os.path.join(functions_dir, filename)
reg_item.find('page').text = filenamewithpath
regexs = parse_regex(reg_item)
iepg, setres = getRegexParsed(regexs, epg)
# ** or add static epg to item name ** #
else:
name += getepg(item.find('epg').text)
else:
pass
except BaseException as err:
addon_log('Error getting item EPG: {0}'.format(str(err)))
try:
url = []
if len(item.findall('link')) > 0:
for i in item.findall('link'):
if i.text is not None:
url.append(i.text)
elif len(item.findall('sportsdevil')) > 0:
for i in item.findall('sportsdevil'):
if i.text is not None:
sd_plugin = "plugin://plugin.video.SportsDevil" if six.PY2 else "plugin://plugin.video.sportsdevil"
sportsdevil = sd_plugin + '/?mode=1&item=catcher%3dstreams%26url=' + i.text + '%26videoTitle=' + name
if item.find('referer'):
sportsdevil = sportsdevil + '%26referer=' + item.find('referer').text
url.append(sportsdevil)
elif len(item.findall('yt-dl')) > 0:
for i in item.findall('yt-dl'):
if i.text is not None:
ytdl = i.text + '&mode=18'
url.append(ytdl)
elif len(item.findall('dm')) > 0:
for i in item.findall('dm'):
if i.text is not None:
dm = "plugin://plugin.video.dailymotion_com/?mode=playVideo&url=" + i.text
url.append(dm)
elif len(item.findall('dmlive')) > 0:
for i in item.findall('dmlive'):
if i.text is not None:
dm = "plugin://plugin.video.dailymotion_com/?mode=playLiveVideo&url=" + i.text
url.append(dm)
elif len(item.findall('utube')) > 0:
for i in item.findall('utube'):
if i.text is not None:
if ' ' in i.text:
utube = 'plugin://plugin.video.youtube/search/?q=' + urllib_parse.quote_plus(i.text)
isJsonrpc = utube
elif len(i.text) == 11:
utube = 'plugin://plugin.video.youtube/play/?video_id=' + i.text
elif (i.text.startswith('PL') and '&order=' not in i.text) or i.text.startswith('UU'):
utube = 'plugin://plugin.video.youtube/play/?&order=default&playlist_id=' + i.text
elif i.text.startswith('PL') or i.text.startswith('UU'):
utube = 'plugin://plugin.video.youtube/play/?playlist_id=' + i.text
elif i.text.startswith('UC') and len(i.text) > 12:
utube = 'plugin://plugin.video.youtube/channel/' + i.text + '/'
isJsonrpc = utube
elif not i.text.startswith('UC') and not (i.text.startswith('PL')):
utube = 'plugin://plugin.video.youtube/user/' + i.text + '/'
isJsonrpc = utube
url.append(utube)
elif len(item.findall('f4m')) > 0:
for i in item.findall('f4m'):
if i.text is not None:
if '.f4m' in i.text:
f4m = 'plugin://plugin.video.f4mTester/?url=' + urllib_parse.quote_plus(i.text)
elif '.m3u8' in i.text:
f4m = 'plugin://plugin.video.f4mTester/?url=' + urllib_parse.quote_plus(i.text) + '&streamtype=HLS'
else:
f4m = 'plugin://plugin.video.f4mTester/?url=' + urllib_parse.quote_plus(i.text) + '&streamtype=SIMPLE'
url.append(f4m)
elif len(item.findall('urlsolve')) > 0:
for i in item.findall('urlsolve'):
if i.text is not None:
resolver = i.text + '&mode=19'
url.append(resolver)
elif len(item.findall('inputstream')) > 0:
for i in item.findall('inputstream'):
if i.text is not None:
istream = i.text + '&mode=20'
url.append(istream)
elif len(item.findall('slproxy')) > 0:
for i in item.findall('slproxy'):
if i.text is not None:
istream = i.text + '&mode=22'
url.append(istream)
if len(url) < 1:
# continue
raise Exception()
except:
addon_log('Error element, Passing: {0}'.format(name.encode('utf-8') if six.PY2 else name))
traceback.print_exc()
continue
if isinstance(item.find('externallink'), ElementTree.Element):
isXMLSource = item.find('externallink').text
if isXMLSource:
ext_url = [isXMLSource]
isXMLSource = True
if isinstance(item.find('jsonrpc'), ElementTree.Element):
isJsonrpc = item.find('jsonrpc').text
if isJsonrpc:
ext_url = [isJsonrpc]
isJsonrpc = True
if isinstance(item.find('thumbnail'), ElementTree.Element):
thumbnail = item.find('thumbnail').text
thumbnail = processPyFunction(thumbnail)
else:
thumbnail = ''
if isinstance(item.find('fanart'), ElementTree.Element):
fanArt = item.find('fanart').text
elif addon.getSetting('use_thumb') == "true":
fanArt = thumbnail
else:
fanArt = fanart
if isinstance(item.find('info'), ElementTree.Element):
desc = item.find('info').text
else:
# ** use item epg in plot if present ** #
if iepg:
desc = iepg
else:
desc = ''
if isinstance(item.find('genre'), ElementTree.Element):
genre = item.find('genre').text
else:
genre = ''
if isinstance(item.find('date'), ElementTree.Element):
date = item.find('date').text
else:
date = ''
try:
if len(url) > 1:
alt = 0
playlist = []
ignorelistsetting = True if '$$LSPlayOnlyOne$$' in url[0] else False
for i in url:
if add_playlist == "false" and not ignorelistsetting:
alt += 1
addLink(i, '{0}) {1}'.format(alt, name.encode('utf-8', 'ignore') if six.PY2 else name), thumbnail, fanArt, desc, genre, date, True, playlist, regexs, total)
elif (add_playlist == "true" and ask_playlist_items == 'true') or ignorelistsetting:
if regexs:
playlist.append(i + '®exs=' + regexs)
elif any(x in i for x in resolve_url) and i.startswith('http'):
playlist.append(i + '&mode=19')
else:
playlist.append(i)
else:
playlist.append(i)
if len(playlist) > 1:
addLink('', name.encode('utf-8') if six.PY2 else name, thumbnail, fanArt, desc, genre, date, True, playlist, regexs, total)
else:
if dontLink:
return name, url[0], regexs
if isXMLSource:
if six.PY2:
name = name.encode('utf-8')
ext_url[0] = ext_url[0].encode('utf-8')
url[0] = url[0].encode('utf-8')
if regexs is not None: # and
addDir(name, ext_url[0], 1, thumbnail, fanArt, desc, genre, date, None, '!!update', regexs, url[0])
else:
addDir(name, ext_url[0], 1, thumbnail, fanArt, desc, genre, date, None, 'source', None, None)
elif isJsonrpc:
addDir(name.encode('utf-8') if six.PY2 else name, ext_url[0], 53, thumbnail, fanArt, desc, genre, date, None, 'source')
else:
try:
if '$doregex' in name and getRegexParsed is not None:
tname, setres = getRegexParsed(regexs, name)
if tname is not None:
name = tname
except:
pass
try:
if '$doregex' in thumbnail and getRegexParsed is not None:
tname, setres = getRegexParsed(regexs, thumbnail)
if tname is not None:
thumbnail = tname
except:
pass
addLink(url[0], name.encode('utf-8') if six.PY2 else name, thumbnail, fanArt, desc, genre, date, True, None, regexs, total)
except:
traceback.print_exc()
addon_log('There was a problem adding item - {0}'.format(repr(name)))
def parse_regex(reg_items):
reg_tags = ['name', 'expres', 'page', 'referer', 'connection', 'notplayable', 'noredirect', 'origin', 'agent',
'accept', 'includeheaders', 'listrepeat', 'proxy', 'x-req', 'x-addr', 'x-forward', 'post', 'rawpost',
'htmlunescape', 'readcookieonly', 'cookiejar', 'setcookie', 'appendcookie', 'ignorecache', 'thumbnail']
regexs = {}
if isinstance(reg_items, ElementTree.Element):
reg_items = [reg_items]
for reg_item in reg_items:
rname = reg_item.find('name').text
sregexs = {}
for i in reg_item:
if i.tag in reg_tags:
sregexs.update({i.tag: i.text})
else:
addon_log('Unsupported tag: {0}'.format(i.tag), LOGINFO)
if not sregexs.get('expres'):
sregexs.update({'expres': ''})
if not sregexs.get('cookiejar'):
sregexs.update({'cookiejar': ''})
regexs.update({rname: sregexs})
regexs = urllib_parse.quote(repr(regexs))
return regexs
# copies from lamda's implementation
def get_ustream(url):
try:
for i in range(1, 51):
result = getUrl(url)
if "EXT-X-STREAM-INF" in result:
return url
if "EXTM3U" not in result:
return
xbmc.sleep(2000)
return
except:
return
def getRegexParsed(regexs, url, cookieJar=None, forCookieJarOnly=False, recursiveCall=False, cachedPages={}, rawPost=False, cookie_jar_file=None): # 0,1,2 = URL, regexOnly, CookieJarOnly
if not recursiveCall:
regexs = eval(urllib_parse.unquote(regexs))
doRegexs = re.compile(r'\$doregex\[([^\]]*)\]').findall(url)
setresolved = True
for k in doRegexs:
if k in regexs:
m = regexs[k]
cookieJarParam = False
if 'cookiejar' in m: # so either create or reuse existing jar
cookieJarParam = m['cookiejar']
if '$doregex' in cookieJarParam:
cookieJar = getRegexParsed(regexs, m['cookiejar'], cookieJar, True, True, cachedPages)
cookieJarParam = True
else:
cookieJarParam = True
if cookieJarParam:
if cookieJar is None:
cookie_jar_file = None
if 'open[' in m['cookiejar']:
cookie_jar_file = m['cookiejar'].split('open[')[1].split(']')[0]
cookieJar = getCookieJar(cookie_jar_file)
if cookie_jar_file:
saveCookieJar(cookieJar, cookie_jar_file)
elif 'save[' in m['cookiejar']:
cookie_jar_file = m['cookiejar'].split('save[')[1].split(']')[0]
complete_path = os.path.join(profile, cookie_jar_file)
# saveCookieJar(cookieJar, cookie_jar_file) # gujal
saveCookieJar(cookieJar, complete_path)
if m['page'] and '$doregex' in m['page']:
pg = getRegexParsed(regexs, m['page'], cookieJar, recursiveCall=True, cachedPages=cachedPages)
if len(pg) == 0:
pg = 'http://regexfailed'
m['page'] = pg
if 'setcookie' in m and m['setcookie'] and '$doregex' in m['setcookie']:
m['setcookie'] = getRegexParsed(regexs, m['setcookie'], cookieJar, recursiveCall=True, cachedPages=cachedPages)
if 'appendcookie' in m and m['appendcookie'] and '$doregex' in m['appendcookie']:
m['appendcookie'] = getRegexParsed(regexs, m['appendcookie'], cookieJar, recursiveCall=True, cachedPages=cachedPages)
if 'post' in m and '$doregex' in m['post']:
m['post'] = getRegexParsed(regexs, m['post'], cookieJar, recursiveCall=True, cachedPages=cachedPages)
if 'rawpost' in m and '$doregex' in m['rawpost']:
m['rawpost'] = getRegexParsed(regexs, m['rawpost'], cookieJar, recursiveCall=True, cachedPages=cachedPages, rawPost=True)
if 'rawpost' in m and '$epoctime$' in m['rawpost']:
m['rawpost'] = m['rawpost'].replace('$epoctime$', getEpocTime())
if 'rawpost' in m and '$epoctime2$' in m['rawpost']:
m['rawpost'] = m['rawpost'].replace('$epoctime2$', getEpocTime2())
link = ''
if m['page'] and m['page'] in cachedPages and 'ignorecache' not in m and forCookieJarOnly is False:
link = cachedPages[m['page']]
else:
if m['page'] and m['page'] != '' and m['page'].startswith('http'):
if '$epoctime$' in m['page']:
m['page'] = m['page'].replace('$epoctime$', getEpocTime())
if '$epoctime2$' in m['page']:
m['page'] = m['page'].replace('$epoctime2$', getEpocTime2())
page_split = m['page'].split('|')
pageUrl = page_split[0]
header_in_page = None
if len(page_split) > 1:
header_in_page = page_split[1]
current_proxies = urllib_request.ProxyHandler(urllib_request.getproxies())
req = urllib_request.Request(pageUrl)
if 'proxy' in m:
proxytouse = m['proxy']
if pageUrl[:5] == "https":
proxy = urllib_request.ProxyHandler({'https': proxytouse})
else:
proxy = urllib_request.ProxyHandler({'http': proxytouse})
opener = urllib_request.build_opener(proxy)
urllib_request.install_opener(opener)
req.add_header('User-Agent', 'Mozilla/5.0 (Windows NT 6.1; rv:14.0) Gecko/20100101 Firefox/14.0.1')
proxytouse = None
if 'referer' in m:
req.add_header('Referer', m['referer'])
if 'accept' in m:
req.add_header('Accept', m['accept'])
if 'agent' in m:
req.add_header('User-agent', m['agent'])
if 'x-req' in m:
req.add_header('X-Requested-With', m['x-req'])
if 'x-addr' in m:
req.add_header('x-addr', m['x-addr'])
if 'x-forward' in m:
req.add_header('X-Forwarded-For', m['x-forward'])
if 'setcookie' in m:
req.add_header('Cookie', m['setcookie'])
if 'appendcookie' in m:
cookiestoApend = m['appendcookie']
cookiestoApend = cookiestoApend.split(';')
for h in cookiestoApend:
n, v = h.split('=')
w, n = n.split(':')
ck = http_cookiejar.Cookie(version=0, name=n, value=v, port=None, port_specified=False, domain=w, domain_specified=False, domain_initial_dot=False, path='/', path_specified=True, secure=False, expires=None, discard=True, comment=None, comment_url=None, rest={'HttpOnly': None}, rfc2109=False)
cookieJar.set_cookie(ck)
if 'origin' in m:
req.add_header('Origin', m['origin'])
if header_in_page:
header_in_page = header_in_page.split('&')
for h in header_in_page:
if h.split('=') == 2:
n, v = h.split('=')
else:
vals = h.split('=')
n = vals[0]
v = '='.join(vals[1:])
req.add_header(n, v)
if cookieJar is not None:
cookie_handler = urllib_request.HTTPCookieProcessor(cookieJar)
opener = urllib_request.build_opener(cookie_handler, urllib_request.HTTPBasicAuthHandler(), urllib_request.HTTPHandler())
opener = urllib_request.install_opener(opener)
if 'noredirect' in m:
opener = urllib_request.build_opener(cookie_handler, NoRedirection, urllib_request.HTTPBasicAuthHandler(), urllib_request.HTTPHandler())
opener = urllib_request.install_opener(opener)
elif 'noredirect' in m:
opener = urllib_request.build_opener(NoRedirection, urllib_request.HTTPBasicAuthHandler(), urllib_request.HTTPHandler())
opener = urllib_request.install_opener(opener)
if 'connection' in m:
from keepalive import HTTPHandler
keepalive_handler = HTTPHandler()
opener = urllib_request.build_opener(keepalive_handler)
urllib_request.install_opener(opener)
post = None
if 'post' in m:
postData = m['post']
splitpost = postData.split(',')
post = {}
for p in splitpost:
n = p.split(':')[0]
v = p.split(':')[1]
post[n] = v
post = urllib_parse.urlencode(post)
if 'rawpost' in m:
post = m['rawpost']
link = ''
try:
if post is not None:
response = urllib_request.urlopen(req, post.encode('utf-8'))
else:
response = urllib_request.urlopen(req)
if response.info().get('Content-Encoding') == 'gzip':
import gzip
buf = six.StringIO(response.read())
f = gzip.GzipFile(fileobj=buf)
link = f.read()
else:
link = response.read()
encoding = None
content_type = response.headers.get('content-type', '')
if 'charset=' in content_type:
encoding = content_type.split('charset=')[-1]
if encoding is None:
epattern = r']+)'''
epattern = epattern.encode('utf8') if six.PY3 else epattern
r = re.search(epattern, link, re.IGNORECASE)
if r:
encoding = r.group(1).decode('utf8') if six.PY3 else r.group(1)
if encoding is not None:
link = link.decode(encoding.lower(), errors='ignore')
link = link.encode('utf8') if six.PY2 else link
else:
link = link.decode('latin-1', errors='ignore') if six.PY3 else link.encode('utf-8')
if 'proxy' in m and current_proxies is not None:
urllib_request.install_opener(urllib_request.build_opener(current_proxies))
link = javascriptUnEscape(link)
if 'includeheaders' in m:
link += '$$HEADERS_START$$:'
for b in response.headers:
link += b + ':' + response.headers.get(b) + '\n'
link += '$$HEADERS_END$$:'
# addon_log(link)
addon_log(cookieJar)
response.close()
except:
# traceback.print_exc()
pass
cachedPages[m['page']] = link
if forCookieJarOnly:
return cookieJar # do nothing
elif m['page'] and not m['page'].startswith('http'):
if m['page'].startswith('$pyFunction:'):
val = doEval(m['page'].split('$pyFunction:')[1], '', cookieJar, m)
if forCookieJarOnly:
return cookieJar # do nothing
link = val
link = javascriptUnEscape(link)
else:
link = m['page']
if '$pyFunction:playmedia(' in m['expres'] or 'ActivateWindow' in m['expres'] or 'RunPlugin' in m['expres'] or '$PLAYERPROXY$=' in url or any(x in url for x in g_ignoreSetResolved):
setresolved = False
if '$doregex' in m['expres']:
m['expres'] = getRegexParsed(regexs, m['expres'], cookieJar, recursiveCall=True, cachedPages=cachedPages)
if m['expres'] != '':
if '$LiveStreamCaptcha' in m['expres']:
val = askCaptcha(m, link, cookieJar)
url = url.replace("$doregex[" + k + "]", val)
elif m['expres'].startswith('$pyFunction:') or '#$pyFunction' in m['expres']:
val = ''
if m['expres'].startswith('$pyFunction:'):
val = doEval(m['expres'].split('$pyFunction:')[1], link, cookieJar, m)
else:
val = doEvalFunction(m['expres'], link, cookieJar, m)
if 'ActivateWindow' in m['expres'] or 'RunPlugin' in m['expres']:
return '', False
if forCookieJarOnly:
return cookieJar # do nothing
if 'listrepeat' in m:
listrepeat = m['listrepeat']
return listrepeat, eval(val), m, regexs, cookieJar
try:
url = url.replace(u"$doregex[" + k + "]", val)
except:
url = url.replace("$doregex[" + k + "]", val.decode("utf-8"))
else:
if 'listrepeat' in m:
listrepeat = m['listrepeat']
ret = re.findall(m['expres'], link)
return listrepeat, ret, m, regexs, cookieJar
val = ''
if link != '':
reg = re.compile(m['expres']).search(link)
if reg:
val = reg.group(1).strip()
elif m['page'] == '' or m['page'] is None:
val = m['expres']
if rawPost:
val = urllib_parse.quote_plus(val)
if 'htmlunescape' in m:
val = html_parser.HTMLParser().unescape(val)
try:
url = url.replace("$doregex[" + k + "]", val)
except:
url = url.replace("$doregex[" + k + "]", val.decode("utf-8"))
else:
url = url.replace("$doregex[" + k + "]", '')
if '$epoctime$' in url:
url = url.replace('$epoctime$', getEpocTime())
if '$epoctime2$' in url:
url = url.replace('$epoctime2$', getEpocTime2())
if '$GUID$' in url:
import uuid
url = url.replace('$GUID$', str(uuid.uuid1()).upper())
if '$get_cookies$' in url:
url = url.replace('$get_cookies$', getCookiesString(cookieJar))
if recursiveCall:
return url
if url == "":
return
else:
return url, setresolved
def getmd5(t):
import hashlib
h = hashlib.md5()
h.update(t)
return h.hexdigest()
def playmedia(media_url):
try:
import CustomPlayer
player = CustomPlayer.MyXBMCPlayer()
listitem = xbmcgui.ListItem(label=str(name), path=media_url)
listitem.setArt({'thumb': xbmc.getInfoImage("ListItem.Thumb"),
'icon': "DefaultVideo.png"})
player.play(media_url, listitem)
xbmc.sleep(1000)
while player.is_active:
xbmc.sleep(200)
except:
traceback.print_exc()
return ''
def kodiJsonRequest(params):
data = json.dumps(params)
request = xbmc.executeJSONRPC(data)
try:
response = json.loads(request)
except UnicodeDecodeError:
response = json.loads(request.decode('utf-8', 'ignore'))
try:
if 'result' in response:
return response['result']
return None
except KeyError:
addon_log("[%s] %s" % (params['method'], response['error']['message']))
return None
def setKodiProxy(proxysettings=None):
if proxysettings is None:
xbmc.executeJSONRPC('{"jsonrpc":"2.0", "method":"Settings.SetSettingValue", "params":{"setting":"network.usehttpproxy", "value":false}, "id":1}')
else:
ps = proxysettings.split(':')
proxyURL = ps[0]
proxyPort = ps[1]
proxyType = ps[2]
proxyUsername = None
proxyPassword = None
if len(ps) > 3 and '@' in ps[3]: # jairox ###proxysettings
proxyUsername = ps[3].split('@')[0] # jairox ###ps[3]
proxyPassword = ps[3].split('@')[1] # jairox ###proxysettings.split('@')[-1]
xbmc.executeJSONRPC('{"jsonrpc":"2.0", "method":"Settings.SetSettingValue", "params":{"setting":"network.usehttpproxy", "value":true}, "id":1}')
xbmc.executeJSONRPC('{"jsonrpc":"2.0", "method":"Settings.SetSettingValue", "params":{"setting":"network.httpproxytype", "value":' + str(proxyType) + '}, "id":1}')
xbmc.executeJSONRPC('{"jsonrpc":"2.0", "method":"Settings.SetSettingValue", "params":{"setting":"network.httpproxyserver", "value":"' + str(proxyURL) + '"}, "id":1}')
xbmc.executeJSONRPC('{"jsonrpc":"2.0", "method":"Settings.SetSettingValue", "params":{"setting":"network.httpproxyport", "value":' + str(proxyPort) + '}, "id":1}')
if proxyUsername is not None:
xbmc.executeJSONRPC('{"jsonrpc":"2.0", "method":"Settings.SetSettingValue", "params":{"setting":"network.httpproxyusername", "value":"' + str(proxyUsername) + '"}, "id":1}')
xbmc.executeJSONRPC('{"jsonrpc":"2.0", "method":"Settings.SetSettingValue", "params":{"setting":"network.httpproxypassword", "value":"' + str(proxyPassword) + '"}, "id":1}')
def getConfiguredProxy():
proxyActive = kodiJsonRequest({'jsonrpc': '2.0', "method": "Settings.GetSettingValue", "params": {"setting": "network.usehttpproxy"}, 'id': 1})['value']
proxyType = kodiJsonRequest({'jsonrpc': '2.0', "method": "Settings.GetSettingValue", "params": {"setting": "network.httpproxytype"}, 'id': 1})['value']
if proxyActive: # PROXY_HTTP
proxyURL = kodiJsonRequest({'jsonrpc': '2.0', "method": "Settings.GetSettingValue", "params": {"setting": "network.httpproxyserver"}, 'id': 1})['value']
proxyPort = six.text_type(kodiJsonRequest({'jsonrpc': '2.0', "method": "Settings.GetSettingValue", "params": {"setting": "network.httpproxyport"}, 'id': 1})['value'])
proxyUsername = kodiJsonRequest({'jsonrpc': '2.0', "method": "Settings.GetSettingValue", "params": {"setting": "network.httpproxyusername"}, 'id': 1})['value']
proxyPassword = kodiJsonRequest({'jsonrpc': '2.0', "method": "Settings.GetSettingValue", "params": {"setting": "network.httpproxypassword"}, 'id': 1})['value']
if proxyUsername and proxyPassword and proxyURL and proxyPort:
return proxyURL + ':' + str(proxyPort) + ':' + str(proxyType) + ':' + proxyUsername + '@' + proxyPassword
elif proxyURL and proxyPort:
return proxyURL + ':' + str(proxyPort) + ':' + str(proxyType)
else:
return None
def playmediawithproxy(media_url, name, iconImage, proxyip, port, proxyuser=None, proxypass=None): # jairox
if media_url is None or media_url == '':
xbmcgui.Dialog().notification(addon_name, 'Unable to play empty Url', icon, 5000, False)
return
progress = xbmcgui.DialogProgress()
progress.create('Progress', 'Playing with custom proxy')
progress.update(10, "", "setting proxy..", "")
proxyset = False
existing_proxy = ''
try:
existing_proxy = getConfiguredProxy()
# read and set here
# jairox
if proxyuser is not None:
setKodiProxy(proxyip + ':' + port + ':0:' + proxyuser + '@' + proxypass)
else:
setKodiProxy(proxyip + ':' + port + ':0')
proxyset = True
progress.update(80, "", "setting proxy complete, now playing", "")
import CustomPlayer
player = CustomPlayer.MyXBMCPlayer()
player.pdialogue == progress
listitem = xbmcgui.ListItem(label=str(name), path=media_url)
listitem.setArt({'thumb': xbmc.getInfoImage("ListItem.Thumb"),
'icon': iconImage})
player.play(media_url, listitem)
xbmc.sleep(1000)
beforestart = time.time()
try:
while player.is_active:
xbmc.sleep(1000)
if player.urlplayed is False and time.time() - beforestart > 12:
xbmcgui.Dialog().notification(addon_name, 'Unable to play, check proxy', icon, 5000, False)
break
except:
pass
progress.close()
progress = None
except:
traceback.print_exc()
if progress:
progress.close()
if proxyset:
setKodiProxy(existing_proxy)
return ''
def createM3uForDash(url, useragent=None):
str = '#EXTM3U'
str += '\n#EXT-X-STREAM-INF:PROGRAM-ID=1,BANDWIDTH=361816'
str += '\n' + url + '&bytes=0-200000'
source_file = os.path.join(profile, 'testfile.m3u')
str += '\n'
SaveToFile(source_file, str)
return source_file
def SaveToFile(file_name, page_data, append=False):
if append:
f = open(file_name, 'a')
f.write(page_data)
f.close()
else:
f = open(file_name, 'wb')
f.write(page_data)
f.close()
return ''
def LoadFile(file_name):
f = open(file_name, 'rb')
d = f.read()
f.close()
return d
def re_me(data, re_patten):
match = ''
m = re.search(re_patten, data)
if m is not None:
match = m.group(1)
else:
match = ''
return match
def get_unwise(str_eval):
global w, i, s, e
page_value = ""
w, i, s, e = None
try:
ss = "w,i,s,e=(" + str_eval + ')'
six.exec_(ss, globals())
page_value = unwise_func(w, i, s, e) # noQA
except:
traceback.print_exc(file=sys.stdout)
return page_value
def unwise_func(w, i, s, e):
lIll = 0
ll1I = 0
Il1l = 0
ll1l = []
l1lI = []
while True:
if (lIll < 5):
l1lI.append(w[lIll])
elif (lIll < len(w)):
ll1l.append(w[lIll])
lIll += 1
if (ll1I < 5):
l1lI.append(i[ll1I])
elif (ll1I < len(i)):
ll1l.append(i[ll1I])
ll1I += 1
if (Il1l < 5):
l1lI.append(s[Il1l])
elif (Il1l < len(s)):
ll1l.append(s[Il1l])
Il1l += 1
if (len(w) + len(i) + len(s) + len(e) == len(ll1l) + len(l1lI) + len(e)):
break
lI1l = ''.join(ll1l)
I1lI = ''.join(l1lI)
ll1I = 0
l1ll = []
for lIll in range(0, len(ll1l), 2):
ll11 = -1
if (ord(I1lI[ll1I]) % 2):
ll11 = 1
l1ll.append(chr(int(lI1l[lIll: lIll + 2], 36) - ll11))
ll1I += 1
if (ll1I >= len(l1lI)):
ll1I = 0
ret = ''.join(l1ll)
if 'eval(function(w,i,s,e)' in ret:
ret = re.compile(r'eval\(function\(w,i,s,e\).*}\((.*?)\)').findall(ret)[0]
return get_unwise(ret)
else:
return ret
def get_unpacked(page_value, regex_for_text='', iterations=1, total_iteration=1):
try:
if page_value.startswith("http"):
page_value = getUrl(page_value)
if regex_for_text and len(regex_for_text) > 0:
try:
page_value = re.compile(regex_for_text).findall(page_value)[0] # get the js variable
except:
return 'NOTPACKED'
page_value = unpack(page_value, iterations, total_iteration)
except:
page_value = 'UNPACKEDFAILED'
traceback.print_exc(file=sys.stdout)
return page_value
def unpack(sJavascript, iteration=1, totaliterations=2):
global myarray, p1, a1, c1, k1
if sJavascript.startswith('var _0xcb8a='):
aSplit = sJavascript.split('var _0xcb8a=')
myarray = []
ss = "myarray=" + aSplit[1].split("eval(")[0]
six.exec_(ss, globals())
a1 = 62
c1 = int(aSplit[1].split(",62,")[1].split(',')[0])
p1 = myarray[0] # noQA
k1 = myarray[3] # noQA
with open('temp file' + str(iteration) + '.js', "wb") as filewriter:
filewriter.write(str(k1))
else:
if "rn p}('" in sJavascript:
aSplit = sJavascript.split("rn p}('")
else:
aSplit = sJavascript.split("rn A}('")
p1, a1, c1, k1 = ('', '0', '0', '')
ss = "p1,a1,c1,k1=('" + aSplit[1].split(".spli")[0] + ')'
six.exec_(ss, globals())
k1 = k1.split('|')
aSplit = aSplit[1].split("))'")
e = ''
d = ''
sUnpacked1 = str(__unpack(p1, a1, c1, k1, e, d, iteration))
if iteration >= totaliterations:
return sUnpacked1
else:
return unpack(sUnpacked1, iteration + 1)
def __unpack(p, a, c, k, e, d, iteration, v=1):
while (c >= 1):
c = c - 1
if (k[c]):
aa = str(__itoaNew(c, a))
if v == 1:
p = re.sub('\\b' + aa + '\\b', k[c], p) # THIS IS Bloody slow!
else:
p = findAndReplaceWord(p, aa, k[c])
return p
# function equalavent to re.sub('\\b' + aa +'\\b', k[c], p)
def findAndReplaceWord(source_str, word_to_find, replace_with):
splits = None
splits = source_str.split(word_to_find)
if len(splits) > 1:
new_string = []
current_index = 0
for current_split in splits:
new_string.append(current_split)
val = word_to_find # by default assume it was wrong to split
# if its first one and item is blank then check next item is valid or not
if current_index == len(splits) - 1:
val = '' # last one nothing to append normally
else:
if len(current_split) == 0: # if blank check next one with current split value
if (len(splits[current_index + 1]) == 0 and word_to_find[0].lower() not in 'abcdefghijklmnopqrstuvwxyz1234567890_') \
or (len(splits[current_index + 1]) > 0 and splits[current_index + 1][0].lower() not in 'abcdefghijklmnopqrstuvwxyz1234567890_'): # first just just check next
val = replace_with
else:
if (splits[current_index][-1].lower() not in 'abcdefghijklmnopqrstuvwxyz1234567890_') \
and ((len(splits[current_index + 1]) == 0 and word_to_find[0].lower() not in 'abcdefghijklmnopqrstuvwxyz1234567890_')
or (len(splits[current_index + 1]) > 0 and splits[current_index + 1][0].lower() not in 'abcdefghijklmnopqrstuvwxyz1234567890_')): # first just just check next
val = replace_with
new_string.append(val)
current_index += 1
source_str = ' '.join(new_string)
return source_str
def __itoa(num, radix):
result = ""
if num == 0:
return '0'
while num > 0:
result = "0123456789abcdefghijklmnopqrstuvwxyz"[num % radix] + result
num = int(num / radix)
return result
def __itoaNew(cc, a):
aa = "" if cc < a else __itoaNew(int(cc / a), a)
cc = (cc % a)
bb = chr(cc + 29) if cc > 35 else str(__itoa(cc, 36))
return aa + bb
def getCookiesString(cookieJar):
try:
cookieString = ""
for index, cookie in enumerate(cookieJar):
cookieString += cookie.name + "=" + cookie.value + ";"
except:
pass
return cookieString
def saveCookieJar(cookieJar, COOKIEFILE):
try:
complete_path = os.path.join(profile, COOKIEFILE)
cookieJar.save(complete_path, ignore_discard=True)
except:
pass
def getCookieJar(COOKIEFILE):
cookieJar = None
if COOKIEFILE:
try:
complete_path = os.path.join(profile, COOKIEFILE)
cookieJar = http_cookiejar.LWPCookieJar()
cookieJar.load(complete_path, ignore_discard=True)
except:
cookieJar = None
if not cookieJar:
cookieJar = http_cookiejar.LWPCookieJar()
return cookieJar
def doEval(fun_call, page_data, Cookie_Jar, m):
global ret_val
ret_val = ''
globals()["page_data"] = page_data
globals()["Cookie_Jar"] = Cookie_Jar
globals()["m"] = m
if functions_dir not in sys.path:
sys.path.append(functions_dir)
try:
py_file = 'import ' + fun_call.split('.')[0]
six.exec_(py_file, globals())
except:
# traceback.print_exc(file=sys.stdout)
pass
six.exec_('ret_val=' + fun_call, globals())
return six.ensure_str(ret_val)
def doEvalFunction(fun_call, page_data, Cookie_Jar, m):
try:
global gLSProDynamicCodeNumber
gLSProDynamicCodeNumber = gLSProDynamicCodeNumber + 1
ret_val = ''
if functions_dir not in sys.path:
sys.path.append(functions_dir)
filename = 'LSProdynamicCode{0}.py'.format(gLSProDynamicCodeNumber)
filenamewithpath = os.path.join(functions_dir, filename)
f = open(filenamewithpath, "wb")
f.write(six.ensure_binary("# -*- coding: utf-8 -*-\n"))
f.write(fun_call.encode("utf-8"))
f.close()
LSProdynamicCode = import_by_string(filename.split('.')[0], filenamewithpath)
ret_val = LSProdynamicCode.GetLSProData(page_data, Cookie_Jar, m)
try:
return str(ret_val)
except:
return ret_val
except:
pass
# traceback.print_exc()
return ""
def import_by_string(full_name, filenamewithpath):
try:
import importlib
return importlib.import_module(full_name, package=None)
except:
import imp
return imp.load_source(full_name, filenamewithpath)
def getGoogleRecaptchaResponse(captchakey, cj, type=1): # 1 for get, 2 for post, 3 for rawpost
recapChallenge = ""
solution = ""
captcha_reload_response_chall = None
solution = None
if len(captchakey) > 0: # new shiny captcha!
captcha_url = captchakey
if not captcha_url.startswith('http'):
captcha_url = 'https://www.google.com/recaptcha/api/challenge?k=' + captcha_url + '&ajax=1'
cap_chall_reg = 'challenge.*?\'(.*?)\''
cap_image_reg = '\'(.*?)\''
captcha_script = getUrl(captcha_url, cookieJar=cj)
recapChallenge = re.findall(cap_chall_reg, captcha_script)[0]
captcha_reload = 'http://www.google.com/recaptcha/api/reload?c='
captcha_k = captcha_url.split('k=')[1]
captcha_reload += recapChallenge + '&k=' + captcha_k + '&reason=i&type=image&lang=en'
captcha_reload_js = getUrl(captcha_reload, cookieJar=cj)
captcha_reload_response_chall = re.findall(cap_image_reg, captcha_reload_js)[0]
captcha_image_url = 'https://www.google.com/recaptcha/api/image?c=' + captcha_reload_response_chall
if not captcha_image_url.startswith("http"):
captcha_image_url = 'https://www.google.com/recaptcha/api/' + captcha_image_url
import random
n = random.randrange(100, 1000, 5)
local_captcha = os.path.join(profile, str(n) + "captcha.img")
localFile = open(local_captcha, "wb")
localFile.write(getUrl(captcha_image_url, cookieJar=cj))
localFile.close()
solver = InputWindow(captcha=local_captcha)
solution = solver.get()
os.remove(local_captcha)
if captcha_reload_response_chall:
if type == 1:
return 'recaptcha_challenge_field=' + urllib_parse.quote_plus(captcha_reload_response_chall) + '&recaptcha_response_field=' + urllib_parse.quote_plus(solution)
elif type == 2:
return 'recaptcha_challenge_field:' + captcha_reload_response_chall + ',recaptcha_response_field:' + solution
else:
return 'recaptcha_challenge_field=' + urllib_parse.quote_plus(captcha_reload_response_chall) + '&recaptcha_response_field=' + urllib_parse.quote_plus(solution)
else:
return ''
def getUrl(url, cookieJar=None, post=None, timeout=20, headers=None, noredir=False):
cookie_handler = urllib_request.HTTPCookieProcessor(cookieJar)
if post is not None:
post = post.encode('utf-8')
if noredir:
opener = urllib_request.build_opener(NoRedirection, cookie_handler, urllib_request.HTTPBasicAuthHandler(), urllib_request.HTTPHandler())
else:
opener = urllib_request.build_opener(cookie_handler, urllib_request.HTTPBasicAuthHandler(), urllib_request.HTTPHandler())
req = urllib_request.Request(url)
req.add_header('User-Agent', 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.154 Safari/537.36')
if headers:
for h, hv in headers:
req.add_header(h, hv)
response = opener.open(req, post, timeout=timeout)
link = response.read()
encoding = None
content_type = response.headers.get('content-type', '')
if 'charset=' in content_type:
encoding = content_type.split('charset=')[-1]
if encoding is None:
epattern = r']+)'''
epattern = epattern.encode('utf8') if six.PY3 else epattern
r = re.search(epattern, link, re.IGNORECASE)
if r:
encoding = r.group(1).decode('utf8') if six.PY3 else r.group(1)
if encoding is not None:
link = link.decode(encoding.lower(), errors='ignore')
link = link.encode('utf8') if six.PY2 else link
else:
link = link.decode('latin-1', errors='ignore') if six.PY3 else link.encode('utf-8')
response.close()
return link
def get_decode(str, reg=None):
if reg:
str = re.findall(reg, str)[0]
s1 = urllib_parse.unquote(str[0: len(str) - 1])
t = ''
for i in range(len(s1)):
t += chr(ord(s1[i]) - s1[len(s1) - 1])
t = urllib_parse.unquote(t)
return t
def javascriptUnEscape(str):
js = re.findall(r'unescape\(\'(.*?)\'', str)
if js is not None and len(js) > 0:
for j in js:
str = str.replace(j, urllib_parse.unquote(j))
return str
def askCaptcha(m, html_page, cookieJar):
global iid
iid += 1
expre = m['expres']
page_url = m['page']
captcha_regex = re.compile(r'\$LiveStreamCaptcha\[([^\]]*)\]').findall(expre)[0]
captcha_url = re.compile(captcha_regex).findall(html_page)[0]
if not captcha_url.startswith("http"):
page_ = 'http://' + "".join(page_url.split('/')[2:3])
if captcha_url.startswith("/"):
captcha_url = page_ + captcha_url
else:
captcha_url = page_ + '/' + captcha_url
local_captcha = os.path.join(profile, str(iid) + "captcha.jpg")
localFile = open(local_captcha, "wb")
req = urllib_request.Request(captcha_url)
req.add_header('User-Agent', 'Mozilla/5.0 (Windows NT 6.1; rv:14.0) Gecko/20100101 Firefox/14.0.1')
if 'referer' in m:
req.add_header('Referer', m['referer'])
if 'agent' in m:
req.add_header('User-agent', m['agent'])
if 'setcookie' in m:
req.add_header('Cookie', m['setcookie'])
urllib_request.urlopen(req)
response = urllib_request.urlopen(req)
link = response.read()
encoding = None
content_type = response.headers.get('content-type', '')
if 'charset=' in content_type:
encoding = content_type.split('charset=')[-1]
if encoding is None:
epattern = r']+)'''
epattern = epattern.encode('utf8') if six.PY3 else epattern
r = re.search(epattern, link, re.IGNORECASE)
if r:
encoding = r.group(1).decode('utf8') if six.PY3 else r.group(1)
if encoding is not None:
link = link.decode(encoding.lower(), errors='ignore')
link = link.encode('utf8') if six.PY2 else link
else:
link = link.decode('latin-1', errors='ignore') if six.PY3 else link.encode('utf-8')
localFile.write(link)
response.close()
localFile.close()
solver = InputWindow(captcha=local_captcha)
solution = solver.get()
return solution
def askCaptchaNew(imageregex, html_page, cookieJar, m):
global iid
iid += 1
if imageregex != '':
if html_page.startswith("http"):
page_ = getUrl(html_page, cookieJar=cookieJar)
else:
page_ = html_page
captcha_url = re.compile(imageregex).findall(page_)[0]
else:
captcha_url = html_page
local_captcha = os.path.join(profile, str(iid) + "captcha.jpg")
localFile = open(local_captcha, "wb")
req = urllib_request.Request(captcha_url)
req.add_header('User-Agent', 'Mozilla/5.0 (Windows NT 6.1; rv:14.0) Gecko/20100101 Firefox/14.0.1')
if 'referer' in m:
req.add_header('Referer', m['referer'])
if 'agent' in m:
req.add_header('User-agent', m['agent'])
if 'accept' in m:
req.add_header('Accept', m['accept'])
if 'setcookie' in m:
req.add_header('Cookie', m['setcookie'])
response = urllib_request.urlopen(req)
link = response.read()
encoding = None
content_type = response.headers.get('content-type', '')
if 'charset=' in content_type:
encoding = content_type.split('charset=')[-1]
if encoding is None:
epattern = r']+)'''
epattern = epattern.encode('utf8') if six.PY3 else epattern
r = re.search(epattern, link, re.IGNORECASE)
if r:
encoding = r.group(1).decode('utf8') if six.PY3 else r.group(1)
if encoding is not None:
link = link.decode(encoding.lower(), errors='ignore')
link = link.encode('utf8') if six.PY2 else link
else:
link = link.decode('latin-1', errors='ignore') if six.PY3 else link.encode('utf-8')
localFile.write(link)
response.close()
localFile.close()
solver = InputWindow(captcha=local_captcha)
solution = solver.get()
return solution
def TakeInput(name, headname):
kb = xbmc.Keyboard('default', 'heading', True)
kb.setDefault(name)
kb.setHeading(headname)
kb.setHiddenInput(False)
return kb.getText()
class InputWindow(xbmcgui.WindowDialog):
def __init__(self, *args, **kwargs):
self.cptloc = kwargs.get('captcha')
self.img = xbmcgui.ControlImage(335, 30, 624, 60, self.cptloc)
self.addControl(self.img)
self.kbd = xbmc.Keyboard()
def get(self):
self.show()
time.sleep(2)
self.kbd.doModal()
if (self.kbd.isConfirmed()):
text = self.kbd.getText()
self.close()
return text
self.close()
return False
def getEpocTime():
return str(int(time.time() * 1000))
def getEpocTime2():
return str(int(time.time()))
def get_params():
param = []
paramstring = sys.argv[2]
if len(paramstring) >= 2:
params = sys.argv[2]
cleanedparams = params.replace('?', '')
if (params[len(params) - 1] == '/'):
params = params[0:len(params) - 2]
pairsofparams = cleanedparams.split('&')
param = {}
for i in range(len(pairsofparams)):
splitparams = {}
splitparams = pairsofparams[i].split('=')
if (len(splitparams)) == 2:
param[splitparams[0]] = splitparams[1]
return param
def getFavorites():
items = json.loads(open(favorites).read())
total = len(items)
for i in items:
name = i[0]
url = i[1]
iconimage = i[2]
try:
fanArt = i[3]
if fanArt is None:
raise Exception()
except:
if addon.getSetting('use_thumb') == "true":
fanArt = iconimage
else:
fanArt = fanart
try:
playlist = i[5]
except:
playlist = None
try:
regexs = i[6]
except:
regexs = None
if i[4] == 0:
addLink(url, name, iconimage, fanArt, '', '', '', 'fav', playlist, regexs, total)
else:
addDir(name, url, i[4], iconimage, fanart, '', '', '', '', 'fav')
def addFavorite(name, url, iconimage, fanart, mode, playlist=None, regexs=None):
favList = []
try:
# seems that after
name = name.encode('utf-8', 'ignore') if six.PY2 else name
except:
pass
if os.path.exists(favorites) is False:
addon_log('Making Favorites File')
favList.append((name, url, iconimage, fanart, mode, playlist, regexs))
a = open(favorites, "w")
a.write(json.dumps(favList))
a.close()
else:
addon_log('Appending Favorites')
a = open(favorites).read()
data = json.loads(a)
data.append((name, url, iconimage, fanart, mode))
b = open(favorites, "w")
b.write(json.dumps(data))
b.close()
def rmFavorite(name):
data = json.loads(open(favorites).read())
for index in range(len(data)):
if data[index][0] == name:
del data[index]
b = open(favorites, "w")
b.write(json.dumps(data))
b.close()
break
xbmc.executebuiltin("XBMC.Container.Refresh")
def urlsolver(url):
try:
import resolveurl
except:
import urlresolver as resolveurl
if resolveurl.HostedMediaFile(url).valid_url():
resolved = resolveurl.resolve(url)
else:
xbmcgui.Dialog().notification(addon_name, 'ResolveUrl does not support this domain.', icon, 5000, False)
resolved = url
return resolved
def tryplay(url, listitem, pdialogue=None):
if url.lower().startswith('plugin') and 'youtube' not in url.lower():
xbmc.executebuiltin('RunPlugin(' + url + ')')
for i in range(8):
xbmc.sleep(500) # sleep for 10 seconds, half each time
try:
if xbmc.getCondVisibility("Player.HasMedia") and xbmc.Player().isPlaying():
return True
except:
pass
return False
import CustomPlayer
player = CustomPlayer.MyXBMCPlayer()
player.pdialogue = pdialogue
beforestart = time.time()
player.play(url, listitem)
xbmc.sleep(1000)
try:
while player.is_active:
xbmc.sleep(400)
if player.urlplayed:
return True
if time.time() - beforestart > 4:
return False
except:
pass
return False
def play_playlist(name, mu_playlist, queueVideo=None):
playlist = xbmc.PlayList(xbmc.PLAYLIST_VIDEO)
if '$$LSPlayOnlyOne$$' in mu_playlist[0]:
mu_playlist[0] = mu_playlist[0].replace('$$LSPlayOnlyOne$$', '')
names = []
iloop = 0
progress = xbmcgui.DialogProgress()
progress.create('Progress', 'Trying Multiple Links')
for i in mu_playlist:
if '$$lsname=' in i:
d_name = i.split('$$lsname=')[1].split('®exs')[0]
names.append(d_name)
mu_playlist[iloop] = i.split('$$lsname=')[0] + ('®exs' + i.split('®exs')[1] if '®exs' in i else '')
else:
d_name = urllib_parse.urlparse(i).netloc
if d_name == '':
names.append(name)
else:
names.append(d_name)
index = iloop
iloop += 1
playname = names[index]
if progress.iscanceled():
return
progress.update(iloop / len(mu_playlist) * 100, "", "Link#%d" % (iloop), playname)
if "&mode=19" in mu_playlist[index]:
liz = xbmcgui.ListItem(playname)
liz.setArt({'thumb': iconimage,
'icon': iconimage})
liz.setInfo(type='Video', infoLabels={'Title': playname, 'mediatype': 'video'})
liz.setProperty("IsPlayable", "true")
urltoplay = urlsolver(mu_playlist[index].replace('&mode=19', '').replace(';', ''))
liz.setPath(urltoplay)
played = tryplay(urltoplay, liz)
elif "$doregex" in mu_playlist[index]:
sepate = mu_playlist[index].split('®exs=')
url, setresolved = getRegexParsed(sepate[1], sepate[0])
url2 = url.replace(';', '')
liz = xbmcgui.ListItem(playname)
liz.setArt({'thumb': iconimage,
'icon': iconimage})
liz.setInfo(type='Video', infoLabels={'Title': playname, 'mediatype': 'video'})
liz.setProperty("IsPlayable", "true")
liz.setPath(url2)
played = tryplay(url2, liz)
else:
url = mu_playlist[index]
url = url.split('®exs=')[0]
liz = xbmcgui.ListItem(playname)
liz.setArt({'thumb': iconimage,
'icon': iconimage})
liz.setInfo(type='Video', infoLabels={'Title': playname, 'mediatype': 'video'})
liz.setProperty("IsPlayable", "true")
liz.setPath(url)
played = tryplay(url, liz)
if played:
return
return
if addon.getSetting('ask_playlist_items') == 'true' and not queueVideo:
names = []
iloop = 0
for i in mu_playlist:
if '$$lsname=' in i:
d_name = i.split('$$lsname=')[1].split('®exs')[0]
names.append(d_name)
mu_playlist[iloop] = i.split('$$lsname=')[0] + ('®exs' + i.split('®exs')[1] if '®exs' in i else '')
else:
d_name = urllib_parse.urlparse(i).netloc
if d_name == '':
names.append(name)
else:
names.append(d_name)
iloop += 1
dialog = xbmcgui.Dialog()
index = dialog.select('Choose a video source', names)
if index >= 0:
playname = names[index]
if "&mode=19" in mu_playlist[index]:
liz = xbmcgui.ListItem(playname)
liz.setArt({'thumb': iconimage,
'icon': iconimage})
liz.setInfo(type='Video', infoLabels={'Title': playname, 'mediatype': 'video'})
liz.setProperty("IsPlayable", "true")
urltoplay = urlsolver(mu_playlist[index].replace('&mode=19', '').replace(';', ''))
liz.setPath(urltoplay)
xbmc.Player().play(urltoplay, liz)
elif "$doregex" in mu_playlist[index]:
sepate = mu_playlist[index].split('®exs=')
url, setresolved = getRegexParsed(sepate[1], sepate[0])
url2 = url.replace(';', '')
liz = xbmcgui.ListItem(playname)
liz.setArt({'thumb': iconimage,
'icon': iconimage})
liz.setInfo(type='Video', infoLabels={'Title': playname, 'mediatype': 'video'})
liz.setProperty("IsPlayable", "true")
liz.setPath(url2)
xbmc.Player().play(url2, liz)
else:
url = mu_playlist[index]
url = url.split('®exs=')[0]
liz = xbmcgui.ListItem(playname)
liz.setArt({'thumb': iconimage,
'icon': iconimage})
liz.setInfo(type='Video', infoLabels={'Title': playname, 'mediatype': 'video'})
liz.setProperty("IsPlayable", "true")
liz.setPath(url)
xbmc.Player().play(url, liz)
elif not queueVideo:
playlist.clear()
item = 0
for i in mu_playlist:
item += 1
info = xbmcgui.ListItem('%s) %s' % (str(item), name))
try:
if "$doregex" in i:
sepate = i.split('®exs=')
url, setresolved = getRegexParsed(sepate[1], sepate[0])
elif "&mode=19" in i:
url = urlsolver(i.replace('&mode=19', '').replace(';', ''))
if url:
playlist.add(url, info)
else:
raise Exception()
except Exception:
playlist.add(i, info)
pass
xbmc.executebuiltin('playlist.playoffset(video,0)')
else:
listitem = xbmcgui.ListItem(name)
playlist.add(mu_playlist, listitem)
def download_file(name, url):
xbmcgui.Dialog().notification(addon_name, 'Function not implemented yet.', icon, 15000, False)
# if addon.getSetting('save_location') == "":
# xbmcgui.Dialog().notification(addon_name, 'Choose a location to save files.', icon, 15000, False)
# addon.openSettings()
# params = {'url': url, 'download_path': addon.getSetting('save_location')}
# downloader.download(name, params)
# dialog = xbmcgui.Dialog()
# ret = dialog.yesno(addon_name, 'Do you want to add this file as a source?')
# if ret:
# addSource(os.path.join(addon.getSetting('save_location'), name))
def _search(url, name):
pluginsearchurls = ['plugin://plugin.video.youtube/kodion/search/list/',
'plugin://plugin.video.dailymotion_com/?mode=search&url',
'plugin://plugin.video.vimeo/kodion/search/list/']
names = ['Youtube', 'DailyMotion', 'Vimeo']
dialog = xbmcgui.Dialog()
index = dialog.select('Choose a video source', names)
if index >= 0:
url = pluginsearchurls[index]
pluginquerybyJSON(url)
def addDir(name, url, mode, iconimage, fanart, description, genre, date, credits, showcontext=False, regexs=None, reg_url=None, allinfo={}):
# addon_log("addDir: %s %s" % (iconimage, fanart))
"""
Needed in Kodi 19 Matrix as paths ending in .xml seem to be blacklisted causing the parent path to always be root.
"""
url = url + "/" if url.endswith(".xml") else url
if regexs and len(regexs) > 0:
u = sys.argv[0] + "?url=" + urllib_parse.quote_plus(url) + "&mode=" + str(mode) + "&name=" + urllib_parse.quote_plus(name) + "&fanart=" + urllib_parse.quote_plus(fanart) + "®exs=" + regexs
else:
u = sys.argv[0] + "?url=" + urllib_parse.quote_plus(url) + "&mode=" + str(mode) + "&name=" + urllib_parse.quote_plus(name) + "&fanart=" + urllib_parse.quote_plus(fanart)
ok = True
if date == '':
date = None
else:
description += '\n\nDate: %s' % date
liz = xbmcgui.ListItem(name)
# liz.setArt({'thumb': "DefaultFolder.png",
# 'icon': iconimage})
liz.setArt({'fanart': fanart, 'thumb': iconimage, 'icon': "DefaultFolder.png"})
if len(allinfo) < 1:
liz.setInfo(type="Video", infoLabels={"Title": name, 'mediatype': 'video', "Plot": description, "Genre": genre, "dateadded": date, "credits": credits})
else:
allinfo.update({'mediatype': 'video'})
liz.setInfo(type="Video", infoLabels=allinfo)
liz.setProperty('IsPlayable', 'false')
if showcontext:
contextMenu = []
parentalblock = addon.getSetting('parentalblocked')
parentalblock = parentalblock == "true"
parentalblockedpin = addon.getSetting('parentalblockedpin')
if len(parentalblockedpin) > 0:
if parentalblock:
contextMenu.append(('Disable Parental Block', 'RunPlugin(%s?mode=55&name=%s)' % (sys.argv[0], urllib_parse.quote_plus(name))))
else:
contextMenu.append(('Enable Parental Block', 'RunPlugin(%s?mode=56&name=%s)' % (sys.argv[0], urllib_parse.quote_plus(name))))
if showcontext == 'source':
if name in str(SOURCES):
contextMenu.append(('Remove from Sources', 'RunPlugin(%s?mode=8&name=%s)' % (sys.argv[0], urllib_parse.quote_plus(name))))
elif showcontext == 'download':
contextMenu.append(('Download', 'RunPlugin(%s?url=%s&mode=9&name=%s)'
% (sys.argv[0], urllib_parse.quote_plus(url), urllib_parse.quote_plus(name))))
elif showcontext == 'fav':
contextMenu.append(('Removeer dos Favoritos', 'RunPlugin(%s?mode=6&name=%s)'
% (sys.argv[0], urllib_parse.quote_plus(name))))
if showcontext == '!!update':
fav_params2 = (
'%s?url=%s&mode=17®exs=%s'
% (sys.argv[0], urllib_parse.quote_plus(reg_url), regexs)
)
contextMenu.append(('[COLOR yellow]!!update[/COLOR]', 'RunPlugin(%s)' % fav_params2))
if name not in FAV:
contextMenu.append(('Adicionar aos Favoritos', 'RunPlugin(%s?mode=5&name=%s&url=%s&iconimage=%s&fanart=%s&fav_mode=%s)'
% (sys.argv[0], urllib_parse.quote_plus(name), urllib_parse.quote_plus(url), urllib_parse.quote_plus(iconimage), urllib_parse.quote_plus(fanart), mode)))
liz.addContextMenuItems(contextMenu)
ok = xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]), url=u, listitem=liz, isFolder=True)
return ok
def ytdl_download(url, title, media_type='video'):
# play in xbmc while playing go back to contextMenu(c) to "!!Download!!"
# Trial yasceen: seperate |User-Agent=
import youtubedl
if url != '':
if media_type == 'audio':
youtubedl.single_YD(url, download=True, audio=True)
else:
youtubedl.single_YD(url, download=True)
elif xbmc.Player().isPlaying():
import YDStreamExtractor
if YDStreamExtractor.isDownloading():
YDStreamExtractor.manageDownloads()
else:
xbmc_url = xbmc.Player().getPlayingFile()
xbmc_url = xbmc_url.split('|User-Agent=')[0]
info = {'url': xbmc_url, 'title': title, 'media_type': media_type}
youtubedl.single_YD('', download=True, dl_info=info)
else:
xbmcgui.Dialog().notification(addon_name, 'First Play, [COLOR yellow]WHILE playing download[/COLOR]', icon, 10000, False)
# Lunatixz PseudoTV feature
def ascii(string):
if isinstance(string, six.string_types):
if isinstance(string, six.text_type) and six.PY2:
string = string.encode('ascii', 'ignore')
return string
def uni(string, encoding='utf-8'):
if isinstance(string, six.string_types):
if not isinstance(string, six.text_type) and six.PY2:
string = six.text_type(string, encoding, 'ignore')
return string
def removeNonAscii(s):
return "".join(filter(lambda x: ord(x) < 128, s))
def sendJSON(command):
]HH ÉÃBˆžNƒBˆ]HH›XË™^XÝ]R”ÓÓ””Ê[šJÛÛ[X[™
JCBˆ^Ù\[šXÛÙQ[˜ÛÙQ\œ›ÜŽƒBˆ]HH›XË™^XÝ]R”ÓÓ””Ê\ØÚZJÛÛ[X[™
JCBƒBˆ™]\›ˆ[šJ]JCBƒBƒB™YˆYÚ[œ]Y\žXžR”ÓÓŠ\›Ú]™WÛYWÜ™\Ý[S›Û™K^[\ÝQ˜[ÙJNƒBˆYˆ Ø]Y[ÉÈ[ˆ\›ƒBˆœÛÛ—Ü]Y\žHH[šJ ÞÈšœÛÛœœÈŽˆŒ‹Œ‹›Y]ÙŽˆ‘š[\Ë‘Ù]\™XÝÜžH‹œ\˜[\ÈŽˆÈ™\™XÝÜžHŽˆ‰\È‹›YYXHŽˆšY[È‹œ›Ü\Y\ÈŽˆÈ]H‹˜[[H‹˜\\Ý‹™\˜][Ûˆ‹[X›˜Z[‹žYX\ˆ—_KšYŽˆ_IÊH H\›Bˆ[ÙNƒBˆœÛÛ—Ü]Y\žHH[šJ ÞÈšœÛÛœœÈŽˆŒ‹Œ‹›Y]ÙŽˆ‘š[\Ë‘Ù]\™XÝÜžH‹œ\˜[\ÈŽžÈ™\™XÝÜžHŽˆ‰\È‹›YYXHŽˆšY[È‹œ›Ü\Y\ÈŽ–Ȝ݋œ^XÛÝ[‹™\™XÝ܈‹™Ù[œ™H‹›Ý\È‹™\˜][Ûˆ‹˜Z[\ˆ‹œ™[ZY\™Y‹[X›˜Z[‹]H‹žYX\ˆ‹™]XYY‹™˜[˜\‹œ˜][™È‹œÙX\ÛÛˆ‹™\\ÛÙH‹œÝY[È‹›\XH—_KšYŽŒ_IÊH H\›BˆœÛÛ—Ù›Û\—Ù]Z[HœÛÛ‹›ØYÊÙ[™”ÓÓŠœÛÛ—Ü]Y\žJJCBƒBˆYˆÚ]™WÛYWÜ™\Ý[ƒBˆ™]\›ˆœÛÛ—Ù›Û\—Ù]Z[BˆYˆ Ù\œ›Ü‰È[ˆœÛÛ—Ù›Û\—Ù]Z[ƒBˆ™]\›ƒBˆ[ÙNƒBˆ›ÜˆH[ˆœÛÛ—Ù›Û\—Ù]Z[ÉÜ™\Ý[ ×VÉÙš[\É×NƒBˆY]HHßCBˆ\›HVÉÙš[I×CBˆ˜[YHH™[[Ý™S›Û\ØÚZJVÉÛX™[ ×JCBˆ[X›˜Z[H™[[Ý™S›Û\ØÚZJVÉÝ[X›˜Z[ ×JCBˆ˜[˜\H™[[Ý™S›Û\ØÚZJVÉÙ˜[˜\ ×JCBˆY]HHXÝ
ËŠH›ÜˆËˆ[ˆÚ^š]\š][\ÊJHYˆ›ÝˆOH Ì È܈›ÝˆOHLH܈ˆOH ÉÊCBˆY]KœÜ
™š[H‹›Û™JCBˆYˆVÉÙš[]\I×HOH Ùš[I΃BˆYˆ^[\݃Bˆ^WÜ^[\Ý
˜[YK\›]Y]YUšY[ÏIÌIÊCBˆÛÛ[YCBˆ[ÙNƒBˆY[šÊ\›˜[YK[X›˜Z[˜[˜\ ÉË ÉË ÉË É˛ۙK ÉËÝ[[[ŠœÛÛ—Ù›Û\—Ù]Z[ÉÜ™\Ý[ ×VÉÙš[\É×JK[[™›Ï[Y]JCBˆYˆVÉÝ\I×H[™VÉÝ\I×HOH ÝœÚÝÉ΃Bˆ›XÜYÚ[‹œÙ]ÛÛ[
[
Þ\˘\™Ý–ÌWJK ÝœÚÝÜÉÊCBˆ[YˆVÉÙ\\ÛÙI×HˆƒBˆ›XÜYÚ[‹œÙ]ÛÛ[
[
Þ\˘\™Ý–ÌWJK Ù\\ÛÙ\ÉÊCBƒBˆ[ÙNƒBˆY\Š˜[YK\›
LË[X›˜Z[˜[˜\ ÉË ÉË ÉË ÉË[[™›Ï[Y]JCBˆ›XÜYÚ[‹™[™Ù‘\™XÝÜžJ[
Þ\˘\™Ý–ÌWJJCBƒBƒB™YˆY[šÊ\›˜[YKXÛÛš[XYÙK˜[˜\\ØÜš\[Û‹Ù[œ™K]KÚÝØÛÛ^^[\Ý™YÙ^ËÝ[Ù]ÛÛÚÚYOHˆ‹[[™›Ï^ßJNƒBˆÈYÛ—ÛÙÊ ØY[šÎˆ \Ë \ÉÈ H
XÛÛš[XYÙK˜[˜\
JCBˆÛÛ^Y[HH×CBˆ\™[[›ØÚÈHYÛ‹™Ù]Ù][™Ê Ü\™[[›ØÚÙY ÊCBˆ\™[[›ØÚÈH\™[[›ØÚÈOHYHƒBˆ\™[[›ØÚÙY[ˆHYÛ‹™Ù]Ù][™Ê Ü\™[[›ØÚÙY[‰ÊCBƒBˆYˆ[Š\™[[›ØÚÙY[ŠHˆƒBˆYˆ\™[[›ØÚ΃BˆÛÛ^Y[K˜\[™
Ñ\ØX›H\™[[›ØÚÉË Ô[”YÚ[Š \ÏÛ[ÙOMMI›˜[YOI\ÊIÈ H
Þ\˘\™Ý–ÌK\›X—Ü\œÙKœ][ÝWÜ\ʘ[YJJJJCBˆ[ÙNƒBˆÛÛ^Y[K˜\[™
Ñ[˜X›H\™[[›ØÚÉË Ô[”YÚ[Š \ÏÛ[ÙOMM‰›˜[YOI\ÊIÈ H
Þ\˘\™Ý–ÌK\›X—Ü\œÙKœ][ÝWÜ\ʘ[YJJJJCBƒBˆžNƒBˆ˜[YHH˜[YK™[˜ÛÙJ Ý]‹N ÊHYˆÚ^”Lˆ[ÙH˜[YCBˆ^Ù\ƒBˆ\ÜÃBˆÚÈHYCBˆ\Ñ›Û\ˆH˜[ÙCBˆYˆ™YÙ^΃Bˆ[ÙHH ÌMÉÃBˆYˆ Û\Ý™\X] È[ˆ™YÙ^΃Bˆ\Ñ›Û\ˆHYCBˆÛÛ^Y[K˜\[™
ÖÐÓÓÔˆÚ]WHHQÝÛ›ØYÝ\œ™[H^Z[™ÈHVËÐÓÓÔ—IË Ô[”YÚ[Š \ÏÝ\›I\É›[ÙOLŒI›˜[YOI\ÊIÃBˆ H
Þ\˘\™Ý–ÌK\›X—Ü\œÙKœ][ÝWÜ\Ê\›
K\›X—Ü\œÙKœ][ÝWÜ\ʘ[YJJJJCBˆ[Yˆ
[žJ[ˆ\››Üˆ[ˆ™\ÛÛ™WÝ\›
H[™\›œÝ\ÝÚ]
Ú ÊJH܈\›™[™ÝÚ]
É›[ÙOLNIÊNƒBˆ\›H\›œ™\XÙJ É›[ÙOLNIË ÉÊCBˆ[ÙHH ÌNIÃBˆÛÛ^Y[K˜\[™
ÖÐÓÓÔˆÚ]WHHQÝÛ›ØYÝ\œ™[H^Z[™ÈHVËÐÓÓÔ—IË Ô[”YÚ[Š \ÏÝ\›I\É›[ÙOLŒI›˜[YOI\ÊIÃBˆ H
Þ\˘\™Ý–ÌK\›X—Ü\œÙKœ][ÝWÜ\Ê\›
K\›X—Ü\œÙKœ][ÝWÜ\ʘ[YJJJJCBˆ[Yˆ\›™[™ÝÚ]
É›[ÙOLN ÊNƒBˆ\›H\›œ™\XÙJ É›[ÙOLN Ë ÉÊCBˆ[ÙHH ÌN ÃBˆÛÛ^Y[K˜\[™
ÖÐÓÓÔˆÚ]WHHQÝÛ›ØYHVËÐÓÓÔ—IË Ô[”YÚ[Š \ÏÝ\›I\É›[ÙOLŒÉ›˜[YOI\ÊIÃBˆ H
Þ\˘\™Ý–ÌK\›X—Ü\œÙKœ][ÝWÜ\Ê\›
K\›X—Ü\œÙKœ][ÝWÜ\ʘ[YJJJJCBˆYˆYÛ‹™Ù]Ù][™Ê Ù]Y[ÛÛ›IÊHOH ÝYI΃BˆÛÛ^Y[K˜\[™
ÈHQÝÛ›ØYÐÓÓÔˆÙXX›YWP]Y[ÈHVËÐÓÓÔ—IË Ô[”YÚ[Š \ÏÝ\›I\É›[ÙOL ›˜[YOI\ÊIÃBˆ H
Þ\˘\™Ý–ÌK\›X—Ü\œÙKœ][ÝWÜ\Ê\›
K\›X—Ü\œÙKœ][ÝWÜ\ʘ[YJJJJCBˆ[Yˆ\›™[™ÝÚ]
É›[ÙOLŒ ÊNƒBˆ\›H\›œ™\XÙJ É›[ÙOLŒ Ë ÉÊCBˆ[ÙHH ÌŒ ÃBˆ[Yˆ\›™[™ÝÚ]
É›[ÙOLŒ‰ÊNƒBˆ\›H\›œ™\XÙJ É›[ÙOLŒ‰Ë ÉÊCBˆ[ÙHH ÌŒ‰ÃBˆ[ÙNƒBˆ[ÙHH ÌL‰ÃBˆÛÛ^Y[K˜\[™
ÖÐÓÓÔˆÚ]WHHQÝÛ›ØYÝ\œ™[H^Z[™ÈHVËÐÓÓÔ—IË Ô[”YÚ[Š \ÏÝ\›I\É›[ÙOLŒI›˜[YOI\ÊIÃBˆ H
Þ\˘\™Ý–ÌK\›X—Ü\œÙKœ][ÝWÜ\Ê\›
K\›X—Ü\œÙKœ][ÝWÜ\ʘ[YJJJJCBˆYˆ ÜYÚ[Ž‹ËÜYÚ[‹šY[Ëž[Ý]X™KÜ^KÏÝšY[×ÚYIÈ[ˆ\›ƒBˆ]Ø]Y[×Ý\›H\›œ™\XÙJ ÜYÚ[Ž‹ËÜYÚ[‹šY[Ëž[Ý]X™KÜ^KÏÝšY[×ÚYIË Ú΋ËÝÝÝËž[Ý]X™K˜ÛÛKÝØ]ÚÝIÊCBˆÛÛ^Y[K˜\[™
ÈHQÝÛ›ØYÐÓÓÔˆ›YWP]Y[ÈHVËÐÓÓÔ—IË Ô[”YÚ[Š \ÏÝ\›I\É›[ÙOL ›˜[YOI\ÊIÃBˆ H
Þ\˘\™Ý–ÌK\›X—Ü\œÙKœ][ÝWÜ\Ê]Ø]Y[×Ý\›
K\›X—Ü\œÙKœ][ÝWÜ\ʘ[YJJJJCBˆHHÞ\˘\™Ý–ÌH
ÈȃBˆ^WÛ\ÝH˜[ÙCBƒBˆYˆ^[\݃BˆYˆYÛ‹™Ù]Ù][™Ê ØYÜ^[\Ý ÊHOH™˜[ÙHˆ[™ É Ô^SÛ›SÛ™I È›Ý[ˆ^[\ÝÌNƒBˆH
ÏH\›Hˆ
È\›X—Ü\œÙKœ][ÝWÜ\Ê\›
H
ȉ›[ÙOHˆ
È[ÙCBˆ[ÙNƒBˆH
ÏH›[ÙOLLÉ›˜[YOI\Éœ^[\ÝI\Ȉ H
\›X—Ü\œÙKœ][ÝWÜ\ʘ[YJK\›X—Ü\œÙKœ][ÝWÜ\ÊÝŠ^[\Ý
Kœ™\XÙJ Ë Ë ß ÊJJCBˆ˜[YHH˜[YH
È ÖÐÓÓÔˆXYÙ[WH
È
ÈÝŠ[Š^[\Ý
JH
È È][\È
VËÐÓÓÔ—IÃBˆ^WÛ\ÝHYCBˆ[Yˆ[ÙHOH ÌŒ‰È܈
[ÙHOH ÌMÉÈ[™\›™[™ÝÚ]
É›[ÙOLŒ‰ÊJNƒBˆH
ÏH\›Hˆ
È\›X—Ü\œÙKœ][ÝWÜ\Ê\›
H
ȉ›˜[YOHˆ
È\›X—Ü\œÙKœ][ÝJ˜[YJH
ȉ›[ÙOHˆ
È[ÙCBˆ[ÙNƒBˆH
ÏH\›Hˆ
È\›X—Ü\œÙKœ][ÝWÜ\Ê\›
H
ȉ›[ÙOHˆ
È[ÙCBˆYˆ™YÙ^΃BˆH
ÏH‰œ™YÙ^ÏHˆ
È™YÙ^ÃBˆYˆ›ÝÙ]ÛÛÚÚYHOH É΃BˆH
ÏH‰œÙ]ÛÛÚÚYOHˆ
È\›X—Ü\œÙKœ][ÝWÜ\ÊÙ]ÛÛÚÚYJCBˆYˆXÛÛš[XYÙH[™XÛÛš[XYÙHOH É΃BˆH
ÏH‰šXÛÛš[XYÙOHˆ
È\›X—Ü\œÙKœ][ÝWÜ\ÊXÛÛš[XYÙJCBƒBˆYˆ]HOH É΃Bˆ]HH›Û™CBˆ[ÙNƒBˆ\ØÜš\[Ûˆ
ÏH ×—‘]Nˆ \ÉÈ H]CBˆ^ˆH›XÙÝZK“\Ý][J˜[YJCBˆ^‹œÙ]\
ÉÝ[X‰ÎˆXÛÛš[XYÙKBˆ Ù˜[˜\ Έ˜[˜\Bˆ ÚXÛۉΈ‘Y˜][šY[Ëœ™ÈŸJCBƒBˆYˆ[[™›È\țۙH܈[Š[[™›ÊHNƒBˆ^‹œÙ][™›Ê\OH•šY[È‹[™›ÓX™[Ï^È•]HŽˆ˜[YK ÛYYX]\IΈ ÝšY[É˔ݎˆ\ØÜš\[Û‹‘Ù[œ™HŽˆÙ[œ™K™]XYYŽˆ]_JCBˆ[ÙNƒBˆ[[™›Ë\]JÉÛYYX]\IΈ ÝšY[ÉßJCBˆ^‹œÙ][™›Ê\OH•šY[È‹[™›ÓX™[ÏX[[™›ÊCBƒBˆYˆ É ‘TÓÓ‘SÓ“I È[ˆ\›ƒBˆ^‹œÙ]›Ü\J Ò\Ô^XX›IË ÝYIÊCBƒBˆYˆ
›Ý^WÛ\Ý
H[™›Ý[žJ[ˆ\››Üˆ[ˆ×ÚYۛܙTÙ]™\ÛÛ™Y
H[™ ÉVQT”“ÖIIÈ›Ý[ˆ\›[™›Ý
[ÙHOH ÌŒ‰È܈
[ÙHOH ÌMÉÈ[™\›™[™ÝÚ]
É›[ÙOLŒ‰ÊJJNƒBˆYˆ™YÙ^΃BˆYˆ ÉQ[˜Ý[ÛŽœ^[YYXJ È›Ý[ˆ\›X—Ü\œÙK[œ][ÝWÜ\Ê™YÙ^ÊH[™ Û›Ý^XX›IÈ›Ý[ˆ\›X—Ü\œÙK[œ][ÝWÜ\Ê™YÙ^ÊH[™ Û\Ý™\X] È›Ý[ˆ\›X—Ü\œÙK[œ][ÝWÜ\Ê™YÙ^ÊNƒBˆ^‹œÙ]›Ü\J Ò\Ô^XX›IË ÝYIÊCBˆ[ÙNƒBˆ^‹œÙ]›Ü\J Ò\Ô^XX›IË ÝYIÊCBƒBˆ[ÙNƒBˆYÛ—ÛÙÊ Ó“ÕÙ][™È\Ü^XX›H›Üˆ\› È
È\›
CBƒBˆYˆÚÝØÛÛ^ƒBˆYˆÚÝØÛÛ^OH Ù˜]‰ÎƒBˆÛÛ^Y[K˜\[™
Bˆ
Ô™[[Ý™Hœ›ÛH]™TÝ™X[\ԛȘ]›Üš]\ÉË Ô[”YÚ[Š \ÏÛ[ÙOM‰›˜[YOI\ÊIÃBˆ H
Þ\˘\™Ý–ÌK\›X—Ü\œÙKœ][ÝWÜ\ʘ[YJJJCBˆ
CBˆ[Yˆ˜[YH›Ý[ˆUŽƒBˆXÛÛš[XYÙHHXÛÛš[XYÙHYˆXÛÛš[XYÙH[ÙH ÉÃBˆ˜[˜\H˜[˜\Yˆ˜[˜\[ÙH ÉÃBˆžNƒBˆ˜]—Ü\˜[\ÈH
Bˆ É\ÏÛ[ÙOMI›˜[YOI\É\›I\ÉšXÛÛš[XYÙOI\É™˜[˜\I\É™˜]—Û[ÙOL ÃBˆ H
Þ\˘\™Ý–ÌK\›X—Ü\œÙKœ][ÝWÜ\ʘ[YJK\›X—Ü\œÙKœ][ÝWÜ\Ê\›
K\›X—Ü\œÙKœ][ÝWÜ\ÊXÛÛš[XYÙJK\›X—Ü\œÙKœ][ÝWÜ\ʘ[˜\
JCBˆ
CBˆ^Ù\ƒBˆ˜]—Ü\˜[\ÈH
Bˆ É\ÏÛ[ÙOMI›˜[YOI\É\›I\ÉšXÛÛš[XYÙOI\É™˜[˜\I\É™˜]—Û[ÙOL ÃBˆ H
Þ\˘\™Ý–ÌK\›X—Ü\œÙKœ][ÝWÜ\ʘ[YJK\›X—Ü\œÙKœ][ÝWÜ\Ê\›
KBˆ\›X—Ü\œÙKœ][ÝWÜ\ÊXÛÛš[XYÙK™[˜ÛÙJ]‹NŠHYˆÚ^”Lˆ[ÙHXÛÛš[XYÙJKBˆ\›X—Ü\œÙKœ][ÝWÜ\ʘ[˜\™[˜ÛÙJ]‹NŠHYˆÚ^”Lˆ[ÙH˜[˜\
JCBˆ
CBˆYˆ^[\݃Bˆ˜]—Ü\˜[\È
ÏH Ü^[\ÝIÈ
È\›X—Ü\œÙKœ][ÝWÜ\ÊÝŠ^[\Ý
Kœ™\XÙJ Ë Ë ß ÊJCBˆYˆ™YÙ^΃Bˆ˜]—Ü\˜[\È
ÏH‰œ™YÙ^ÏHˆ
È™YÙ^ÃBˆÛÛ^Y[K˜\[™
ÐYÈ]™TÝ™X[\ԛȘ]›Üš]\ÉË Ô[”YÚ[Š \ÊIÈ H˜]—Ü\˜[\ÊJCBˆ^‹˜YÛÛ^Y[R][\ÊÛÛ^Y[JCBˆžNƒBˆYˆ^[\Ý\țݛۙNƒBˆYˆYÛ‹™Ù]Ù][™Ê ØYÜ^[\Ý ÊHOH™˜[ÙHŽƒBˆ^[\ÝÛ˜[YHH˜[YKœÜ]
ÊH ÊVÌWCBˆÛÛ^Y[WÈHÃBˆ
Ô^H È
È^[\ÝÛ˜[YH
È È^S\Ý Ë Ô[”YÚ[Š \ÏÛ[ÙOLLÉ›˜[YOI\Éœ^[\ÝI\ÊIÃBˆ H
Þ\˘\™Ý–ÌK\›X—Ü\œÙKœ][ÝWÜ\Ê^[\ÝÛ˜[YJK\›X—Ü\œÙKœ][ÝWÜ\ÊÝŠ^[\Ý
Kœ™\XÙJ Ë Ë ß ÊJJJCBˆCBˆ^‹˜YÛÛ^Y[R][\ÊÛÛ^Y[WÊCBˆ^Ù\ƒBˆ\ÜÃBƒBˆÚÈH›XÜYÚ[‹˜Y\™XÝÜžR][J[™OZ[
Þ\˘\™Ý–ÌWJK\›]K\Ý][O[^‹Ý[][\Ï]Ý[\Ñ›Û\Z\Ñ›Û\ŠCBˆ™]\›ˆÚÃBƒBƒB™Yˆ^\Ù]™\ÛÛ™Y
\›˜[YKXÛÛš[XYÙKÙ]™\ÛÛ™YUYK™YÏS›Û™JNƒBˆYˆ\›\țۙNƒBˆ›XÜYÚ[‹™[™Ù‘\™XÝÜžJ[
Þ\˘\™Ý–ÌWJJCBˆ™]\›ƒBƒBˆYˆ É›[ÙOLŒ‰È[ˆ\›ƒBˆÙ]™\ÛÛ™YH˜[ÙCBˆ\›H\›œ™\XÙJ É›[ÙOLŒ‰Ë ÉÊCBˆHHÞ\˘\™Ý–ÌH
ÈȃBˆH
ÏH\›Hˆ
È\›X—Ü\œÙKœ][ÝWÜ\Ê\›
H
ȉ›˜[YOHˆ
È\›X—Ü\œÙKœ][ÝJ˜[YJH
ȉ›[ÙOLŒˆƒBˆ\›HCBƒBˆYˆÙ]™\ÛÛ™YƒBˆÙ]™\ÈHYCBˆYˆ É Ñ\™XÝ È[ˆ\›ƒBˆ\›H\›œ™\XÙJ É Ñ\™XÝ Ë ÉÊCBˆÙ]™\ÈH˜[ÙCBˆYˆ™YÈ[™ Û›Ý^XX›IÈ[ˆ™Y΃BˆÙ]™\ÈH˜[ÙCBƒBˆ^ˆH›XÙÝZK“\Ý][J˜[YJCBˆ^‹œÙ]\
ÉÝ[X‰ÎˆXÛÛš[XYÙKBˆ ÚXÛۉΈXÛÛš[XYÙ_JCBˆ^‹œÙ][™›Ê\OIÕšY[ÉË[™›ÓX™[Ï^ÉÕ]IΈ˜[YK ÛYYX]\IΈ ÝšY[ÉßJCBˆ^‹œÙ]›Ü\J’\Ô^XX›H‹YHŠCBˆYˆ É›[ÙOLNIÈ[ˆ\›ƒBˆ\›H\›ÛÛ™\Š\›œ™\XÙJ É›[ÙOLNIË ÉÊKœ™\XÙJ ÎÉË ÉÊJCBˆ[Yˆ É›[ÙOLŒ È[ˆ\›ƒBˆ\›H\›œ™\XÙJ É›[ÙOLŒ Ë ÉÊCBˆYˆ É XÉÈ[ˆ\›ƒBˆ\›XÈH\›œÜ]
É XÏIÊCBˆXÈH\›X—Ü\œÙK[œ][ÝWÜ\ÊXÊCBˆYˆ ÞÔÔÓ_IÈ›Ý[ˆX΃BˆXÈ
ÏH ßžÔÔÓ__ ÃBˆ^‹œÙ]›Ü\J Ú[œ]Ý™X[K˜Y\]™K›XÙ[œÙWÝ\IË ØÛÛKÚY]š[™K˜[IÊCBˆ^‹œÙ]›Ü\J Ú[œ]Ý™X[K˜Y\]™K›XÙ[œÙWÚÙ^IËXÊCBƒBˆYˆ ß È[ˆ\›ƒBˆ\›ÝšˆH\›œÜ]
ß ÊCBˆ^‹œÙ]›Ü\J Ú[œ]Ý™X[K˜Y\]™KœÝ™X[WÚXY\œÉËÝšŠCBƒBˆYˆ Ë›LÝN È[ˆ\›ƒBˆYˆÚ^”LŽƒBˆ^‹œÙ]›Ü\J Ú[œ]Ý™X[XYÛ‰Ë Ú[œ]Ý™X[K˜Y\]™IÊCBˆ[ÙNƒBˆ^‹œÙ]›Ü\J Ú[œ]Ý™X[IË Ú[œ]Ý™X[K˜Y\]™IÊCBˆ^‹œÙ]›Ü\J Ú[œ]Ý™X[K˜Y\]™K›X[šY™\ÝÝ\IË ÚÉÊCBˆ^‹œÙ]Z[YU\J Ø\XØ][Û‹Ý›™˜\K›\YÜÝ™X[WÝ\› ÊCBˆ^‹œÙ]ÛÛ[ÛÚÝ\
˜[ÙJCBƒBˆ[Yˆ Ë›\ È[ˆ\›Üˆ ٛܛX][\ È[ˆ\›ƒBˆYˆÚ^”LŽƒBˆ^‹œÙ]›Ü\J Ú[œ]Ý™X[XYÛ‰Ë Ú[œ]Ý™X[K˜Y\]™IÊCBˆ[ÙNƒBˆ^‹œÙ]›Ü\J Ú[œ]Ý™X[IË Ú[œ]Ý™X[K˜Y\]™IÊCBˆ^‹œÙ]›Ü\J Ú[œ]Ý™X[K˜Y\]™K›X[šY™\ÝÝ\IË Û\ ÊCBˆ^‹œÙ]Z[YU\J Ø\XØ][Û‹Ù\Ú
Þ[ ÊCBˆ^‹œÙ]ÛÛ[ÛÚÝ\
˜[ÙJCBƒBˆ[Yˆ Ëš\ÛIÈ[ˆ\›ƒBˆYˆÚ^”LŽƒBˆ^‹œÙ]›Ü\J Ú[œ]Ý™X[XYÛ‰Ë Ú[œ]Ý™X[K˜Y\]™IÊCBˆ[ÙNƒBˆ^‹œÙ]›Ü\J Ú[œ]Ý™X[IË Ú[œ]Ý™X[K˜Y\]™IÊCBˆ^‹œÙ]›Ü\J Ú[œ]Ý™X[K˜Y\]™K›X[šY™\ÝÝ\IË Ú\ÛIÊCBˆ^‹œÙ]Z[YU\J Ø\XØ][Û‹Ý›™›\Ë\ÜÝŠÞ[ ÊCBˆ^‹œÙ]ÛÛ[ÛÚÝ\
˜[ÙJCBƒBˆ^‹œÙ]]
\›
CBˆYˆ›ÝÙ]™\΃Bˆ›XË”^Y\Š
Kœ^J\›
CBˆ[ÙNƒBˆ›XÜYÚ[‹œÙ]™\ÛÛ™Y\›
[
Þ\˘\™Ý–ÌWJKYK^ŠCBƒBˆ[ÙNƒBˆ›XË™^XÝ]XZ[[Š Ô[”YÚ[Š È
È\›
È ÊIÊCBƒBƒBˆÈ[šÜÈÈ\ØÚXÚØK[ˆ\ÈØÜ˜\\ˆ›Üˆ‹ËÚK[X›ÞK˜ÚÜ›Ùܘ[[KÜÝ][Û—ÜÙ[XÝœBˆÈ‹ËÙ›Ü[Kž›X˛ܙËÜÜÝœÜNLÍŒŒŽ œÜÝÛÝ[LL
̓B™YˆÙ]\Ê[šÊNƒBˆ\›H\›X—Ü™\]Y\Ý\›Ü[Š[šÊCBˆÛÝ\˜ÙHH\›œ™XY
CBˆ\›˜ÛÜÙJ
CBˆÛÝ\˜ÙLˆHÛÝ\˜ÙKœÜ]
’™]ŠCBˆÛÝ\˜ÙLÈHÛÝ\˜ÙL–ÌWKœÜ]
Ü›Ùܘ[[KÙ]Z[œØÛÛœÝÚYIÊCBˆÛÝ\˜Ù]Zž™Z]HÛÝ\˜ÙLÖÌWKœÜ]
ÏœˆÏH™YH‹ÉÊCBˆ›ÝÝ[YHHÛÝ\˜Ù]Zž™Z]ÌVÍ›[ŠÛÝ\˜Ù]Zž™Z]ÌJWCBˆÛÝ\˜Ù]]HHÛÝ\˜ÙLÖÌ—KœÜ]
ØOÜÙ]ˆŠCBˆ›ÝÝ]HHÛÝ\˜Ù]]VÌVÌMΛ[ŠÛÝ\˜Ù]]VÌJWCBˆ›ÝÝ]HH›ÝÝ]K™[˜ÛÙJ Ý]‹N ÊHYˆÚ^”Lˆ[ÙH›ÝÝ]CBˆ™]\›ˆˆHˆ
È›ÝÝ]H
ȈHˆ
È›ÝÝ[YCBƒBƒB™YˆÙ]Ù\Ê\›™YÙ^
NƒBˆ]HHXZÙT™\]Y\Ý
\›
CBˆžNƒBˆ][HH™K™š[™[
™YÙ^]JVÌCBˆ™]\›ˆ][CBˆ^Ù\ƒBˆYÛ—ÛÙÊ Ü™YÙ^˜Z[Y ÊCBˆYÛ—ÛÙÊ™YÙ^
CBˆ™]\›ƒBƒBƒBˆÈ›ÝHÙ[™\šXÈ[\[Y[˜][Ûˆ\È]™YYÈÈÛÛ™\B™Yˆž
›ÛÝHœ›ÛÝ‹™\ÝYL
NƒBˆÜH[X™HYΈ Ï È
ÈYÈ
È Ï‰ÈÈ›ÔPCBˆÛH[X™HYΈ ÏÉÈ
ÈYÈ
È Ï—‰ÈÈ›ÔPCBˆ[H[X™H‹[ˆ[
ÈÜ
Ù^JH
ÈÝŠŠH
ÈÛ
Ù^JHÈ›ÔPCBˆ[HÜ
›ÛÝ
H
È ×‰ÈYˆ›ÛÝ[ÙHˆƒBƒBˆ›ÜˆÙ^K›[ˆÚ^š]\š][\Ê
NƒBˆ\HH\J›
CBˆYˆ™\ÝYOHƒBˆÙ^HH Ü™YÙ^ ÈÈ[™›Ü˜Ú[™È[Ü]™[YÜÈÈ™H˜[YY\È™YÙ^BˆYˆ\H\È\݃Bˆ›Üˆˆ[ˆ›ƒBˆˆH\ØØ\JŠCBˆ[H[
‹[
CBƒBˆYˆ\H\ÈX݃Bˆ[H[
׉È
Èž
››Û™K™\ÝY
ÈJK[
CBˆYˆ\H\È›Ý\Ý[™\H\È›ÝX݃BˆYˆ›\țݛۙNƒBˆ›H\ØØ\J›
CBƒBˆYˆ›\țۙNƒBˆ[H[
›[
CBˆ[ÙNƒBˆ[H[
›™[˜ÛÙJ]‹NŠHYˆÚ^”Lˆ[ÙH›[
CBƒBˆ[
ÏHÛ
›ÛÝ
HYˆ›ÛÝ[ÙHˆƒBƒBˆ™]\›ˆ[BƒBƒBž›XÜYÚ[‹œÙ]ÛÛ[
[
Þ\˘\™Ý–ÌWJK Û[ÝšY\ÉÊCBƒBžNƒBˆ›XÜYÚ[‹˜YÛÜY]Ù
[
Þ\˘\™Ý–ÌWJK›XÜYÚ[‹”ÓÔ•ÓQUÑÕS”ÓÔ•Q
CB™^Ù\ƒBˆ\ÜÃBžNƒBˆ›XÜYÚ[‹˜YÛÜY]Ù
[
Þ\˘\™Ý–ÌWJK›XÜYÚ[‹”ÓÔ•ÓQUÑÓP‘S
CB™^Ù\ƒBˆ\ÜÃBžNƒBˆ›XÜYÚ[‹˜YÛÜY]Ù
[
Þ\˘\™Ý–ÌWJK›XÜYÚ[‹”ÓÔ•ÓQUÑÑUJCB™^Ù\ƒBˆ\ÜÃBžNƒBˆ›XÜYÚ[‹˜YÛÜY]Ù
[
Þ\˘\™Ý–ÌWJK›XÜYÚ[‹”ÓÔ•ÓQUÑÑÑS”‘JCB™^Ù\ƒBˆ\ÜÃBƒBœ\˜[\ÈHÙ]Ü\˜[\Ê
CBƒB\›H›Û™CB›˜[YHH›Û™CB›[ÙHH›Û™CBœ^[\ÝH›Û™CBšXÛÛš[XYÙHH›Û™CB™˜[˜\HST•Bœ^[\ÝH›Û™CB™˜]—Û[ÙHH›Û™CBœ™YÙ^ÈH›Û™CBƒBžNƒBˆ\›H\›X—Ü\œÙK[œ][ÝWÜ\Ê\˜[\ÖÈ\›—JCBˆ\›H\›™XÛÙJ Ý]‹N ÊHYˆÚ^”Lˆ[ÙH\›BˆÈ\›H\›œœÝš\
™š^ŠHYˆ\›™[™ÝÚ]
‹ž[š^ŠH[™Ú^”LÈ[ÙH\›BˆˆˆƒBˆ™YYÈ›ÝÈÝš\HÈÙ™ˆž[È[ÝÈHš[HÈ™H›ØÙ\ÜÙYÛÜœ™XÝKƒBˆˆˆƒBˆ\›H\›œœÝš\
‹ÈŠHYˆ\›™[™ÝÚ]
‹ž[ÈŠH[ÙH\›B™^Ù\ƒBˆ\ÜÃBžNƒBˆ˜[YHH\›X—Ü\œÙK[œ][ÝWÜ\Ê\˜[\ÖÈ›˜[YH—JCB™^Ù\ƒBˆ\ÜÃBžNƒBˆXÛÛš[XYÙHH\›X—Ü\œÙK[œ][ÝWÜ\Ê\˜[\ÖÈšXÛÛš[XYÙH—JCB™^Ù\ƒBˆ\ÜÃBžNƒBˆ˜[˜\H\›X—Ü\œÙK[œ][ÝWÜ\Ê\˜[\ÖÈ™˜[˜\—JCB™^Ù\ƒBˆ\ÜÃBžNƒBˆ[ÙHH[
\˜[\ÖÈ›[ÙH—JCB™^Ù\ƒBˆ\ÜÃBžNƒBˆ^[\ÝH]˜[
\›X—Ü\œÙK[œ][ÝWÜ\Ê\˜[\ÖÈœ^[\Ý—JKœ™\XÙJ ß Ë Ë ÊJCB™^Ù\ƒBˆ\ÜÃBžNƒBˆ˜]—Û[ÙHH[
\˜[\ÖÈ™˜]—Û[ÙH—JCB™^Ù\ƒBˆ\ÜÃBžNƒBˆ™YÙ^ÈH\˜[\ÖÈœ™YÙ^È—CB™^Ù\ƒBˆ\ÜÃBœ^Z][HH ÉÃBžNƒBˆ^Z][HH\›X—Ü\œÙK[œ][ÝWÜ\Ê\˜[\ÖÈœ^Z][H—JCB™^Ù\ƒBˆ\ÜÃBƒB˜YÛ—ÛÙÊ“[ÙNˆÌH‹™›Ü›X]
[ÙJJCBƒBšYˆ\›\țݛۙNƒBˆYÛ—ÛÙÊ•T“ˆÌH‹™›Ü›X]
\›
JCB˜YÛ—ÛÙÊ“˜[YNˆÌH‹™›Ü›X]
˜[YJJCBƒBšYˆ^Z][HOH É΃BˆÈHÙ]ÛÝ\
ÉË]O\^Z][JCBˆ˜[YK\›™YÙ^ÈHÙ]][\Ê˛ۙKÛ[šÏUYJCBˆ[ÙHHLMÃBƒBšYˆ[ÙH\țۙNƒBˆYÛ—ÛÙÊ™Ù]ÛÝ\˜Ù\ÈŠCBˆÙÙ]ÛÝ\˜Ù\Ê
CBˆ[™^
CBˆ›XÜYÚ[‹™[™Ù‘\™XÝÜžJ[
Þ\˘\™Ý–ÌWJJCBƒB™[Yˆ[ÙHOHNƒBˆYÛ—ÛÙÊ™Ù]]HŠCBˆ]HH›Û™CBˆYˆ™YÙ^È[™[Š™YÙ^ÊHˆƒBˆ]KÙ]™\ÛÛ™YHÙ]™YÙ^\œÙY
™YÙ^Ë\›
CBˆYˆ]KœÝ\ÝÚ]
Ú ÊH܈]KœÝ\ÝÚ]
ÜÛX‰ÊH܈]KœÝ\ÝÚ]
Û™œÉÊH܈]KœÝ\ÝÚ]
ËÉÊNƒBˆ\›H]CBˆ]HH›Û™CBˆÙ]]J\›˜[˜\]JCBˆ›XÜYÚ[‹™[™Ù‘\™XÝÜžJ[
Þ\˘\™Ý–ÌWJJCBƒB™[Yˆ[ÙHOHŽƒBˆYÛ—ÛÙÊ™Ù]Ú[›™[][\ÈŠCBˆÙ]Ú[›™[][\ʘ[YK\›˜[˜\
CBˆ›XÜYÚ[‹™[™Ù‘\™XÝÜžJ[
Þ\˘\™Ý–ÌWJJCBƒB™[Yˆ[ÙHOH΃BˆYÛ—ÛÙÊ™Ù]ÝXÚ[›™[][\ÈŠCBˆÙ]ÝXÚ[›™[][\ʘ[YK\›˜[˜\
CBˆ›XÜYÚ[‹™[™Ù‘\™XÝÜžJ[
Þ\˘\™Ý–ÌWJJCBƒB™[Yˆ[ÙHOH
ƒBˆYÛ—ÛÙÊ™Ù]˜]›Üš]\ÈŠCBˆÙ]˜]›Üš]\Ê
CBˆ›XÜYÚ[‹™[™Ù‘\™XÝÜžJ[
Þ\˘\™Ý–ÌWJJCBƒB™[Yˆ[ÙHOH
NƒBˆYÛ—ÛÙʘY˜]›Üš]HŠCBˆžNƒBˆ˜[YHH˜[YKœÜ]
× ÊVÌWCBˆ^Ù\ƒBˆ\ÜÃBˆžNƒBˆ˜[YHH˜[YKœÜ]
ÈH ÊVÌCBˆ^Ù\ƒBˆ\ÜÃBˆY˜]›Üš]J˜[YK\›XÛÛš[XYÙK˜[˜\˜]—Û[ÙJCBƒB™[Yˆ[ÙHOH
ŽƒBˆYÛ—ÛÙÊœ›Q˜]›Üš]HŠCBˆžNƒBˆ˜[YHH˜[YKœÜ]
× ÊVÌWCBˆ^Ù\ƒBˆ\ÜÃBˆžNƒBˆ˜[YHH˜[YKœÜ]
ÈH ÊVÌCBˆ^Ù\ƒBˆ\ÜÃBˆ›Q˜]›Üš]J˜[YJCBƒB™[Yˆ[ÙHOH
΃BˆYÛ—ÛÙʘYÛÝ\˜ÙHŠCBˆYÛÝ\˜ÙJ\›
CBƒB™[Yˆ[ÙHOHƒBˆYÛ—ÛÙÊœ›TÛÝ\˜ÙHŠCBˆ›TÛÝ\˜ÙJ˜[YJCBƒB™[Yˆ[ÙHOHNƒBˆYÛ—ÛÙÊ™ÝÛ›ØYÙš[HŠCBˆÝÛ›ØYÙš[J˜[YK\›
CBƒB™[Yˆ[ÙHOHLNƒBˆYÛ—ÛÙʘYÛÝ\˜ÙHŠCBˆYÛÝ\˜ÙJ\›
CBƒB™[Yˆ[ÙHOHLŽƒBˆYÛ—ÛÙÊœÙ]™\ÛÛ™Y\›ŠCBˆYˆ›Ý\›œÝ\ÝÚ]
œYÚ[Ž‹ËÜYÚ[ˆŠH܈›Ý[žJ[ˆ\››Üˆ[ˆ×ÚYۛܙTÙ]™\ÛÛ™Y
NƒBˆÙ]™\ÈHYCBˆYˆ É Ñ\™XÝ È[ˆ\›ƒBˆ\›H\›œ™\XÙJ É Ñ\™XÝ Ë ÉÊCBˆÙ]™\ÈH˜[ÙCBˆYˆ ÉVQT”“ÖIIÈ[ˆ\›ƒBˆ\››ÞHH\›œÜ]
ÉVQT”“ÖIIÊCBˆÈ˜Z\›Þ[ٛ܈›ÞH]]Bˆ›Þ]\Ù\ˆH›Û™CBˆ›Þ\\ÜÈH›Û™CBˆYˆ[Š›ÞJHˆ[™ Ð È[ˆ›ÞNƒBˆ›ÞHH›ÞKœÜ]
ΉÊCBˆ›Þ]\Ù\ˆH›ÞVÌCBˆ›Þ\\ÜÈH›ÞVÌWKœÜ]
Ð ÊVÌCBˆ›ÞZ\H›ÞVÌWKœÜ]
Ð ÊVÌWCBˆÜH›ÞVÌ—CBˆ[ÙNƒBˆ›ÞZ\ÜH›ÞKœÜ]
ΉÊCBˆ^[YYX]Ú]›ÞJ\›˜[YKXÛÛš[XYÙK›ÞZ\Ü›Þ]\Ù\‹›Þ\\ÜÊHȘZ\›ÞBƒBˆ][HH›XÙÝZK“\Ý][J]]\›
CBˆYˆ›ÝÙ]™\΃Bˆ›XË”^Y\Š
Kœ^J\›
CBˆ[ÙNƒBˆ›XÜYÚ[‹œÙ]™\ÛÛ™Y\›
[
Þ\˘\™Ý–ÌWJKYK][JCBˆ[ÙNƒBˆ›XË™^XÝ]XZ[[Š Ô[”YÚ[Š È
È\›
È ÊIÊCBƒB™[Yˆ[ÙHOHL΃BˆYÛ—ÛÙÊœ^WÜ^[\ÝŠCBˆ^WÜ^[\Ý
˜[YK^[\Ý
CBƒB™[Yˆ[ÙHOHMÈ܈[ÙHOHLM΃BˆYÛ—ÛÙÊ™Ù]™YÙ^\œÙYŠCBˆ]HH›Û™CBˆYˆ™YÙ^È[™ Û\Ý™\X] È[ˆ\›X—Ü\œÙK[œ][ÝWÜ\Ê™YÙ^ÊNƒBˆ\Ý™\X]™]K™YÙ^ËÛÛÚÚYR˜\ˆHÙ]™YÙ^\œÙY
™YÙ^Ë\›
CBˆH ÉÃBˆ™YÙ^˜[YHHVÉÛ˜[YI×CBˆ^\Ý[™×Û\ÝH™YÙ^ËœÜ
™YÙ^˜[YJCBˆ\›H ÉÃBˆ[\ÜÛÜCBˆˆH ÉÃBˆ›[X™\ˆHBˆ›ÜˆØšˆ[ˆ™]ƒBˆžNƒBˆ›[X™\ˆ
ÏHCBˆ™]ØÛÜHHÛÜK™Y\ÛÜJ™YÙ^ÊCBˆ\Ý™\X]H\Ý™\X]BˆHHBˆ›ÜˆH[ˆ˜[™ÙJ[ŠØšŠJNƒBˆYˆ[Š™]ØÛÜJHˆƒBˆ›ÜˆWÚÙ^SËWݘ[YSÈ[ˆÚ^š]\š][\Ê™]ØÛÜJNƒBˆYˆWݘ[YSÈ\țݛۙNƒBˆ›ÜˆWÚÙ^KWݘ[YH[ˆÚ^š]\š][\ÊWݘ[YSÊNƒBˆYˆWݘ[YH\țݛۙNƒBˆYˆ\JWݘ[YJH\ÈX݃Bˆ›ÜˆWÚÙ^[Wݘ[Y[[ˆÚ^š]\š][\ÊWݘ[YJNƒBˆYˆWݘ[Y[\țݛۙNƒBˆ˜[H›Û™CBˆYˆ\Ú[œÝ[˜ÙJØš‹\JNƒBˆžNƒBˆ˜[HØš–ÚWK™XÛÙJ Ý]‹N ÊCBˆ^Ù\ƒBˆ˜[HØš–ÚWCBˆ[ÙNƒBˆžNƒBˆ˜[HØš‹™XÛÙJ Ý]‹N ÊCBˆ^Ù\ƒBˆ˜[HØšƒBƒBˆYˆ ÖÉÈ
È™YÙ^˜[YH
È Ëœ\˜[IÈ
ÈÝŠH
ÈJH
È ×VÑWIÈ[ˆWݘ[Y[ƒBˆWݘ[Y[HWݘ[Y[œ™\XÙJ ÖÉÈ
È™YÙ^˜[YH
È Ëœ\˜[IÈ
ÈÝŠH
ÈJH
È ×VÑWIË\›X—Ü\œÙK[œ][ÝJ˜[
JCBˆWݘ[YVÝWÚÙ^[HHWݘ[Y[œ™\XÙJ ÖÉÈ
È™YÙ^˜[YH
È Ëœ\˜[IÈ
ÈÝŠH
ÈJH
È ×I˘[
CBƒBˆ[ÙNƒBˆ˜[H›Û™CBˆYˆ\Ú[œÝ[˜ÙJØš‹\JNƒBˆžNƒBˆ˜[HØš–ÚWK™XÛÙJ Ý]‹N ÊCBˆ^Ù\ƒBˆ˜[HØš–ÚWCBˆ[ÙNƒBˆžNƒBˆ˜[HØš‹™XÛÙJ Ý]‹N ÊCBˆ^Ù\ƒBˆ˜[HØšƒBˆYˆ ÖÉÈ
È™YÙ^˜[YH
È Ëœ\˜[IÈ
ÈÝŠH
ÈJH
È ×VÑWIÈ[ˆWݘ[YNƒBˆWݘ[YHHWݘ[YKœ™\XÙJ ÖÉÈ
È™YÙ^˜[YH
È Ëœ\˜[IÈ
ÈÝŠH
ÈJH
È ×VÑWIË\›X—Ü\œÙK[œ][ÝJ˜[
JCBƒBˆWݘ[YSÖÝWÚÙ^WHHWݘ[YKœ™\XÙJ ÖÉÈ
È™YÙ^˜[YH
È Ëœ\˜[IÈ
ÈÝŠH
ÈJH
È ×I˘[
CBƒBˆ˜[H›Û™CBˆYˆ\Ú[œÝ[˜ÙJØš‹\JNƒBˆžNƒBˆ˜[HØš–ÚWK™XÛÙJ Ý]‹N ÊCBˆ^Ù\ƒBˆ˜[HØš–ÚWCBˆ[ÙNƒBˆžNƒBˆ˜[HØš‹™XÛÙJ Ý]‹N ÊCBˆ^Ù\ƒBˆ˜[HØšƒBˆYˆ ÖÉÈ
È™YÙ^˜[YH
È Ëœ\˜[IÈ
ÈÝŠH
ÈJH
È ×VÑWIÈ[ˆ\Ý™\X]ƒBˆ\Ý™\X]H\Ý™\X]œ™\XÙJ ÖÉÈ
È™YÙ^˜[YH
È Ëœ\˜[IÈ
ÈÝŠH
ÈJH
È ×VÑWI˘[
CBˆ\Ý™\X]H\Ý™\X]œ™\XÙJ ÖÉÈ
È™YÙ^˜[YH
È Ëœ\˜[IÈ
ÈÝŠH
ÈJH
È ×IË\ØØ\J˜[
JCBƒBˆ\Ý™\X]H\Ý™\X]œ™\XÙJ ÖÉÈ
È™YÙ^˜[YH
È Ëœ\˜[IÈ
ÈÝŠ
H
È ×IËÝŠ›[X™\ŠJCBˆžNƒBˆYˆÛÛÚÚYR˜\ˆ[™ ÖÉÈ
È™YÙ^˜[YH
È Ë˜ÛÛÚÚY\×IÈ[ˆ\Ý™\X]ƒBˆ\Ý™\X]H\Ý™\X]œ™\XÙJ ÖÉÈ
È™YÙ^˜[YH
È Ë˜ÛÛÚÚY\×IËÙ]ÛÛÚÚY\ÔÝš[™ÊÛÛÚÚYR˜\ŠJCBˆ^Ù\ƒBˆ\ÜÃBƒBˆ™YÙ^Þ[H ÉÃBˆYˆ[Š™]ØÛÜJHˆƒBˆ™YÙ^Þ[Hž
™]ØÛÜK ÛܛܛÛÝ ÊCBˆ™YÙ^Þ[H™YÙ^Þ[œÜ]
ÏܛܛÛ݉ÊVÌWKœÜ]
ÏÛܛܛÛÝ ÊVÌCBˆžNƒBˆˆ
ÏH ×][O‰\׉\ÏÚ][O‰È H
\Ý™\X]™YÙ^Þ[
CBˆ^Ù\ƒBˆˆ
ÏH ×][O‰\׉\ÏÚ][O‰È H
\Ý™\X]™[˜ÛÙJ]‹NŠK™YÙ^Þ[
CBˆ^Ù\ƒBˆ˜XÙX˜XÚËœš[Ù^Êš[O\Þ\ËœÝÝ]
CBƒBˆÈYÛ—ÛÙÊ™\ŠŠJCBˆÙ]]J ÉË ÉË Ï][\Ï—žÌWÚ][\Ï—‰Ë™›Ü›X]
ŠJCBˆ›XÜYÚ[‹™[™Ù‘\™XÝÜžJ[
Þ\˘\™Ý–ÌWJJCBˆ[ÙNƒBˆ\›Ù]™\ÛÛ™YHÙ]™YÙ^\œÙY
™YÙ^Ë\›
CBˆYˆ›Ý
™YÙ^È[™ Û›Ý^XX›IÈ[ˆ™YÙ^È[™›Ý\›
NƒBˆYˆ\›ƒBˆYˆ ÉVQT”“ÖIIÈ[ˆ\›ƒBˆ\››ÞHH\›œÜ]
ÉVQT”“ÖIIÊCBˆÈ˜Z\›Þ[ٛ܈›ÞH]]Bˆ›Þ]\Ù\ˆH›Û™CBˆ›Þ\\ÜÈH›Û™CBˆYˆ[Š›ÞJHˆ[™ Ð È[ˆ›ÞNƒBˆ›ÞHH›ÞKœÜ]
ΉÊCBˆ›Þ]\Ù\ˆH›ÞVÌCBˆ›Þ\\ÜÈH›ÞVÌWKœÜ]
Ð ÊVÌCBˆ›ÞZ\H›ÞVÌWKœÜ]
Ð ÊVÌWCBˆÜH›ÞVÌ—CBˆ[ÙNƒBˆ›ÞZ\ÜH›ÞKœÜ]
ΉÊCBƒBˆ^[YYX]Ú]›ÞJ\›˜[YKXÛÛš[XYÙK›ÞZ\Ü›Þ]\Ù\‹›Þ\\ÜÊHȘZ\›ÞBˆ[ÙNƒBˆ^\Ù]™\ÛÛ™Y
\›˜[YKXÛÛš[XYÙKÙ]™\ÛÛ™Y™YÙ^ÊCBˆ[ÙNƒBˆ›XÙÝZK‘X[ÙÊ
K››ÝYšXØ][ÛŠYÛ—Û˜[YK јZ[YÈ^˜XÝ™YÙ^‰ËXÛÛ‹
˜[ÙJCBƒB™[Yˆ[ÙHOHNƒBˆYÛ—ÛÙÊž[Ý]X™YŠCBˆžNƒBˆ[\Ü[Ý]X™YBˆ^Ù\^Ù\[ÛŽƒBˆ›XÙÝZK‘X[ÙÊ
K››ÝYšXØ][ÛŠYÛ—Û˜[YK ÔX\ÙHÐÓÓÔˆY[Ý×Z[œÝ[[Ý]X™KYËÐÓÓÔ—H[Ù[IËXÛÛ‹L˜[ÙJCBˆÝ™X[WÝ\›H[Ý]X™YœÚ[™ÛWÖQ
\›
CBˆ^\Ù]™\ÛÛ™Y
Ý™X[WÝ\›˜[YKXÛÛš[XYÙJCBƒB™[Yˆ[ÙHOHNNƒBˆYÛ—ÛÙÊ‘Ù[™\Ú\ØÛÛ[[Ûœ™\ÛÛ™\œÈŠCBˆ^\Ù]™\ÛÛ™Y
\›ÛÛ™\Š\›
K˜[YKXÛÛš[XYÙKYJCBƒB™[Yˆ[ÙHOHŒƒBˆYÛ—ÛÙÊœÙ]™\ÛÛ™Y\›ŠCBˆ][HH›XÙÝZK“\Ý][J˜[YJCBˆYˆ É XÉÈ[ˆ\›ƒBˆ\›XÈH\›œÜ]
É XÏIÊCBˆXÈH\›X—Ü\œÙK[œ][ÝWÜ\ÊXÊCBˆYˆ ÞÔÔÓ_IÈ›Ý[ˆX΃BˆXÈ
ÏH ßžÔÔÓ__ ÃBˆ][KœÙ]›Ü\J Ú[œ]Ý™X[K˜Y\]™K›XÙ[œÙWÝ\IË ØÛÛKÚY]š[™K˜[IÊCBˆ][KœÙ]›Ü\J Ú[œ]Ý™X[K˜Y\]™K›XÙ[œÙWÚÙ^IËXÊCBˆYˆ ß È[ˆ\›ƒBˆ\›ÝšˆH\›œÜ]
ß ÊCBˆ][KœÙ]›Ü\J Ú[œ]Ý™X[K˜Y\]™KœÝ™X[WÚXY\œÉËÝšŠCBˆ][KœÙ]]
\›
CBˆYˆ Ë›LÝN È[ˆ\›ƒBˆYˆÚ^”LŽƒBˆ][KœÙ]›Ü\J Ú[œ]Ý™X[XYÛ‰Ë Ú[œ]Ý™X[K˜Y\]™IÊCBˆ[ÙNƒBˆ][KœÙ]›Ü\J Ú[œ]Ý™X[IË Ú[œ]Ý™X[K˜Y\]™IÊCBˆ][KœÙ]›Ü\J Ú[œ]Ý™X[K˜Y\]™K›X[šY™\ÝÝ\IË ÚÉÊCBˆ][KœÙ]Z[YU\J Ø\XØ][Û‹Ý›™˜\K›\YÜÝ™X[WÝ\› ÊCBˆ][KœÙ]ÛÛ[ÛÚÝ\
˜[ÙJCBƒBˆ[Yˆ Ë›\ È[ˆ\›Üˆ ٛܛX][\ È[ˆ\›ƒBˆYˆÚ^”LŽƒBˆ][KœÙ]›Ü\J Ú[œ]Ý™X[XYÛ‰Ë Ú[œ]Ý™X[K˜Y\]™IÊCBˆ[ÙNƒBˆ][KœÙ]›Ü\J Ú[œ]Ý™X[IË Ú[œ]Ý™X[K˜Y\]™IÊCBˆ][KœÙ]›Ü\J Ú[œ]Ý™X[K˜Y\]™K›X[šY™\ÝÝ\IË Û\ ÊCBˆ][KœÙ]Z[YU\J Ø\XØ][Û‹Ù\Ú
Þ[ ÊCBˆ][KœÙ]ÛÛ[ÛÚÝ\
˜[ÙJCBƒBˆ[Yˆ Ëš\ÛIÈ[ˆ\›ƒBˆYˆÚ^”LŽƒBˆ][KœÙ]›Ü\J Ú[œ]Ý™X[XYÛ‰Ë Ú[œ]Ý™X[K˜Y\]™IÊCBˆ[ÙNƒBˆ][KœÙ]›Ü\J Ú[œ]Ý™X[IË Ú[œ]Ý™X[K˜Y\]™IÊCBˆ][KœÙ]›Ü\J Ú[œ]Ý™X[K˜Y\]™K›X[šY™\ÝÝ\IË Ú\ÛIÊCBˆ][KœÙ]Z[YU\J Ø\XØ][Û‹Ý›™›\Ë\ÜÝŠÞ[ ÊCBˆ][KœÙ]ÛÛ[ÛÚÝ\
˜[ÙJCBˆ][KœÙ]]
\›
CBˆ›XÜYÚ[‹œÙ]™\ÛÛ™Y\›
[
Þ\˘\™Ý–ÌWJKYK][JCBƒB™[Yˆ[ÙHOHŒNƒBˆYÛ—ÛÙÊ™ÝÛ›ØYÝ\œ™[š[H\Ú[™È[Ý]X™KYÙ\šXÙHŠCBˆ]\HH ÝšY[ÉÃBˆYˆ ÖÛ\×IÈ[ˆ˜[YNƒBˆ]\HH Ø]Y[ÉÃBˆ˜[YHH˜[YKœ™\XÙJ ÖÛ\×IË ÉÊCBˆ]ÙÝÛ›ØY
ɢ[YK]\JCBƒB™[Yˆ[ÙHOHŒŽƒBˆYÛ—ÛÙʜۛÞHŠCBˆžNƒBˆœ›ÛHÜ[\ÜÝ™X[[[š×Ü›ÞCBˆžNƒBˆHH™K™š[™[
‰× UPSUOJŠÏÊW Ë\›
VÌCBˆ^Ù\ƒBˆHH ÉÈYˆ™KœÙX\˜Ú
‰× ‘TÓÓ‘SÓ“W Ë\›
H[ÙH Ø™\Ý ÃBˆ\›H™KœÝXŠ‰× UPSUOKŠ× Ë ÉË\›
CBƒBˆžNƒBˆLÝN[ÙH™K™š[™[
‰× LÕNSÑJŠÏÊW Ë\›
VÌCBˆ^Ù\ƒBˆLÝN[ÙH›Û™CBˆ\›H™KœÝXŠ‰× LÕNSÑKŠ× Ë ÉË\›
CBƒBˆžNƒBˆžHH™K™š[™[
‰× “ÖOJŠÏÊW Ë\›
VÌCBˆ^Ù\ƒBˆžHH ÉÃBˆ\›H™KœÝXŠ‰× “ÖOKŠ× Ë ÉË\›
CBˆžHH ÉÈYˆžHOH ÉÈ[ÙH ɘ[\ÜI\ÉÈ HžCBƒBˆYˆ™KœÙX\˜Ú
‰× ‘TÓÓ‘SÓ“W Ë\›
NƒBˆ\›H™KœÝXŠ‰× ‘TÓÓ‘SÓ“W Ë ÉË\›
CBˆÛ›ÞHHÝ™X[[[š×Ü›ÞK”Ó›ÞWÒ[\Š
CBˆHH ÉÈYˆHOH ÉÈ[ÙH ɘ[\ÜOI\ÉÈ HCBˆ\›HÛ›ÞKœ™\ÛÛ™WÝ\›
\›X—Ü\œÙKœ][ÝJ\›
H
ÈH
ÈžJCBˆYÛ—ÛÙÊœÙ]™\ÛÛ™Y\›ŠCBˆ\Ý][HH›XÙÝZK“\Ý][JÝŠ˜[YJJCBˆ\Ý][KœÙ][™›Ê ÝšY[ÉËÉÕ]IΈ݊˜[YJ_JCBˆ\Ý][KœÙ]]
\›
CBˆ›XÜYÚ[‹œÙ]™\ÛÛ™Y\›
[
Þ\˘\™Ý–ÌWJKYK\Ý][JCBƒBˆ[ÙNƒBˆÛ›ÞHHÝ™X[[[š×Ü›ÞK”Ó›ÞWÒ[\Š
CBˆYˆLÝN[ÙƒBˆ\›H\›X—Ü\œÙKœ][ÝJ\›
H
È É˜[\ÛLÝN[ÙI\ÉÈ HLÝN[Ù
ÈžCBˆ[ÙNƒBˆ\›H\›X—Ü\œÙKœ][ÝJ\›
H
È É˜[\ÜOI\ÉÈ HH
ÈžCBˆ\Ý][HH›XÙÝZK“\Ý][JÝŠ˜[YJJCBˆ\Ý][KœÙ][™›Ê ÝšY[ÉËÉÕ]IΈ݊˜[YJ_JCBˆ\Ý][KœÙ]]
\›
CBˆÛ›ÞKœ^TÓ[šÊ\›\Ý][JCBˆ^Ù\ƒBˆ˜XÙX˜XÚËœš[Ù^Êš[O\Þ\ËœÝÝ]
CBˆ\ÜÃBƒB™[Yˆ[ÙHOHŒÎƒBˆYÛ—ÛÙÊ™Ù][™›È[ˆÝÛ›ØYŠCBˆ]\HH ÝšY[ÉÃBˆYˆ ÖÛ\×IÈ[ˆ˜[YNƒBˆ]\HH Ø]Y[ÉÃBˆ˜[YHH˜[YKœ™\XÙJ ÖÛ\×IË ÉÊCBˆ]ÙÝÛ›ØY
\›˜[YK]\JCBƒB™[Yˆ[ÙHOHƒBˆYÛ—ÛÙÊ]Y[ÈÛ›H[Ý]X™HÝÛ›ØYŠCBˆ]ÙÝÛ›ØY
\›˜[YK Ø]Y[ÉÊCBƒB™[Yˆ[ÙHOHNƒBˆYÛ—ÛÙÊ”ÙX\˜Ú[ˆÝ\ˆYÚ[œÈŠCBˆÜÙX\˜Ú
\›˜[YJCBˆ›XÜYÚ[‹™[™Ù‘\™XÝÜžJ[
Þ\˘\™Ý–ÌWJJCBƒB™[Yˆ[ÙHOH
MNƒBˆYÛ—ÛÙÊ™[˜X›YØÚÈŠCBˆ\™[[›ØÚÙY[ˆHYÛ‹™Ù]Ù][™Ê Ü\™[[›ØÚÙY[‰ÊCBˆÙ^X›Ø\™H›XË’Ù^X›Ø\™
ÉË Ñ[\ˆ[‰ÊCBˆÙ^X›Ø\™™Ó[Ù[
CBˆYˆÙ^X›Ø\™š\ÐÛÛ™š\›YY
NƒBˆ™]Ô݈HÙ^X›Ø\™™Ù]^
CBˆYˆ™]Ô݈OH\™[[›ØÚÙY[ŽƒBˆYÛ‹œÙ]Ù][™Ê Ü\™[[›ØÚÙY Ë™˜[ÙHŠCBˆ›XÙÝZK‘X[ÙÊ
K››ÝYšXØ][ÛŠYÛ—Û˜[YK Ô\™[[›ØÚÈ\ØX›Y ËXÛÛ‹
L˜[ÙJCBˆ[ÙNƒBˆ›XÙÝZK‘X[ÙÊ
K››ÝYšXØ][ÛŠYÛ—Û˜[YK ÕܛۙÈ[ÏÉËXÛÛ‹
L˜[ÙJCBˆ›XÜYÚ[‹™[™Ù‘\™XÝÜžJ[
Þ\˘\™Ý–ÌWJJCBƒB™[Yˆ[ÙHOH
MŽƒBˆYÛ—ÛÙÊ™\ØX›HØÚÈŠCBˆYÛ‹œÙ]Ù][™Ê Ü\™[[›ØÚÙY ËYHŠCBˆ›XÙÝZK‘X[ÙÊ
K››ÝYšXØ][ÛŠYÛ—Û˜[YK Ô\™[[›ØÚÈ[˜X›Y ËXÛÛ‹
L˜[ÙJCBˆ›XÜYÚ[‹™[™Ù‘\™XÝÜžJ[
Þ\˘\™Ý–ÌWJJCBƒB™[Yˆ[ÙHOH
L΃BˆYÛ—ÛÙÊ”™\]Y\Ý[™È”ÓÓ‹T”È][\ÈŠCBˆYÚ[œ]Y\žXžR”ÓÓŠ\›
CBƒBšYˆšY]Û[ÙH\țݛۙNƒBˆ›XË™^XÝ]XZ[[ŠÛÛZ[™\‹”Ù]šY]Ó[ÙJ \ÊHˆ HšY]Û[ÙJCB?>