Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Relaxing channel name check in Xtream API #178

Merged
merged 27 commits into from
Dec 8, 2021
Merged
Changes from 1 commit
Commits
Show all changes
27 commits
Select commit Hold shift + click to select a range
293963b
Added Initial XTream
superolmo May 28, 2021
a1dca48
Added XTream Series
superolmo Jun 2, 2021
d0be0a5
Added check for local logo_path
superolmo Jun 4, 2021
a84f7ce
Back to fixed path
superolmo Jun 4, 2021
b97598a
Added pyxtream choice
superolmo Jun 5, 2021
0530fc9
Replaced the test server
superolmo Jun 6, 2021
b56a6fb
Replaced the test server
superolmo Jun 6, 2021
ab11dfd
Fixed cache-path and added regex search
superolmo Jun 7, 2021
5d4a971
Merge branch 'master' of github.com:superolmo/hypnotix
superolmo Jun 7, 2021
2b8d127
Merge branch 'master' of https://github.com/linuxmint/hypnotix into l…
superolmo Jun 7, 2021
b38d61e
Changed osp back to os.path
superolmo Jun 7, 2021
7db1d62
Changed osp back to os.path
superolmo Jun 7, 2021
06517d6
Merge branch 'linuxmin-master'
superolmo Jun 7, 2021
2a45eb1
Fixed bug in the way it reload from cache
superolmo Jun 12, 2021
e79a848
Fixed missing provider when it doesn't load
superolmo Jun 18, 2021
dcbb6a1
Improved handling of missing keys
superolmo Jun 18, 2021
97b9e73
Fixed Categories and cleaned up the code
superolmo Jun 18, 2021
32af21f
Updated function names to follow PEP8
superolmo Jun 28, 2021
51e6d1d
Added check before authorizing
superolmo Sep 27, 2021
e6390d9
Merge remote-tracking branch 'upstream/master'
superolmo Sep 27, 2021
fca44f2
Scale down changes
superolmo Sep 27, 2021
0b41feb
Revert some more changes
superolmo Sep 27, 2021
215e191
Revert last changes
superolmo Sep 27, 2021
76bd0f2
Revert flag name
superolmo Sep 27, 2021
211db79
Discard streams w/o name, change live radio type to live stream
superolmo Nov 5, 2021
c8577a2
Rebase to upstream master
superolmo Nov 28, 2021
a7d447c
Fix subgroup name check
superolmo Dec 1, 2021
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
Fixed cache-path and added regex search
  • Loading branch information
superolmo committed Jun 7, 2021
commit ab11dfd592df67b4650119ee5e6699ecc922963c
151 changes: 123 additions & 28 deletions usr/lib/hypnotix/xtream.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,10 +16,12 @@
__version__ = '0.1'
__author__ = 'Claudio Olmi'

from typing import List
import requests
import time
from os import path as osp
from os import makedirs
from os import remove

# Timing xtream json downloads
from timeit import default_timer as timer, timeit
Expand Down Expand Up @@ -97,6 +99,15 @@ def __init__(self, xtream: object, group_title, stream_info):
if not xtream.validateURL(self.url):
print("{} - Bad URL? `{}`".format(self.name, self.url))

def export_json(self):
jsondata = {}

jsondata['url'] = self.url
jsondata.update(self.raw)
jsondata['logo_path'] = self.logo_path

return jsondata

class Group():
def __init__(self, group_info: dict, stream_type: str):
# Raw JSON Group
Expand Down Expand Up @@ -212,13 +223,14 @@ def __init__(self, provider_name: str, provider_username: str, provider_password
self.username = provider_username
self.password = provider_password
self.name = provider_name
self.cache_path = cache_path

# if the cache_path is specified, test that it is a directory
if cache_path != "":
if osp.isdir(cache_path):
self.cache_path = cache_path
else:
if self.cache_path != "":
# If the cache_path is not a directory, clear it
if not osp.isdir(self.cache_path):
print("Cache Path is not a directory, using default '~/.xtream-cache/'")
self.cache_path == ""

# If the cache_path is still empty, use default
if self.cache_path == "":
Expand All @@ -228,6 +240,80 @@ def __init__(self, provider_name: str, provider_username: str, provider_password

self.authenticate()

def search_stream(self, keyword: str, return_type: str = "LIST") -> List:
"""Search for streams

Args:
keyword (str): Keyword to search for. Supports REGEX
return_type (str, optional): Output format, 'LIST' or 'JSON'. Defaults to "LIST".

Returns:
List: List with all the results, it could be empty. Each result
"""

search_result = []

regex = re.compile(keyword)

for stream in self.movies:
if re.match(regex, stream.name) is not None:
search_result.append(stream.export_json())
if return_type == "JSON":
if search_result != None:
print("Found {} results".format(len(search_result)))
return json.dumps(search_result, ensure_ascii=False)
else:
return search_result

def download_video(self, url: str, fullpath_filename: str) -> bool:
"""Download a stream

Args:
url (str): Complete URL of the stream
fullpath_filename (str): Complete File path where to save the stream

Returns:
bool: True if successful, False if error
"""
ret_code = False
mb_size = 1024*1024
try:
print("Downloading from URL `{}` and saving at `{}`".format(url,fullpath_filename))
response = requests.get(url, timeout=(5), stream=True)
print("Got response")
# If there is an answer from the remote server
if response.status_code == 200:
print("Got response 200")
# Set downloaded size
downloaded_bytes = 0
# Get total playlist byte size
total_content_size = int(response.headers['content-length'])
total_content_size_mb = total_content_size/mb_size
# Set stream blocks
block_bytes = int(4*mb_size) # 4 MB

#response.encoding = response.apparent_encoding
print("Ready to download {:.1f} MB file".format(total_content_size_mb))
with open(fullpath_filename, "w") as file:
# Grab data by block_bytes
for data in response.iter_content(block_bytes,decode_unicode=True):
downloaded_bytes += block_bytes
print("{:.0f}/{:.1f} MB downloaded".format(downloaded_bytes/mb_size,total_content_size_mb))
file.write(str(data))
if downloaded_bytes < total_content_size:
print("The file size is incorrect, deleting")
remove(fullpath_filename)
else:
# Set the datatime when it was last retreived
# self.settings.set_
ret_code = True
else:
print("HTTP error %d while retrieving from %s!" % (response.status_code, url))
except Exception as e:
print(e)

return ret_code

def slugify(self, string: str) -> str:
"""Normalize string

Expand Down Expand Up @@ -289,35 +375,44 @@ def authenticate(self):
}

def loadFromFile(self, filename) -> dict:
"""Try to load the distionary from file

Args:
filename ([type]): File name containing the data

Returns:
dict: Dictionary is found and no errors, None is file does not exists
"""
#Build the full path
full_filename = osp.join(self.cache_path, "{}-{}".format(
self.slugify(self.name),
filename
))


my_data = None
#threshold_time = time.mktime(time.gmtime(60*60*8)) # 8 hours
threshold_time = 60*60*8

# Get the enlapsed seconds since last file update
diff_time = time.time() - osp.getmtime(full_filename)
# If the file was updated less than the threshold time,
# it means that the file is still fresh, we can load it.
# Otherwise skip and return None to force a re-download
if threshold_time > diff_time:
# Load the JSON data
try:
with open(full_filename,mode='r',encoding='utf-8') as myfile:
#my_data = myfile.read()
my_data = json.load(myfile)
except Exception as e:
print("Could not save to file `{}`: e=`{}`".format(
full_filename, e
))

return my_data

if osp.isfile(full_filename):

my_data = None
#threshold_time = time.mktime(time.gmtime(60*60*8)) # 8 hours
threshold_time = 60*60*8

# Get the enlapsed seconds since last file update
diff_time = time.time() - osp.getmtime(full_filename)
# If the file was updated less than the threshold time,
# it means that the file is still fresh, we can load it.
# Otherwise skip and return None to force a re-download
if threshold_time > diff_time:
# Load the JSON data
try:
with open(full_filename,mode='r',encoding='utf-8') as myfile:
#my_data = myfile.read()
my_data = json.load(myfile)
except Exception as e:
print("Could not save to file `{}`: e=`{}`".format(
full_filename, e
))
return my_data
else:
return None

def saveToFile(self, data_list: dict, filename: str) -> bool:
"""Save a dictionary to file
Expand Down Expand Up @@ -674,4 +769,4 @@ def get_all_live_epg_URL_by_stream(self, stream_id):

def get_all_epg_URL(self):
URL = '%s/xmltv.php?username=%s&password=%s' % (self.server, self.username, self.password)
return URL
return URL