2025-01-18 23:06:34 -06:00
|
|
|
#! /usr/bin/env python3
|
|
|
|
import json
|
|
|
|
import requests
|
2025-01-24 03:35:29 -06:00
|
|
|
import time
|
2025-01-18 23:06:34 -06:00
|
|
|
|
|
|
|
odysee_url = r'https://odysee.com'
|
|
|
|
odysee_api_url = r'https://api.na-backend.odysee.com/api/v1/proxy'
|
|
|
|
|
|
|
|
def odysee_get_channel_url(handle):
|
|
|
|
return f'{odysee_url}/{handle}'
|
|
|
|
|
|
|
|
def odysee_get_releases(handle):
|
|
|
|
releases = {}
|
|
|
|
try:
|
|
|
|
for i in range(1,20):
|
|
|
|
payload = {
|
|
|
|
"method": "claim_search",
|
|
|
|
"params": {
|
|
|
|
"channel": handle,
|
|
|
|
"page_size": 20,
|
|
|
|
"page": i
|
|
|
|
}
|
|
|
|
}
|
|
|
|
response = requests.post(odysee_api_url, json=payload)
|
|
|
|
response.raise_for_status()
|
|
|
|
data = response.json()
|
|
|
|
lastpage = data.get("result", {}).get("total_pages", 1)
|
|
|
|
items = data.get("result", {}).get("items", [])
|
|
|
|
for raw_item in items:
|
|
|
|
item = raw_item
|
2025-01-24 02:52:38 -06:00
|
|
|
# The value_type field can help us immediately whittle down chaff like reposts,
|
|
|
|
# playlists, etc. By and large we only care about streams, I think, but I'm not
|
|
|
|
# confident enough in my knowledge of the LBRY API to whitelist instead of
|
|
|
|
# blacklist value_types.
|
2025-01-18 23:06:34 -06:00
|
|
|
if item["value_type"] == "repost":
|
|
|
|
item = raw_item["reposted_claim"]
|
2025-01-24 02:52:38 -06:00
|
|
|
if item["value_type"] == "stream":
|
2025-01-18 23:06:34 -06:00
|
|
|
pass
|
2025-01-23 23:21:02 -06:00
|
|
|
elif item["value_type"] == "collection":
|
|
|
|
# Collections are playlists, and we don't care about the ones that aren't
|
|
|
|
continue
|
2025-01-18 23:06:34 -06:00
|
|
|
else:
|
2025-01-23 23:21:02 -06:00
|
|
|
print(f'Unknown value type, continuing: {item["value_type"]}')
|
2025-01-24 02:52:38 -06:00
|
|
|
# A stream is data(?) in the form of a file, but we don't know what
|
|
|
|
# So we should check to see what it is and ignore it if it's something dumb
|
|
|
|
if item["value"].get("stream_type") == "video":
|
|
|
|
continue
|
2025-01-24 03:31:54 -06:00
|
|
|
# If we can't hash the file, it's not a file we want
|
|
|
|
if not item["value"].get("source", {}).get("hash"):
|
|
|
|
continue
|
2025-01-18 23:06:34 -06:00
|
|
|
releases[item["claim_id"]] = {
|
2025-01-24 14:33:51 -06:00
|
|
|
# Fields with .strip() at the end are user-controlled and may mess with sorting if
|
|
|
|
# leading/trailing whitespace is left in.
|
|
|
|
"name": item.get("name", "Unnamed Release").strip(),
|
|
|
|
"title": item["value"].get("title", "Untitled Release").strip(),
|
2025-01-24 02:52:38 -06:00
|
|
|
# This field is an int in unixtime
|
2025-01-24 03:07:01 -06:00
|
|
|
"publishdate": int(item["value"].get("release_time", 0)),
|
2025-01-24 14:33:51 -06:00
|
|
|
"description": item["value"].get("description", "No description provided for this release").strip(),
|
2025-01-18 23:06:34 -06:00
|
|
|
"thumbnail": item["value"].get("thumbnail", {}).get("url", ""),
|
2025-01-24 03:26:16 -06:00
|
|
|
"url": f"{odysee_get_channel_url(handle)}/{item['name']}",
|
2025-01-24 03:31:54 -06:00
|
|
|
"filehash": item["value"].get("source", {}).get("hash", "")
|
2025-01-18 23:06:34 -06:00
|
|
|
}
|
|
|
|
if i == lastpage:
|
|
|
|
break
|
2025-01-24 03:35:29 -06:00
|
|
|
else:
|
|
|
|
# If we're not on the last page, sleep for a second to be easier on Odysee
|
|
|
|
# This isn't a proper wait limiter, but it's something.
|
|
|
|
time.sleep(1)
|
2025-01-18 23:06:34 -06:00
|
|
|
except requests.RequestException as e:
|
|
|
|
print(f'RequestException occurred while getting releases for {handle}: {e}')
|
|
|
|
return None
|
|
|
|
except KeyError as e:
|
|
|
|
print(f'KeyError occurred while getting releases for {handle}: {e}')
|
|
|
|
print(f'Nonzero chance Odysee updated their API out from under you')
|
|
|
|
return None
|
|
|
|
return releases
|