Compare commits

5 Commits

View File

@ -155,8 +155,11 @@ def parse_enclosures(post_dir, entry):
if "type" in e: if "type" in e:
print("found enclosed media", e.type) print("found enclosed media", e.type)
if "image/" in e.type: if "image/" in e.type:
if not os.path.exists(post_dir): #this might be redundant with create_post
os.makedirs(post_dir)
featured_image = grab_media(post_dir, e.href) featured_image = grab_media(post_dir, e.href)
entry["featured_image"] = featured_image media_item = urlparse(e.href).path.split('/')[-1]
entry["featured_image"] = media_item
else: else:
print("FIXME:ignoring enclosed", e.type) print("FIXME:ignoring enclosed", e.type)
return entry return entry
@ -309,12 +312,15 @@ def create_opds_post(post_dir, entry):
ft = item['type'].split('/')[-1] ft = item['type'].split('/')[-1]
fn = item['rel'].split('/')[-1] fn = item['rel'].split('/')[-1]
if fn == "acquisition": # entry.links has image, thumbnail and publication/acquisition.
fn = "publication" #calling the publications acquisition is weird # Only downloading image for now
#if fn == "acquisition":
#fn = "publication" #calling the publications acquisition is weird
if 'image' in fn:
prefered_name = "{}-{}.{}".format(fn, slugify(entry['title']), ft) prefered_name = "{}-{}.{}".format(fn, slugify(entry['title']), ft)
grab_media(post_dir, item['href'], prefered_name) grab_media(post_dir, item['href'], prefered_name)
frontmatter['featured_image'] = prefered_name
if "summary" in entry: if "summary" in entry:
summary = entry.summary summary = entry.summary
@ -330,6 +336,18 @@ def create_opds_post(post_dir, entry):
timestamp = arrow.get(entry['updated_parsed']) timestamp = arrow.get(entry['updated_parsed'])
f.write(timestamp.format('X')) f.write(timestamp.format('X'))
def opds_fetch_more(data):
"""
Look for mode OPDS feeds to pull, untill we no longer hit the "next" navigation property.
"""
for link in data.feed.links:
for i in link:
if link[i] == 'next':
print(link['href'])
data = grab_feed(link['href'])
return data
return None
def main(): def main():
feed_urls = open("feeds_list.txt", "r").read().splitlines() feed_urls = open("feeds_list.txt", "r").read().splitlines()
@ -373,14 +391,25 @@ def main():
data = grab_feed(feed_url) data = grab_feed(feed_url)
if data: if data: #whenever we get a 200
if data.feed: #only if it is an actual feed
opds_feed = False opds_feed = False
opds_entries = []
if 'links' in data.feed:
for i in data.feed['links']: for i in data.feed['links']:
if i['rel'] == 'self': if i['rel'] == 'self':
if 'opds' in i['type']: if 'opds' in i['type']:
opds_feed = True opds_feed = True
print("OPDS type feed!") print("OPDS type feed!")
feed_data = data
while feed_data:
feed_data = opds_fetch_more(feed_data)
if feed_data:
for i in feed_data.entries:
opds_entries.append(i)
for i in opds_entries:
data['entries'].append(i)
for entry in data.entries: for entry in data.entries:
@ -425,10 +454,14 @@ def main():
post_name post_name
) # create list of posts which have not been returned by the feed ) # create list of posts which have not been returned by the feed
for post in existing_posts: for post in existing_posts:
# remove blog posts no longer returned by the RSS feed # remove blog posts no longer returned by the RSS feed
print("deleted", post) post_dir = os.path.join(output_dir, feed_name, post)
shutil.rmtree(os.path.join(feed_dir, slugify(post))) shutil.rmtree(post_dir)
print("deleted", post_dir)
else:
print(feed_url, "is not or no longer a feed!")
end = time.time() end = time.time()