diff --git a/lumbung-calendar-prototype/README.md b/lumbung-calendar-prototype/README.md deleted file mode 100644 index 62fe8ff..0000000 --- a/lumbung-calendar-prototype/README.md +++ /dev/null @@ -1,9 +0,0 @@ -# Calendar Feed -Generate HUGO posts based on a publicly accessible ICS calendar. - -## Use -Fill in your details in `calendar_feed_config.py` - -## TODO / FIXME - - * Multiple calendars to multiple hugo categories diff --git a/lumbung-calendar-prototype/event_feed.py b/lumbung-calendar-prototype/event_feed.py index d11fb1c..a3c7a82 100644 --- a/lumbung-calendar-prototype/event_feed.py +++ b/lumbung-calendar-prototype/event_feed.py @@ -1,49 +1,52 @@ #!/bin/python3 -#lumbung.space calendar feed generator -#© 2021 roel roscam abbing gplv3 etc +# lumbung.space calendar feed generator +# © 2021 roel roscam abbing gplv3 etc -from ics import Calendar -import requests -import jinja2 import os -import shutil -from slugify import slugify -from natural import date -from event_feed_config import calendar_url, output_dir -from urllib.parse import urlparse -import arrow import re +import shutil +from urllib.parse import urlparse + +import arrow +import jinja2 +import requests +from ics import Calendar +from natural import date +from slugify import slugify + +from event_feed_config import calendar_url, output_dir cal = Calendar(requests.get(calendar_url).text) -env = jinja2.Environment( - loader=jinja2.FileSystemLoader(os.path.curdir) - ) +env = jinja2.Environment(loader=jinja2.FileSystemLoader(os.path.curdir)) if not os.path.exists(output_dir): os.mkdir(output_dir) -template = env.get_template('event_template.md') +template = env.get_template("event_template.md") existing_posts = os.listdir(output_dir) + def findURLs(string): """ return all URLs in a given string """ regex = r"(?i)\b((?:https?://|www\d{0,3}[.]|[a-z0-9.\-]+[.][a-z]{2,4}/)(?:[^\s()<>]+|\(([^\s()<>]+|(\([^\s()<>]+\)))*\))+(?:\(([^\s()<>]+|(\([^\s()<>]+\)))*\)|[^\s`!()\[\]{};:'\".,<>?«»“”‘’]))" - url = re.findall(regex,string) - return [x[0] for x in url] + url = re.findall(regex, string) + return [x[0] for x in url] + def find_imageURLS(string): """ return all image URLS in a given string """ - regex = r"(?:http\:|https\:)?\/\/.*?\.(?:png|jpg|jpeg|gif|svg)" + regex = r"(?:http\:|https\:)?\/\/.*?\.(?:png|jpg|jpeg|gif|svg)" img_urls = re.findall(regex, string, flags=re.IGNORECASE) - return img_urls + return img_urls + def create_metadata(event): """ @@ -55,24 +58,28 @@ def create_metadata(event): if location_urls: location_url = location_urls[0] - event.location = '[{}]({})'.format(urlparse(location_url).netloc, location_url) - + event.location = "[{}]({})".format( + urlparse(location_url).netloc, location_url + ) event_metadata = { - 'name':event.name, - 'created':event.created.format(), - 'description': event.description, - 'localized_begin': '           '.join(localize_time(event.begin)), #non-breaking space characters to defeat markdown - 'begin': event.begin.format(), - 'end': event.end.format(), - 'duration': date.compress(event.duration), - 'location': event.location, - 'uid': event.uid, - 'images' : find_imageURLS(event.description) # currently not used in template + "name": event.name, + "created": event.created.format(), + "description": event.description, + "localized_begin": "           ".join( + localize_time(event.begin) + ), # non-breaking space characters to defeat markdown + "begin": event.begin.format(), + "end": event.end.format(), + "duration": date.compress(event.duration), + "location": event.location, + "uid": event.uid, + "images": find_imageURLS(event.description), # currently not used in template } return event_metadata + def localize_time(date): """ Turn a given date into various timezones @@ -82,28 +89,27 @@ def localize_time(date): # 3 PM Kassel, Germany, 4 PM Ramallah/Jerusalem, Palestina (QoF), # 8 AM Bogota, Colombia (MaMa), 8 PM Jakarta, Indonesia (Gudskul), # 1 PM (+1day) Wellington, New Zealand (Fafswag), 9 AM Havana, Cuba (Instar). - tzs = [ - ('Kassel','Europe/Berlin'), - ('Bamako', 'Europe/London'), - ('Palestine','Asia/Jerusalem'), - ('Bogota','America/Bogota'), - ('Jakarta','Asia/Jakarta'), - ('Makassar','Asia/Makassar'), - ('Wellington', 'Pacific/Auckland') - ] + ("Kassel", "Europe/Berlin"), + ("Bamako", "Europe/London"), + ("Palestine", "Asia/Jerusalem"), + ("Bogota", "America/Bogota"), + ("Jakarta", "Asia/Jakarta"), + ("Makassar", "Asia/Makassar"), + ("Wellington", "Pacific/Auckland"), + ] - localized_begins =[] + localized_begins = [] for location, tz in tzs: - localized_begins.append( #javascript formatting because of string creation from hell - '__{}__ {}'.format( - str(location), - str(date.to(tz).format("YYYY-MM-DD __HH:mm__")) - ) + localized_begins.append( # javascript formatting because of string creation from hell + "__{}__ {}".format( + str(location), str(date.to(tz).format("YYYY-MM-DD __HH:mm__")) ) + ) return localized_begins + def create_event_post(post_dir, event): """ Create HUGO post based on calendar event metadata @@ -112,83 +118,86 @@ def create_event_post(post_dir, event): In that case it will also delete images no longer in metadata TODO: split this up into more functions for legibility """ - + if not os.path.exists(post_dir): os.mkdir(post_dir) event_metadata = create_metadata(event) - #list already existing images - #so we can later delete them if we dont find them in the event metadata anymore + # list already existing images + # so we can later delete them if we dont find them in the event metadata anymore existing_images = os.listdir(post_dir) try: - existing_images.remove('index.md') - existing_images.remove('.timestamp') + existing_images.remove("index.md") + existing_images.remove(".timestamp") except: pass - for img in event_metadata['images']: + for img in event_metadata["images"]: + + # parse img url to safe local image name + img_name = img.split("/")[-1] + fn, ext = img_name.split(".") + img_name = slugify(fn) + "." + ext - #parse img url to safe local image name - img_name = img.split('/')[-1] - fn, ext = img_name.split('.') - img_name = slugify(fn) + '.' + ext - local_image = os.path.join(post_dir, img_name) - + if not os.path.exists(local_image): - #download preview image + # download preview image response = requests.get(img, stream=True) - with open(local_image, 'wb') as img_file: + with open(local_image, "wb") as img_file: shutil.copyfileobj(response.raw, img_file) print('Downloaded image for event "{}"'.format(event.name)) - event_metadata['description'] = event_metadata['description'].replace(img, '![]({})'.format(img_name)) + event_metadata["description"] = event_metadata["description"].replace( + img, "![]({})".format(img_name) + ) if img_name in existing_images: existing_images.remove(img_name) for left_over_image in existing_images: - #remove images we found, but which are no longer in remote event - os.remove(os.path.join(post_dir,left_over_image)) - print('deleted image', left_over_image) + # remove images we found, but which are no longer in remote event + os.remove(os.path.join(post_dir, left_over_image)) + print("deleted image", left_over_image) - with open(os.path.join(post_dir,'index.md'),'w') as f: - post = template.render(event = event_metadata) + with open(os.path.join(post_dir, "index.md"), "w") as f: + post = template.render(event=event_metadata) f.write(post) - print('created post for', event.name, '({})'.format(event.uid)) + print("created post for", event.name, "({})".format(event.uid)) - with open(os.path.join(post_dir,'.timestamp'),'w') as f: - f.write(event_metadata['created']) + with open(os.path.join(post_dir, ".timestamp"), "w") as f: + f.write(event_metadata["created"]) def update_event_post(post_dir, event): """ Update a post based on the VCARD event 'created' field which changes when updated - """ + """ if os.path.exists(post_dir): - old_timestamp = open(os.path.join(post_dir,'.timestamp')).read() + old_timestamp = open(os.path.join(post_dir, ".timestamp")).read() if event.created > arrow.get(old_timestamp): - print('Updating', event.name, '({})'.format(event.uid)) + print("Updating", event.name, "({})".format(event.uid)) create_event_post(post_dir, event) else: - print('Event current: ', event.name, '({})'.format(event.uid)) + print("Event current: ", event.name, "({})".format(event.uid)) + for event in list(cal.events): post_dir = os.path.join(output_dir, event.uid) - if event.uid not in existing_posts: - #if there is an event we dont already have, make it + if event.uid not in existing_posts: + # if there is an event we dont already have, make it create_event_post(post_dir, event) - elif event.uid in existing_posts: - #if we already have it, update + elif event.uid in existing_posts: + # if we already have it, update update_event_post(post_dir, event) - existing_posts.remove(event.uid) # create list of posts which have not been returned by the calendar - - -for post in existing_posts: - #remove events not returned by the calendar (deletion) - print('deleted', post) - shutil.rmtree(os.path.join(output_dir,post)) + existing_posts.remove( + event.uid + ) # create list of posts which have not been returned by the calendar +for post in existing_posts: + # remove events not returned by the calendar (deletion) + print("deleted", post) + shutil.rmtree(os.path.join(output_dir, post)) diff --git a/lumbung-calendar-prototype/event_template.md b/lumbung-calendar-prototype/event_template.md index 441f3da..849530c 100644 --- a/lumbung-calendar-prototype/event_template.md +++ b/lumbung-calendar-prototype/event_template.md @@ -11,9 +11,8 @@ uid: "{{ event.uid }}" {% if event.location %} location: "{{ event.location }}" {% endif %} - - --- + {% if event.description %} {{ event.description }} diff --git a/lumbung-calendar-prototype/requirements.txt b/lumbung-calendar-prototype/requirements.txt deleted file mode 100644 index 356637c..0000000 --- a/lumbung-calendar-prototype/requirements.txt +++ /dev/null @@ -1,16 +0,0 @@ -# Automatically generated by https://github.com/damnever/pigar. - -# calendar-feed/event_feed.py: 3 -Jinja2 == 2.10 - -# calendar-feed/event_feed.py: 1 -ics == 0.7 - -# calendar-feed/event_feed.py: 6 -natural == 0.2.0 - -# calendar-feed/event_feed.py: 5 -python_slugify == 5.0.2 - -# calendar-feed/event_feed.py: 2 -requests == 2.21.0 diff --git a/lumbung-feed-aggregator/README.md b/lumbung-feed-aggregator/README.md deleted file mode 100644 index 97d32e9..0000000 --- a/lumbung-feed-aggregator/README.md +++ /dev/null @@ -1,11 +0,0 @@ -# lumbung feed aggregator - -* Grab feeds listed in `feeds_list.txt` -* Parse feed for blogpost entries -* * Download images linked in blogposts -* Turn blogpost entries into HUGO posts - -# TODO/FIXME - -* only include posts with a certain tag - diff --git a/lumbung-feed-aggregator/post_template.md b/lumbung-feed-aggregator/post_template.md index 9dbc449..231e15c 100644 --- a/lumbung-feed-aggregator/post_template.md +++ b/lumbung-feed-aggregator/post_template.md @@ -7,7 +7,7 @@ author: "{{ frontmatter.author }}" original_link: "{{ frontmatter.original_link }}" feed_name: "{{ frontmatter.feed_name}}" categories: ["network", "{{ frontmatter.feed_name}}"] -tags: {{ frontmatter.tags }} +tags: { { frontmatter.tags } } --- -{{ content }} \ No newline at end of file +{{ content }} diff --git a/lumbung-feed-aggregator/rss_aggregator.py b/lumbung-feed-aggregator/rss_aggregator.py index 0f65c93..94a523e 100644 --- a/lumbung-feed-aggregator/rss_aggregator.py +++ b/lumbung-feed-aggregator/rss_aggregator.py @@ -1,84 +1,88 @@ #!/bin/python3 -#lumbung.space rss feed aggregator -#© 2021 roel roscam abbing gplv3 etc +# lumbung.space rss feed aggregator +# © 2021 roel roscam abbing gplv3 etc -import requests -import jinja2 import os import shutil -import feedparser -from urllib.parse import urlparse -from ast import literal_eval as make_tuple -from slugify import slugify -from bs4 import BeautifulSoup import time +from ast import literal_eval as make_tuple +from urllib.parse import urlparse + import arrow +import feedparser +import jinja2 +import requests +from bs4 import BeautifulSoup +from slugify import slugify def write_etag(feed_name, feed_data): """ save timestamp of when feed was last modified """ - etag = '' - modified = '' - - if 'etag' in feed_data: + etag = "" + modified = "" + + if "etag" in feed_data: etag = feed_data.etag - if 'modified' in feed_data: + if "modified" in feed_data: modified = feed_data.modified if etag or modified: - with open(os.path.join('etags',feed_name +'.txt'),'w') as f: + with open(os.path.join("etags", feed_name + ".txt"), "w") as f: f.write(str((etag, modified))) + def get_etag(feed_name): """ return timestamp of when feed was last modified """ - fn = os.path.join('etags',feed_name +'.txt') - etag = '' - modified = '' + fn = os.path.join("etags", feed_name + ".txt") + etag = "" + modified = "" if os.path.exists(fn): - etag, modified = make_tuple(open(fn,'r').read()) + etag, modified = make_tuple(open(fn, "r").read()) return etag, modified + def create_frontmatter(entry): """ - parse RSS metadata and return as frontmatter + parse RSS metadata and return as frontmatter """ - if 'published' in entry: + if "published" in entry: published = entry.published_parsed - if 'updated' in entry: + if "updated" in entry: published = entry.updated_parsed published = arrow.get(published) - if 'author' in entry: + if "author" in entry: author = entry.author else: - author = '' + author = "" tags = [] - if 'tags' in entry: - #TODO finish categories + if "tags" in entry: + # TODO finish categories for t in entry.tags: - tags.append(t['term']) + tags.append(t["term"]) frontmatter = { - 'title':entry.title, - 'date': published.format(), - 'summary': '', - 'author': author, - 'original_link': entry.link, - 'feed_name': entry['feed_name'], - 'tags': str(tags) + "title": entry.title, + "date": published.format(), + "summary": "", + "author": author, + "original_link": entry.link, + "feed_name": entry["feed_name"], + "tags": str(tags), } return frontmatter + def create_post(post_dir, entry): """ write hugo post based on RSS entry @@ -88,40 +92,41 @@ def create_post(post_dir, entry): if not os.path.exists(post_dir): os.makedirs(post_dir) - if 'content' in entry: + if "content" in entry: post_content = entry.content[0].value else: post_content = entry.summary parsed_content = parse_posts(post_dir, post_content) - with open(os.path.join(post_dir,'index.html'),'w') as f: #n.b. .html + with open(os.path.join(post_dir, "index.html"), "w") as f: # n.b. .html post = template.render(frontmatter=frontmatter, content=parsed_content) f.write(post) - print('created post for', entry.title, '({})'.format(entry.link)) + print("created post for", entry.title, "({})".format(entry.link)) + def grab_media(post_directory, url): """ download media linked in post to have local copy if download succeeds return new local path otherwise return url """ - image = urlparse(url).path.split('/')[-1] + image = urlparse(url).path.split("/")[-1] try: if not os.path.exists(os.path.join(post_directory, image)): - #TODO: stream is true is a conditional so we could check the headers for things, mimetype etc + # TODO: stream is true is a conditional so we could check the headers for things, mimetype etc response = requests.get(url, stream=True) if response.ok: - with open(os.path.join(post_directory, image), 'wb') as img_file: + with open(os.path.join(post_directory, image), "wb") as img_file: shutil.copyfileobj(response.raw, img_file) - print('Downloaded cover image', image) + print("Downloaded cover image", image) return image return image elif os.path.exists(os.path.join(post_directory, image)): return image except Exception as e: - print('Failed to download image', url) + print("Failed to download image", url) print(e) return url @@ -133,27 +138,28 @@ def parse_posts(post_dir, post_content): filter out iframe sources not in allowlist """ soup = BeautifulSoup(post_content, "html.parser") - allowed_iframe_sources = ['youtube.com', 'vimeo.com', 'tv.lumbung.space'] + allowed_iframe_sources = ["youtube.com", "vimeo.com", "tv.lumbung.space"] media = [] - for img in soup(['img','object']): - local_image = grab_media(post_dir, img['src']) - if img['src'] != local_image: - img['src'] = local_image + for img in soup(["img", "object"]): + local_image = grab_media(post_dir, img["src"]) + if img["src"] != local_image: + img["src"] = local_image - for iframe in soup(['iframe']): - if not any(source in iframe['src'] for source in allowed_iframe_sources): - print('filtered iframe: {}...'.format(iframe['src'][:25])) + for iframe in soup(["iframe"]): + if not any(source in iframe["src"] for source in allowed_iframe_sources): + print("filtered iframe: {}...".format(iframe["src"][:25])) iframe.decompose() return soup.decode() + def grab_feed(feed_url): """ check whether feed has been updated - download & return it if it has + download & return it if it has """ feed_name = urlparse(feed_url).netloc - + etag, modified = get_etag(feed_name) try: @@ -164,42 +170,42 @@ def grab_feed(feed_url): else: data = feedparser.parse(feed_url) except Exception as e: - print('Error grabbing feed') + print("Error grabbing feed") print(feed_name) print(e) return False print(data.status, feed_url) if data.status == 200: - #304 means the feed has not been modified since we last checked + # 304 means the feed has not been modified since we last checked write_etag(feed_name, data) return data return False - -feed_urls = open('feeds_list.txt','r').read().splitlines() + +feed_urls = open("feeds_list.txt", "r").read().splitlines() start = time.time() -if not os.path.exists('etags'): - os.mkdir('etags') +if not os.path.exists("etags"): + os.mkdir("etags") -env = jinja2.Environment( - loader=jinja2.FileSystemLoader(os.path.curdir) - ) +env = jinja2.Environment(loader=jinja2.FileSystemLoader(os.path.curdir)) -output_dir = os.environ.get('OUTPUT_DIR', '/home/r/Programming/lumbung.space/lumbung.space-web/content/posts/') -#output_dir = os.environ.get('OUTPUT_DIR', 'network/') +output_dir = os.environ.get( + "OUTPUT_DIR", "/home/r/Programming/lumbung.space/lumbung.space-web/content/posts/" +) +# output_dir = os.environ.get('OUTPUT_DIR', 'network/') if not os.path.exists(output_dir): os.makedirs(output_dir) -template = env.get_template('post_template.md') +template = env.get_template("post_template.md") -#add iframe to the allowlist of feedparser's sanitizer, -#this is now handled in parse_post() -feedparser.sanitizer._HTMLSanitizer.acceptable_elements |= {'iframe'} +# add iframe to the allowlist of feedparser's sanitizer, +# this is now handled in parse_post() +feedparser.sanitizer._HTMLSanitizer.acceptable_elements |= {"iframe"} for feed_url in feed_urls: @@ -216,33 +222,33 @@ for feed_url in feed_urls: if data: for entry in data.entries: - # if 'tags' in entry: - # for tag in entry.tags: - # for x in ['lumbung.space', 'D15', 'lumbung']: - # if x in tag['term']: - # print(entry.title) - entry['feed_name'] = feed_name + # if 'tags' in entry: + # for tag in entry.tags: + # for x in ['lumbung.space', 'D15', 'lumbung']: + # if x in tag['term']: + # print(entry.title) + entry["feed_name"] = feed_name post_name = slugify(entry.title) post_dir = os.path.join(output_dir, feed_name, post_name) - - if post_name not in existing_posts: - #if there is a blog entry we dont already have, make it + + if post_name not in existing_posts: + # if there is a blog entry we dont already have, make it create_post(post_dir, entry) - elif post_name in existing_posts: - #if we already have it, update it - create_post(post_dir, entry) - existing_posts.remove(post_name) # create list of posts which have not been returned by the feed + elif post_name in existing_posts: + # if we already have it, update it + create_post(post_dir, entry) + existing_posts.remove( + post_name + ) # create list of posts which have not been returned by the feed - for post in existing_posts: - #remove blog posts no longer returned by the RSS feed - print('deleted', post) + for post in existing_posts: + # remove blog posts no longer returned by the RSS feed + print("deleted", post) shutil.rmtree(os.path.join(feed_dir, slugify(post))) - end = time.time() print(end - start) - diff --git a/lumbung-hashtag-bot/README.md b/lumbung-hashtag-bot/README.md deleted file mode 100644 index 618a3ac..0000000 --- a/lumbung-hashtag-bot/README.md +++ /dev/null @@ -1,30 +0,0 @@ -# lumbung.space hashtag publishing bot - -This script makes [Hugo page bundles](https://gohugo.io/content-management/page-bundles/) out of Hashtag feeds on a Mastodon Hometown or Glitchsoc instance. - -## Install requirements - -`pip3 install Mastodon.py jinja2` - -## Setup - -This script requires access to an account on said Mastodon instance. This instance and the credentials can be set in `config_hashtag_bot.py`. - -If it is the first time you are running the script, you need to register the application on the Mastodon instance. Have a look at the [Mastodon.py documentation](https://mastodonpy.readthedocs.io/en/stable/#module-mastodon) for how to do that. - -This bot only uses read permissions. - -Set which hashtags you want to publish by adding them to the list `hashtags` in `config_hashtag_bot.py`. Omit the '#'. - -## What it does - -* The Bot only looks at the **local timeline** for posts under each hashtag configured in `config_hashtag_bot.py`. -* This means posts need to be **public** or directly addressed to the bot -* This script respects the mental model of 'local only' posts in the sense that people do not expect them to appear elsewhere. So **local only posts are ignored** -* It takes only posts with Media attached and then only those with images - -## What it doesn't do - -* Different types of media or embeds -* No thread recreation, each post is treated as a top level post - diff --git a/lumbung-hashtag-bot/post_template.md b/lumbung-hashtag-bot/post_template.md index 6aeff3e..5fb056e 100644 --- a/lumbung-hashtag-bot/post_template.md +++ b/lumbung-hashtag-bot/post_template.md @@ -11,4 +11,4 @@ tags: [{% for i in post_metadata.tags %} "{{ i.name }}", {% endfor %}] {{item.description}} {% endfor %} -{{ post_metadata.content | filter_mastodon_urls }} \ No newline at end of file +{{ post_metadata.content | filter_mastodon_urls }} diff --git a/lumbung-hashtag-bot/publish_hashtags.py b/lumbung-hashtag-bot/publish_hashtags.py index 09e09d7..77bf58c 100644 --- a/lumbung-hashtag-bot/publish_hashtags.py +++ b/lumbung-hashtag-bot/publish_hashtags.py @@ -5,28 +5,31 @@ # Currently does not do any thread recreation and only handles images import os -import requests import shutil import jinja2 - +import requests from mastodon import Mastodon + import config_hashtag_bot + def login_mastodon_bot(): mastodon = Mastodon( - client_id = 'publishbot_clientcred.secret', - api_base_url = config_hashtag_bot.instance + client_id="publishbot_clientcred.secret", + api_base_url=config_hashtag_bot.instance, ) mastodon.log_in( config_hashtag_bot.email, config_hashtag_bot.password, - to_file = 'publishbot_usercred.secret', scopes=['read'] + to_file="publishbot_usercred.secret", + scopes=["read"], ) return mastodon + def create_frontmatter(post_metadata): """ Parse post metadata and return it as HUGO frontmatter @@ -35,6 +38,7 @@ def create_frontmatter(post_metadata): frontmatter = "" return frontmatter + def download_media(post_directory, media_attachments): """ Download media attached to posts. N.b. currently only images @@ -42,15 +46,16 @@ def download_media(post_directory, media_attachments): """ for item in media_attachments: - if item['type'] == 'image': - image = localize_media_url(item['url']) - #TODO check whether this needs to handle delete & redraft with different images + if item["type"] == "image": + image = localize_media_url(item["url"]) + # TODO check whether this needs to handle delete & redraft with different images if not os.path.exists(os.path.join(post_directory, image)): - #download image - response = requests.get(item['url'], stream=True) - with open(os.path.join(post_directory, image), 'wb') as img_file: + # download image + response = requests.get(item["url"], stream=True) + with open(os.path.join(post_directory, image), "wb") as img_file: shutil.copyfileobj(response.raw, img_file) - print('Downloaded cover image', image) + print("Downloaded cover image", image) + def create_post(post_directory, post_metadata): """ @@ -61,17 +66,18 @@ def create_post(post_directory, post_metadata): if not os.path.exists(post_directory): os.mkdir(post_directory) - with open(os.path.join(post_directory,'index.html'),'w') as f: + with open(os.path.join(post_directory, "index.html"), "w") as f: post = template.render(post_metadata=post_metadata) f.write(post) - download_media(post_directory, post_metadata['media_attachments']) + download_media(post_directory, post_metadata["media_attachments"]) + def localize_media_url(url): """ Returns the filename, used also as custom jinja filter """ - return url.split('/')[-1] + return url.split("/")[-1] def filter_mastodon_urls(content): @@ -80,7 +86,7 @@ def filter_mastodon_urls(content): e.g.