forked from ruangrupa/konfluks
cli commands
This commit is contained in:
parent
30dbc6212f
commit
b385833cbe
1
.gitignore
vendored
1
.gitignore
vendored
@ -1 +1,2 @@
|
||||
test
|
||||
__pycache__
|
||||
|
@ -1,8 +1,3 @@
|
||||
#!/bin/python3
|
||||
|
||||
# lumbung.space calendar feed generator
|
||||
# © 2021 roel roscam abbing gplv3 etc
|
||||
|
||||
import os
|
||||
import re
|
||||
import shutil
|
||||
@ -187,22 +182,23 @@ def update_event_post(post_dir, event):
|
||||
print("Event current: ", event.name, "({})".format(event.uid))
|
||||
|
||||
|
||||
for event in list(cal.events):
|
||||
def main():
|
||||
for event in list(cal.events):
|
||||
|
||||
post_dir = os.path.join(output_dir, event.uid)
|
||||
post_dir = os.path.join(output_dir, event.uid)
|
||||
|
||||
if event.uid not in existing_posts:
|
||||
# if there is an event we dont already have, make it
|
||||
create_event_post(post_dir, event)
|
||||
if event.uid not in existing_posts:
|
||||
# if there is an event we dont already have, make it
|
||||
create_event_post(post_dir, event)
|
||||
|
||||
elif event.uid in existing_posts:
|
||||
# if we already have it, update
|
||||
update_event_post(post_dir, event)
|
||||
existing_posts.remove(
|
||||
event.uid
|
||||
) # create list of posts which have not been returned by the calendar
|
||||
elif event.uid in existing_posts:
|
||||
# if we already have it, update
|
||||
update_event_post(post_dir, event)
|
||||
existing_posts.remove(
|
||||
event.uid
|
||||
) # create list of posts which have not been returned by the calendar
|
||||
|
||||
for post in existing_posts:
|
||||
# remove events not returned by the calendar (deletion)
|
||||
print("deleted", post)
|
||||
shutil.rmtree(os.path.join(output_dir, post))
|
||||
for post in existing_posts:
|
||||
# remove events not returned by the calendar (deletion)
|
||||
print("deleted", post)
|
||||
shutil.rmtree(os.path.join(output_dir, post))
|
||||
|
@ -1,8 +1,3 @@
|
||||
#!/bin/python3
|
||||
|
||||
# lumbung.space rss feed aggregator
|
||||
# © 2021 roel roscam abbing gplv3 etc
|
||||
|
||||
import os
|
||||
import shutil
|
||||
import time
|
||||
@ -100,6 +95,9 @@ def create_post(post_dir, entry):
|
||||
|
||||
parsed_content = parse_posts(post_dir, post_content)
|
||||
|
||||
template_dir = os.path.join(Path(__file__).parent.resolve(), "templates")
|
||||
env = jinja2.Environment(loader=jinja2.FileSystemLoader(template_dir))
|
||||
template = env.get_template("feed.md")
|
||||
with open(os.path.join(post_dir, "index.html"), "w") as f: # n.b. .html
|
||||
post = template.render(frontmatter=frontmatter, content=parsed_content)
|
||||
f.write(post)
|
||||
@ -140,7 +138,6 @@ def parse_posts(post_dir, post_content):
|
||||
"""
|
||||
soup = BeautifulSoup(post_content, "html.parser")
|
||||
allowed_iframe_sources = ["youtube.com", "vimeo.com", "tv.lumbung.space"]
|
||||
media = []
|
||||
|
||||
for img in soup(["img", "object"]):
|
||||
local_image = grab_media(post_dir, img["src"])
|
||||
@ -184,70 +181,64 @@ def grab_feed(feed_url):
|
||||
return False
|
||||
|
||||
|
||||
feed_urls = open("feeds_list.txt", "r").read().splitlines()
|
||||
def main():
|
||||
feed_urls = open("feeds_list.txt", "r").read().splitlines()
|
||||
|
||||
start = time.time()
|
||||
start = time.time()
|
||||
|
||||
if not os.path.exists("etags"):
|
||||
os.mkdir("etags")
|
||||
if not os.path.exists("etags"):
|
||||
os.mkdir("etags")
|
||||
|
||||
output_dir = os.environ.get("OUTPUT_DIR")
|
||||
|
||||
template_dir = os.path.join(Path(__file__).parent.resolve(), "templates")
|
||||
env = jinja2.Environment(loader=jinja2.FileSystemLoader(template_dir))
|
||||
if not os.path.exists(output_dir):
|
||||
os.makedirs(output_dir)
|
||||
|
||||
output_dir = os.environ.get("OUTPUT_DIR")
|
||||
# add iframe to the allowlist of feedparser's sanitizer,
|
||||
# this is now handled in parse_post()
|
||||
feedparser.sanitizer._HTMLSanitizer.acceptable_elements |= {"iframe"}
|
||||
|
||||
if not os.path.exists(output_dir):
|
||||
os.makedirs(output_dir)
|
||||
for feed_url in feed_urls:
|
||||
|
||||
template = env.get_template("feed.md")
|
||||
feed_name = urlparse(feed_url).netloc
|
||||
|
||||
# add iframe to the allowlist of feedparser's sanitizer,
|
||||
# this is now handled in parse_post()
|
||||
feedparser.sanitizer._HTMLSanitizer.acceptable_elements |= {"iframe"}
|
||||
feed_dir = os.path.join(output_dir, feed_name)
|
||||
|
||||
for feed_url in feed_urls:
|
||||
if not os.path.exists(feed_dir):
|
||||
os.makedirs(feed_dir)
|
||||
|
||||
feed_name = urlparse(feed_url).netloc
|
||||
existing_posts = os.listdir(feed_dir)
|
||||
|
||||
feed_dir = os.path.join(output_dir, feed_name)
|
||||
data = grab_feed(feed_url)
|
||||
|
||||
if not os.path.exists(feed_dir):
|
||||
os.makedirs(feed_dir)
|
||||
if data:
|
||||
for entry in data.entries:
|
||||
# if 'tags' in entry:
|
||||
# for tag in entry.tags:
|
||||
# for x in ['lumbung.space', 'D15', 'lumbung']:
|
||||
# if x in tag['term']:
|
||||
# print(entry.title)
|
||||
entry["feed_name"] = feed_name
|
||||
|
||||
existing_posts = os.listdir(feed_dir)
|
||||
post_name = slugify(entry.title)
|
||||
post_dir = os.path.join(output_dir, feed_name, post_name)
|
||||
|
||||
data = grab_feed(feed_url)
|
||||
if post_name not in existing_posts:
|
||||
# if there is a blog entry we dont already have, make it
|
||||
create_post(post_dir, entry)
|
||||
|
||||
if data:
|
||||
for entry in data.entries:
|
||||
# if 'tags' in entry:
|
||||
# for tag in entry.tags:
|
||||
# for x in ['lumbung.space', 'D15', 'lumbung']:
|
||||
# if x in tag['term']:
|
||||
# print(entry.title)
|
||||
entry["feed_name"] = feed_name
|
||||
elif post_name in existing_posts:
|
||||
# if we already have it, update it
|
||||
create_post(post_dir, entry)
|
||||
existing_posts.remove(
|
||||
post_name
|
||||
) # create list of posts which have not been returned by the feed
|
||||
|
||||
post_name = slugify(entry.title)
|
||||
post_dir = os.path.join(output_dir, feed_name, post_name)
|
||||
for post in existing_posts:
|
||||
# remove blog posts no longer returned by the RSS feed
|
||||
print("deleted", post)
|
||||
shutil.rmtree(os.path.join(feed_dir, slugify(post)))
|
||||
|
||||
if post_name not in existing_posts:
|
||||
# if there is a blog entry we dont already have, make it
|
||||
create_post(post_dir, entry)
|
||||
end = time.time()
|
||||
|
||||
elif post_name in existing_posts:
|
||||
# if we already have it, update it
|
||||
create_post(post_dir, entry)
|
||||
existing_posts.remove(
|
||||
post_name
|
||||
) # create list of posts which have not been returned by the feed
|
||||
|
||||
for post in existing_posts:
|
||||
# remove blog posts no longer returned by the RSS feed
|
||||
print("deleted", post)
|
||||
shutil.rmtree(os.path.join(feed_dir, slugify(post)))
|
||||
|
||||
|
||||
end = time.time()
|
||||
|
||||
print(end - start)
|
||||
print(end - start)
|
||||
|
@ -1,9 +1,3 @@
|
||||
# lumbung.space hashtag publishing bot
|
||||
# © 2021 roel roscam abbing agplv3
|
||||
# Makes Hugo posts out of hashtag feeds on Mastodon.
|
||||
# Requires an account on the Mastodon instance configured.
|
||||
# Currently does not do any thread recreation and only handles images
|
||||
|
||||
import os
|
||||
import shutil
|
||||
from pathlib import Path
|
||||
@ -12,23 +6,11 @@ import jinja2
|
||||
import requests
|
||||
from mastodon import Mastodon
|
||||
|
||||
# Which instance to login to
|
||||
instance = "https://social.lumbung.space"
|
||||
|
||||
# n.b. if it is the first time you use this script
|
||||
# You need to register the app:
|
||||
# https://mastodonpy.readthedocs.io/en/stable/#module-mastodon
|
||||
|
||||
# Login credentials for bot account
|
||||
email = ""
|
||||
password = ""
|
||||
|
||||
# Which hashtags to publish
|
||||
hashtags = ["jalansesama"]
|
||||
|
||||
# your Hugo content directory
|
||||
output_dir = os.environ.get("OUTPUT_DIR", "path/to/hugo/content")
|
||||
|
||||
|
||||
def login_mastodon_bot():
|
||||
mastodon = Mastodon(
|
||||
@ -82,6 +64,14 @@ def create_post(post_directory, post_metadata):
|
||||
if not os.path.exists(post_directory):
|
||||
os.mkdir(post_directory)
|
||||
|
||||
template_dir = os.path.join(Path(__file__).parent.resolve(), "templates")
|
||||
env = jinja2.Environment(loader=jinja2.FileSystemLoader(template_dir))
|
||||
|
||||
env.filters["localize_media_url"] = localize_media_url
|
||||
env.filters["filter_mastodon_urls"] = filter_mastodon_urls
|
||||
|
||||
template = env.get_template("hashtag.md")
|
||||
|
||||
with open(os.path.join(post_directory, "index.html"), "w") as f:
|
||||
post = template.render(post_metadata=post_metadata)
|
||||
f.write(post)
|
||||
@ -106,57 +96,48 @@ def filter_mastodon_urls(content):
|
||||
return content
|
||||
|
||||
|
||||
mastodon = login_mastodon_bot()
|
||||
def main():
|
||||
mastodon = login_mastodon_bot()
|
||||
|
||||
output_dir = output_dir
|
||||
output_dir = os.environ.get("OUTPUT_DIR")
|
||||
if not os.path.exists(output_dir):
|
||||
os.mkdir(output_dir)
|
||||
|
||||
for hashtag in hashtags:
|
||||
|
||||
template_dir = os.path.join(Path(__file__).parent.resolve(), "templates")
|
||||
env = jinja2.Environment(loader=jinja2.FileSystemLoader(template_dir))
|
||||
hashtag_dir = os.path.join(output_dir, hashtag)
|
||||
if not os.path.exists(hashtag_dir):
|
||||
os.mkdir(hashtag_dir)
|
||||
|
||||
env.filters["localize_media_url"] = localize_media_url
|
||||
env.filters["filter_mastodon_urls"] = filter_mastodon_urls
|
||||
existing_posts = os.listdir(hashtag_dir) # list all existing posts
|
||||
|
||||
template = env.get_template("hashtag.md")
|
||||
timeline = mastodon.timeline_hashtag(
|
||||
hashtag, local=True, only_media=True
|
||||
) # returns max 20 queries and only with media
|
||||
timeline = mastodon.fetch_remaining(
|
||||
timeline
|
||||
) # returns all the rest n.b. can take a while because of rate limit
|
||||
|
||||
for post_metadata in timeline:
|
||||
post_dir = os.path.join(hashtag_dir, str(post_metadata["id"]))
|
||||
|
||||
if not os.path.exists(output_dir):
|
||||
os.mkdir(output_dir)
|
||||
# if there is a post in the feed we dont already have locally, make it
|
||||
if str(post_metadata["id"]) not in existing_posts:
|
||||
|
||||
if not post_metadata[
|
||||
"local_only"
|
||||
]: # if you get an error here then you are using vanilla Mastodon, this is a Hometown or Glitch only feature
|
||||
create_post(post_dir, post_metadata)
|
||||
|
||||
for hashtag in hashtags:
|
||||
# if we already have the post do nothing, possibly update
|
||||
elif str(post_metadata["id"]) in existing_posts:
|
||||
# update_post(post_dir, post_metadata)
|
||||
existing_posts.remove(
|
||||
str(post_metadata["id"])
|
||||
) # create list of posts which have not been returned in the feed
|
||||
|
||||
hashtag_dir = os.path.join(output_dir, hashtag)
|
||||
if not os.path.exists(hashtag_dir):
|
||||
os.mkdir(hashtag_dir)
|
||||
|
||||
existing_posts = os.listdir(hashtag_dir) # list all existing posts
|
||||
|
||||
timeline = mastodon.timeline_hashtag(
|
||||
hashtag, local=True, only_media=True
|
||||
) # returns max 20 queries and only with media
|
||||
timeline = mastodon.fetch_remaining(
|
||||
timeline
|
||||
) # returns all the rest n.b. can take a while because of rate limit
|
||||
|
||||
for post_metadata in timeline:
|
||||
post_dir = os.path.join(hashtag_dir, str(post_metadata["id"]))
|
||||
|
||||
# if there is a post in the feed we dont already have locally, make it
|
||||
if str(post_metadata["id"]) not in existing_posts:
|
||||
|
||||
if not post_metadata[
|
||||
"local_only"
|
||||
]: # if you get an error here then you are using vanilla Mastodon, this is a Hometown or Glitch only feature
|
||||
create_post(post_dir, post_metadata)
|
||||
|
||||
# if we already have the post do nothing, possibly update
|
||||
elif str(post_metadata["id"]) in existing_posts:
|
||||
# update_post(post_dir, post_metadata)
|
||||
existing_posts.remove(
|
||||
str(post_metadata["id"])
|
||||
) # create list of posts which have not been returned in the feed
|
||||
|
||||
for post in existing_posts:
|
||||
print("deleted", post) # rm posts that exist but are no longer returned in feed
|
||||
shutil.rmtree(os.path.join(hashtag_dir, post))
|
||||
for post in existing_posts:
|
||||
print(
|
||||
"deleted", post
|
||||
) # rm posts that exist but are no longer returned in feed
|
||||
shutil.rmtree(os.path.join(hashtag_dir, post))
|
||||
|
@ -1,8 +1,3 @@
|
||||
#!/bin/python3
|
||||
|
||||
# lumbung.space video feed generator
|
||||
# c 2021 roel roscam abbing gpvl3 etc
|
||||
|
||||
import ast
|
||||
import datetime
|
||||
import json
|
||||
@ -15,6 +10,9 @@ import jinja2
|
||||
import peertube
|
||||
import requests
|
||||
|
||||
host = "https://tv.lumbung.space"
|
||||
configuration = peertube.Configuration(host=host + "/api/v1")
|
||||
client = peertube.ApiClient(configuration)
|
||||
|
||||
# jinja filters & config
|
||||
def duration(n):
|
||||
@ -35,29 +33,10 @@ def linebreaks(text):
|
||||
return br.sub(r"<br />\n", text)
|
||||
|
||||
|
||||
template_dir = os.path.join(Path(__file__).parent.resolve(), "templates")
|
||||
env = jinja2.Environment(loader=jinja2.FileSystemLoader(template_dir))
|
||||
env.filters["duration"] = duration
|
||||
env.filters["linebreaks"] = linebreaks
|
||||
|
||||
host = "https://tv.lumbung.space"
|
||||
|
||||
configuration = peertube.Configuration(host=host + "/api/v1")
|
||||
|
||||
client = peertube.ApiClient(configuration)
|
||||
|
||||
v = peertube.VideoApi(client)
|
||||
|
||||
response = v.videos_get(count=100, filter="local", tags_one_of="publish")
|
||||
|
||||
videos = response.to_dict()
|
||||
videos = videos["data"]
|
||||
|
||||
|
||||
def create_post(post_directory, video_metadata):
|
||||
def create_post(post_directory, video_metadata, host):
|
||||
global client # lazy
|
||||
|
||||
if not os.path.exists(post_dir):
|
||||
if not os.path.exists(post_directory):
|
||||
os.mkdir(post_directory)
|
||||
|
||||
preview_image = video_metadata["preview_path"].split("/")[-1]
|
||||
@ -77,6 +56,12 @@ def create_post(post_directory, video_metadata):
|
||||
long_description = ast.literal_eval(api_response)
|
||||
video_metadata["description"] = long_description["description"]
|
||||
|
||||
template_dir = os.path.join(Path(__file__).parent.resolve(), "templates")
|
||||
env = jinja2.Environment(loader=jinja2.FileSystemLoader(template_dir))
|
||||
env.filters["duration"] = duration
|
||||
env.filters["linebreaks"] = linebreaks
|
||||
template = env.get_template("video.md")
|
||||
|
||||
with open(os.path.join(post_directory, "index.md"), "w") as f:
|
||||
post = template.render(v=video_metadata, host=host, preview_image=preview_image)
|
||||
f.write(post)
|
||||
@ -86,7 +71,7 @@ def create_post(post_directory, video_metadata):
|
||||
f.write(timestamp.format("X"))
|
||||
|
||||
|
||||
def update_post(post_directory, video_metadata):
|
||||
def update_post(post_directory, video_metadata, host):
|
||||
if os.path.exists(post_directory):
|
||||
if os.path.exists(os.path.join(post_directory, ".timestamp")):
|
||||
old_timestamp = open(os.path.join(post_directory, ".timestamp")).read()
|
||||
@ -101,7 +86,7 @@ def update_post(post_directory, video_metadata):
|
||||
video_metadata["name"],
|
||||
"({})".format(video_metadata["uuid"]),
|
||||
)
|
||||
create_post(post_dir, video_metadata)
|
||||
create_post(post_directory, video_metadata, host)
|
||||
else:
|
||||
print(
|
||||
"Video current: ",
|
||||
@ -110,37 +95,43 @@ def update_post(post_directory, video_metadata):
|
||||
)
|
||||
else:
|
||||
# compat for when there is no timestamp yet..
|
||||
create_post(post_dir, video_metadata)
|
||||
create_post(post_directory, video_metadata, host)
|
||||
|
||||
|
||||
output_dir = os.environ.get(
|
||||
"OUTPUT_DIR", "/home/r/Programming/lumbung.space/lumbung.space-web/content/video"
|
||||
)
|
||||
def main():
|
||||
v = peertube.VideoApi(client)
|
||||
|
||||
if not os.path.exists(output_dir):
|
||||
os.mkdir(output_dir)
|
||||
response = v.videos_get(count=100, filter="local", tags_one_of="publish")
|
||||
|
||||
template = env.get_template("video.md")
|
||||
videos = response.to_dict()
|
||||
videos = videos["data"]
|
||||
|
||||
existing_posts = os.listdir(output_dir)
|
||||
output_dir = os.environ.get("OUTPUT_DIR")
|
||||
|
||||
for video_metadata in videos:
|
||||
post_dir = os.path.join(output_dir, video_metadata["uuid"])
|
||||
if not os.path.exists(output_dir):
|
||||
os.mkdir(output_dir)
|
||||
|
||||
if (
|
||||
video_metadata["uuid"] not in existing_posts
|
||||
): # if there is a video we dont already have, make it
|
||||
print("New: ", video_metadata["name"], "({})".format(video_metadata["uuid"]))
|
||||
create_post(post_dir, video_metadata)
|
||||
existing_posts = os.listdir(output_dir)
|
||||
|
||||
elif (
|
||||
video_metadata["uuid"] in existing_posts
|
||||
): # if we already have the video do nothing, possibly update
|
||||
update_post(post_dir, video_metadata)
|
||||
existing_posts.remove(
|
||||
video_metadata["uuid"]
|
||||
) # create list of posts which have not been returned by peertube
|
||||
for video_metadata in videos:
|
||||
post_dir = os.path.join(output_dir, video_metadata["uuid"])
|
||||
|
||||
for post in existing_posts:
|
||||
print("deleted", post) # rm posts not returned
|
||||
shutil.rmtree(os.path.join(output_dir, post))
|
||||
if (
|
||||
video_metadata["uuid"] not in existing_posts
|
||||
): # if there is a video we dont already have, make it
|
||||
print(
|
||||
"New: ", video_metadata["name"], "({})".format(video_metadata["uuid"])
|
||||
)
|
||||
create_post(post_dir, video_metadata, host)
|
||||
|
||||
elif (
|
||||
video_metadata["uuid"] in existing_posts
|
||||
): # if we already have the video do nothing, possibly update
|
||||
update_post(post_dir, video_metadata, host)
|
||||
existing_posts.remove(
|
||||
video_metadata["uuid"]
|
||||
) # create list of posts which have not been returned by peertube
|
||||
|
||||
for post in existing_posts:
|
||||
print("deleted", post) # rm posts not returned
|
||||
shutil.rmtree(os.path.join(output_dir, post))
|
||||
|
@ -19,3 +19,7 @@ peertube = {git = "https://framagit.org/framasoft/peertube/clients/python.git"}
|
||||
[build-system]
|
||||
requires = ["poetry-core>=1.0.0"]
|
||||
build-backend = "poetry.core.masonry.api"
|
||||
|
||||
[tool.poetry.scripts]
|
||||
cal= "lumbunglib.cloudcal:main"
|
||||
vid = "lumbunglib.video:main"
|
||||
|
Loading…
Reference in New Issue
Block a user