twoot/twoot.py

614 lines
24 KiB
Python
Raw Normal View History

2019-07-31 20:42:38 +00:00
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
"""
2022-08-22 12:27:18 +00:00
Copyright (C) 2019-2022 Jean-Christophe Francois
2019-07-31 20:42:38 +00:00
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
2019-07-31 20:42:38 +00:00
import sys
2020-10-14 19:51:00 +00:00
import logging
2019-08-01 12:58:41 +00:00
import argparse
2019-07-31 20:42:38 +00:00
import os
2019-08-01 10:31:26 +00:00
import random
2019-07-31 20:42:38 +00:00
import requests
from bs4 import BeautifulSoup, element
import sqlite3
2020-12-19 08:21:39 +00:00
import datetime
import time
2019-07-31 20:42:38 +00:00
import re
from pathlib import Path
from mastodon import Mastodon, MastodonError, MastodonAPIError, MastodonIllegalArgumentError
2020-03-29 11:41:49 +00:00
import subprocess
2020-03-26 19:50:59 +00:00
import shutil
2020-03-25 16:40:07 +00:00
NITTER_URLS = [
'https://nitter.42l.fr',
2021-06-01 09:05:33 +00:00
'https://nitter.pussthecat.org',
'https://nitter.fdn.fr',
'https://nitter.eu',
'https://nitter.namazso.eu',
2022-08-19 08:48:33 +00:00
'https://n.actionsack.com',
2022-09-08 08:05:19 +00:00
'https://nitter.moomoo.me',
2022-08-22 11:34:56 +00:00
'https://n.ramle.be',
]
2019-09-17 13:44:03 +00:00
# Update from https://www.whatismybrowser.com/guides/the-latest-user-agent/
2019-08-01 10:31:26 +00:00
USER_AGENTS = [
2022-08-19 08:48:33 +00:00
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/104.0.0.0 Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:103.0) Gecko/20100101 Firefox/103.0',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 12_5_1) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/15.6 Safari/605.1.15',
'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/104.0.0.0 Safari/537.36 Vivaldi/5.4.2753.37',
2019-08-01 10:31:26 +00:00
]
2019-07-31 20:42:38 +00:00
2020-12-17 21:08:43 +00:00
def process_media_body(tt_iter):
2020-12-18 10:45:43 +00:00
"""
Receives an iterator over all the elements contained in the tweet-text container.
2020-12-17 21:08:43 +00:00
Processes them to make them suitable for posting on Mastodon
:param tt_iter: iterator over the HTML elements in the text of the tweet
2020-12-17 21:08:43 +00:00
:return: cleaned up text of the tweet
2020-12-18 10:45:43 +00:00
"""
2019-07-31 20:42:38 +00:00
tweet_text = ''
# Iterate elements
for tag in tt_iter:
# If element is plain text, copy it verbatim
if isinstance(tag, element.NavigableString):
tweet_text += tag.string
# If it is an 'a' html tag
2020-12-17 21:08:43 +00:00
elif tag.name == 'a':
tag_text = tag.get_text()
2020-12-18 13:57:22 +00:00
if tag_text.startswith('@'):
2020-12-17 21:08:43 +00:00
# Only keep user name
tweet_text += tag_text
2020-12-18 13:57:22 +00:00
elif tag_text.startswith('#'):
2020-12-17 21:08:43 +00:00
# Only keep hashtag text
tweet_text += tag_text
else:
# This is a real link, keep url
tweet_text += tag.get('href')
2019-07-31 20:42:38 +00:00
else:
2020-12-17 09:15:46 +00:00
logging.warning("No handler for tag in twitter text: " + tag.prettify())
2019-07-31 20:42:38 +00:00
return tweet_text
def process_card(nitter_url, card_container):
2020-12-18 10:45:43 +00:00
"""
2020-12-17 21:59:21 +00:00
Extract image from card in case mastodon does not do it
:param card_container: soup of 'a' tag containing card markup
:return: list with url of image
2020-12-18 10:45:43 +00:00
"""
2020-12-17 21:59:21 +00:00
list = []
2020-12-18 20:32:26 +00:00
img = card_container.div.div.img
if img is not None:
image_url = nitter_url + img.get('src')
2020-12-18 20:32:26 +00:00
list.append(image_url)
logging.debug('Extracted image from card')
2020-12-17 21:59:21 +00:00
return list
2020-12-18 10:45:43 +00:00
def process_attachments(nitter_url, attachments_container, get_vids, twit_account, status_id, author_account):
2020-12-18 10:45:43 +00:00
"""
Extract images or video from attachments. Videos are downloaded on the file system.
:param nitter_url: url of nitter mirror
:param attachments_container: soup of 'div' tag containing attachments markup
:param get_vids: whether to download videos or not
2020-12-18 12:26:26 +00:00
:param twit_account: name of twitter account
2020-12-18 16:55:12 +00:00
:param status_id: id of tweet being processed
2020-12-18 12:26:26 +00:00
:param author_account: author of tweet with video attachment
2020-12-18 10:45:43 +00:00
:return: list with url of images
"""
# Collect url of images
pics = []
images = attachments_container.find_all('a', class_='still-image')
for image in images:
pics.append(nitter_url + image.get('href'))
2020-12-18 13:57:22 +00:00
logging.debug('collected ' + str(len(pics)) + ' images from attachments')
2020-12-18 10:45:43 +00:00
2020-12-18 12:26:26 +00:00
# Download nitter video (converted animated GIF)
gif_class = attachments_container.find('video', class_='gif')
if gif_class is not None:
gif_video_file = nitter_url + gif_class.source.get('src')
2020-12-18 12:26:26 +00:00
2020-12-18 16:55:12 +00:00
video_path = os.path.join('output', twit_account, status_id, author_account, status_id)
os.makedirs(video_path, exist_ok=True)
2020-12-18 13:28:17 +00:00
# Open directory for writing file
2020-12-18 20:06:05 +00:00
orig_dir = os.getcwd()
os.chdir(video_path)
with requests.get(gif_video_file, stream=True) as r:
r.raise_for_status()
# Download chunks and write them to file
with open('gif_video.mp4', 'wb') as f:
for chunk in r.iter_content(chunk_size=16*1024):
f.write(chunk)
2020-12-18 13:28:17 +00:00
logging.debug('downloaded video of GIF animation from attachments')
2020-12-18 13:28:17 +00:00
# Close directory
2020-12-18 20:06:05 +00:00
os.chdir(orig_dir)
2020-12-18 13:28:17 +00:00
# Download twitter video
2020-12-18 20:06:05 +00:00
vid_in_tweet = False
2020-12-18 13:28:17 +00:00
vid_class = attachments_container.find('div', class_='video-container')
if vid_class is not None:
2020-12-18 20:06:05 +00:00
video_file = os.path.join('https://twitter.com', author_account, 'status', status_id)
2020-12-18 13:28:17 +00:00
if get_vids:
# Download video from twitter and store in filesystem. Running as subprocess to avoid
# requirement to install ffmpeg and ffmpeg-python for those that do not want to post videos
try:
# Set output location to ./output/twit_account/status_id
dl_feedback = subprocess.run(
2020-12-18 20:06:05 +00:00
["./twitterdl.py", video_file, "-ooutput/" + twit_account + "/" + status_id, "-w 500"],
2020-12-18 13:28:17 +00:00
capture_output=True,
timeout=300
2020-12-18 13:28:17 +00:00
)
if dl_feedback.returncode != 0:
2020-12-18 20:06:05 +00:00
logging.warning('Video in tweet ' + status_id + ' from ' + twit_account + ' failed to download')
vid_in_tweet = True
else:
logging.debug('downloaded twitter video from attachments')
2020-12-18 13:28:17 +00:00
except OSError:
logging.fatal("Could not execute twitterdl.py (is it there? Is it set as executable?)")
sys.exit(-1)
else:
2020-12-18 20:06:05 +00:00
vid_in_tweet = True
2020-12-18 12:26:26 +00:00
2020-12-18 20:06:05 +00:00
return pics, vid_in_tweet
2020-12-18 10:45:43 +00:00
2020-02-15 14:39:01 +00:00
def contains_class(body_classes, some_class):
2020-12-18 10:45:43 +00:00
"""
2020-02-15 14:39:01 +00:00
:param body_classes: list of classes to search
:param some_class: class that we are interested in
:return: True if found, false otherwise
2020-12-18 10:45:43 +00:00
"""
2020-02-15 14:39:01 +00:00
found = False
for body_class in body_classes:
if body_class == some_class:
found = True
return found
def is_time_valid(timestamp, max_age, min_delay):
ret = True
# Check that the tweet is not too young (might be deleted) or too old
age_in_hours = (time.time() - float(timestamp)) / 3600.0
min_delay_in_hours = min_delay / 60.0
max_age_in_hours = max_age * 24.0
if age_in_hours < min_delay_in_hours or age_in_hours > max_age_in_hours:
ret = False
return ret
2022-09-14 14:28:48 +00:00
def login(instance, account, password):
# Create Mastodon application if it does not exist yet
if not os.path.isfile(instance + '.secret'):
try:
Mastodon.create_app(
'twoot',
api_base_url='https://' + instance,
to_file=instance + '.secret'
)
except MastodonError as me:
logging.fatal('failed to create app on ' + instance)
logging.fatal(me)
sys.exit(-1)
# Log in to Mastodon instance
try:
mastodon = Mastodon(
client_id=instance + '.secret',
api_base_url='https://' + instance
)
mastodon.log_in(
username=account,
password=password,
to_file=account + ".secret"
)
logging.info('Logging in to ' + instance)
except MastodonError as me:
2022-09-08 07:28:28 +00:00
logging.fatal('ERROR: Login to ' + instance + ' Failed')
logging.fatal(me)
sys.exit(-1)
2021-06-01 12:57:43 +00:00
2022-09-08 08:11:37 +00:00
# Check ratelimit status
2022-09-08 08:15:14 +00:00
logging.info('Ratelimit allowed requests: ' + str(mastodon.ratelimit_limit))
logging.info('Ratelimit remaining requests: ' + str(mastodon.ratelimit_remaining))
2022-09-08 08:19:23 +00:00
logging.info('Ratelimit reset time: ' + time.asctime(time.localtime(mastodon.ratelimit_reset)))
logging.info('Ratelimit last call: ' + time.asctime(time.localtime(mastodon.ratelimit_lastcall)))
2022-09-14 14:28:48 +00:00
return mastodon
2020-12-16 18:43:17 +00:00
2019-08-01 12:58:41 +00:00
def main(argv):
2020-12-19 08:21:39 +00:00
# Start stopwatch
start_time = time.time()
2019-08-01 12:58:41 +00:00
# Build parser for command line arguments
parser = argparse.ArgumentParser(description='toot tweets.')
parser.add_argument('-t', metavar='<twitter account>', action='store', required=True)
parser.add_argument('-i', metavar='<mastodon instance>', action='store', required=True)
parser.add_argument('-m', metavar='<mastodon account>', action='store', required=True)
parser.add_argument('-p', metavar='<mastodon password>', action='store', required=True)
parser.add_argument('-r', action='store_true', help='Also post replies to other tweets')
parser.add_argument('-v', action='store_true', help='Ingest twitter videos and upload to Mastodon instance')
parser.add_argument('-a', metavar='<max age (in days)>', action='store', type=float, default=1)
parser.add_argument('-d', metavar='<min delay (in mins)>', action='store', type=float, default=0)
2021-06-01 09:54:08 +00:00
parser.add_argument('-c', metavar='<max # of toots to post>', action='store', type=int, default=0)
2019-08-01 12:58:41 +00:00
# Parse command line
args = vars(parser.parse_args())
twit_account = args['t']
mast_instance = args['i']
mast_account = args['m']
mast_password = args['p']
tweets_and_replies = args['r']
get_vids = args['v']
2019-08-01 12:58:41 +00:00
max_age = float(args['a'])
min_delay = float(args['d'])
2021-06-01 09:54:08 +00:00
cap = int(args['c'])
2019-08-01 12:58:41 +00:00
2020-12-18 16:21:41 +00:00
# Remove previous log file
2020-12-19 09:09:03 +00:00
#try:
# os.remove(twit_account + '.log')
#except FileNotFoundError:
# pass
2020-12-18 16:21:41 +00:00
2020-12-18 16:06:09 +00:00
# Setup logging to file
2021-06-01 13:49:11 +00:00
logging.basicConfig(
filename=twit_account + '.log',
2022-09-08 07:35:02 +00:00
level=logging.INFO,
2021-06-01 14:12:05 +00:00
format='%(asctime)s %(levelname)-8s %(message)s',
2021-06-01 13:49:11 +00:00
datefmt='%Y-%m-%d %H:%M:%S',
)
2020-12-18 16:06:09 +00:00
logging.info('Running with the following parameters:')
logging.info(' -t ' + twit_account)
logging.info(' -i ' + mast_instance)
logging.info(' -m ' + mast_account)
2020-12-18 16:21:41 +00:00
logging.info(' -r ' + str(tweets_and_replies))
logging.info(' -v ' + str(get_vids))
logging.info(' -a ' + str(max_age))
logging.info(' -d ' + str(min_delay))
2021-06-01 12:57:43 +00:00
logging.info(' -c ' + str(cap))
2020-12-16 18:43:17 +00:00
# Try to open database. If it does not exist, create it
sql = sqlite3.connect('twoot.db')
db = sql.cursor()
db.execute('''CREATE TABLE IF NOT EXISTS toots (twitter_account TEXT, mastodon_instance TEXT,
mastodon_account TEXT, tweet_id TEXT, toot_id TEXT)''')
2022-08-22 12:50:03 +00:00
db.execute('''CREATE INDEX IF NOT EXISTS main_index ON toots (twitter_account,
mastodon_instance, mastodon_account, tweet_id)''')
# Select random nitter instance to fetch updates from
2022-09-14 14:28:48 +00:00
nitter_url = NITTER_URLS[random.randint(0, len(NITTER_URLS) - 1)]
2019-08-01 12:58:41 +00:00
# **********************************************************
# Load twitter page of user. Process all tweets and generate
# list of dictionaries ready to be posted on Mastodon
# **********************************************************
# To store content of all tweets from this user
tweets = []
# Initiate session
session = requests.Session()
2019-08-01 12:58:41 +00:00
# Get a copy of the default headers that requests would use
headers = requests.utils.default_headers()
# Update default headers with randomly selected user agent
headers.update(
{
'User-Agent': USER_AGENTS[random.randint(0, len(USER_AGENTS)-1)],
2020-12-18 16:55:12 +00:00
'Cookie': 'replaceTwitter=; replaceYouTube=; hlsPlayback=on; proxyVideos=',
2019-08-01 12:58:41 +00:00
}
)
url = nitter_url + '/' + twit_account
2020-12-17 16:50:10 +00:00
# Use different page if we need to handle replies
if tweets_and_replies:
url += '/with_replies'
2020-12-16 18:43:17 +00:00
# Download twitter page of user.
try:
twit_account_page = session.get(url, headers=headers)
except requests.exceptions.ConnectionError:
logging.fatal('Host did not respond when trying to download ' + url)
exit(-1)
2020-02-13 17:01:45 +00:00
2020-02-15 14:39:01 +00:00
# Verify that download worked
if twit_account_page.status_code != 200:
2022-08-22 12:27:18 +00:00
logging.fatal('The Nitter page did not download correctly from ' + url + ' (' + str(twit_account_page.status_code) + '). Aborting')
exit(-1)
2020-02-15 14:39:01 +00:00
logging.info('Nitter page downloaded successfully from ' + url)
2020-02-13 17:01:45 +00:00
# DEBUG: Save page to file
2022-09-14 14:28:48 +00:00
# of = open(twit_account + '.html', 'w')
# of.write(twit_account_page.text)
# of.close()
2020-12-16 18:43:17 +00:00
2020-02-15 14:39:01 +00:00
# Make soup
soup = BeautifulSoup(twit_account_page.text, 'html.parser')
2019-08-01 12:58:41 +00:00
# Replace twit_account with version with correct capitalization
2020-12-16 19:42:44 +00:00
ta = soup.find('meta', property='og:title').get('content')
ta_match = re.search(r'\(@(.+)\)', ta)
2020-12-16 19:48:00 +00:00
if ta_match is not None:
twit_account = ta_match.group(1)
2019-08-01 12:58:41 +00:00
# Extract twitter timeline
2020-12-16 19:55:26 +00:00
timeline = soup.find_all('div', class_='timeline-item')
2020-12-17 16:50:10 +00:00
logging.info('Processing ' + str(len(timeline)) + ' tweets found in timeline')
# **********************************************************
# Process each tweets and generate dictionary
# with data ready to be posted on Mastodon
# **********************************************************
2020-12-18 21:09:34 +00:00
out_date_cnt = 0
in_db_cnt = 0
for status in timeline:
# Extract tweet ID and status ID
2020-12-16 20:55:13 +00:00
tweet_id = status.find('a', class_='tweet-link').get('href').strip('#m')
status_id = tweet_id.split('/')[3]
2020-10-14 19:51:00 +00:00
logging.debug('processing tweet %s', tweet_id)
# Extract time stamp
time_string = status.find('span', class_='tweet-date').a.get('title')
2022-01-03 17:11:40 +00:00
try:
timestamp = datetime.datetime.strptime(time_string, '%d/%m/%Y, %H:%M:%S').timestamp()
except:
# Dec 21, 2021 · 12:00 PM UTC
timestamp = datetime.datetime.strptime(time_string, '%b %d, %Y · %I:%M %p %Z').timestamp()
# Check if time is within acceptable range
if not is_time_valid(timestamp, max_age, min_delay):
2020-12-18 21:09:34 +00:00
out_date_cnt += 1
logging.debug("Tweet outside valid time range, skipping")
continue
# Check in database if tweet has already been posted
2020-11-09 14:55:42 +00:00
db.execute("SELECT * FROM toots WHERE twitter_account=? AND mastodon_instance=? AND mastodon_account=? AND tweet_id=?",
(twit_account, mast_instance, mast_account, tweet_id))
tweet_in_db = db.fetchone()
if tweet_in_db is not None:
2020-12-18 21:09:34 +00:00
in_db_cnt += 1
2020-10-14 19:51:00 +00:00
logging.debug("Tweet %s already in database", tweet_id)
# Skip to next tweet
continue
2020-11-09 14:55:42 +00:00
else:
logging.debug('Tweet %s not found in database', tweet_id)
2020-10-14 19:51:00 +00:00
2020-02-15 14:39:01 +00:00
# extract author
author = status.find('a', class_='fullname').get('title')
2019-08-01 12:58:41 +00:00
# Extract user name
author_account = status.find('a', class_='username').get('title').lstrip('@')
2019-08-01 12:58:41 +00:00
2020-12-16 21:46:01 +00:00
# Extract URL of full status page (for video download)
full_status_url = 'https://twitter.com' + tweet_id
2020-12-17 21:59:21 +00:00
# Initialize containers
2020-12-17 16:56:12 +00:00
tweet_text = ''
2020-12-17 21:59:21 +00:00
photos = []
2020-12-17 16:56:12 +00:00
2020-12-17 17:59:02 +00:00
# Add prefix if the tweet is a reply-to
# Only consider item of class 'replying-to' that is a direct child
2022-09-14 14:28:48 +00:00
# of class 'tweet-body' in status. Others can be in a quoted tweet.
replying_to_class = status.select("div.tweet-body > div.replying-to")
2022-08-22 07:33:27 +00:00
if len(replying_to_class) != 0:
2022-08-22 07:30:52 +00:00
tweet_text += 'Replying to ' + replying_to_class[0].a.get_text() + '\n\n'
# Check it the tweet is a retweet from somebody else
if author_account.lower() != twit_account.lower():
tweet_text = 'RT from ' + author + ' (@' + author_account + ')\n\n'
2020-02-14 06:58:39 +00:00
2019-08-01 12:58:41 +00:00
# extract iterator over tweet text contents
2020-12-16 21:46:01 +00:00
tt_iter = status.find('div', class_='tweet-content media-body').children
2019-08-01 12:58:41 +00:00
2020-12-17 21:59:21 +00:00
# Process text of tweet
tweet_text += process_media_body(tt_iter)
2020-12-17 20:44:32 +00:00
2020-12-17 21:59:21 +00:00
# Process quote: append link to tweet_text
2020-12-18 21:41:57 +00:00
quote_div = status.find('a', class_='quote-link')
2020-12-17 21:59:21 +00:00
if quote_div is not None:
2020-12-18 21:41:57 +00:00
tweet_text += '\n\nhttps://twitter.com' + quote_div.get('href').strip('#m')
2020-12-17 20:44:32 +00:00
2020-12-17 21:59:21 +00:00
# Process card : extract image if necessary
card_class = status.find('a', class_='card-container')
if card_class is not None:
photos.extend(process_card(nitter_url, card_class))
2020-12-17 20:44:32 +00:00
# Process attachment: capture image or .mp4 url or download twitter video
2020-12-18 13:57:22 +00:00
attachments_class = status.find('div', class_='attachments')
2020-12-18 10:45:43 +00:00
if attachments_class is not None:
pics, vid_in_tweet = process_attachments(nitter_url, attachments_class, get_vids, twit_account, status_id, author_account)
2020-12-18 20:06:05 +00:00
photos.extend(pics)
if vid_in_tweet:
tweet_text += '\n\n[Video embedded in original tweet]'
2019-08-01 12:58:41 +00:00
# Add footer with link to original tweet
2020-12-16 21:46:01 +00:00
tweet_text += '\n\nOriginal tweet : ' + full_status_url
2019-08-01 12:58:41 +00:00
# If no media was specifically added in the tweet, try to get the first picture
# with "twitter:image" meta tag in first linked page in tweet text
if not photos:
m = re.search(r"http[^ \n\xa0]*", tweet_text)
if m is not None:
link_url = m.group(0)
if link_url.endswith(".html"): # Only process a web page
try:
r = requests.get(link_url, timeout=10)
if r.status_code == 200:
# Matches the first instance of either twitter:image or twitter:image:src meta tag
match = re.search(r'<meta name="twitter:image(?:|:src)" content="(.+?)".*?>', r.text)
if match is not None:
url = match.group(1).replace('&amp;', '&') # Remove HTML-safe encoding from URL if any
photos.append(url)
# Give up if anything goes wrong
except (requests.exceptions.ConnectionError,
requests.exceptions.Timeout,
requests.exceptions.ContentDecodingError,
requests.exceptions.TooManyRedirects,
requests.exceptions.MissingSchema):
pass
2020-12-19 09:53:11 +00:00
else:
logging.debug("downloaded twitter:image from linked page")
# Check if video was downloaded
video_file = None
video_path = Path('./output') / twit_account / status_id
if video_path.exists():
# Take the first subdirectory of video path (named after original poster of video)
video_path = [p for p in video_path.iterdir() if p.is_dir()][0]
# Take again the first subdirectory of video path (named after status id of original post where video is attached)
video_path = [p for p in video_path.iterdir() if p.is_dir()][0]
# list video files
video_file_list = list(video_path.glob('*.mp4'))
if len(video_file_list) != 0:
# Extract posix path of first video file in list
video_file = video_file_list[0].absolute().as_posix()
2019-08-01 12:58:41 +00:00
# Add dictionary with content of tweet to list
tweet = {
"author": author,
"author_account": author_account,
"timestamp": timestamp,
"tweet_id": tweet_id,
"tweet_text": tweet_text,
"video": video_file,
2019-08-01 12:58:41 +00:00
"photos": photos,
}
tweets.append(tweet)
logging.debug('Tweet %s added to list of toots to upload', tweet_id)
# Log summary stats
2020-12-18 21:09:34 +00:00
logging.info(str(out_date_cnt) + ' tweets outside of valid time range')
logging.info(str(in_db_cnt) + ' tweets already in database')
2020-10-14 19:51:00 +00:00
2019-08-01 12:58:41 +00:00
# DEBUG: Print extracted tweets
2022-09-14 14:28:48 +00:00
# for t in tweets:
# print(t)
2019-08-01 12:58:41 +00:00
2022-09-14 14:28:48 +00:00
# Login to account on maston instance
mastodon = None
if len(tweets) != 0:
mastodon = login(mast_instance, mast_account, mast_password)
2019-08-01 12:58:41 +00:00
# **********************************************************
# Iterate tweets in list.
# post each on Mastodon and record it in database
2019-08-01 12:58:41 +00:00
# **********************************************************
2020-12-18 21:09:34 +00:00
posted_cnt = 0
2019-08-01 12:58:41 +00:00
for tweet in reversed(tweets):
2021-06-01 09:54:08 +00:00
# Check if we have reached the cap on the number of toots to post
if cap != 0 and posted_cnt >= cap:
2021-06-03 07:35:30 +00:00
logging.info('%d toots not posted due to configured cap', len(tweets) - cap)
2021-06-01 09:54:08 +00:00
break
2020-10-14 19:51:00 +00:00
logging.debug('Uploading Tweet %s', tweet["tweet_id"])
2019-08-01 12:58:41 +00:00
media_ids = []
# Upload video if there is one
if tweet['video'] is not None:
try:
2020-12-19 09:59:23 +00:00
logging.debug("Uploading video to Mastodon")
media_posted = mastodon.media_post(tweet['video'])
media_ids.append(media_posted['id'])
except (MastodonAPIError, MastodonIllegalArgumentError, TypeError): # Media cannot be uploaded (invalid format, dead link, etc.)
2020-11-09 14:55:42 +00:00
logging.debug("Uploading video failed")
pass
2019-08-01 12:58:41 +00:00
2022-09-14 14:35:10 +00:00
else: # Only upload pic if no video was uploaded
# Upload photos
for photo in tweet['photos']:
media = False
# Download picture
try:
2020-12-19 09:59:23 +00:00
logging.debug('downloading picture')
media = requests.get(photo)
except: # Picture cannot be downloaded for any reason
pass
# Upload picture to Mastodon instance
if media:
try:
2020-12-19 09:59:23 +00:00
logging.debug('uploading picture to Mastodon')
media_posted = mastodon.media_post(media.content, mime_type=media.headers['content-type'])
media_ids.append(media_posted['id'])
except (MastodonAPIError, MastodonIllegalArgumentError, TypeError): # Media cannot be uploaded (invalid format, dead link, etc.)
pass
2019-08-01 12:58:41 +00:00
# Post toot
try:
mastodon = Mastodon(
access_token=mast_account + '.secret',
api_base_url='https://' + mast_instance
)
if len(media_ids) == 0:
toot = mastodon.status_post(tweet['tweet_text'], visibility='public')
else:
toot = mastodon.status_post(tweet['tweet_text'], media_ids=media_ids, visibility='public')
except MastodonError as me:
2020-10-14 19:51:00 +00:00
logging.error('posting ' + tweet['tweet_text'] + ' to ' + mast_instance + ' Failed')
logging.error(me)
2019-08-01 12:58:41 +00:00
2020-12-18 21:09:34 +00:00
else:
posted_cnt += 1
logging.debug('Tweet %s posted on %s', tweet['tweet_id'], mast_account)
2020-10-14 19:51:00 +00:00
2019-08-01 12:58:41 +00:00
# Insert toot id into database
if 'id' in toot:
db.execute("INSERT INTO toots VALUES ( ? , ? , ? , ? , ? )",
(twit_account, mast_instance, mast_account, tweet['tweet_id'], toot['id']))
sql.commit()
2020-12-18 21:09:34 +00:00
logging.info(str(posted_cnt) + ' Tweets posted to Mastodon')
# Cleanup downloaded video files
try:
shutil.rmtree('./output/' + twit_account)
except FileNotFoundError: # The directory does not exist
pass
2019-08-01 12:58:41 +00:00
2020-12-19 09:36:59 +00:00
logging.info('Run time : %2.1f seconds' % (time.time() - start_time))
2020-12-19 09:48:46 +00:00
logging.info('_____________________________________________________________________________________')
2019-08-01 12:58:41 +00:00
if __name__ == "__main__":
main(sys.argv)