Merge branch 'master' of github.com:cquest/tootbot

This commit is contained in:
cquest 2022-12-11 18:10:58 +01:00
commit d54ca76d7e
5 changed files with 60 additions and 10 deletions

11
.github/dependabot.yml vendored Normal file
View File

@ -0,0 +1,11 @@
# To get started with Dependabot version updates, you'll need to specify which
# package ecosystems to update and where the package manifests are located.
# Please see the documentation for all configuration options:
# https://docs.github.com/github/administering-a-repository/configuration-options-for-dependency-updates
version: 2
updates:
- package-ecosystem: "pip" # See documentation for possible values
directory: "/" # Location of package manifests
schedule:
interval: "weekly"

23
.github/workflows/pylint.yml vendored Normal file
View File

@ -0,0 +1,23 @@
name: Pylint
on: [push]
jobs:
build:
runs-on: ubuntu-latest
strategy:
matrix:
python-version: ["3.8", "3.9", "3.10"]
steps:
- uses: actions/checkout@v3
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v3
with:
python-version: ${{ matrix.python-version }}
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install pylint
- name: Analysing the code with pylint
run: |
pylint $(git ls-files '*.py')

View File

@ -24,19 +24,17 @@ The script is simply called by a cron job and can run on any server (does not ha
## Setup ## Setup
```python ```shell
# clone this repo # clone this repo
git clone https://github.com/cquest/tootbot.git git clone https://github.com/cquest/tootbot.git
cd tootbot cd tootbot
# install required python modules # install required python modules
pip3 install -r requirements.txt pip3 install -r requirements.txt
```
# install additional required software # install additional required software
sudo apt install jq
apt install jq ```
## Useage ## Useage

View File

@ -7,7 +7,7 @@
# 3- mastodon password # 3- mastodon password
# 4- instance domain (https:// is automatically added) # 4- instance domain (https:// is automatically added)
# 5- max age (in days) # 5- max age (in days)
# 6- footer tags to add (optionnal) # 6- footer tags to add (optional)
python3 tootbot.py geonym_fr geonym@amicale.net **password** test.amicale.net python3 tootbot.py geonym_fr geonym@amicale.net **password** test.amicale.net
python3 tootbot.py cq94 cquest@amicale.net **password** test.amicale.net python3 tootbot.py cq94 cquest@amicale.net **password** test.amicale.net

View File

@ -101,6 +101,7 @@ if source[:4] == 'http':
c = ("RT https://twitter.com/%s\n" % t.author[2:-1]) + c c = ("RT https://twitter.com/%s\n" % t.author[2:-1]) + c
toot_media = [] toot_media = []
# get the pictures... # get the pictures...
if 'summary' in t: if 'summary' in t:
for p in re.finditer(r"https://pbs.twimg.com/[^ \xa0\"]*", t.summary): for p in re.finditer(r"https://pbs.twimg.com/[^ \xa0\"]*", t.summary):
media = requests.get(p.group(0)) media = requests.get(p.group(0))
@ -114,9 +115,19 @@ if source[:4] == 'http':
media.content, mime_type=media.headers.get('content-type')) media.content, mime_type=media.headers.get('content-type'))
toot_media.append(media_posted['id']) toot_media.append(media_posted['id'])
for p in re.finditer(r"https://i.redd.it/[a-zA-Z0-9]*.(gif/jpg/mp4/png|webp)", t.summary):
mediaUrl = p.group(0)
try:
media = requests.get(mediaUrl)
media_posted = mastodon_api.media_post(
media.content, mime_type=media.headers.get('content-type'))
toot_media.append(media_posted['id'])
except:
print('Could not upload media to Mastodon! ' + mediaUrl)
if 'links' in t: if 'links' in t:
for l in t.links: for l in t.links:
if l.type in ('image/jpg', 'image/png'): if l.type in ('image/gif', 'image/jpg', 'image/png', 'image/webp'):
media = requests.get(l.url) media = requests.get(l.url)
media_posted = mastodon_api.media_post( media_posted = mastodon_api.media_post(
media.content, mime_type=media.headers.get('content-type')) media.content, mime_type=media.headers.get('content-type'))
@ -126,9 +137,12 @@ if source[:4] == 'http':
m = re.search(r"http[^ \xa0]*", c) m = re.search(r"http[^ \xa0]*", c)
if m is not None: if m is not None:
l = m.group(0) l = m.group(0)
r = requests.get(l, allow_redirects=False) try:
if r.status_code in {301, 302}: r = requests.get(l, allow_redirects=False)
c = c.replace(l, r.headers.get('Location')) if r.status_code in {301, 302}:
c = c.replace(l, r.headers.get('Location'))
except:
print('Cannot resolve link redirect: ' + l)
# remove ellipsis # remove ellipsis
c = c.replace('\xa0', ' ') c = c.replace('\xa0', ' ')
@ -137,6 +151,10 @@ if source[:4] == 'http':
c = c + '\nSource: ' + t.authors[0].name c = c + '\nSource: ' + t.authors[0].name
c = c + '\n\n' + t.link c = c + '\n\n' + t.link
# replace links to reddit by libreddit ones
c = c.replace('old.reddit.com', 'libreddit.net')
c = c.replace('reddit.com', 'libreddit.net')
if tags: if tags:
c = c + '\n' + tags c = c + '\n' + tags