diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 0000000..91abb11 --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,11 @@ +# To get started with Dependabot version updates, you'll need to specify which +# package ecosystems to update and where the package manifests are located. +# Please see the documentation for all configuration options: +# https://docs.github.com/github/administering-a-repository/configuration-options-for-dependency-updates + +version: 2 +updates: + - package-ecosystem: "pip" # See documentation for possible values + directory: "/" # Location of package manifests + schedule: + interval: "weekly" diff --git a/.github/workflows/pylint.yml b/.github/workflows/pylint.yml new file mode 100644 index 0000000..383e65c --- /dev/null +++ b/.github/workflows/pylint.yml @@ -0,0 +1,23 @@ +name: Pylint + +on: [push] + +jobs: + build: + runs-on: ubuntu-latest + strategy: + matrix: + python-version: ["3.8", "3.9", "3.10"] + steps: + - uses: actions/checkout@v3 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v3 + with: + python-version: ${{ matrix.python-version }} + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install pylint + - name: Analysing the code with pylint + run: | + pylint $(git ls-files '*.py') diff --git a/README.md b/README.md index 343fac7..d7dd110 100644 --- a/README.md +++ b/README.md @@ -24,19 +24,17 @@ The script is simply called by a cron job and can run on any server (does not ha ## Setup -```python +```shell # clone this repo git clone https://github.com/cquest/tootbot.git cd tootbot # install required python modules pip3 install -r requirements.txt -``` # install additional required software - -apt install jq - +sudo apt install jq +``` ## Useage diff --git a/cron-sample.sh b/cron-sample.sh index d1946d2..0e5a804 100644 --- a/cron-sample.sh +++ b/cron-sample.sh @@ -7,7 +7,7 @@ # 3- mastodon password # 4- instance domain (https:// is automatically added) # 5- max age (in days) -# 6- footer tags to add (optionnal) +# 6- footer tags to add (optional) python3 tootbot.py geonym_fr geonym@amicale.net **password** test.amicale.net python3 tootbot.py cq94 cquest@amicale.net **password** test.amicale.net diff --git a/tootbot.py b/tootbot.py index 9ef7c30..2753994 100755 --- a/tootbot.py +++ b/tootbot.py @@ -101,6 +101,7 @@ if source[:4] == 'http': c = ("RT https://twitter.com/%s\n" % t.author[2:-1]) + c toot_media = [] # get the pictures... + if 'summary' in t: for p in re.finditer(r"https://pbs.twimg.com/[^ \xa0\"]*", t.summary): media = requests.get(p.group(0)) @@ -114,9 +115,19 @@ if source[:4] == 'http': media.content, mime_type=media.headers.get('content-type')) toot_media.append(media_posted['id']) + for p in re.finditer(r"https://i.redd.it/[a-zA-Z0-9]*.(gif/jpg/mp4/png|webp)", t.summary): + mediaUrl = p.group(0) + try: + media = requests.get(mediaUrl) + media_posted = mastodon_api.media_post( + media.content, mime_type=media.headers.get('content-type')) + toot_media.append(media_posted['id']) + except: + print('Could not upload media to Mastodon! ' + mediaUrl) + if 'links' in t: for l in t.links: - if l.type in ('image/jpg', 'image/png'): + if l.type in ('image/gif', 'image/jpg', 'image/png', 'image/webp'): media = requests.get(l.url) media_posted = mastodon_api.media_post( media.content, mime_type=media.headers.get('content-type')) @@ -126,9 +137,12 @@ if source[:4] == 'http': m = re.search(r"http[^ \xa0]*", c) if m is not None: l = m.group(0) - r = requests.get(l, allow_redirects=False) - if r.status_code in {301, 302}: - c = c.replace(l, r.headers.get('Location')) + try: + r = requests.get(l, allow_redirects=False) + if r.status_code in {301, 302}: + c = c.replace(l, r.headers.get('Location')) + except: + print('Cannot resolve link redirect: ' + l) # remove ellipsis c = c.replace('\xa0…', ' ') @@ -137,6 +151,10 @@ if source[:4] == 'http': c = c + '\nSource: ' + t.authors[0].name c = c + '\n\n' + t.link + # replace links to reddit by libreddit ones + c = c.replace('old.reddit.com', 'libreddit.net') + c = c.replace('reddit.com', 'libreddit.net') + if tags: c = c + '\n' + tags