mirror of
https://github.com/hastagAB/Awesome-Python-Scripts.git
synced 2024-11-27 14:01:09 +00:00
Merge branch 'master' into to-do-bot-branch
This commit is contained in:
commit
47befdf7c0
45
DOH-Dig/README.md
Normal file
45
DOH-Dig/README.md
Normal file
|
@ -0,0 +1,45 @@
|
|||
# doh-dig
|
||||
|
||||
A python dig script that returns json dns record lookup using cloud flares DNS servers.
|
||||
|
||||
## Usage
|
||||
```
|
||||
Usage:
|
||||
doh-dig type <type> <record>
|
||||
doh-dig ptr <ip>
|
||||
doh-dig (-h | --help)
|
||||
doh-dig --version
|
||||
|
||||
```
|
||||
|
||||
### requirements
|
||||
* [docopt]: https://github.com/docopt/docopt
|
||||
* [requests]: https://pypi.org/project/requests/
|
||||
|
||||
### Examples
|
||||
|
||||
#### lookup and A record for google.com
|
||||
./doh-dig type a google.com |python -m json.tool
|
||||
```
|
||||
[
|
||||
{
|
||||
"name": "google.com.",
|
||||
"type": 1,
|
||||
"TTL": 235,
|
||||
"data": "172.217.19.174"
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
#### lookup reverse record for an IP
|
||||
./doh-dig ptr 1.1.1.1 |python -m json.tool
|
||||
```
|
||||
[
|
||||
{
|
||||
"name": "1.1.1.1.in-addr.arpa.",
|
||||
"type": 12,
|
||||
"TTL": 1345,
|
||||
"data": "one.one.one.one."
|
||||
}
|
||||
]
|
||||
```
|
52
DOH-Dig/doh-dig
Executable file
52
DOH-Dig/doh-dig
Executable file
|
@ -0,0 +1,52 @@
|
|||
#!/usr/bin/env python3
|
||||
# Author: @awsumco
|
||||
|
||||
"""DNS OF HTTPS - DIG
|
||||
|
||||
Usage:
|
||||
doh-dig type <type> <record>
|
||||
doh-dig ptr <ip>
|
||||
doh-dig (-h | --help)
|
||||
doh-dig --version
|
||||
|
||||
Options:
|
||||
-h --help Show this screen.
|
||||
--version Show version.
|
||||
|
||||
"""
|
||||
from docopt import docopt
|
||||
from pprint import pprint as pp
|
||||
from sys import exit
|
||||
import ipaddress, json
|
||||
|
||||
def CloudFlareLookup(type,record):
|
||||
import requests
|
||||
headers = {'accept': 'application/dns-json'}
|
||||
url = "https://1.1.1.1/dns-query?name=%s&type=%s" % (record,type)
|
||||
r = requests.get(url, headers=headers)
|
||||
j_data = json.loads(r.text)
|
||||
try:
|
||||
return(j_data['Answer'])
|
||||
except:
|
||||
return(j_data['Question'])
|
||||
|
||||
|
||||
valid_types = ['A','MX','PTR','SRV','TXT','NS']
|
||||
|
||||
if __name__ == '__main__':
|
||||
arguments = docopt(__doc__, version='doh-dig 0.1')
|
||||
if arguments['type']:
|
||||
t = arguments['<type>'].upper()
|
||||
r = arguments['<record>'].lower()
|
||||
if t not in valid_types:
|
||||
exit('invalid type')
|
||||
x = CloudFlareLookup(t,r)
|
||||
print(json.dumps(x))
|
||||
elif arguments['ptr']:
|
||||
ip = arguments['<ip>']
|
||||
arpa = ipaddress.ip_address(ip).reverse_pointer
|
||||
x = CloudFlareLookup('PTR',arpa)
|
||||
print(json.dumps(x))
|
||||
else:
|
||||
print(arguments)
|
||||
|
2
DOH-Dig/requirements.txt
Normal file
2
DOH-Dig/requirements.txt
Normal file
|
@ -0,0 +1,2 @@
|
|||
docopt
|
||||
requests
|
|
@ -13,6 +13,7 @@ So far, the following projects have been integrated to this repo:
|
|||
|--|--|
|
||||
|[File Encrypt Decrypt](https://github.com/hastagAB/Awesome-Python-Scripts/tree/master/file-encrypt-decrypt)|[Aditya Arakeri](https://github.com/adityaarakeri)|
|
||||
| [Address locator](https://github.com/hastagAB/Awesome-Python-Scripts/tree/master/Location_Of_Adress) | [Chris]() |
|
||||
| [Automated emails](https://github.com/hastagAB/Awesome-Python-Scripts/tree/master/automated_email) | [Suvigya](https://github.com/SuvigyaJain1) |
|
||||
|[AI chatbot](https://github.com/hastagAB/Awesome-Python-Scripts/tree/master/Artificial-intelligence_bot) |[umar abdullahi](https://github.com/umarbrowser) |
|
||||
|[Asymmetric Encryption](https://github.com/hastagAB/Awesome-Python-Scripts/tree/master/asymmetric_cryptography) |[victor matheus](https://github.com/victormatheusc) |
|
||||
|[Bitcoin price GUI](https://github.com/hastagAB/Awesome-Python-Scripts/tree/master/Bitcoin-Price-GUI) |[Amirul Abu](https://github.com/amirulabu) |
|
||||
|
@ -26,6 +27,7 @@ So far, the following projects have been integrated to this repo:
|
|||
| [Crypt socket](https://github.com/hastagAB/Awesome-Python-Scripts/tree/master/Crypt_Socket)|[Willian GL](https://github.com/williangl) |
|
||||
|[Current City Weather](https://github.com/hastagAB/Awesome-Python-Scripts/tree/master/Current_City_Weather) |[Jesse Bridge](https://github.com/jessebridge) |
|
||||
|[Directory organizer](https://github.com/hastagAB/Awesome-Python-Scripts/tree/master/Directory-organizer) | [Athul P](https://github.com/athulpn) |
|
||||
|[DOH DIG](https://github.com/hastagAB/Awesome-Python-Scripts/tree/master/DOH-Dig/) | [Ryan](https://github.com/awsumco) |
|
||||
|[Excel Files Merger](https://github.com/hastagAB/Awesome-Python-Scripts/tree/master/Excel_Files_Merger) | [Andrei N](https://github.com/Andrei-Niculae)|
|
||||
|[Excel to List](https://github.com/hastagAB/Awesome-Python-Scripts/tree/master/Excel_to_ListofList) | [Nitish Srivastava](https://github.com/nitish-iiitd)|
|
||||
|[Extended_ip_address_info](https://github.com/hastagAB/Awesome-Python-Scripts/tree/master/extended_ip_address_info) | [hafpaf](https://github.com/hafpaf)|
|
||||
|
@ -55,6 +57,7 @@ So far, the following projects have been integrated to this repo:
|
|||
|[Subtitle downloader](https://github.com/hastagAB/Awesome-Python-Scripts/tree/master/Subtitle-downloader)|[Kaushlendra Pratap](https://github.com/kaushl1998)|
|
||||
|[Take Screenshot](https://github.com/hastagAB/Awesome-Python-Scripts/tree/master/Take_screenshot)|[Moad Mohammed Elhebri](https://github.com/moadmmh)|
|
||||
|[To Do Bot](https://github.com/hastagAB/Awesome-Python-Scripts/tree/master/To-Do-Bot) | [Darshan Patel](https://github.com/DarshanPatel11)|
|
||||
|[Upload Files to S3](https://github.com/hastagAB/Awesome-Python-Scripts/tree/master/Upload_files_to_s3)|[Jayram Nai](https://github.com/jramnai)|
|
||||
|[Vinegère Cipher](https://github.com/hastagAB/Awesome-Python-Scripts/tree/master/vigenere_cipher)|[victoni](https://github.com/victoni)|
|
||||
|[Web proxy](https://github.com/hastagAB/Awesome-Python-Scripts/tree/master/Proxy-Request)|[Nikhil Kumar Singh](https://github.com/nikhilkumarsingh)|
|
||||
|[Website blocker](https://github.com/hastagAB/Awesome-Python-Scripts/tree/master/Website-Blocker)|[Ayush Bhardwaj](https://github.com/hastagAB)|
|
||||
|
@ -67,6 +70,7 @@ So far, the following projects have been integrated to this repo:
|
|||
|[Find PhoneNumber in String](https://github.com/hastagAB/Awesome-Python-Scripts/tree/master/Find-PhoneNumber-in-String)|[Austin Zuniga](https://github.com/AustinZuniga)|
|
||||
|[IMDB TV Series Info Extractor](https://github.com/hastagAB/Awesome-Python-Scripts/tree/master/imdb_episode_ratings)|[Yash Raj Sarrof](https://github.com/yashYRS) |
|
||||
|[Yoda-speak Translator](https://github.com/hastagAB/Awesome-Python-Scripts/tree/master/speak_like_yoda)|[sonniki](https://github.com/sonniki) |
|
||||
|[Medium Article Downloader](https://github.com/hastagAB/Awesome-Python-Scripts/tree/master/medium_article_downloader)|[coolsonu39](https://github.com/coolsonu39)|
|
||||
|
||||
## How to use :
|
||||
|
||||
|
|
17
Upload_files_to_s3/README.md
Normal file
17
Upload_files_to_s3/README.md
Normal file
|
@ -0,0 +1,17 @@
|
|||
# Upload files & folders from your machine to Amazon S3
|
||||
|
||||
A python script that will upload your files & folder to Amzzon S3 using python and boto3
|
||||
|
||||
## Requirement
|
||||
|
||||
Python 2.xx
|
||||
boto3
|
||||
```bash
|
||||
pip install boto3
|
||||
```
|
||||
|
||||
#Usage
|
||||
Go to Upload_files_to_s3 directory and add your folder's name you want to upload to s3 and then run upload_files_to_s3.py as below:
|
||||
```bash
|
||||
$ python upload_files_to_s3.py
|
||||
```
|
1
Upload_files_to_s3/requirements.txt
Normal file
1
Upload_files_to_s3/requirements.txt
Normal file
|
@ -0,0 +1 @@
|
|||
boto3==1.9.197 # Amazon Web Services SDK for Python
|
33
Upload_files_to_s3/upload_files_to_s3.py
Normal file
33
Upload_files_to_s3/upload_files_to_s3.py
Normal file
|
@ -0,0 +1,33 @@
|
|||
import boto3
|
||||
import os
|
||||
|
||||
ACL = 'public-read' #access type of the file
|
||||
AWS_ACCESS_KEY_ID = 'your_access_key'
|
||||
AWS_REGION = 'your_region'
|
||||
AWS_SECRET_ACCESS_KEY = 'your_secret_key'
|
||||
AWS_STORAGE_BUCKET_NAME = 'my_bucket'
|
||||
FOLDER_NAME_ON_S3 = 'my_folder_on_s3'
|
||||
FOLDER_PATH = '/home/foo/my_folder'
|
||||
|
||||
|
||||
def upload_files_to_s3(path):
|
||||
"""
|
||||
Upload files to AWS s3 bucket from your machine
|
||||
using python and boto3
|
||||
"""
|
||||
session = boto3.Session(
|
||||
aws_access_key_id=AWS_ACCESS_KEY_ID,
|
||||
aws_secret_access_key=AWS_SECRET_ACCESS_KEY,
|
||||
region_name=AWS_REGION
|
||||
)
|
||||
s3 = session.resource('s3')
|
||||
bucket = s3.Bucket(AWS_STORAGE_BUCKET_NAME)
|
||||
for subdir, dirs, files in os.walk(path):
|
||||
for file in files:
|
||||
full_path = os.path.join(subdir, file)
|
||||
with open(full_path, 'rb') as data:
|
||||
key = FOLDER_NAME_ON_S3 + full_path[len(path) + 1:]
|
||||
bucket.put_object(Key=key, Body=data, ACL=ACL)
|
||||
|
||||
if __name__ == "__main__":
|
||||
upload_files_to_s3(FOLDER_PATH)
|
13
automated_email/README.md
Normal file
13
automated_email/README.md
Normal file
|
@ -0,0 +1,13 @@
|
|||
#Automated email python script You can now send emails to multiple people at once easily with only a few clicks using smtplib module in Python
|
||||
|
||||
#Requirement Python version 3 and above smtplib json
|
||||
|
||||
```bash
|
||||
pip install smtplib
|
||||
pip install json
|
||||
```
|
||||
|
||||
Can be run easily using commmand prompt (python automated_email.py)
|
||||
-> login as you would for your gmail account( same email and password)
|
||||
-> find your way with the intuitive user friendly menu
|
||||
(!!!Your passwords and emails are only stored on your local device and no one has access to your information otherwise!!!)
|
47
automated_email/automated_email.py
Normal file
47
automated_email/automated_email.py
Normal file
|
@ -0,0 +1,47 @@
|
|||
from smtplib import SMTP as smtp
|
||||
import json
|
||||
|
||||
def sendmail(sender_add, reciever_add, msg, password):
|
||||
server = smtp('smtp.gmail.com:587')
|
||||
server.starttls()
|
||||
server.login(sender_add, password)
|
||||
server.sendmail(sender_add, reciever_add, msg)
|
||||
print("Mail sent succesfully....!")
|
||||
|
||||
|
||||
group = {}
|
||||
print('\t\t ......LOGIN.....')
|
||||
your_add = input('Enter your email address :')
|
||||
password = input('Enter your email password for login:')
|
||||
print('\n\n\n\n')
|
||||
choice = 'y'
|
||||
while(choice != '3' or choice != 'no'):
|
||||
print("\n 1.Create a group\n2.Message a group\n3.Exit")
|
||||
choice = input()
|
||||
if choice == '1':
|
||||
ch = 'y'
|
||||
while(ch != 'n'):
|
||||
gname = input('Enter name of group :')
|
||||
group[gname] = input('Enter contact emails separated by a single space :').rstrip()
|
||||
ch = input('Add another....y/n? :').rstrip()
|
||||
with open('groups.json', 'a') as f:
|
||||
json.dump(group, f)
|
||||
elif choice == '2':
|
||||
gname = input('Enter name of group :')
|
||||
try:
|
||||
f = open('groups.json', 'r')
|
||||
members = json.load(f)
|
||||
f.close()
|
||||
except:
|
||||
print('Invalid group name. Please Create group first')
|
||||
exit
|
||||
members = members[gname].split()
|
||||
msg = input('Enter message :')
|
||||
for i in members:
|
||||
try:
|
||||
sendmail(your_add, i, msg, password)
|
||||
except:
|
||||
print("An unexpected error occured. Please try again later...")
|
||||
continue
|
||||
else:
|
||||
break
|
2
automated_email/requirements.txt
Normal file
2
automated_email/requirements.txt
Normal file
|
@ -0,0 +1,2 @@
|
|||
smtplib
|
||||
json
|
3
medium_article_downloader/README.md
Normal file
3
medium_article_downloader/README.md
Normal file
|
@ -0,0 +1,3 @@
|
|||
A simple python script download latest articles from medium topicwise and save them in text files.
|
||||
|
||||
It basically scrapes the site using requests and bs4 modules. I made it just for fun after I read Automate the Boring Stuff with Python by Al Sweigart.
|
43
medium_article_downloader/helpers.py
Normal file
43
medium_article_downloader/helpers.py
Normal file
|
@ -0,0 +1,43 @@
|
|||
import requests, bs4
|
||||
|
||||
def get_topic():
|
||||
'''Get a topic to download from user.'''
|
||||
|
||||
topic_list = ['comics', 'books', 'art', 'culture', 'film', 'food', 'gaming', 'humor', 'internet-culture', 'lit', 'medium-magazine', 'music', 'photography', 'social-media', 'sports', 'style', 'true-crime', 'tv', 'writing', 'business', 'design', 'economy', 'startups', 'freelancing', 'leadersip', 'marketing', 'productivity', 'work', 'artificial-intelligence', 'blockchain', 'cryptocurrency', 'cybersecurity', 'data-science', 'gadgets', 'javascript', 'macine-learning', 'math', 'neuroscience', 'programming', 'science', 'self-driving-cars', 'software-engineering', 'space', 'technology', 'visual-design', 'addiction', 'creativity', 'disability', 'family', 'health', 'mental-health', 'parenting', 'personal-finance', 'pets', 'psychedelics', 'psychology', 'relationships', 'self', 'sexuality', 'spirituality', 'travel', 'wellness', 'basic-income', 'cities', 'education', 'environment', 'equality', 'future', 'gun-control', 'history', 'justice', 'language', 'lgbtqia', 'media', 'masculinity', 'philosophy', 'politics', 'race', 'religion', 'san-francisco', 'transportation', 'women', 'world']
|
||||
print('Welcome to Medium aricle downloader by @CoolSonu39!')
|
||||
choice = 'some-random-topic'
|
||||
print('Which domain do you want to read today?')
|
||||
while choice not in topic_list:
|
||||
print("Enter 'list' to see the list of topics.")
|
||||
choice = input('Enter your choice: ')
|
||||
if choice == 'list':
|
||||
print()
|
||||
for i in topic_list:
|
||||
print(i)
|
||||
print()
|
||||
elif choice not in topic_list:
|
||||
print('\nTopic' + choice + 'not found :(')
|
||||
return choice
|
||||
|
||||
|
||||
def extract_links(url):
|
||||
'''Extract article links from url'''
|
||||
|
||||
html_response = requests.get(url)
|
||||
parsed_response = bs4.BeautifulSoup(html_response.text, features='html5lib')
|
||||
article_list = parsed_response.select('h3 > a')
|
||||
return article_list
|
||||
|
||||
|
||||
def medium_text(url):
|
||||
'''Extract text from a medium article link.'''
|
||||
|
||||
html_response = requests.get(url)
|
||||
parsed_response = bs4.BeautifulSoup(html_response.text, features='html5lib')
|
||||
tag_list = parsed_response.find_all(['h1', 'p', 'h2'])
|
||||
|
||||
extracted_text = ''
|
||||
for j in range(len(tag_list)):
|
||||
extracted_text += tag_list[j].getText() + '\n\n'
|
||||
|
||||
return extracted_text
|
24
medium_article_downloader/medium.py
Normal file
24
medium_article_downloader/medium.py
Normal file
|
@ -0,0 +1,24 @@
|
|||
import requests, bs4
|
||||
from helpers import *
|
||||
|
||||
choice = get_topic()
|
||||
print('\nGetting latest article links from %s...' % (choice))
|
||||
|
||||
article_list = extract_links('https://medium.com/topic/' + choice)
|
||||
print('Total articles found: ' + str(len(article_list)))
|
||||
|
||||
for i in range(len(article_list)):
|
||||
heading = article_list[i].getText()
|
||||
artlink = article_list[i].get('href')
|
||||
artlink = artlink if artlink.startswith("https://") else "https://medium.com" + artlink
|
||||
print('Downloading article: ' + str(i+1))
|
||||
|
||||
# remove invalid characters from filename
|
||||
file_name = f"{heading}.txt".replace(':', '').replace('?', '')
|
||||
file = open(file_name, 'w')
|
||||
|
||||
article_text = medium_text(artlink)
|
||||
file.write(article_text)
|
||||
file.close()
|
||||
|
||||
print('Done.')
|
2
medium_article_downloader/requirements.txt
Normal file
2
medium_article_downloader/requirements.txt
Normal file
|
@ -0,0 +1,2 @@
|
|||
requests
|
||||
bs4
|
Loading…
Reference in New Issue
Block a user