mirror of
https://github.com/hastagAB/Awesome-Python-Scripts.git
synced 2024-11-30 15:31:07 +00:00
Add the website URL detector (#230)
This commit is contained in:
parent
45dd094afc
commit
9f1744f8a7
|
@ -137,6 +137,7 @@ So far, the following projects have been integrated to this repo:
|
||||||
|[Vinegère Cipher](vigenere_cipher)|[victoni](https://github.com/victoni)|
|
|[Vinegère Cipher](vigenere_cipher)|[victoni](https://github.com/victoni)|
|
||||||
|[Web proxy](Proxy-Request)|[Nikhil Kumar Singh](https://github.com/nikhilkumarsingh)|
|
|[Web proxy](Proxy-Request)|[Nikhil Kumar Singh](https://github.com/nikhilkumarsingh)|
|
||||||
|[Website blocker](Website-Blocker)|[Ayush Bhardwaj](https://github.com/hastagAB)|
|
|[Website blocker](Website-Blocker)|[Ayush Bhardwaj](https://github.com/hastagAB)|
|
||||||
|
|[Website Url Detector](Website_Url_Detector)|[sonniki](https://github.com/sonniki)|
|
||||||
|[Word Frequency Counter](Word_Frequency_Counter)|[sonniki](https://github.com/sonniki)|
|
|[Word Frequency Counter](Word_Frequency_Counter)|[sonniki](https://github.com/sonniki)|
|
||||||
|[Word generator](Word-generator)|[TGLIDE](https://github.com/TGlide)|
|
|[Word generator](Word-generator)|[TGLIDE](https://github.com/TGlide)|
|
||||||
|[Work log generator](Work_Log_Generator)|[Maël Pedretti](https://github.com/73VW)|
|
|[Work log generator](Work_Log_Generator)|[Maël Pedretti](https://github.com/73VW)|
|
||||||
|
|
27
Website_Url_Detector/README.md
Normal file
27
Website_Url_Detector/README.md
Normal file
|
@ -0,0 +1,27 @@
|
||||||
|
# Website URL Detector
|
||||||
|
|
||||||
|
## Description
|
||||||
|
A python script that detects URLs on a given website.
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
```py
|
||||||
|
>>> python detect_urls.py --website [website_url]
|
||||||
|
```
|
||||||
|
|
||||||
|
### Example
|
||||||
|
|
||||||
|
```py
|
||||||
|
>>> python detect_urls.py --website https://en.wikipedia.org/wiki/Guido_van_Rossum
|
||||||
|
https://upload.wikimedia.org/wikipedia/commons/thumb/e/e2/Guido-portrait-2014-drc.jpg/1200px-Guido-portrait-2014-drc.jpg
|
||||||
|
https://creativecommons.org/licenses/by-sa/3.0/
|
||||||
|
https://en.wikipedia.org/wiki/Guido_van_Rossum
|
||||||
|
https://gvanrossum.github.io/
|
||||||
|
http://mail.python.org/pipermail/python-dev/2007-January/070849.html
|
||||||
|
https://web.archive.org/web/20090908131440/http://mail.python.org/pipermail/python-dev/2007-January/070849.html
|
||||||
|
http://www.computerhistory.org/atchm/2018-chm-fellow-guido-van-rossum-python-creator-benevolent-dictator-for-life/
|
||||||
|
https://web.archive.org/web/20180724114116/http://www.computerhistory.org/atchm/2018-chm-fellow-guido-van-rossum-python-creator-benevolent-dictator-for-life/
|
||||||
|
https://web.archive.org/web/20081031103755/http://wiki.codecall.net/Guido_van_Rossum
|
||||||
|
http://wiki.codecall.net/Guido_van_Rossum
|
||||||
|
...
|
||||||
|
```
|
34
Website_Url_Detector/detect_urls.py
Normal file
34
Website_Url_Detector/detect_urls.py
Normal file
|
@ -0,0 +1,34 @@
|
||||||
|
import argparse
|
||||||
|
import re
|
||||||
|
import requests
|
||||||
|
|
||||||
|
|
||||||
|
def run(url: str) -> None:
|
||||||
|
"""
|
||||||
|
Detect all the URLs on a given website.
|
||||||
|
|
||||||
|
:param url: the url of the website to process
|
||||||
|
:return:
|
||||||
|
"""
|
||||||
|
# Load the website's HTML.
|
||||||
|
website = requests.get(url)
|
||||||
|
html = website.text
|
||||||
|
# Detect the URLs.
|
||||||
|
URL_REGEX = r"http[s]?://(?:[a-zA-Z#]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+"
|
||||||
|
detected_urls = re.findall(URL_REGEX, html)
|
||||||
|
# Filter invalid URLs.
|
||||||
|
suffixes = "aero|asia|biz|cat|com|coop|edu|gov|info|int|jobs|mil|mobi|museum|name|net|org|pro|tel|travel|ac|ad|ae|af|ag|ai|al|am|an|ao|aq|ar|as|at|au|aw|ax|az|ba|bb|bd|be|bf|bg|bh|bi|bj|bm|bn|bo|br|bs|bt|bv|bw|by|bz|ca|cc|cd|cf|cg|ch|ci|ck|cl|cm|cn|co|cr|cu|cv|cx|cy|cz|cz|de|dj|dk|dm|do|dz|ec|ee|eg|er|es|et|eu|fi|fj|fk|fm|fo|fr|ga|gb|gd|ge|gf|gg|gh|gi|gl|gm|gn|gp|gq|gr|gs|gt|gu|gw|gy|hk|hm|hn|hr|ht|hu|id|ie|il|im|in|io|iq|ir|is|it|je|jm|jo|jp|ke|kg|kh|ki|km|kn|kp|kr|kw|ky|kz|la|lb|lc|li|lk|lr|ls|lt|lu|lv|ly|ma|mc|md|me|mg|mh|mk|ml|mn|mn|mo|mp|mr|ms|mt|mu|mv|mw|mx|my|mz|na|nc|ne|nf|ng|ni|nl|no|np|nr|nu|nz|nom|pa|pe|pf|pg|ph|pk|pl|pm|pn|pr|ps|pt|pw|py|qa|re|ra|rs|ru|rw|sa|sb|sc|sd|se|sg|sh|si|sj|sj|sk|sl|sm|sn|so|sr|st|su|sv|sy|sz|tc|td|tf|tg|th|tj|tk|tl|tm|tn|to|tp|tr|tt|tv|tw|tz|ua|ug|uk|us|uy|uz|va|vc|ve|vg|vi|vn|vu|wf|ws|ye|yt|yu|za|zm|zw".split("|")
|
||||||
|
detected_urls = [x for x in detected_urls if any("."+suffix in x for suffix in suffixes)]
|
||||||
|
print("\n".join(detected_urls))
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
parser = argparse.ArgumentParser()
|
||||||
|
parser.add_argument(
|
||||||
|
"--website",
|
||||||
|
required=True,
|
||||||
|
help="URL of a website to detect other URLs on"
|
||||||
|
)
|
||||||
|
args = parser.parse_args()
|
||||||
|
# Detect the URLs.
|
||||||
|
run(args.website)
|
3
Website_Url_Detector/requirements.txt
Normal file
3
Website_Url_Detector/requirements.txt
Normal file
|
@ -0,0 +1,3 @@
|
||||||
|
argparse
|
||||||
|
re
|
||||||
|
requests==2.22.0
|
Loading…
Reference in New Issue
Block a user