mirror of
https://github.com/metafy-social/python-scripts.git
synced 2024-11-23 20:11:10 +00:00
Email Extractor
irrelevant code remove
This commit is contained in:
parent
a9428eacd6
commit
303d1cae16
|
@ -1,11 +1,4 @@
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
"""Untitled0.ipynb
|
|
||||||
|
|
||||||
Automatically generated by Colaboratory.
|
|
||||||
|
|
||||||
Original file is located at
|
|
||||||
https://colab.research.google.com/drive/1BuQhjlIL_OYu39gpE2NQNZx9KJ_kPy3o
|
|
||||||
"""
|
|
||||||
|
|
||||||
import requests
|
import requests
|
||||||
from bs4 import BeautifulSoup
|
from bs4 import BeautifulSoup
|
||||||
|
@ -50,61 +43,3 @@ df.to_csv('email.csv', index=False)
|
||||||
|
|
||||||
files.download("email.csv")
|
files.download("email.csv")
|
||||||
|
|
||||||
urllib.request.urlopen('https://www.youracclaim.com/badges/42b5d2d4-7c14-4c1a-b78a-adb3ac04105b/public_url').read().decode("utf-8")
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
import urllib.request
|
|
||||||
|
|
||||||
fp = urllib.request.urlopen("http://royninja.github.io/contact.html")
|
|
||||||
mybytes = fp.read()
|
|
||||||
|
|
||||||
mystr = mybytes.decode("utf8")
|
|
||||||
fp.close()
|
|
||||||
|
|
||||||
print(mystr)
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
import urllib.request
|
|
||||||
|
|
||||||
fp = urllib.request.urlopen("http://royninja.github.io/contact.html")
|
|
||||||
mybytes = fp.read()
|
|
||||||
|
|
||||||
mystr = mybytes.decode("utf8")
|
|
||||||
fp.close()
|
|
||||||
|
|
||||||
print(mystr)
|
|
||||||
|
|
||||||
webUrl = urllib.request.urlopen("https://royninja.github.io")
|
|
||||||
|
|
||||||
pip install email-scraper
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
scrape_emails(mystr)
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
import requests
|
|
||||||
from bs4 import BeautifulSoup
|
|
||||||
|
|
||||||
|
|
||||||
url = 'https://royninja.github.io/'
|
|
||||||
reqs = requests.get(url)
|
|
||||||
soup = BeautifulSoup(reqs.text, 'html.parser')
|
|
||||||
|
|
||||||
urls = []
|
|
||||||
for link in soup.find_all('a'):
|
|
||||||
urls.append(link.get('href'))
|
|
||||||
|
|
||||||
urls[1]
|
|
||||||
|
|
||||||
url+urls[1]
|
|
||||||
|
|
||||||
BufautifulSoup(requests.get(url+urls[1]).text,'html.parser')
|
|
||||||
|
|
||||||
url2
|
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue
Block a user