Lukazlim: Replace dependency requests with httpx (#12744)

* Replace dependency `requests` with `httpx`

Fixes #12742
Signed-off-by: Lim, Lukaz Wei Hwang <lukaz.wei.hwang.lim@intel.com>

* updating DIRECTORY.md

* [pre-commit.ci] auto fixes from pre-commit.com hooks

for more information, see https://pre-commit.ci

---------

Signed-off-by: Lim, Lukaz Wei Hwang <lukaz.wei.hwang.lim@intel.com>
Co-authored-by: Lim, Lukaz Wei Hwang <lukaz.wei.hwang.lim@intel.com>
Co-authored-by: cclauss <cclauss@users.noreply.github.com>
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
This commit is contained in:
Christian Clauss 2025-05-14 03:42:11 +02:00 committed by GitHub
parent 6e4d1b3765
commit a2fa32c7ad
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
40 changed files with 957 additions and 640 deletions

View File

@ -899,6 +899,7 @@
* [N Body Simulation](physics/n_body_simulation.py) * [N Body Simulation](physics/n_body_simulation.py)
* [Newtons Law Of Gravitation](physics/newtons_law_of_gravitation.py) * [Newtons Law Of Gravitation](physics/newtons_law_of_gravitation.py)
* [Newtons Second Law Of Motion](physics/newtons_second_law_of_motion.py) * [Newtons Second Law Of Motion](physics/newtons_second_law_of_motion.py)
* [Orbital Transfer Work](physics/orbital_transfer_work.py)
* [Period Of Pendulum](physics/period_of_pendulum.py) * [Period Of Pendulum](physics/period_of_pendulum.py)
* [Photoelectric Effect](physics/photoelectric_effect.py) * [Photoelectric Effect](physics/photoelectric_effect.py)
* [Potential Energy](physics/potential_energy.py) * [Potential Energy](physics/potential_energy.py)

View File

@ -8,8 +8,16 @@ fit our dataset. In this particular code, I had used a CSGO dataset (ADR vs
Rating). We try to best fit a line through dataset and estimate the parameters. Rating). We try to best fit a line through dataset and estimate the parameters.
""" """
# /// script
# requires-python = ">=3.13"
# dependencies = [
# "httpx",
# "numpy",
# ]
# ///
import httpx
import numpy as np import numpy as np
import requests
def collect_dataset(): def collect_dataset():
@ -17,7 +25,7 @@ def collect_dataset():
The dataset contains ADR vs Rating of a Player The dataset contains ADR vs Rating of a Player
:return : dataset obtained from the link, as matrix :return : dataset obtained from the link, as matrix
""" """
response = requests.get( response = httpx.get(
"https://raw.githubusercontent.com/yashLadha/The_Math_of_Intelligence/" "https://raw.githubusercontent.com/yashLadha/The_Math_of_Intelligence/"
"master/Week1/ADRvsRating.csv", "master/Week1/ADRvsRating.csv",
timeout=10, timeout=10,

View File

@ -59,9 +59,9 @@ def avg_speed_of_molecule(temperature: float, molar_mass: float) -> float:
Examples: Examples:
>>> avg_speed_of_molecule(273, 0.028) # nitrogen at 273 K >>> avg_speed_of_molecule(273, 0.028) # nitrogen at 273 K
454.3488755020387 454.3488755062257
>>> avg_speed_of_molecule(300, 0.032) # oxygen at 300 K >>> avg_speed_of_molecule(300, 0.032) # oxygen at 300 K
445.52572733919885 445.5257273433045
>>> avg_speed_of_molecule(-273, 0.028) # invalid temperature >>> avg_speed_of_molecule(-273, 0.028) # invalid temperature
Traceback (most recent call last): Traceback (most recent call last):
... ...
@ -87,9 +87,9 @@ def mps_speed_of_molecule(temperature: float, molar_mass: float) -> float:
Examples: Examples:
>>> mps_speed_of_molecule(273, 0.028) # nitrogen at 273 K >>> mps_speed_of_molecule(273, 0.028) # nitrogen at 273 K
402.65620701908966 402.65620702280023
>>> mps_speed_of_molecule(300, 0.032) # oxygen at 300 K >>> mps_speed_of_molecule(300, 0.032) # oxygen at 300 K
394.836895549922 394.8368955535605
>>> mps_speed_of_molecule(-273, 0.028) # invalid temperature >>> mps_speed_of_molecule(-273, 0.028) # invalid temperature
Traceback (most recent call last): Traceback (most recent call last):
... ...

View File

@ -11,6 +11,7 @@ classifiers = [
dependencies = [ dependencies = [
"beautifulsoup4>=4.12.3", "beautifulsoup4>=4.12.3",
"fake-useragent>=1.5.1", "fake-useragent>=1.5.1",
"httpx>=0.28.1",
"imageio>=2.36.1", "imageio>=2.36.1",
"keras>=3.7", "keras>=3.7",
"lxml>=5.3", "lxml>=5.3",
@ -19,7 +20,6 @@ dependencies = [
"opencv-python>=4.10.0.84", "opencv-python>=4.10.0.84",
"pandas>=2.2.3", "pandas>=2.2.3",
"pillow>=11", "pillow>=11",
"requests>=2.32.3",
"rich>=13.9.4", "rich>=13.9.4",
"scikit-learn>=1.5.2", "scikit-learn>=1.5.2",
"sphinx-pyproject>=0.3", "sphinx-pyproject>=0.3",
@ -42,8 +42,8 @@ docs = [
"sphinx-pyproject>=0.3", "sphinx-pyproject>=0.3",
] ]
euler-validate = [ euler-validate = [
"httpx>=0.28.1",
"numpy>=2.1.3", "numpy>=2.1.3",
"requests>=2.32.3",
] ]
[tool.ruff] [tool.ruff]

View File

@ -1,5 +1,6 @@
beautifulsoup4 beautifulsoup4
fake-useragent fake-useragent
httpx
imageio imageio
keras keras
lxml lxml
@ -8,7 +9,6 @@ numpy
opencv-python opencv-python
pandas pandas
pillow pillow
requests
rich rich
scikit-learn scikit-learn
sphinx-pyproject sphinx-pyproject

View File

@ -3,8 +3,8 @@
# /// script # /// script
# requires-python = ">=3.13" # requires-python = ">=3.13"
# dependencies = [ # dependencies = [
# "httpx",
# "pytest", # "pytest",
# "requests",
# ] # ]
# /// # ///
@ -15,8 +15,8 @@ import os
import pathlib import pathlib
from types import ModuleType from types import ModuleType
import httpx
import pytest import pytest
import requests
PROJECT_EULER_DIR_PATH = pathlib.Path.cwd().joinpath("project_euler") PROJECT_EULER_DIR_PATH = pathlib.Path.cwd().joinpath("project_euler")
PROJECT_EULER_ANSWERS_PATH = pathlib.Path.cwd().joinpath( PROJECT_EULER_ANSWERS_PATH = pathlib.Path.cwd().joinpath(
@ -66,7 +66,7 @@ def added_solution_file_path() -> list[pathlib.Path]:
"Accept": "application/vnd.github.v3+json", "Accept": "application/vnd.github.v3+json",
"Authorization": "token " + os.environ["GITHUB_TOKEN"], "Authorization": "token " + os.environ["GITHUB_TOKEN"],
} }
files = requests.get(get_files_url(), headers=headers, timeout=10).json() files = httpx.get(get_files_url(), headers=headers, timeout=10).json()
for file in files: for file in files:
filepath = pathlib.Path.cwd().joinpath(file["filename"]) filepath = pathlib.Path.cwd().joinpath(file["filename"])
if ( if (

1080
uv.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -2,22 +2,29 @@
Get CO2 emission data from the UK CarbonIntensity API Get CO2 emission data from the UK CarbonIntensity API
""" """
# /// script
# requires-python = ">=3.13"
# dependencies = [
# "httpx",
# ]
# ///
from datetime import date from datetime import date
import requests import httpx
BASE_URL = "https://api.carbonintensity.org.uk/intensity" BASE_URL = "https://api.carbonintensity.org.uk/intensity"
# Emission in the last half hour # Emission in the last half hour
def fetch_last_half_hour() -> str: def fetch_last_half_hour() -> str:
last_half_hour = requests.get(BASE_URL, timeout=10).json()["data"][0] last_half_hour = httpx.get(BASE_URL, timeout=10).json()["data"][0]
return last_half_hour["intensity"]["actual"] return last_half_hour["intensity"]["actual"]
# Emissions in a specific date range # Emissions in a specific date range
def fetch_from_to(start, end) -> list: def fetch_from_to(start, end) -> list:
return requests.get(f"{BASE_URL}/{start}/{end}", timeout=10).json()["data"] return httpx.get(f"{BASE_URL}/{start}/{end}", timeout=10).json()["data"]
if __name__ == "__main__": if __name__ == "__main__":

View File

@ -4,9 +4,17 @@ This is to show simple COVID19 info fetching from worldometers site using lxml
more convenient to use in Python web projects (e.g. Django or Flask-based) more convenient to use in Python web projects (e.g. Django or Flask-based)
""" """
# /// script
# requires-python = ">=3.13"
# dependencies = [
# "httpx",
# "lxml",
# ]
# ///
from typing import NamedTuple from typing import NamedTuple
import requests import httpx
from lxml import html from lxml import html
@ -19,7 +27,7 @@ class CovidData(NamedTuple):
def covid_stats(url: str = "https://www.worldometers.info/coronavirus/") -> CovidData: def covid_stats(url: str = "https://www.worldometers.info/coronavirus/") -> CovidData:
xpath_str = '//div[@class = "maincounter-number"]/span/text()' xpath_str = '//div[@class = "maincounter-number"]/span/text()'
return CovidData( return CovidData(
*html.fromstring(requests.get(url, timeout=10).content).xpath(xpath_str) *html.fromstring(httpx.get(url, timeout=10).content).xpath(xpath_str)
) )

View File

@ -1,14 +1,28 @@
# /// script
# requires-python = ">=3.13"
# dependencies = [
# "beautifulsoup4",
# "fake-useragent",
# "httpx",
# ]
# ///
import sys import sys
import webbrowser import webbrowser
import requests import httpx
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
from fake_useragent import UserAgent from fake_useragent import UserAgent
if __name__ == "__main__": if __name__ == "__main__":
print("Googling.....") print("Googling.....")
url = "https://www.google.com/search?q=" + " ".join(sys.argv[1:]) url = "https://www.google.com/search?q=" + " ".join(sys.argv[1:])
res = requests.get(url, headers={"UserAgent": UserAgent().random}, timeout=10) res = httpx.get(
url,
headers={"UserAgent": UserAgent().random},
timeout=10,
follow_redirects=True,
)
# res.raise_for_status() # res.raise_for_status()
with open("project1a.html", "wb") as out_file: # only for knowing the class with open("project1a.html", "wb") as out_file: # only for knowing the class
for data in res.iter_content(10000): for data in res.iter_content(10000):

View File

@ -3,7 +3,15 @@ Get the citation from google scholar
using title and year of publication, and volume and pages of journal. using title and year of publication, and volume and pages of journal.
""" """
import requests # /// script
# requires-python = ">=3.13"
# dependencies = [
# "beautifulsoup4",
# "httpx",
# ]
# ///
import httpx
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
@ -12,7 +20,7 @@ def get_citation(base_url: str, params: dict) -> str:
Return the citation number. Return the citation number.
""" """
soup = BeautifulSoup( soup = BeautifulSoup(
requests.get(base_url, params=params, timeout=10).content, "html.parser" httpx.get(base_url, params=params, timeout=10).content, "html.parser"
) )
div = soup.find("div", attrs={"class": "gs_ri"}) div = soup.find("div", attrs={"class": "gs_ri"})
anchors = div.find("div", attrs={"class": "gs_fl"}).find_all("a") anchors = div.find("div", attrs={"class": "gs_fl"}).find_all("a")

View File

@ -3,9 +3,16 @@ This is used to convert the currency using the Amdoren Currency API
https://www.amdoren.com https://www.amdoren.com
""" """
# /// script
# requires-python = ">=3.13"
# dependencies = [
# "httpx",
# ]
# ///
import os import os
import requests import httpx
URL_BASE = "https://www.amdoren.com/api/currency.php" URL_BASE = "https://www.amdoren.com/api/currency.php"
@ -176,7 +183,7 @@ def convert_currency(
params = locals() params = locals()
# from is a reserved keyword # from is a reserved keyword
params["from"] = params.pop("from_") params["from"] = params.pop("from_")
res = requests.get(URL_BASE, params=params, timeout=10).json() res = httpx.get(URL_BASE, params=params, timeout=10).json()
return str(res["amount"]) if res["error"] == 0 else res["error_message"] return str(res["amount"]) if res["error"] == 0 else res["error_message"]

View File

@ -1,4 +1,12 @@
import requests # /// script
# requires-python = ">=3.13"
# dependencies = [
# "beautifulsoup4",
# "httpx",
# ]
# ///
import httpx
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
""" """
@ -20,8 +28,8 @@ def stock_price(symbol: str = "AAPL") -> str:
True True
""" """
url = f"https://finance.yahoo.com/quote/{symbol}?p={symbol}" url = f"https://finance.yahoo.com/quote/{symbol}?p={symbol}"
yahoo_finance_source = requests.get( yahoo_finance_source = httpx.get(
url, headers={"USER-AGENT": "Mozilla/5.0"}, timeout=10 url, headers={"USER-AGENT": "Mozilla/5.0"}, timeout=10, follow_redirects=True
).text ).text
soup = BeautifulSoup(yahoo_finance_source, "html.parser") soup = BeautifulSoup(yahoo_finance_source, "html.parser")

View File

@ -1,4 +1,11 @@
import requests # /// script
# requires-python = ">=3.13"
# dependencies = [
# "httpx",
# ]
# ///
import httpx
# Put your API key(s) here # Put your API key(s) here
OPENWEATHERMAP_API_KEY = "" OPENWEATHERMAP_API_KEY = ""
@ -19,13 +26,13 @@ def current_weather(location: str) -> list[dict]:
weather_data = [] weather_data = []
if OPENWEATHERMAP_API_KEY: if OPENWEATHERMAP_API_KEY:
params_openweathermap = {"q": location, "appid": OPENWEATHERMAP_API_KEY} params_openweathermap = {"q": location, "appid": OPENWEATHERMAP_API_KEY}
response_openweathermap = requests.get( response_openweathermap = httpx.get(
OPENWEATHERMAP_URL_BASE, params=params_openweathermap, timeout=10 OPENWEATHERMAP_URL_BASE, params=params_openweathermap, timeout=10
) )
weather_data.append({"OpenWeatherMap": response_openweathermap.json()}) weather_data.append({"OpenWeatherMap": response_openweathermap.json()})
if WEATHERSTACK_API_KEY: if WEATHERSTACK_API_KEY:
params_weatherstack = {"query": location, "access_key": WEATHERSTACK_API_KEY} params_weatherstack = {"query": location, "access_key": WEATHERSTACK_API_KEY}
response_weatherstack = requests.get( response_weatherstack = httpx.get(
WEATHERSTACK_URL_BASE, params=params_weatherstack, timeout=10 WEATHERSTACK_URL_BASE, params=params_weatherstack, timeout=10
) )
weather_data.append({"Weatherstack": response_weatherstack.json()}) weather_data.append({"Weatherstack": response_weatherstack.json()})

View File

@ -1,4 +1,12 @@
import requests # /// script
# requires-python = ">=3.13"
# dependencies = [
# "beautifulsoup4",
# "httpx",
# ]
# ///
import httpx
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
@ -7,7 +15,7 @@ def horoscope(zodiac_sign: int, day: str) -> str:
"https://www.horoscope.com/us/horoscopes/general/" "https://www.horoscope.com/us/horoscopes/general/"
f"horoscope-general-daily-{day}.aspx?sign={zodiac_sign}" f"horoscope-general-daily-{day}.aspx?sign={zodiac_sign}"
) )
soup = BeautifulSoup(requests.get(url, timeout=10).content, "html.parser") soup = BeautifulSoup(httpx.get(url, timeout=10).content, "html.parser")
return soup.find("div", class_="main-horoscope").p.text return soup.find("div", class_="main-horoscope").p.text

View File

@ -1,10 +1,18 @@
# /// script
# requires-python = ">=3.13"
# dependencies = [
# "beautifulsoup4",
# "httpx",
# ]
# ///
import json import json
import os import os
import re import re
import sys import sys
import urllib.request import urllib.request
import requests import httpx
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
headers = { headers = {
@ -39,7 +47,7 @@ def download_images_from_google_query(query: str = "dhaka", max_images: int = 5)
"ijn": "0", "ijn": "0",
} }
html = requests.get( html = httpx.get(
"https://www.google.com/search", params=params, headers=headers, timeout=10 "https://www.google.com/search", params=params, headers=headers, timeout=10
) )
soup = BeautifulSoup(html.text, "html.parser") soup = BeautifulSoup(html.text, "html.parser")

View File

@ -1,5 +1,12 @@
"""Get the site emails from URL.""" """Get the site emails from URL."""
# /// script
# requires-python = ">=3.13"
# dependencies = [
# "httpx",
# ]
# ///
from __future__ import annotations from __future__ import annotations
__author__ = "Muhammad Umer Farooq" __author__ = "Muhammad Umer Farooq"
@ -13,7 +20,7 @@ import re
from html.parser import HTMLParser from html.parser import HTMLParser
from urllib import parse from urllib import parse
import requests import httpx
class Parser(HTMLParser): class Parser(HTMLParser):
@ -72,7 +79,7 @@ def emails_from_url(url: str = "https://github.com") -> list[str]:
try: try:
# Open URL # Open URL
r = requests.get(url, timeout=10) r = httpx.get(url, timeout=10, follow_redirects=True)
# pass the raw HTML to the parser to get links # pass the raw HTML to the parser to get links
parser.feed(r.text) parser.feed(r.text)
@ -81,9 +88,15 @@ def emails_from_url(url: str = "https://github.com") -> list[str]:
valid_emails = set() valid_emails = set()
for link in parser.urls: for link in parser.urls:
# open URL. # open URL.
# read = requests.get(link) # Check if the link is already absolute
if not link.startswith("http://") and not link.startswith("https://"):
# Prepend protocol only if link starts with domain, normalize otherwise
if link.startswith(domain):
link = f"https://{link}"
else:
link = parse.urljoin(f"https://{domain}", link)
try: try:
read = requests.get(link, timeout=10) read = httpx.get(link, timeout=10, follow_redirects=True)
# Get the valid email. # Get the valid email.
emails = re.findall("[a-zA-Z0-9]+@" + domain, read.text) emails = re.findall("[a-zA-Z0-9]+@" + domain, read.text)
# If not in list then append it. # If not in list then append it.

View File

@ -1,8 +1,17 @@
import requests # /// script
# requires-python = ">=3.13"
# dependencies = [
# "beautifulsoup4",
# "fake-useragent",
# "httpx",
# ]
# ///
import httpx
from bs4 import BeautifulSoup, NavigableString, Tag from bs4 import BeautifulSoup, NavigableString, Tag
from fake_useragent import UserAgent from fake_useragent import UserAgent
BASE_URL = "https://ww1.gogoanime2.org" BASE_URL = "https://ww7.gogoanime2.org"
def search_scraper(anime_name: str) -> list: def search_scraper(anime_name: str) -> list:
@ -25,9 +34,9 @@ def search_scraper(anime_name: str) -> list:
""" """
# concat the name to form the search url. # concat the name to form the search url.
search_url = f"{BASE_URL}/search/{anime_name}" search_url = f"{BASE_URL}/search?keyword={anime_name}"
response = requests.get( response = httpx.get(
search_url, headers={"UserAgent": UserAgent().chrome}, timeout=10 search_url, headers={"UserAgent": UserAgent().chrome}, timeout=10
) # request the url. ) # request the url.
@ -82,7 +91,7 @@ def search_anime_episode_list(episode_endpoint: str) -> list:
request_url = f"{BASE_URL}{episode_endpoint}" request_url = f"{BASE_URL}{episode_endpoint}"
response = requests.get( response = httpx.get(
url=request_url, headers={"UserAgent": UserAgent().chrome}, timeout=10 url=request_url, headers={"UserAgent": UserAgent().chrome}, timeout=10
) )
response.raise_for_status() response.raise_for_status()
@ -133,7 +142,7 @@ def get_anime_episode(episode_endpoint: str) -> list:
episode_page_url = f"{BASE_URL}{episode_endpoint}" episode_page_url = f"{BASE_URL}{episode_endpoint}"
response = requests.get( response = httpx.get(
url=episode_page_url, headers={"User-Agent": UserAgent().chrome}, timeout=10 url=episode_page_url, headers={"User-Agent": UserAgent().chrome}, timeout=10
) )
response.raise_for_status() response.raise_for_status()

View File

@ -1,13 +1,20 @@
# Created by sarathkaul on 12/11/19 # Created by sarathkaul on 12/11/19
import requests # /// script
# requires-python = ">=3.13"
# dependencies = [
# "httpx",
# ]
# ///
import httpx
_NEWS_API = "https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey=" _NEWS_API = "https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey="
def fetch_bbc_news(bbc_news_api_key: str) -> None: def fetch_bbc_news(bbc_news_api_key: str) -> None:
# fetching a list of articles in json format # fetching a list of articles in json format
bbc_news_page = requests.get(_NEWS_API + bbc_news_api_key, timeout=10).json() bbc_news_page = httpx.get(_NEWS_API + bbc_news_api_key, timeout=10).json()
# each article in the list is a dict # each article in the list is a dict
for i, article in enumerate(bbc_news_page["articles"], 1): for i, article in enumerate(bbc_news_page["articles"], 1):
print(f"{i}.) {article['title']}") print(f"{i}.) {article['title']}")

View File

@ -18,12 +18,19 @@ with your token::
export USER_TOKEN="" export USER_TOKEN=""
""" """
# /// script
# requires-python = ">=3.13"
# dependencies = [
# "httpx",
# ]
# ///
from __future__ import annotations from __future__ import annotations
import os import os
from typing import Any from typing import Any
import requests import httpx
BASE_URL = "https://api.github.com" BASE_URL = "https://api.github.com"
@ -36,13 +43,13 @@ USER_TOKEN = os.environ.get("USER_TOKEN", "")
def fetch_github_info(auth_token: str) -> dict[Any, Any]: def fetch_github_info(auth_token: str) -> dict[Any, Any]:
""" """
Fetch GitHub info of a user using the requests module Fetch GitHub info of a user using the httpx module
""" """
headers = { headers = {
"Authorization": f"token {auth_token}", "Authorization": f"token {auth_token}",
"Accept": "application/vnd.github.v3+json", "Accept": "application/vnd.github.v3+json",
} }
return requests.get(AUTHENTICATED_USER_ENDPOINT, headers=headers, timeout=10).json() return httpx.get(AUTHENTICATED_USER_ENDPOINT, headers=headers, timeout=10).json()
if __name__ == "__main__": # pragma: no cover if __name__ == "__main__": # pragma: no cover

View File

@ -2,20 +2,26 @@
Scraping jobs given job title and location from indeed website Scraping jobs given job title and location from indeed website
""" """
# /// script
# requires-python = ">=3.13"
# dependencies = [
# "beautifulsoup4",
# "httpx",
# ]
# ///
from __future__ import annotations from __future__ import annotations
from collections.abc import Generator from collections.abc import Generator
import requests import httpx
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
url = "https://www.indeed.co.in/jobs?q=mobile+app+development&l=" url = "https://www.indeed.co.in/jobs?q=mobile+app+development&l="
def fetch_jobs(location: str = "mumbai") -> Generator[tuple[str, str]]: def fetch_jobs(location: str = "mumbai") -> Generator[tuple[str, str]]:
soup = BeautifulSoup( soup = BeautifulSoup(httpx.get(url + location, timeout=10).content, "html.parser")
requests.get(url + location, timeout=10).content, "html.parser"
)
# This attribute finds out all the specifics listed in a job # This attribute finds out all the specifics listed in a job
for job in soup.find_all("div", attrs={"data-tn-component": "organicJob"}): for job in soup.find_all("div", attrs={"data-tn-component": "organicJob"}):
job_title = job.find("a", attrs={"data-tn-element": "jobTitle"}).text.strip() job_title = job.find("a", attrs={"data-tn-element": "jobTitle"}).text.strip()

View File

@ -6,19 +6,26 @@ For more details and premium features visit:
https://zenquotes.io/ https://zenquotes.io/
""" """
# /// script
# requires-python = ">=3.13"
# dependencies = [
# "httpx",
# ]
# ///
import pprint import pprint
import requests import httpx
API_ENDPOINT_URL = "https://zenquotes.io/api" API_ENDPOINT_URL = "https://zenquotes.io/api"
def quote_of_the_day() -> list: def quote_of_the_day() -> list:
return requests.get(API_ENDPOINT_URL + "/today", timeout=10).json() return httpx.get(API_ENDPOINT_URL + "/today", timeout=10).json()
def random_quotes() -> list: def random_quotes() -> list:
return requests.get(API_ENDPOINT_URL + "/random", timeout=10).json() return httpx.get(API_ENDPOINT_URL + "/random", timeout=10).json()
if __name__ == "__main__": if __name__ == "__main__":

View File

@ -4,9 +4,18 @@ and fetch from Amazon information about products of this name or category. The
information will include title, URL, price, ratings, and the discount available. information will include title, URL, price, ratings, and the discount available.
""" """
# /// script
# requires-python = ">=3.13"
# dependencies = [
# "beautifulsoup4",
# "httpx",
# "pandas",
# ]
# ///
from itertools import zip_longest from itertools import zip_longest
import requests import httpx
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
from pandas import DataFrame from pandas import DataFrame
@ -25,7 +34,7 @@ def get_amazon_product_data(product: str = "laptop") -> DataFrame:
"Accept-Language": "en-US, en;q=0.5", "Accept-Language": "en-US, en;q=0.5",
} }
soup = BeautifulSoup( soup = BeautifulSoup(
requests.get(url, headers=header, timeout=10).text, features="lxml" httpx.get(url, headers=header, timeout=10).text, features="lxml"
) )
# Initialize a Pandas dataframe with the column titles # Initialize a Pandas dataframe with the column titles
data_frame = DataFrame( data_frame = DataFrame(

View File

@ -1,16 +1,24 @@
# /// script
# requires-python = ">=3.13"
# dependencies = [
# "beautifulsoup4",
# "httpx",
# ]
# ///
from __future__ import annotations from __future__ import annotations
import csv import csv
import requests import httpx
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
def get_imdb_top_250_movies(url: str = "") -> dict[str, float]: def get_imdb_top_250_movies(url: str = "") -> dict[str, float]:
url = url or "https://www.imdb.com/chart/top/?ref_=nv_mv_250" url = url or "https://www.imdb.com/chart/top/?ref_=nv_mv_250"
soup = BeautifulSoup(requests.get(url, timeout=10).text, "html.parser") soup = BeautifulSoup(httpx.get(url, timeout=10).text, "html.parser")
titles = soup.find_all("td", attrs="titleColumn") titles = soup.find_all("h3", class_="ipc-title__text")
ratings = soup.find_all("td", class_="ratingColumn imdbRating") ratings = soup.find_all("span", class_="ipc-rating-star--rating")
return { return {
title.a.text: float(rating.strong.text) title.a.text: float(rating.strong.text)
for title, rating in zip(titles, ratings) for title, rating in zip(titles, ratings)

View File

@ -1,4 +1,11 @@
import requests # /// script
# requires-python = ">=3.13"
# dependencies = [
# "httpx",
# ]
# ///
import httpx
# Function to get geolocation data for an IP address # Function to get geolocation data for an IP address
@ -8,7 +15,7 @@ def get_ip_geolocation(ip_address: str) -> str:
url = f"https://ipinfo.io/{ip_address}/json" url = f"https://ipinfo.io/{ip_address}/json"
# Send a GET request to the API # Send a GET request to the API
response = requests.get(url, timeout=10) response = httpx.get(url, timeout=10)
# Check if the HTTP request was successful # Check if the HTTP request was successful
response.raise_for_status() response.raise_for_status()
@ -23,7 +30,7 @@ def get_ip_geolocation(ip_address: str) -> str:
location = "Location data not found." location = "Location data not found."
return location return location
except requests.exceptions.RequestException as e: except httpx.RequestError as e:
# Handle network-related exceptions # Handle network-related exceptions
return f"Request error: {e}" return f"Request error: {e}"
except ValueError as e: except ValueError as e:

View File

@ -3,9 +3,17 @@ CAUTION: You may get a json.decoding error.
This works for some of us but fails for others. This works for some of us but fails for others.
""" """
# /// script
# requires-python = ">=3.13"
# dependencies = [
# "httpx",
# "rich",
# ]
# ///
from datetime import UTC, date, datetime from datetime import UTC, date, datetime
import requests import httpx
from rich import box from rich import box
from rich import console as rich_console from rich import console as rich_console
from rich import table as rich_table from rich import table as rich_table
@ -57,7 +65,7 @@ def get_forbes_real_time_billionaires() -> list[dict[str, int | str]]:
Returns: Returns:
List of top 10 realtime billionaires data. List of top 10 realtime billionaires data.
""" """
response_json = requests.get(API_URL, timeout=10).json() response_json = httpx.get(API_URL, timeout=10).json()
return [ return [
{ {
"Name": person["personName"], "Name": person["personName"],

View File

@ -1,11 +1,18 @@
# /// script
# requires-python = ">=3.13"
# dependencies = [
# "httpx",
# ]
# ///
from __future__ import annotations from __future__ import annotations
import requests import httpx
def get_hackernews_story(story_id: str) -> dict: def get_hackernews_story(story_id: str) -> dict:
url = f"https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty" url = f"https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty"
return requests.get(url, timeout=10).json() return httpx.get(url, timeout=10).json()
def hackernews_top_stories(max_stories: int = 10) -> list[dict]: def hackernews_top_stories(max_stories: int = 10) -> list[dict]:
@ -13,7 +20,7 @@ def hackernews_top_stories(max_stories: int = 10) -> list[dict]:
Get the top max_stories posts from HackerNews - https://news.ycombinator.com/ Get the top max_stories posts from HackerNews - https://news.ycombinator.com/
""" """
url = "https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty" url = "https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty"
story_ids = requests.get(url, timeout=10).json()[:max_stories] story_ids = httpx.get(url, timeout=10).json()[:max_stories]
return [get_hackernews_story(story_id) for story_id in story_ids] return [get_hackernews_story(story_id) for story_id in story_ids]

View File

@ -1,5 +1,13 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
import requests
# /// script
# requires-python = ">=3.13"
# dependencies = [
# "httpx",
# ]
# ///
import httpx
giphy_api_key = "YOUR API KEY" giphy_api_key = "YOUR API KEY"
# Can be fetched from https://developers.giphy.com/dashboard/ # Can be fetched from https://developers.giphy.com/dashboard/
@ -11,7 +19,7 @@ def get_gifs(query: str, api_key: str = giphy_api_key) -> list:
""" """
formatted_query = "+".join(query.split()) formatted_query = "+".join(query.split())
url = f"https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}" url = f"https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}"
gifs = requests.get(url, timeout=10).json()["data"] gifs = httpx.get(url, timeout=10).json()["data"]
return [gif["url"] for gif in gifs] return [gif["url"] for gif in gifs]

View File

@ -1,9 +1,19 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
# /// script
# requires-python = ">=3.13"
# dependencies = [
# "beautifulsoup4",
# "fake-useragent",
# "httpx",
# ]
# ///
from __future__ import annotations from __future__ import annotations
import json import json
import requests import httpx
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
from fake_useragent import UserAgent from fake_useragent import UserAgent
@ -39,7 +49,7 @@ class InstagramUser:
""" """
Return a dict of user information Return a dict of user information
""" """
html = requests.get(self.url, headers=headers, timeout=10).text html = httpx.get(self.url, headers=headers, timeout=10).text
scripts = BeautifulSoup(html, "html.parser").find_all("script") scripts = BeautifulSoup(html, "html.parser").find_all("script")
try: try:
return extract_user_profile(scripts[4]) return extract_user_profile(scripts[4])

View File

@ -1,6 +1,14 @@
# /// script
# requires-python = ">=3.13"
# dependencies = [
# "beautifulsoup4",
# "httpx",
# ]
# ///
from datetime import UTC, datetime from datetime import UTC, datetime
import requests import httpx
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
@ -15,9 +23,9 @@ def download_image(url: str) -> str:
A message indicating the result of the operation. A message indicating the result of the operation.
""" """
try: try:
response = requests.get(url, timeout=10) response = httpx.get(url, timeout=10)
response.raise_for_status() response.raise_for_status()
except requests.exceptions.RequestException as e: except httpx.RequestError as e:
return f"An error occurred during the HTTP request to {url}: {e!r}" return f"An error occurred during the HTTP request to {url}: {e!r}"
soup = BeautifulSoup(response.text, "html.parser") soup = BeautifulSoup(response.text, "html.parser")
@ -30,13 +38,13 @@ def download_image(url: str) -> str:
return f"Image URL not found in meta tag {image_meta_tag}." return f"Image URL not found in meta tag {image_meta_tag}."
try: try:
image_data = requests.get(image_url, timeout=10).content image_data = httpx.get(image_url, timeout=10).content
except requests.exceptions.RequestException as e: except httpx.RequestError as e:
return f"An error occurred during the HTTP request to {image_url}: {e!r}" return f"An error occurred during the HTTP request to {image_url}: {e!r}"
if not image_data: if not image_data:
return f"Failed to download the image from {image_url}." return f"Failed to download the image from {image_url}."
file_name = f"{datetime.now(tz=UTC).astimezone():%Y-%m-%d_%H:%M:%S}.jpg" file_name = f"{datetime.now(tz=UTC).astimezone():%Y-%m-%d_%H-%M-%S}.jpg"
with open(file_name, "wb") as out_file: with open(file_name, "wb") as out_file:
out_file.write(image_data) out_file.write(image_data)
return f"Image downloaded and saved in the file {file_name}" return f"Image downloaded and saved in the file {file_name}"

View File

@ -1,17 +1,24 @@
# /// script
# requires-python = ">=3.13"
# dependencies = [
# "httpx",
# ]
# ///
from datetime import UTC, datetime from datetime import UTC, datetime
import requests import httpx
def download_video(url: str) -> bytes: def download_video(url: str) -> bytes:
base_url = "https://downloadgram.net/wp-json/wppress/video-downloader/video?url=" base_url = "https://downloadgram.net/wp-json/wppress/video-downloader/video?url="
video_url = requests.get(base_url + url, timeout=10).json()[0]["urls"][0]["src"] video_url = httpx.get(base_url + url, timeout=10)
return requests.get(video_url, timeout=10).content return httpx.get(video_url, timeout=10).content
if __name__ == "__main__": if __name__ == "__main__":
url = input("Enter Video/IGTV url: ").strip() url = input("Enter Video/IGTV url: ").strip()
file_name = f"{datetime.now(tz=UTC).astimezone():%Y-%m-%d_%H:%M:%S}.mp4" file_name = f"{datetime.now(tz=UTC).astimezone():%Y-%m-%d_%H-%M-%S}.mp4"
with open(file_name, "wb") as fp: with open(file_name, "wb") as fp:
fp.write(download_video(url)) fp.write(download_video(url))
print(f"Done. Video saved to disk as {file_name}.") print(f"Done. Video saved to disk as {file_name}.")

View File

@ -1,6 +1,11 @@
import shutil # /// script
# requires-python = ">=3.13"
# dependencies = [
# "httpx",
# ]
# ///
import requests import httpx
def get_apod_data(api_key: str) -> dict: def get_apod_data(api_key: str) -> dict:
@ -9,17 +14,17 @@ def get_apod_data(api_key: str) -> dict:
Get your API Key from: https://api.nasa.gov/ Get your API Key from: https://api.nasa.gov/
""" """
url = "https://api.nasa.gov/planetary/apod" url = "https://api.nasa.gov/planetary/apod"
return requests.get(url, params={"api_key": api_key}, timeout=10).json() return httpx.get(url, params={"api_key": api_key}, timeout=10).json()
def save_apod(api_key: str, path: str = ".") -> dict: def save_apod(api_key: str, path: str = ".") -> dict:
apod_data = get_apod_data(api_key) apod_data = get_apod_data(api_key)
img_url = apod_data["url"] img_url = apod_data["url"]
img_name = img_url.split("/")[-1] img_name = img_url.split("/")[-1]
response = requests.get(img_url, stream=True, timeout=10) response = httpx.get(img_url, timeout=10)
with open(f"{path}/{img_name}", "wb+") as img_file: with open(f"{path}/{img_name}", "wb+") as img_file:
shutil.copyfileobj(response.raw, img_file) img_file.write(response.content)
del response del response
return apod_data return apod_data
@ -29,7 +34,7 @@ def get_archive_data(query: str) -> dict:
Get the data of a particular query from NASA archives Get the data of a particular query from NASA archives
""" """
url = "https://images-api.nasa.gov/search" url = "https://images-api.nasa.gov/search"
return requests.get(url, params={"q": query}, timeout=10).json() return httpx.get(url, params={"q": query}, timeout=10).json()
if __name__ == "__main__": if __name__ == "__main__":

View File

@ -1,8 +1,17 @@
# /// script
# requires-python = ">=3.13"
# dependencies = [
# "beautifulsoup4",
# "fake-useragent",
# "httpx",
# ]
# ///
import webbrowser import webbrowser
from sys import argv from sys import argv
from urllib.parse import parse_qs, quote from urllib.parse import parse_qs, quote
import requests import httpx
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
from fake_useragent import UserAgent from fake_useragent import UserAgent
@ -13,26 +22,18 @@ if __name__ == "__main__":
url = f"https://www.google.com/search?q={query}&num=100" url = f"https://www.google.com/search?q={query}&num=100"
res = requests.get( res = httpx.get(
url, url,
headers={"User-Agent": str(UserAgent().random)}, headers={"User-Agent": str(UserAgent().random)},
timeout=10, timeout=10,
) )
try: try:
link = ( link = BeautifulSoup(res.text, "html.parser").find("div").find("a").get("href")
BeautifulSoup(res.text, "html.parser")
.find("div", attrs={"class": "yuRUbf"})
.find("a")
.get("href")
)
except AttributeError: except AttributeError:
link = parse_qs( link = parse_qs(
BeautifulSoup(res.text, "html.parser") BeautifulSoup(res.text, "html.parser").find("div").find("a").get("href")
.find("div", attrs={"class": "kCrYT"})
.find("a")
.get("href")
)["url"][0] )["url"][0]
webbrowser.open(link) webbrowser.open(link)

View File

@ -1,6 +1,15 @@
# /// script
# requires-python = ">=3.13"
# dependencies = [
# "beautifulsoup4",
# "fake-useragent",
# "httpx",
# ]
# ///
import os import os
import requests import httpx
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
from fake_useragent import UserAgent from fake_useragent import UserAgent
@ -12,7 +21,7 @@ def save_image(image_url: str, image_title: str) -> None:
""" """
Saves the image of anime character Saves the image of anime character
""" """
image = requests.get(image_url, headers=headers, timeout=10) image = httpx.get(image_url, headers=headers, timeout=10)
with open(image_title, "wb") as file: with open(image_title, "wb") as file:
file.write(image.content) file.write(image.content)
@ -22,7 +31,7 @@ def random_anime_character() -> tuple[str, str, str]:
Returns the Title, Description, and Image Title of a random anime character . Returns the Title, Description, and Image Title of a random anime character .
""" """
soup = BeautifulSoup( soup = BeautifulSoup(
requests.get(URL, headers=headers, timeout=10).text, "html.parser" httpx.get(URL, headers=headers, timeout=10).text, "html.parser"
) )
title = soup.find("meta", attrs={"property": "og:title"}).attrs["content"] title = soup.find("meta", attrs={"property": "og:title"}).attrs["content"]
image_url = soup.find("meta", attrs={"property": "og:image"}).attrs["content"] image_url = soup.find("meta", attrs={"property": "og:image"}).attrs["content"]

View File

@ -32,7 +32,14 @@ Below a Django function for the views.py file contains a login form for demonstr
recaptcha verification. recaptcha verification.
""" """
import requests # /// script
# requires-python = ">=3.13"
# dependencies = [
# "httpx",
# ]
# ///
import httpx
try: try:
from django.contrib.auth import authenticate, login from django.contrib.auth import authenticate, login
@ -56,7 +63,7 @@ def login_using_recaptcha(request):
client_key = request.POST.get("g-recaptcha-response") client_key = request.POST.get("g-recaptcha-response")
# post recaptcha response to Google's recaptcha api # post recaptcha response to Google's recaptcha api
response = requests.post( response = httpx.post(
url, data={"secret": secret_key, "response": client_key}, timeout=10 url, data={"secret": secret_key, "response": client_key}, timeout=10
) )
# if the recaptcha api verified our keys # if the recaptcha api verified our keys

View File

@ -1,6 +1,13 @@
# /// script
# requires-python = ">=3.13"
# dependencies = [
# "httpx",
# ]
# ///
from __future__ import annotations from __future__ import annotations
import requests import httpx
valid_terms = set( valid_terms = set(
"""approved_at_utc approved_by author_flair_background_color """approved_at_utc approved_by author_flair_background_color
@ -28,13 +35,14 @@ def get_subreddit_data(
if invalid_search_terms := ", ".join(sorted(set(wanted_data) - valid_terms)): if invalid_search_terms := ", ".join(sorted(set(wanted_data) - valid_terms)):
msg = f"Invalid search term: {invalid_search_terms}" msg = f"Invalid search term: {invalid_search_terms}"
raise ValueError(msg) raise ValueError(msg)
response = requests.get( response = httpx.get(
f"https://reddit.com/r/{subreddit}/{age}.json?limit={limit}", f"https://www.reddit.com/r/{subreddit}/{age}.json?limit={limit}",
headers={"User-agent": "A random string"}, headers={"User-agent": "A random string"},
timeout=10, timeout=10,
) )
response.raise_for_status()
if response.status_code == 429: if response.status_code == 429:
raise requests.HTTPError(response=response) raise httpx.HTTPError(response=response)
data = response.json() data = response.json()
if not wanted_data: if not wanted_data:

View File

@ -4,9 +4,16 @@ Get book and author data from https://openlibrary.org
ISBN: https://en.wikipedia.org/wiki/International_Standard_Book_Number ISBN: https://en.wikipedia.org/wiki/International_Standard_Book_Number
""" """
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError # /// script
# requires-python = ">=3.13"
# dependencies = [
# "httpx",
# ]
# ///
import requests from json import JSONDecodeError
import httpx
def get_openlibrary_data(olid: str = "isbn/0140328726") -> dict: def get_openlibrary_data(olid: str = "isbn/0140328726") -> dict:
@ -25,7 +32,9 @@ def get_openlibrary_data(olid: str = "isbn/0140328726") -> dict:
if new_olid.count("/") != 1: if new_olid.count("/") != 1:
msg = f"{olid} is not a valid Open Library olid" msg = f"{olid} is not a valid Open Library olid"
raise ValueError(msg) raise ValueError(msg)
return requests.get(f"https://openlibrary.org/{new_olid}.json", timeout=10).json() return httpx.get(
f"https://openlibrary.org/{new_olid}.json", timeout=10, follow_redirects=True
).json()
def summarize_book(ol_book_data: dict) -> dict: def summarize_book(ol_book_data: dict) -> dict:
@ -36,8 +45,7 @@ def summarize_book(ol_book_data: dict) -> dict:
"title": "Title", "title": "Title",
"publish_date": "Publish date", "publish_date": "Publish date",
"authors": "Authors", "authors": "Authors",
"number_of_pages": "Number of pages:", "number_of_pages": "Number of pages",
"first_sentence": "First sentence",
"isbn_10": "ISBN (10)", "isbn_10": "ISBN (10)",
"isbn_13": "ISBN (13)", "isbn_13": "ISBN (13)",
} }
@ -45,7 +53,6 @@ def summarize_book(ol_book_data: dict) -> dict:
data["Authors"] = [ data["Authors"] = [
get_openlibrary_data(author["key"])["name"] for author in data["Authors"] get_openlibrary_data(author["key"])["name"] for author in data["Authors"]
] ]
data["First sentence"] = data["First sentence"]["value"]
for key, value in data.items(): for key, value in data.items():
if isinstance(value, list): if isinstance(value, list):
data[key] = ", ".join(value) data[key] = ", ".join(value)
@ -71,5 +78,5 @@ if __name__ == "__main__":
try: try:
book_summary = summarize_book(get_openlibrary_data(f"isbn/{isbn}")) book_summary = summarize_book(get_openlibrary_data(f"isbn/{isbn}"))
print("\n".join(f"{key}: {value}" for key, value in book_summary.items())) print("\n".join(f"{key}: {value}" for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException: except JSONDecodeError:
print(f"Sorry, there are no results for ISBN: {isbn}.") print(f"Sorry, there are no results for ISBN: {isbn}.")

View File

@ -1,11 +1,18 @@
# Created by sarathkaul on 12/11/19 # Created by sarathkaul on 12/11/19
import requests # /// script
# requires-python = ">=3.13"
# dependencies = [
# "httpx",
# ]
# ///
import httpx
def send_slack_message(message_body: str, slack_url: str) -> None: def send_slack_message(message_body: str, slack_url: str) -> None:
headers = {"Content-Type": "application/json"} headers = {"Content-Type": "application/json"}
response = requests.post( response = httpx.post(
slack_url, json={"text": message_body}, headers=headers, timeout=10 slack_url, json={"text": message_body}, headers=headers, timeout=10
) )
if response.status_code != 200: if response.status_code != 200:

View File

@ -1,6 +1,6 @@
import json import json
import requests import httpx
from .fetch_github_info import AUTHENTICATED_USER_ENDPOINT, fetch_github_info from .fetch_github_info import AUTHENTICATED_USER_ENDPOINT, fetch_github_info
@ -21,7 +21,7 @@ def test_fetch_github_info(monkeypatch):
assert "Accept" in kwargs["headers"] assert "Accept" in kwargs["headers"]
return FakeResponse(b'{"login":"test","id":1}') return FakeResponse(b'{"login":"test","id":1}')
monkeypatch.setattr(requests, "get", mock_response) monkeypatch.setattr(httpx, "get", mock_response)
result = fetch_github_info("token") result = fetch_github_info("token")
assert result["login"] == "test" assert result["login"] == "test"
assert result["id"] == 1 assert result["id"] == 1

View File

@ -5,19 +5,31 @@ Provide the current worldwide COVID-19 statistics.
This data is being scrapped from 'https://www.worldometers.info/coronavirus/'. This data is being scrapped from 'https://www.worldometers.info/coronavirus/'.
""" """
import requests # /// script
# requires-python = ">=3.13"
# dependencies = [
# "beautifulsoup4",
# "httpx",
# ]
# ///
import httpx
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
def world_covid19_stats(url: str = "https://www.worldometers.info/coronavirus") -> dict: def world_covid19_stats(
url: str = "https://www.worldometers.info/coronavirus/",
) -> dict:
""" """
Return a dict of current worldwide COVID-19 statistics Return a dict of current worldwide COVID-19 statistics
""" """
soup = BeautifulSoup(requests.get(url, timeout=10).text, "html.parser") soup = BeautifulSoup(
keys = soup.findAll("h1") httpx.get(url, timeout=10, follow_redirects=True).text, "html.parser"
values = soup.findAll("div", {"class": "maincounter-number"}) )
keys += soup.findAll("span", {"class": "panel-title"}) keys = soup.find_all("h1")
values += soup.findAll("div", {"class": "number-table-main"}) values = soup.find_all("div", {"class": "maincounter-number"})
keys += soup.find_all("span", {"class": "panel-title"})
values += soup.find_all("div", {"class": "number-table-main"})
return {key.text.strip(): value.text.strip() for key, value in zip(keys, values)} return {key.text.strip(): value.text.strip() for key, value in zip(keys, values)}