mirror of
https://github.com/TheAlgorithms/Python.git
synced 2024-12-18 01:00:15 +00:00
4d0c830d2c
* ci(pre-commit): Add ``flake8-builtins`` additional dependency to ``pre-commit`` (#7104) * refactor: Fix ``flake8-builtins`` (#7104) * fix(lru_cache): Fix naming conventions in docstrings (#7104) * ci(pre-commit): Order additional dependencies alphabetically (#7104) * fix(lfu_cache): Correct function name in docstring (#7104) * Update strings/snake_case_to_camel_pascal_case.py Co-authored-by: Christian Clauss <cclauss@me.com> * Update data_structures/stacks/next_greater_element.py Co-authored-by: Christian Clauss <cclauss@me.com> * Update digital_image_processing/index_calculation.py Co-authored-by: Christian Clauss <cclauss@me.com> * Update graphs/prim.py Co-authored-by: Christian Clauss <cclauss@me.com> * Update hashes/djb2.py Co-authored-by: Christian Clauss <cclauss@me.com> * refactor: Rename `_builtin` to `builtin_` ( #7104) * fix: Rename all instances (#7104) * refactor: Update variable names (#7104) * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * ci: Create ``tox.ini`` and ignore ``A003`` (#7123) * revert: Remove function name changes (#7104) * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Rename tox.ini to .flake8 * Update data_structures/heap/heap.py Co-authored-by: Dhruv Manilawala <dhruvmanila@gmail.com> * refactor: Rename `next_` to `next_item` (#7104) * ci(pre-commit): Add `flake8` plugin `flake8-bugbear` (#7127) * refactor: Follow `flake8-bugbear` plugin (#7127) * fix: Correct `knapsack` code (#7127) * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: Christian Clauss <cclauss@me.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Dhruv Manilawala <dhruvmanila@gmail.com>
41 lines
936 B
Python
41 lines
936 B
Python
import webbrowser
|
|
from sys import argv
|
|
from urllib.parse import parse_qs, quote
|
|
|
|
import requests
|
|
from bs4 import BeautifulSoup
|
|
from fake_useragent import UserAgent
|
|
|
|
if __name__ == "__main__":
|
|
if len(argv) > 1:
|
|
query = "%20".join(argv[1:])
|
|
else:
|
|
query = quote(str(input("Search: ")))
|
|
|
|
print("Googling.....")
|
|
|
|
url = f"https://www.google.com/search?q={query}&num=100"
|
|
|
|
res = requests.get(
|
|
url,
|
|
headers={"User-Agent": str(UserAgent().random)},
|
|
)
|
|
|
|
try:
|
|
link = (
|
|
BeautifulSoup(res.text, "html.parser")
|
|
.find("div", attrs={"class": "yuRUbf"})
|
|
.find("a")
|
|
.get("href")
|
|
)
|
|
|
|
except AttributeError:
|
|
link = parse_qs(
|
|
BeautifulSoup(res.text, "html.parser")
|
|
.find("div", attrs={"class": "kCrYT"})
|
|
.find("a")
|
|
.get("href")
|
|
)["url"][0]
|
|
|
|
webbrowser.open(link)
|