mirror of
https://github.com/TheAlgorithms/Python.git
synced 2024-11-23 21:11:08 +00:00
Enable ruff S113 rule (#11375)
* Enable ruff S113 rule * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
This commit is contained in:
parent
7b88e15b1c
commit
2702bf9400
|
@ -19,7 +19,8 @@ def collect_dataset():
|
|||
"""
|
||||
response = requests.get(
|
||||
"https://raw.githubusercontent.com/yashLadha/The_Math_of_Intelligence/"
|
||||
"master/Week1/ADRvsRating.csv"
|
||||
"master/Week1/ADRvsRating.csv",
|
||||
timeout=10,
|
||||
)
|
||||
lines = response.text.splitlines()
|
||||
data = []
|
||||
|
|
|
@ -14,7 +14,6 @@ lint.ignore = [ # `ruff rule S101` for a description of that rule
|
|||
"RUF002", # Docstring contains ambiguous {}. Did you mean {}?
|
||||
"RUF003", # Comment contains ambiguous {}. Did you mean {}?
|
||||
"S101", # Use of `assert` detected -- DO NOT FIX
|
||||
"S113", # Probable use of requests call without timeout -- FIX ME
|
||||
"S311", # Standard pseudo-random generators are not suitable for cryptographic purposes -- FIX ME
|
||||
"SLF001", # Private member accessed: `_Iterator` -- FIX ME
|
||||
"UP038", # Use `X | Y` in `{}` call instead of `(X, Y)` -- DO NOT FIX
|
||||
|
|
|
@ -57,7 +57,7 @@ def added_solution_file_path() -> list[pathlib.Path]:
|
|||
"Accept": "application/vnd.github.v3+json",
|
||||
"Authorization": "token " + os.environ["GITHUB_TOKEN"],
|
||||
}
|
||||
files = requests.get(get_files_url(), headers=headers).json()
|
||||
files = requests.get(get_files_url(), headers=headers, timeout=10).json()
|
||||
for file in files:
|
||||
filepath = pathlib.Path.cwd().joinpath(file["filename"])
|
||||
if (
|
||||
|
|
|
@ -11,13 +11,13 @@ BASE_URL = "https://api.carbonintensity.org.uk/intensity"
|
|||
|
||||
# Emission in the last half hour
|
||||
def fetch_last_half_hour() -> str:
|
||||
last_half_hour = requests.get(BASE_URL).json()["data"][0]
|
||||
last_half_hour = requests.get(BASE_URL, timeout=10).json()["data"][0]
|
||||
return last_half_hour["intensity"]["actual"]
|
||||
|
||||
|
||||
# Emissions in a specific date range
|
||||
def fetch_from_to(start, end) -> list:
|
||||
return requests.get(f"{BASE_URL}/{start}/{end}").json()["data"]
|
||||
return requests.get(f"{BASE_URL}/{start}/{end}", timeout=10).json()["data"]
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
|
|
@ -18,7 +18,9 @@ class CovidData(NamedTuple):
|
|||
|
||||
def covid_stats(url: str = "https://www.worldometers.info/coronavirus/") -> CovidData:
|
||||
xpath_str = '//div[@class = "maincounter-number"]/span/text()'
|
||||
return CovidData(*html.fromstring(requests.get(url).content).xpath(xpath_str))
|
||||
return CovidData(
|
||||
*html.fromstring(requests.get(url, timeout=10).content).xpath(xpath_str)
|
||||
)
|
||||
|
||||
|
||||
fmt = """Total COVID-19 cases in the world: {}
|
||||
|
|
|
@ -8,7 +8,7 @@ from fake_useragent import UserAgent
|
|||
if __name__ == "__main__":
|
||||
print("Googling.....")
|
||||
url = "https://www.google.com/search?q=" + " ".join(sys.argv[1:])
|
||||
res = requests.get(url, headers={"UserAgent": UserAgent().random})
|
||||
res = requests.get(url, headers={"UserAgent": UserAgent().random}, timeout=10)
|
||||
# res.raise_for_status()
|
||||
with open("project1a.html", "wb") as out_file: # only for knowing the class
|
||||
for data in res.iter_content(10000):
|
||||
|
|
|
@ -11,7 +11,9 @@ def get_citation(base_url: str, params: dict) -> str:
|
|||
"""
|
||||
Return the citation number.
|
||||
"""
|
||||
soup = BeautifulSoup(requests.get(base_url, params=params).content, "html.parser")
|
||||
soup = BeautifulSoup(
|
||||
requests.get(base_url, params=params, timeout=10).content, "html.parser"
|
||||
)
|
||||
div = soup.find("div", attrs={"class": "gs_ri"})
|
||||
anchors = div.find("div", attrs={"class": "gs_fl"}).find_all("a")
|
||||
return anchors[2].get_text()
|
||||
|
|
|
@ -176,7 +176,7 @@ def convert_currency(
|
|||
params = locals()
|
||||
# from is a reserved keyword
|
||||
params["from"] = params.pop("from_")
|
||||
res = requests.get(URL_BASE, params=params).json()
|
||||
res = requests.get(URL_BASE, params=params, timeout=10).json()
|
||||
return str(res["amount"]) if res["error"] == 0 else res["error_message"]
|
||||
|
||||
|
||||
|
|
|
@ -4,7 +4,9 @@ from bs4 import BeautifulSoup
|
|||
|
||||
def stock_price(symbol: str = "AAPL") -> str:
|
||||
url = f"https://finance.yahoo.com/quote/{symbol}?p={symbol}"
|
||||
yahoo_finance_source = requests.get(url, headers={"USER-AGENT": "Mozilla/5.0"}).text
|
||||
yahoo_finance_source = requests.get(
|
||||
url, headers={"USER-AGENT": "Mozilla/5.0"}, timeout=10
|
||||
).text
|
||||
soup = BeautifulSoup(yahoo_finance_source, "html.parser")
|
||||
specific_fin_streamer_tag = soup.find("fin-streamer", {"data-test": "qsp-price"})
|
||||
|
||||
|
|
|
@ -20,13 +20,13 @@ def current_weather(location: str) -> list[dict]:
|
|||
if OPENWEATHERMAP_API_KEY:
|
||||
params_openweathermap = {"q": location, "appid": OPENWEATHERMAP_API_KEY}
|
||||
response_openweathermap = requests.get(
|
||||
OPENWEATHERMAP_URL_BASE, params=params_openweathermap
|
||||
OPENWEATHERMAP_URL_BASE, params=params_openweathermap, timeout=10
|
||||
)
|
||||
weather_data.append({"OpenWeatherMap": response_openweathermap.json()})
|
||||
if WEATHERSTACK_API_KEY:
|
||||
params_weatherstack = {"query": location, "access_key": WEATHERSTACK_API_KEY}
|
||||
response_weatherstack = requests.get(
|
||||
WEATHERSTACK_URL_BASE, params=params_weatherstack
|
||||
WEATHERSTACK_URL_BASE, params=params_weatherstack, timeout=10
|
||||
)
|
||||
weather_data.append({"Weatherstack": response_weatherstack.json()})
|
||||
if not weather_data:
|
||||
|
|
|
@ -7,7 +7,7 @@ def horoscope(zodiac_sign: int, day: str) -> str:
|
|||
"https://www.horoscope.com/us/horoscopes/general/"
|
||||
f"horoscope-general-daily-{day}.aspx?sign={zodiac_sign}"
|
||||
)
|
||||
soup = BeautifulSoup(requests.get(url).content, "html.parser")
|
||||
soup = BeautifulSoup(requests.get(url, timeout=10).content, "html.parser")
|
||||
return soup.find("div", class_="main-horoscope").p.text
|
||||
|
||||
|
||||
|
|
|
@ -39,7 +39,9 @@ def download_images_from_google_query(query: str = "dhaka", max_images: int = 5)
|
|||
"ijn": "0",
|
||||
}
|
||||
|
||||
html = requests.get("https://www.google.com/search", params=params, headers=headers)
|
||||
html = requests.get(
|
||||
"https://www.google.com/search", params=params, headers=headers, timeout=10
|
||||
)
|
||||
soup = BeautifulSoup(html.text, "html.parser")
|
||||
matched_images_data = "".join(
|
||||
re.findall(r"AF_initDataCallback\(([^<]+)\);", str(soup.select("script")))
|
||||
|
|
|
@ -77,7 +77,7 @@ def emails_from_url(url: str = "https://github.com") -> list[str]:
|
|||
|
||||
try:
|
||||
# Open URL
|
||||
r = requests.get(url)
|
||||
r = requests.get(url, timeout=10)
|
||||
|
||||
# pass the raw HTML to the parser to get links
|
||||
parser.feed(r.text)
|
||||
|
@ -88,7 +88,7 @@ def emails_from_url(url: str = "https://github.com") -> list[str]:
|
|||
# open URL.
|
||||
# read = requests.get(link)
|
||||
try:
|
||||
read = requests.get(link)
|
||||
read = requests.get(link, timeout=10)
|
||||
# Get the valid email.
|
||||
emails = re.findall("[a-zA-Z0-9]+@" + domain, read.text)
|
||||
# If not in list then append it.
|
||||
|
|
|
@ -28,7 +28,7 @@ def search_scraper(anime_name: str) -> list:
|
|||
search_url = f"{BASE_URL}/search/{anime_name}"
|
||||
|
||||
response = requests.get(
|
||||
search_url, headers={"UserAgent": UserAgent().chrome}
|
||||
search_url, headers={"UserAgent": UserAgent().chrome}, timeout=10
|
||||
) # request the url.
|
||||
|
||||
# Is the response ok?
|
||||
|
@ -82,7 +82,9 @@ def search_anime_episode_list(episode_endpoint: str) -> list:
|
|||
|
||||
request_url = f"{BASE_URL}{episode_endpoint}"
|
||||
|
||||
response = requests.get(url=request_url, headers={"UserAgent": UserAgent().chrome})
|
||||
response = requests.get(
|
||||
url=request_url, headers={"UserAgent": UserAgent().chrome}, timeout=10
|
||||
)
|
||||
response.raise_for_status()
|
||||
|
||||
soup = BeautifulSoup(response.text, "html.parser")
|
||||
|
@ -132,7 +134,7 @@ def get_anime_episode(episode_endpoint: str) -> list:
|
|||
episode_page_url = f"{BASE_URL}{episode_endpoint}"
|
||||
|
||||
response = requests.get(
|
||||
url=episode_page_url, headers={"User-Agent": UserAgent().chrome}
|
||||
url=episode_page_url, headers={"User-Agent": UserAgent().chrome}, timeout=10
|
||||
)
|
||||
response.raise_for_status()
|
||||
|
||||
|
|
|
@ -7,7 +7,7 @@ _NEWS_API = "https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey="
|
|||
|
||||
def fetch_bbc_news(bbc_news_api_key: str) -> None:
|
||||
# fetching a list of articles in json format
|
||||
bbc_news_page = requests.get(_NEWS_API + bbc_news_api_key).json()
|
||||
bbc_news_page = requests.get(_NEWS_API + bbc_news_api_key, timeout=10).json()
|
||||
# each article in the list is a dict
|
||||
for i, article in enumerate(bbc_news_page["articles"], 1):
|
||||
print(f"{i}.) {article['title']}")
|
||||
|
|
|
@ -42,7 +42,7 @@ def fetch_github_info(auth_token: str) -> dict[Any, Any]:
|
|||
"Authorization": f"token {auth_token}",
|
||||
"Accept": "application/vnd.github.v3+json",
|
||||
}
|
||||
return requests.get(AUTHENTICATED_USER_ENDPOINT, headers=headers).json()
|
||||
return requests.get(AUTHENTICATED_USER_ENDPOINT, headers=headers, timeout=10).json()
|
||||
|
||||
|
||||
if __name__ == "__main__": # pragma: no cover
|
||||
|
|
|
@ -13,7 +13,9 @@ url = "https://www.indeed.co.in/jobs?q=mobile+app+development&l="
|
|||
|
||||
|
||||
def fetch_jobs(location: str = "mumbai") -> Generator[tuple[str, str], None, None]:
|
||||
soup = BeautifulSoup(requests.get(url + location).content, "html.parser")
|
||||
soup = BeautifulSoup(
|
||||
requests.get(url + location, timeout=10).content, "html.parser"
|
||||
)
|
||||
# This attribute finds out all the specifics listed in a job
|
||||
for job in soup.find_all("div", attrs={"data-tn-component": "organicJob"}):
|
||||
job_title = job.find("a", attrs={"data-tn-element": "jobTitle"}).text.strip()
|
||||
|
|
|
@ -14,11 +14,11 @@ API_ENDPOINT_URL = "https://zenquotes.io/api"
|
|||
|
||||
|
||||
def quote_of_the_day() -> list:
|
||||
return requests.get(API_ENDPOINT_URL + "/today").json()
|
||||
return requests.get(API_ENDPOINT_URL + "/today", timeout=10).json()
|
||||
|
||||
|
||||
def random_quotes() -> list:
|
||||
return requests.get(API_ENDPOINT_URL + "/random").json()
|
||||
return requests.get(API_ENDPOINT_URL + "/random", timeout=10).json()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
|
|
@ -42,7 +42,7 @@ def fetch_pharmacy_and_price_list(drug_name: str, zip_code: str) -> list | None:
|
|||
return None
|
||||
|
||||
request_url = BASE_URL.format(drug_name, zip_code)
|
||||
response = get(request_url)
|
||||
response = get(request_url, timeout=10)
|
||||
|
||||
# Is the response ok?
|
||||
response.raise_for_status()
|
||||
|
|
|
@ -24,7 +24,9 @@ def get_amazon_product_data(product: str = "laptop") -> DataFrame:
|
|||
),
|
||||
"Accept-Language": "en-US, en;q=0.5",
|
||||
}
|
||||
soup = BeautifulSoup(requests.get(url, headers=header).text, features="lxml")
|
||||
soup = BeautifulSoup(
|
||||
requests.get(url, headers=header, timeout=10).text, features="lxml"
|
||||
)
|
||||
# Initialize a Pandas dataframe with the column titles
|
||||
data_frame = DataFrame(
|
||||
columns=[
|
||||
|
|
|
@ -8,7 +8,7 @@ from bs4 import BeautifulSoup
|
|||
|
||||
def get_imdb_top_250_movies(url: str = "") -> dict[str, float]:
|
||||
url = url or "https://www.imdb.com/chart/top/?ref_=nv_mv_250"
|
||||
soup = BeautifulSoup(requests.get(url).text, "html.parser")
|
||||
soup = BeautifulSoup(requests.get(url, timeout=10).text, "html.parser")
|
||||
titles = soup.find_all("td", attrs="titleColumn")
|
||||
ratings = soup.find_all("td", class_="ratingColumn imdbRating")
|
||||
return {
|
||||
|
|
|
@ -8,7 +8,7 @@ def get_ip_geolocation(ip_address: str) -> str:
|
|||
url = f"https://ipinfo.io/{ip_address}/json"
|
||||
|
||||
# Send a GET request to the API
|
||||
response = requests.get(url)
|
||||
response = requests.get(url, timeout=10)
|
||||
|
||||
# Check if the HTTP request was successful
|
||||
response.raise_for_status()
|
||||
|
|
|
@ -57,7 +57,7 @@ def get_forbes_real_time_billionaires() -> list[dict[str, int | str]]:
|
|||
Returns:
|
||||
List of top 10 realtime billionaires data.
|
||||
"""
|
||||
response_json = requests.get(API_URL).json()
|
||||
response_json = requests.get(API_URL, timeout=10).json()
|
||||
return [
|
||||
{
|
||||
"Name": person["personName"],
|
||||
|
|
|
@ -5,7 +5,7 @@ import requests
|
|||
|
||||
def get_hackernews_story(story_id: str) -> dict:
|
||||
url = f"https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty"
|
||||
return requests.get(url).json()
|
||||
return requests.get(url, timeout=10).json()
|
||||
|
||||
|
||||
def hackernews_top_stories(max_stories: int = 10) -> list[dict]:
|
||||
|
@ -13,7 +13,7 @@ def hackernews_top_stories(max_stories: int = 10) -> list[dict]:
|
|||
Get the top max_stories posts from HackerNews - https://news.ycombinator.com/
|
||||
"""
|
||||
url = "https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty"
|
||||
story_ids = requests.get(url).json()[:max_stories]
|
||||
story_ids = requests.get(url, timeout=10).json()[:max_stories]
|
||||
return [get_hackernews_story(story_id) for story_id in story_ids]
|
||||
|
||||
|
||||
|
|
|
@ -11,7 +11,7 @@ def get_gifs(query: str, api_key: str = giphy_api_key) -> list:
|
|||
"""
|
||||
formatted_query = "+".join(query.split())
|
||||
url = f"https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}"
|
||||
gifs = requests.get(url).json()["data"]
|
||||
gifs = requests.get(url, timeout=10).json()["data"]
|
||||
return [gif["url"] for gif in gifs]
|
||||
|
||||
|
||||
|
|
|
@ -39,7 +39,7 @@ class InstagramUser:
|
|||
"""
|
||||
Return a dict of user information
|
||||
"""
|
||||
html = requests.get(self.url, headers=headers).text
|
||||
html = requests.get(self.url, headers=headers, timeout=10).text
|
||||
scripts = BeautifulSoup(html, "html.parser").find_all("script")
|
||||
try:
|
||||
return extract_user_profile(scripts[4])
|
||||
|
|
|
@ -15,7 +15,7 @@ def download_image(url: str) -> str:
|
|||
A message indicating the result of the operation.
|
||||
"""
|
||||
try:
|
||||
response = requests.get(url)
|
||||
response = requests.get(url, timeout=10)
|
||||
response.raise_for_status()
|
||||
except requests.exceptions.RequestException as e:
|
||||
return f"An error occurred during the HTTP request to {url}: {e!r}"
|
||||
|
@ -30,7 +30,7 @@ def download_image(url: str) -> str:
|
|||
return f"Image URL not found in meta tag {image_meta_tag}."
|
||||
|
||||
try:
|
||||
image_data = requests.get(image_url).content
|
||||
image_data = requests.get(image_url, timeout=10).content
|
||||
except requests.exceptions.RequestException as e:
|
||||
return f"An error occurred during the HTTP request to {image_url}: {e!r}"
|
||||
if not image_data:
|
||||
|
|
|
@ -5,8 +5,8 @@ import requests
|
|||
|
||||
def download_video(url: str) -> bytes:
|
||||
base_url = "https://downloadgram.net/wp-json/wppress/video-downloader/video?url="
|
||||
video_url = requests.get(base_url + url).json()[0]["urls"][0]["src"]
|
||||
return requests.get(video_url).content
|
||||
video_url = requests.get(base_url + url, timeout=10).json()[0]["urls"][0]["src"]
|
||||
return requests.get(video_url, timeout=10).content
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
|
|
@ -9,14 +9,14 @@ def get_apod_data(api_key: str) -> dict:
|
|||
Get your API Key from: https://api.nasa.gov/
|
||||
"""
|
||||
url = "https://api.nasa.gov/planetary/apod"
|
||||
return requests.get(url, params={"api_key": api_key}).json()
|
||||
return requests.get(url, params={"api_key": api_key}, timeout=10).json()
|
||||
|
||||
|
||||
def save_apod(api_key: str, path: str = ".") -> dict:
|
||||
apod_data = get_apod_data(api_key)
|
||||
img_url = apod_data["url"]
|
||||
img_name = img_url.split("/")[-1]
|
||||
response = requests.get(img_url, stream=True)
|
||||
response = requests.get(img_url, stream=True, timeout=10)
|
||||
|
||||
with open(f"{path}/{img_name}", "wb+") as img_file:
|
||||
shutil.copyfileobj(response.raw, img_file)
|
||||
|
@ -29,7 +29,7 @@ def get_archive_data(query: str) -> dict:
|
|||
Get the data of a particular query from NASA archives
|
||||
"""
|
||||
url = "https://images-api.nasa.gov/search"
|
||||
return requests.get(url, params={"q": query}).json()
|
||||
return requests.get(url, params={"q": query}, timeout=10).json()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
|
|
@ -16,6 +16,7 @@ if __name__ == "__main__":
|
|||
res = requests.get(
|
||||
url,
|
||||
headers={"User-Agent": str(UserAgent().random)},
|
||||
timeout=10,
|
||||
)
|
||||
|
||||
try:
|
||||
|
|
|
@ -12,7 +12,7 @@ def save_image(image_url: str, image_title: str) -> None:
|
|||
"""
|
||||
Saves the image of anime character
|
||||
"""
|
||||
image = requests.get(image_url, headers=headers)
|
||||
image = requests.get(image_url, headers=headers, timeout=10)
|
||||
with open(image_title, "wb") as file:
|
||||
file.write(image.content)
|
||||
|
||||
|
@ -21,7 +21,9 @@ def random_anime_character() -> tuple[str, str, str]:
|
|||
"""
|
||||
Returns the Title, Description, and Image Title of a random anime character .
|
||||
"""
|
||||
soup = BeautifulSoup(requests.get(URL, headers=headers).text, "html.parser")
|
||||
soup = BeautifulSoup(
|
||||
requests.get(URL, headers=headers, timeout=10).text, "html.parser"
|
||||
)
|
||||
title = soup.find("meta", attrs={"property": "og:title"}).attrs["content"]
|
||||
image_url = soup.find("meta", attrs={"property": "og:image"}).attrs["content"]
|
||||
description = soup.find("p", id="description").get_text()
|
||||
|
|
|
@ -56,7 +56,9 @@ def login_using_recaptcha(request):
|
|||
client_key = request.POST.get("g-recaptcha-response")
|
||||
|
||||
# post recaptcha response to Google's recaptcha api
|
||||
response = requests.post(url, data={"secret": secret_key, "response": client_key})
|
||||
response = requests.post(
|
||||
url, data={"secret": secret_key, "response": client_key}, timeout=10
|
||||
)
|
||||
# if the recaptcha api verified our keys
|
||||
if response.json().get("success", False):
|
||||
# authenticate the user
|
||||
|
|
|
@ -31,6 +31,7 @@ def get_subreddit_data(
|
|||
response = requests.get(
|
||||
f"https://reddit.com/r/{subreddit}/{age}.json?limit={limit}",
|
||||
headers={"User-agent": "A random string"},
|
||||
timeout=10,
|
||||
)
|
||||
if response.status_code == 429:
|
||||
raise requests.HTTPError(response=response)
|
||||
|
|
|
@ -25,7 +25,7 @@ def get_openlibrary_data(olid: str = "isbn/0140328726") -> dict:
|
|||
if new_olid.count("/") != 1:
|
||||
msg = f"{olid} is not a valid Open Library olid"
|
||||
raise ValueError(msg)
|
||||
return requests.get(f"https://openlibrary.org/{new_olid}.json").json()
|
||||
return requests.get(f"https://openlibrary.org/{new_olid}.json", timeout=10).json()
|
||||
|
||||
|
||||
def summarize_book(ol_book_data: dict) -> dict:
|
||||
|
|
|
@ -5,7 +5,9 @@ import requests
|
|||
|
||||
def send_slack_message(message_body: str, slack_url: str) -> None:
|
||||
headers = {"Content-Type": "application/json"}
|
||||
response = requests.post(slack_url, json={"text": message_body}, headers=headers)
|
||||
response = requests.post(
|
||||
slack_url, json={"text": message_body}, headers=headers, timeout=10
|
||||
)
|
||||
if response.status_code != 200:
|
||||
msg = (
|
||||
"Request to slack returned an error "
|
||||
|
|
|
@ -13,7 +13,7 @@ def world_covid19_stats(url: str = "https://www.worldometers.info/coronavirus")
|
|||
"""
|
||||
Return a dict of current worldwide COVID-19 statistics
|
||||
"""
|
||||
soup = BeautifulSoup(requests.get(url).text, "html.parser")
|
||||
soup = BeautifulSoup(requests.get(url, timeout=10).text, "html.parser")
|
||||
keys = soup.findAll("h1")
|
||||
values = soup.findAll("div", {"class": "maincounter-number"})
|
||||
keys += soup.findAll("span", {"class": "panel-title"})
|
||||
|
|
Loading…
Reference in New Issue
Block a user