|
| 1 | +from bs4 import BeautifulSoup |
| 2 | +import requests |
| 3 | + |
| 4 | +language_symbols = {} |
| 5 | + |
| 6 | + |
| 7 | +def lang(): |
| 8 | + try: |
| 9 | + response = requests.get("https://www.wikipedia.org/") |
| 10 | + response.raise_for_status() |
| 11 | + soup = BeautifulSoup(response.content, 'html.parser') |
| 12 | + |
| 13 | + for option in soup.find_all('option'): |
| 14 | + language = option.text |
| 15 | + symbol = option['lang'] |
| 16 | + language_symbols[language] = symbol |
| 17 | + |
| 18 | + return list(language_symbols.keys()) |
| 19 | + |
| 20 | + except requests.exceptions.RequestException as e: |
| 21 | + print("Error fetching language data:", e) |
| 22 | + return [] |
| 23 | + |
| 24 | + |
| 25 | +def data(selected_topic, selected_language): |
| 26 | + symbol = language_symbols.get(selected_language) |
| 27 | + |
| 28 | + try: |
| 29 | + url = f"https://{symbol}.wikipedia.org/wiki/{selected_topic}" |
| 30 | + data_response = requests.get(url) |
| 31 | + data_response.raise_for_status() |
| 32 | + data_soup = BeautifulSoup(data_response.content, 'html.parser') |
| 33 | + |
| 34 | + main_content = data_soup.find('div', {'id': 'mw-content-text'}) |
| 35 | + filtered_content = "" |
| 36 | + |
| 37 | + if main_content: |
| 38 | + for element in main_content.descendants: |
| 39 | + if element.name in ['h1', 'h2', 'h3', 'h4', 'h5', 'h6']: |
| 40 | + filtered_content += "\n" + element.get_text(strip=True).upper() + "\n" |
| 41 | + |
| 42 | + elif element.name == 'p': |
| 43 | + filtered_content += element.get_text(strip=True) + "\n" |
| 44 | + |
| 45 | + return filtered_content |
| 46 | + |
| 47 | + except requests.exceptions.RequestException as e: |
| 48 | + print("Error fetching Wikipedia content:", e) |
| 49 | + return "Error fetching data." |
| 50 | + |
| 51 | + |
| 52 | +def get_image_urls(query): |
| 53 | + try: |
| 54 | + search_url = f"https://www.google.com/search?q={query}&tbm=isch" |
| 55 | + image_response = requests.get(search_url) |
| 56 | + image_response.raise_for_status() |
| 57 | + image_soup = BeautifulSoup(image_response.content, 'html.parser') |
| 58 | + |
| 59 | + image_urls = [] |
| 60 | + for img in image_soup.find_all('img'): |
| 61 | + image_url = img.get('src') |
| 62 | + if image_url and image_url.startswith("http"): |
| 63 | + image_urls.append(image_url) |
| 64 | + |
| 65 | + return image_urls[0] |
| 66 | + |
| 67 | + except requests.exceptions.RequestException as e: |
| 68 | + print("Error fetching image URLs:", e) |
| 69 | + return None |
0 commit comments