-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathwrapper.py
82 lines (68 loc) · 2.81 KB
/
wrapper.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
import requests
import time
import bs4
import urllib
def continue_crawl(search_history, target_url, max_steps=25):
"""
function that checks if we achieve target article,
get a loop or neither of them if searching takes
too much steps
"""
if search_history[-1] == target_url:
print("We've found the target article!")
return False
elif len(search_history) > max_steps:
print("The search has gone on suspiciously long, aborting search!")
return False
elif search_history[-1] in search_history[:-1]:
print("We've arrived at an article we've already seen, aborting search!")
return False
else:
return True
def find_first_link(url):
"""
function that finds first link in the Wikipedia article
"""
response = requests.get(url)
html = response.text
soup = bs4.BeautifulSoup(html, "html.parser")
# This div contains the article's body
# (June 2017 Note: Body nested in two div tags)
content_div = soup.find(id="mw-content-text").find(class_="mw-parser-output")
# stores the first link found in the article, if the article contains no
# links this value will remain None
article_link = None
# Find all the direct children of content_div that are paragraphs
for element in content_div.find_all("p", recursive=False):
# Find the first anchor tag that's a direct child of a paragraph.
# It's important to only look at direct children, because other types
# of link, e.g. footnotes and pronunciation, could come before the
# first link to an article. Those other link types aren't direct
# children though, they're in divs of various classes.
if element.find("a", recursive=False):
article_link = element.find("a", recursive=False).get("href")
break
if not article_link:
return
# Build a full url from the relative article_link url
first_link = urllib.parse.urljoin("https://en.wikipedia.org/", article_link)
return first_link
def web_crawl():
"""
final function which runs continue_crawl in a loop
as long as it does not finds a target article, a loop
or runs for too many times
"""
while continue_crawl(article_chain, target_url):
# download html of last article in article_chain
response = article_chain[-1]
# find the first link in that html
first_link = find_first_link(response)
# add the first link to article chain
article_chain.append(first_link)
# delay for about two seconds
time.sleep(2)
# starting our wrapper
target_url = "https://en.wikipedia.org/wiki/Lewis_Hamilton"
article_chain = ["https://en.wikipedia.org/wiki/Robert_Kubica"]
web_crawl()