-
Notifications
You must be signed in to change notification settings - Fork 5
/
dailystar-scrubber.py
73 lines (56 loc) · 1.87 KB
/
dailystar-scrubber.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
# encoding=utf8
import sys
reload(sys)
sys.setdefaultencoding('utf8')
import newspaper
from newspaper import Article
import csv, os
from bs4 import BeautifulSoup
import urllib
with open("keywords.txt") as f:
req_keywords = f.readlines()
req_keywords = [x.strip().decode('utf-8').lower() for x in req_keywords]
newspaper_base_url = 'http://www.thedailystar.net'
category = 'city'
def checkif_kw_exist(list_one, list_two):
common_kw = set(list_one) & set(list_two)
if len(common_kw) == 0: return False, common_kw
else: return True, common_kw
def get_article_info(url):
a = Article(url)
a.download()
a.parse()
a.nlp()
success, checked_kws = checkif_kw_exist(req_keywords, a.text.split())
if success:
return [url, a.publish_date, a.title, a.text]
else: return False
output_file = "./output.csv"
if not os.path.exists(output_file):
open(output_file, 'w').close()
# for url in urls:
# result = get_article_info(url)
# if result is not False:
# with open(output_file, 'a') as f:
# writeFile = csv.writer(f)
# writeFile.writerow(result)
# f.close
# else: pass
for index in range(1,3700,1):
print '--------------------'
print 'checking page: {id}'.format(id=index)
print '--------------------'
page_url = newspaper_base_url + '/' + category + '?page='+str(index)
page_soup = BeautifulSoup( urllib.urlopen(page_url).read())
primary_tag = page_soup.find_all("h4", attrs={"class": "pad-bottom-small"})
for tag in primary_tag:
url = tag.find("a")
print url
url = newspaper_base_url + url.get('href')
result = get_article_info(url)
if result is not False:
with open(output_file, 'a') as f:
writeFile = csv.writer(f)
writeFile.writerow(result)
f.close
else: pass