forked from CrawlScript/WebCollector
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathNewsCrawler.java
64 lines (55 loc) · 2.31 KB
/
NewsCrawler.java
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
import cn.edu.hfut.dmic.webcollector.model.CrawlDatums;
import cn.edu.hfut.dmic.webcollector.model.Page;
import cn.edu.hfut.dmic.webcollector.plugin.berkeley.BreadthCrawler;
import org.jsoup.nodes.Document;
/**
* Crawling news from hfut news
*
* @author hu
*/
public class NewsCrawler extends BreadthCrawler {
/**
* @param crawlPath crawlPath is the path of the directory which maintains
* information of this crawler
* @param autoParse if autoParse is true,BreadthCrawler will auto extract
* links which match regex rules from pag
*/
public NewsCrawler(String crawlPath, boolean autoParse) {
super(crawlPath, autoParse);
/*start page*/
this.addSeed("http://news.hfut.edu.cn/list-1-1.html");
/*fetch url like http://news.hfut.edu.cn/show-xxxxxxhtml*/
this.addRegex("http://news.hfut.edu.cn/show-.*html");
/*do not fetch jpg|png|gif*/
this.addRegex("-.*\\.(jpg|png|gif).*");
/*do not fetch url contains #*/
this.addRegex("-.*#.*");
}
@Override
public void visit(Page page, CrawlDatums next) {
String url = page.getUrl();
/*if page is news page*/
if (page.matchUrl("http://news.hfut.edu.cn/show-.*html")) {
/*we use jsoup to parse page*/
Document doc = page.getDoc();
/*extract title and content of news by css selector*/
String title = page.select("div[id=Article]>h2").first().text();
String content = page.select("div#artibody", 0).text();
System.out.println("URL:\n" + url);
System.out.println("title:\n" + title);
System.out.println("content:\n" + content);
/*If you want to add urls to crawl,add them to nextLink*/
/*WebCollector automatically filters links that have been fetched before*/
/*If autoParse is true and the link you add to nextLinks does not match the regex rules,the link will also been filtered.*/
//next.add("http://xxxxxx.com");
}
}
public static void main(String[] args) throws Exception {
NewsCrawler crawler = new NewsCrawler("crawl", true);
crawler.setThreads(50);
crawler.setTopN(100);
//crawler.setResumable(true);
/*start crawl with depth of 4*/
crawler.start(4);
}
}