Java网络爬虫怎么实现?

Python013

Java网络爬虫怎么实现?,第1张

网络爬虫是一个自动提取网页的程序,它为搜索引擎从万维网上下载网页,是搜索引擎的重要组成。\x0d\x0a传统爬虫从一个或若干初始网页的URL开始,获得初始网页上的URL,在抓取网页的过程中,不断从当前页面上抽取新的URL放入队列,直到满足系统的一定停止条件。对于垂直搜索来说,聚焦爬虫,即有针对性地爬取特定主题网页的爬虫,更为适合。\x0d\x0a\x0d\x0a以下是一个使用java实现的简单爬虫核心代码:\x0d\x0apublic void crawl() throws Throwable { \x0d\x0awhile (continueCrawling()) { \x0d\x0aCrawlerUrl url = getNextUrl()//获取待爬取队列中的下一个URL\x0d\x0aif (url != null) { \x0d\x0aprintCrawlInfo() \x0d\x0aString content = getContent(url)//获取URL的文本信息\x0d\x0a \x0d\x0a//聚焦爬虫只爬取与主题内容相关的网页,这里采用正则匹配简单处理\x0d\x0aif (isContentRelevant(content, this.regexpSearchPattern)) { \x0d\x0asaveContent(url, content)//保存网页至本地\x0d\x0a\x0d\x0a//获取网页内容中的链接,并放入待爬取队列中\x0d\x0aCollection urlStrings = extractUrls(content, url)\x0d\x0aaddUrlsToUrlQueue(url, urlStrings)\x0d\x0a} else { \x0d\x0aSystem.out.println(url + " is not relevant ignoring ...")\x0d\x0a} \x0d\x0a\x0d\x0a//延时防止被对方屏蔽\x0d\x0aThread.sleep(this.delayBetweenUrls)\x0d\x0a} \x0d\x0a} \x0d\x0acloseOutputStream()\x0d\x0a}\x0d\x0aprivate CrawlerUrl getNextUrl() throws Throwable { \x0d\x0aCrawlerUrl nextUrl = null\x0d\x0awhile ((nextUrl == null) &&(!urlQueue.isEmpty())) { \x0d\x0aCrawlerUrl crawlerUrl = this.urlQueue.remove()\x0d\x0a//doWeHavePermissionToVisit:是否有权限访问该URL,友好的爬虫会根据网站提供的"Robot.txt"中配置的规则进行爬取 \x0d\x0a//isUrlAlreadyVisited:URL是否访问过,大型的搜索引擎往往采用BloomFilter进行排重,这里简单使用HashMap \x0d\x0a//isDepthAcceptable:是否达到指定的深度上限。爬虫一般采取广度优先的方式。一些网站会构建爬虫陷阱(自动生成一些无效链接使爬虫陷入死循环),采用深度限制加以避免 \x0d\x0aif (doWeHavePermissionToVisit(crawlerUrl) \x0d\x0a&&(!isUrlAlreadyVisited(crawlerUrl)) \x0d\x0a&&isDepthAcceptable(crawlerUrl)) { \x0d\x0anextUrl = crawlerUrl\x0d\x0a// System.out.println("Next url to be visited is " + nextUrl)\x0d\x0a} \x0d\x0a} \x0d\x0areturn nextUrl\x0d\x0a}\x0d\x0aprivate String getContent(CrawlerUrl url) throws Throwable { \x0d\x0a//HttpClient4.1的调用与之前的方式不同 \x0d\x0aHttpClient client = new DefaultHttpClient()\x0d\x0aHttpGet httpGet = new HttpGet(url.getUrlString())\x0d\x0aStringBuffer strBuf = new StringBuffer()\x0d\x0aHttpResponse response = client.execute(httpGet)\x0d\x0aif (HttpStatus.SC_OK == response.getStatusLine().getStatusCode()) { \x0d\x0aHttpEntity entity = response.getEntity()\x0d\x0aif (entity != null) { \x0d\x0aBufferedReader reader = new BufferedReader( \x0d\x0anew InputStreamReader(entity.getContent(), "UTF-8"))\x0d\x0aString line = null\x0d\x0aif (entity.getContentLength() >0) { \x0d\x0astrBuf = new StringBuffer((int) entity.getContentLength())\x0d\x0awhile ((line = reader.readLine()) != null) { \x0d\x0astrBuf.append(line)\x0d\x0a} \x0d\x0a} \x0d\x0a} \x0d\x0aif (entity != null) { \x0d\x0ansumeContent()\x0d\x0a} \x0d\x0a} \x0d\x0a//将url标记为已访问 \x0d\x0amarkUrlAsVisited(url)\x0d\x0areturn strBuf.toString()\x0d\x0a}\x0d\x0apublic static boolean isContentRelevant(String content, \x0d\x0aPattern regexpPattern) { \x0d\x0aboolean retValue = false\x0d\x0aif (content != null) { \x0d\x0a//是否符合正则表达式的条件 \x0d\x0aMatcher m = regexpPattern.matcher(content.toLowerCase())\x0d\x0aretValue = m.find()\x0d\x0a} \x0d\x0areturn retValue\x0d\x0a}\x0d\x0apublic List extractUrls(String text, CrawlerUrl crawlerUrl) { \x0d\x0aMap urlMap = new HashMap()\x0d\x0aextractHttpUrls(urlMap, text)\x0d\x0aextractRelativeUrls(urlMap, text, crawlerUrl)\x0d\x0areturn new ArrayList(urlMap.keySet())\x0d\x0a} \x0d\x0aprivate void extractHttpUrls(Map urlMap, String text) { \x0d\x0aMatcher m = (text)\x0d\x0awhile (m.find()) { \x0d\x0aString url = m.group()\x0d\x0aString[] terms = url.split("a href=\"")\x0d\x0afor (String term : terms) { \x0d\x0a// System.out.println("Term = " + term)\x0d\x0aif (term.startsWith("http")) { \x0d\x0aint index = term.indexOf("\"")\x0d\x0aif (index >0) { \x0d\x0aterm = term.substring(0, index)\x0d\x0a} \x0d\x0aurlMap.put(term, term)\x0d\x0aSystem.out.println("Hyperlink: " + term)\x0d\x0a} \x0d\x0a} \x0d\x0a} \x0d\x0a} \x0d\x0aprivate void extractRelativeUrls(Map urlMap, String text, \x0d\x0aCrawlerUrl crawlerUrl) { \x0d\x0aMatcher m = relativeRegexp.matcher(text)\x0d\x0aURL textURL = crawlerUrl.getURL()\x0d\x0aString host = textURL.getHost()\x0d\x0awhile (m.find()) { \x0d\x0aString url = m.group()\x0d\x0aString[] terms = url.split("a href=\"")\x0d\x0afor (String term : terms) { \x0d\x0aif (term.startsWith("/")) { \x0d\x0aint index = term.indexOf("\"")\x0d\x0aif (index >0) { \x0d\x0aterm = term.substring(0, index)\x0d\x0a} \x0d\x0aString s = //" + host + term\x0d\x0aurlMap.put(s, s)\x0d\x0aSystem.out.println("Relative url: " + s)\x0d\x0a} \x0d\x0a} \x0d\x0a} \x0d\x0a\x0d\x0a}\x0d\x0apublic static void main(String[] args) { \x0d\x0atry { \x0d\x0aString url = ""\x0d\x0aQueue urlQueue = new LinkedList()\x0d\x0aString regexp = "java"\x0d\x0aurlQueue.add(new CrawlerUrl(url, 0))\x0d\x0aNaiveCrawler crawler = new NaiveCrawler(urlQueue, 100, 5, 1000L, \x0d\x0aregexp)\x0d\x0a// boolean allowCrawl = crawler.areWeAllowedToVisit(url)\x0d\x0a// System.out.println("Allowed to crawl: " + url + " " + \x0d\x0a// allowCrawl)\x0d\x0acrawler.crawl()\x0d\x0a} catch (Throwable t) { \x0d\x0aSystem.out.println(t.toString())\x0d\x0at.printStackTrace()\x0d\x0a} \x0d\x0a}

网络爬虫是一个自动提取网页的程序,它为搜索引擎从万维网上下载网页,是搜索引擎的重要组成。

传统爬虫从一个或若干初始网页的URL开始,获得初始网页上的URL,在抓取网页的过程中,不断从当前页面上抽取新的URL放入队列,直到满足系统的一定停止条件。对于垂直搜索来说,聚焦爬虫,即有针对性地爬取特定主题网页的爬虫,更为适合。

以下是一个使用java实现的简单爬虫核心代码:

public void crawl() throws Throwable {

while (continueCrawling()) {

CrawlerUrl url = getNextUrl()//获取待爬取队列中的下一个URL

if (url != null) {

printCrawlInfo()

String content = getContent(url)//获取URL的文本信息

//聚焦爬虫只爬取与主题内容相关的网页,这里采用正则匹配简单处理

if (isContentRelevant(content, this.regexpSearchPattern)) {

saveContent(url, content)//保存网页至本地

//获取网页内容中的链接,并放入待爬取队列中

Collection urlStrings = extractUrls(content, url)

addUrlsToUrlQueue(url, urlStrings)

} else {

System.out.println(url + " is not relevant ignoring ...")

}

//延时防止被对方屏蔽

Thread.sleep(this.delayBetweenUrls)

}

}

closeOutputStream()

}

private CrawlerUrl getNextUrl() throws Throwable {

CrawlerUrl nextUrl = null

while ((nextUrl == null) &&(!urlQueue.isEmpty())) {

CrawlerUrl crawlerUrl = this.urlQueue.remove()

//doWeHavePermissionToVisit:是否有权限访问该URL,友好的爬虫会根据网站提供的"Robot.txt"中配置的规则进行爬取

//isUrlAlreadyVisited:URL是否访问过,大型的搜索引擎往往采用BloomFilter进行排重,这里简单使用HashMap

//isDepthAcceptable:是否达到指定的深度上限。爬虫一般采取广度优先的方式。一些网站会构建爬虫陷阱(自动生成一些无效链接使爬虫陷入死循环),采用深度限制加以避免

if (doWeHavePermissionToVisit(crawlerUrl)

&&(!isUrlAlreadyVisited(crawlerUrl))

&&isDepthAcceptable(crawlerUrl)) {

nextUrl = crawlerUrl

// System.out.println("Next url to be visited is " + nextUrl)

}

}

return nextUrl

}

private String getContent(CrawlerUrl url) throws Throwable {

//HttpClient4.1的调用与之前的方式不同

HttpClient client = new DefaultHttpClient()

HttpGet httpGet = new HttpGet(url.getUrlString())

StringBuffer strBuf = new StringBuffer()

HttpResponse response = client.execute(httpGet)

if (HttpStatus.SC_OK == response.getStatusLine().getStatusCode()) {

HttpEntity entity = response.getEntity()

if (entity != null) {

BufferedReader reader = new BufferedReader(

new InputStreamReader(entity.getContent(), "UTF-8"))

String line = null

if (entity.getContentLength() >0) {

strBuf = new StringBuffer((int) entity.getContentLength())

while ((line = reader.readLine()) != null) {

strBuf.append(line)

}

}

}

if (entity != null) {

nsumeContent()

}

}

//将url标记为已访问

markUrlAsVisited(url)

return strBuf.toString()

}

public static boolean isContentRelevant(String content,

Pattern regexpPattern) {

boolean retValue = false

if (content != null) {

//是否符合正则表达式的条件

Matcher m = regexpPattern.matcher(content.toLowerCase())

retValue = m.find()

}

return retValue

}

public List extractUrls(String text, CrawlerUrl crawlerUrl) {

Map urlMap = new HashMap()

extractHttpUrls(urlMap, text)

extractRelativeUrls(urlMap, text, crawlerUrl)

return new ArrayList(urlMap.keySet())

}

private void extractHttpUrls(Map urlMap, String text) {

Matcher m = (text)

while (m.find()) {

String url = m.group()

String[] terms = url.split("a href=\"")

for (String term : terms) {

// System.out.println("Term = " + term)

if (term.startsWith("http")) {

int index = term.indexOf("\"")

if (index >0) {

term = term.substring(0, index)

}

urlMap.put(term, term)

System.out.println("Hyperlink: " + term)

}

}

}

}

private void extractRelativeUrls(Map urlMap, String text,

CrawlerUrl crawlerUrl) {

Matcher m = relativeRegexp.matcher(text)

URL textURL = crawlerUrl.getURL()

String host = textURL.getHost()

while (m.find()) {

String url = m.group()

String[] terms = url.split("a href=\"")

for (String term : terms) {

if (term.startsWith("/")) {

int index = term.indexOf("\"")

if (index >0) {

term = term.substring(0, index)

}

String s = //" + host + term

urlMap.put(s, s)

System.out.println("Relative url: " + s)

}

}

}

}

public static void main(String[] args) {

try {

String url = ""

Queue urlQueue = new LinkedList()

String regexp = "java"

urlQueue.add(new CrawlerUrl(url, 0))

NaiveCrawler crawler = new NaiveCrawler(urlQueue, 100, 5, 1000L,

regexp)

// boolean allowCrawl = crawler.areWeAllowedToVisit(url)

// System.out.println("Allowed to crawl: " + url + " " +

// allowCrawl)

crawler.crawl()

} catch (Throwable t) {

System.out.println(t.toString())

t.printStackTrace()

}

}