码迷,mamicode.com
首页 > 编程语言 > 详细

爬虫系统-多线程

时间:2017-03-22 00:17:50      阅读:190      评论:0      收藏:0      [点我收藏+]

标签:crawl   false   down   get   ring   finally   eal   thread   pcl   

package com.open111.crawler;

import java.io.BufferedReader;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStreamReader;
import java.util.LinkedList;
import java.util.Queue;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.ThreadPoolExecutor;

import org.apache.http.HttpEntity;
import org.apache.http.ParseException;
import org.apache.http.client.ClientProtocolException;
import org.apache.http.client.config.RequestConfig;
import org.apache.http.client.methods.CloseableHttpResponse;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.impl.client.CloseableHttpClient;
import org.apache.http.impl.client.HttpClients;
import org.apache.http.util.EntityUtils;
import org.apache.log4j.Logger;
import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;

/**
*
* 爬虫起始类
* @author user
*
*/
public class StartCrawler {

private static Logger logger=Logger.getLogger(StartCrawler.class);

public static String[] excludeUrl=new String[]{ ".pom", ".xml", ".md5", ".sha1", ".asc", ".gz", ".zip", "../"}; // 要过滤的url后缀

public static Queue<String> waitForCrawlerUrls=new LinkedList<String>(); // 等待爬取的url

private static int total=0;

private static boolean exeFlag=true;

/**
* 解析网页内容
* @param webPageContent
*/
public static void parseWebPage(String webPageContent,String realPath){
if("".equals(webPageContent)){
return;
}
Document doc=Jsoup.parse(webPageContent);
Elements links=doc.select("a"); // 获取所有超链接元素
for(int i=0;i<links.size();i++){
Element link=links.get(i);
String url=link.attr("href");
boolean f=true;
for(int j=0;j<excludeUrl.length;j++){
if(url.endsWith(excludeUrl[j])){
f=false;
break;
}
}
if(f){ // 是我们需要的url
if(url.endsWith(".jar")){ // 目标地址
total++;
logger.info("发现第"+total+"个目标:"+(realPath+url));
}else{ // 要继续解析的Url
logger.info("爬虫url队列新增url:"+(realPath+url));
addUrl(realPath+url,"解析网页");
}
}
}
}

/**
* 添加url到爬虫队列,假如队列中存在 就不添加
* @param string
*/
private static void addUrl(String url,String info) {
if(url==null || "".equals(url)){
return;
}
if(!waitForCrawlerUrls.contains(url)){
waitForCrawlerUrls.add(url);
logger.info("["+info+"]"+url+"添加到爬虫队列");
}
}

/**
* 解析网页请求
* @param url 请求的url
*/
public static void parseUrl(){
ExecutorService executorService=Executors.newFixedThreadPool(10);
while(exeFlag){
if(waitForCrawlerUrls.size()>0){
executorService.execute(new Runnable() {

public void run() {
// TODO Auto-generated method stub
String url=waitForCrawlerUrls.poll(); // 摘取第一个元素
if(url==null || "".equals(url)){
return;
}
logger.info("执行解析url:"+url);
RequestConfig requestConfig=RequestConfig.custom().setSocketTimeout(100000) // 设置读取超时时间
.setConnectTimeout(5000) // 设置连接超时时间
.build();
CloseableHttpClient httpClient=HttpClients.createDefault(); // 创建httpclient实例
HttpGet httpGet=new HttpGet(url); // 创建httpget实例
httpGet.setConfig(requestConfig);
CloseableHttpResponse response=null;
try {
response=httpClient.execute(httpGet);
} catch (ClientProtocolException e) {
logger.error("ClientProtocolException", e);
addUrl(url,"由于异常");
} catch (IOException e) {
logger.error("IOException", e);
addUrl(url,"由于异常");
}
if(response!=null){
HttpEntity entity=response.getEntity(); // 获取返回实体
if("text/html".equals(entity.getContentType().getValue())){
String webPageContent=null;
try {
webPageContent = EntityUtils.toString(entity, "utf-8");
parseWebPage(webPageContent,url);
} catch (ParseException e) {
logger.error("ParseException", e);
addUrl(url,"由于异常");
} catch (IOException e) {
logger.error("IOException", e);
addUrl(url,"由于异常");
}
}
try {
response.close();
} catch (IOException e) {
logger.error("IOException", e);
addUrl(url,"由于异常");
}
}else{
logger.info("连接超时");
addUrl(url,"由于异常");
}
}
});
}else{
if(((ThreadPoolExecutor)executorService).getActiveCount()==0){ // 活动线程是0
executorService.shutdown(); // 结束所有线程
exeFlag=false;
logger.info("爬虫任务已经成功");
}
}

try {
Thread.sleep(1000);
} catch (InterruptedException e) {
logger.error("线程休眠报错",e);
}
}

}

private static void init(){
logger.info("读取爬虫配置文件");
FileInputStream fis=null;
InputStreamReader isr=null;
BufferedReader br=null;
try {
String str=null;
fis=new FileInputStream("c:\\crawler.txt");
isr=new InputStreamReader(fis);
br=new BufferedReader(isr);
while((str=br.readLine())!=null){
addUrl(str, "初始化");
}
} catch (FileNotFoundException e) {
logger.error("FileNotFoundException", e);
} catch (IOException e) {
logger.error("IOException", e);
}finally{
try {
br.close();
isr.close();
fis.close();
} catch (IOException e) {
logger.error("IOException", e);
}
}
logger.info("完成读取爬虫配置文件");
parseUrl();
}

public static void main(String[] args) {
logger.info("开始执行爬虫任务");
init();
}
}

爬虫系统-多线程

标签:crawl   false   down   get   ring   finally   eal   thread   pcl   

原文地址:http://www.cnblogs.com/csy666/p/6597460.html

(0)
(0)
   
举报
评论 一句话评论(0
登录后才能评论!
© 2014 mamicode.com 版权所有  联系我们:gaon5@hotmail.com
迷上了代码!