标签:爬虫
近期研究爬虫爬取网站链接:import java.io.BufferedReader;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.net.URL;
import java.net.URLConnection;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import javax.swing.plaf.synth.SynthSpinnerUI;
public class study {
private static List<String> waitforUrl=new ArrayList<>();//存储抓取出来的url,等待爬
private static Set<String> goforUrl=new HashSet<>();//存储爬过的url
private static Map<String,Integer> allUrldepth=new HashMap<>();//对所有url进行爬去深度判断
private static int Maxdepth=2;
public static void main(String[] args) {
String urlstr="............";
study.gourl(urlstr, 1);
}
public static void gourl(String urlstr,int depath) {
if(!(goforUrl.contains(urlstr)||depath>Maxdepth)) {
goforUrl.add(urlstr);
try {
URL url=new URL(urlstr);
URLConnection urlConn=url.openConnection();//建立url链接
InputStream is=urlConn.getInputStream();//通过链接获取页面内容,为字节流
InputStreamReader isr=new InputStreamReader(is,"utf-8");//将字节流转化为字符流
BufferedReader br=new BufferedReader(isr);//读取字节流
StringBuffer sb=new StringBuffer();//实例化StringBuffer用来存储读取数据
String line=null;
while((line=br.readLine())!=null) {
sb.append(line);
//System.out.println(line);
Pattern p = Pattern.compile("<a .*href=.+</a>");
Matcher m=p.matcher(line);
while(m.find()) {
String href=m.group();
href=href.substring(href.indexOf("href="));
if(href.charAt(5)==‘\"‘||href.charAt(5)==‘\‘‘){
href=href.substring(6);
}else{
href=href.substring(5);
}
try {
href=href.substring(0,href.indexOf("\""));
}catch(Exception e) {
href=href.substring(0,href.indexOf("\‘"));
}
waitforUrl.add(href);
allUrldepth.put(href, depath+1);
}
}
is.close();//关闭输出流
isr.close();//关闭字节流读取
br.close();
System.out.println(urlstr);
System.out.println("链接总数:"+waitforUrl.size()+",已爬去链接数:"+goforUrl.size());
}catch(Exception e){
e.printStackTrace();
}
//用递归的方法继续爬取其他链接
String nexturl=waitforUrl.get(0);
waitforUrl.remove(0);
gourl(nexturl,allUrldepth.get(nexturl));
}//else是如果存在重复链接时继续爬
else {
String nexturl=waitforUrl.get(0);
waitforUrl.remove(0);
gourl(nexturl,allUrldepth.get(nexturl));
}
}
}
标签:爬虫
原文地址:http://blog.51cto.com/12390959/2124580