标签:
1 __author__ = ‘minmin‘ 2 #coding:utf-8 3 import re,urllib,sgmllib,os 4 5 #根据当前的url获取html 6 def getHtml(url): 7 page = urllib.urlopen(url) 8 html = page.read() 9 page.close() 10 return html 11 12 #根据html获取想要的文章内容 13 def func(str): 14 result= re.findall(r"<p>([^<>]*)</p>",getHtml(url),re.M) 15 artical =‘‘ 16 for j in result: 17 if len(j)<>0: 18 j = j.replace(" ","") 19 j = j.replace("<STRONG>"," ")#去掉<STRONG>,换成" " 20 j = j.replace("</STRONG>"," ")#去掉</STROGN>换成" " 21 artical = artical + j + ‘\n‘ 22 return artical 23 24 #html链接的标签是“a”,链接的属性是“href”,也就是要获得html中所有tag=a,attrs=href 值。 25 class URLPaser(sgmllib.SGMLParser): 26 def reset(self): 27 sgmllib.SGMLParser.reset(self) 28 self.urls = [] 29 30 def start_a(self,attrs): 31 href = [v for k,v in attrs if k == ‘href‘] 32 if href: 33 self.urls.extend(href) 34 35 IParser = URLPaser() 36 socket = urllib.urlopen("http://mil.news.sina.com.cn/")#打开这个网页 37 38 #fout = file(‘qq_art_urls.txt‘,‘w‘)#要把这个链接写到这个文件中 39 IParser.feed(socket.read())#分析啦 40 41 reg = ‘http://mil.news.sina.com.cn/.*‘#这个是用来匹配符合条件的链接,使用正则表达式匹配 42 43 pattern = re.compile(reg) 44 45 46 os.getcwd()#获得当前文件夹路径 47 os.path.sep#当前系统路径分隔符 48 49 #判断文件是否存在 50 if os.path.exists(‘sina_military‘)==False: 51 os.makedirs(‘sina_military‘) 52 53 i = 0 54 url2 = [] 55 for url in IParser.urls:#链接都存在urls里 56 57 if pattern.match(url): 58 if url not in url2: 59 url2.append(url) 60 artical = func(url) 61 print artical 62 if len(artical)<>0: 63 i = i + 1 64 f = open("sina_military/"+ str(i) + ‘.txt‘,‘a+‘) 65 f.write(artical) 66 f.close()
标签:
原文地址:http://www.cnblogs.com/minmsy/p/4962731.html