方法一:
# coding:utf-8
import re
import requests
# 获取网页内容
r = requests.get(‘http://www.163.com‘)
data = r.text
# 利用正则查找所有连接
link_list =re.findall(r"(?<=href=\").+?(?=\")|(?<=href=\‘).+?(?=\‘)" ,data)
for url in link_list:
print url
方法二:
import urllib2
import re
#connect to a URL
website = urllib2.urlopen(url)
#read html code
html = website.read()
#use re.findall to get all the links
links = re.findall(‘"((http|ftp)s?://.*?)"‘, html)
for url in links:
print url