废话不多说,直接上代码
# -*- coding: utf-8 -*-
import urllib2
import sys
from bs4 import BeautifulSoup
reload(sys)
sys.setdefaultencoding("utf-8")
def get_name(url):
name_list = []
hdr = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',
'Accept-Encoding': 'none',
'Accept-Language': 'en-US,en;q=0.8',
'Connection': 'keep-alive'}
request = urllib2.Request(url,headers=hdr) # 创建对名字大全网站get请求
result = urllib2.urlopen(request) # 发出请求
soup = BeautifulSoup(result.read(), 'html.parser') # 生成可分析对象
if soup.find_all("a",class_="btn btn2"):
for name in soup.find_all("a",class_="btn btn2")[:1]: # 遍历所有的姓氏链接,此处只获取前15个姓氏
url = 'http://chen.resgain.net' #找到姓氏链接,再次返回此函数
get_name(url)
elif soup.find_all('a',class_='btn btn-link'):
for name in soup.find_all('a',class_='btn btn-link')[:20]: #找到不同姓氏的名字,此处只获取每个姓氏的前10个
name_list.append(name.text)
print name.text
return name_list
if __name__ == '__main__':
url = "http://www.resgain.net/xsdq.html"
# url = 'http://chen.resgain.net' #找到姓氏链接,再次返回此函数
get_name(url)