1.、获取单条新闻的#标题#链接#时间#来源#内容 #点击次数,并包装成一个函数。
import requestsfrom bs4 import BeautifulSoupa=requests.get('http://news.gzcc.cn/html/xiaoyuanxinwen/')a.encoding='utf-8'soup=BeautifulSoup(a.text,'html.parser')for xinwen in soup.select('li'): if len(xinwen.select('.news-list-description'))>0: title = xinwen.select('.news-list-description')[0].text url = xinwen.select('a')[0]['href'] time = xinwen.select('.news-list-info')[0].contents[0].text neirong = xinwen.select('.news-list-description')[0].text adiv=requests.get(url) adiv.encoding='utf-8' soupdiv=BeautifulSoup(adiv.text,'html.parser') detail=soupdiv.select('.show-content')[0].text click = int(requests.get('http://oa.gzcc.cn/api.php?op=count&id=7821&modelid=80').text.split('.')[-1].lstrip("html('").rstrip("');")) print(time,title,neirong,url,detail,click) break
2、获取一个新闻列表页的所有新闻的上述详情,并包装成一个函数。
import requestsfrom bs4 import BeautifulSoupimport redef getclick(1url): id=re.search('_(.*).html',1url).group(1).split('/')[1] clickurl='http://oa.gzcc.cn/api.php?op=count&id={}&modelid=80'.format(id) click=int(requests.get('http://oa.gzcc.cn/api.php?op=count&id=8249&modelid=80').text.split('.')[-1].lstrip("html('").rstrip("');")) return clickdef getonepages(eveylisturl): res=requests.get(eveylisturl) res.encoding='utf-8' soup=BeautifulSoup(res.text,'html.parser') for news in soup.select('li'): if len(news.select('.news-list-title'))>0: title=news.select('.news-list-title')[0].text url=news.select('a')[0]['href'] time=news.select('.news-list-info')[0].contents[0].text bm=news.select('.news-list-info')[0].contents[1].text resd=requests.get(url) resd.encoding='utf-8' soupd=BeautifulSoup(resd.text,'html.parser') detail=soupd.select('.show-content')[0].text count=getclick(url) print(title,count)hpk='http://news.gzcc.cn/html/xiaoyuanxinwen/' res=requests.get(hpk)res.encoding='utf-8'soup=BeautifulSoup(res.text,'html.parser') a=int(soup.select('.a1')[0].text.rstrip('条'))pages=a//10+1 for i in range(1,9): pagesurl='http://news.gzcc.cn/html/xiaoyuanxinwen/{}.html'.format(i) getonepages(pagesurl)
3、获取所有新闻列表页的网址,调用上述函数。
import requestsfrom bs4 import BeautifulSoupfrom datetime import datetimeimport reurl='http://news.gzcc.cn/html/xiaoyuanxinwen/'res=requests.get(url)res.encoding='utf-8' soup=BeautifulSoup(res.text,'html.parser') def getonepage(listurl): res=requests.get(listurl) res.encoding='utf-8' soup=BeautifulSoup(res.text,'html.parser') for news in soup.select('li'): if len(news.select('.news-list-title'))>0: title=news.select('.news-list-title')[0].text url=news.select('a')[0]['href'] time=news.select('.news-list-info')[0].contents[0].text dt=datetime.strptime(time,'%Y-%m-%d') source=news.select('.news-list-info')[0].contents[1].text resd=requests.get(url) resd.encoding='utf-8' soupd=BeautifulSoup(resd.text,'html.parser') ar=soupd.select('.show-content')[0].text print(title,url,time,dt,source)getonepage('http://news.gzcc.cn/html/xiaoyuanxinwen/index.html')res.encoding='utf-8' soup=BeautifulSoup(res.text,'html.parser')page= int(soup.select('.a1')[0].text.rstrip('条'))//10+1for i in range(2,page+1): listurl='http://news.gzcc.cn/html/xiaoyuanxinwen/{}.html'.format(i) getonepage(listurl)
4、完成所有校园新闻的爬取工作。
import requestsfrom bs4 import BeautifulSoupfrom datetime import datetimeimport reurl='http://news.gzcc.cn/html/xiaoyuanxinwen/'res=requests.get(url)res.encoding='utf-8' soup=BeautifulSoup(res.text,'html.parser') def getclick(newurl): id = re.match('http://news.gzcc.cn/html/2017/xiaoyuanxinwen_(.*).html',newurl).groups()[0].split('/')[1] clickurl = 'http://oa.gzcc.cn/api.php?op=count&id=8301&modelid=80'.format(id) click = int(requests.get(clickurl).text.split('.')[-1].lstrip("html('").rstrip("');")) return(click)def getonepage(listurl): res=requests.get(listurl) res.encoding='utf-8' soup=BeautifulSoup(res.text,'html.parser') for news in soup.select('li'): if len(news.select('.news-list-title'))>0: title=news.select('.news-list-title')[0].text url=news.select('a')[0]['href'] time=news.select('.news-list-info')[0].contents[0].text dt=datetime.strptime(time,'%Y-%m-%d') source=news.select('.news-list-info')[0].contents[1].text resd=requests.get(url) resd.encoding='utf-8' soupd=BeautifulSoup(resd.text,'html.parser') ar=soupd.select('.show-content')[0].text click=getclick(url) print(title,url,time,dt,source,click)getonepage('http://news.gzcc.cn/html/xiaoyuanxinwen/index.html')res.encoding='utf-8' soup=BeautifulSoup(res.text,'html.parser')page= int(soup.select('.a1')[0].text.rstrip('条'))//10+1for i in range(2,4): listurl='http://news.gzcc.cn/html/xiaoyuanxinwen/{}.html'.format(i) getonepage(listurl)