爬虫 python社区 ,知乎日报,腾讯新闻
爬虫 python社区 ,知乎日报,腾讯新闻
阿豪boy 发表于6个月前
爬虫 python社区 ,知乎日报,腾讯新闻
  • 发表于 6个月前
  • 阅读 4
  • 收藏 0
  • 点赞 0
  • 评论 0

新睿云服务器60天免费使用,快来体验!>>>   

 python社区文章

# coding=utf-8


from bs4 import BeautifulSoup
import urllib
import urllib2
import sys
reload(sys)
sys.setdefaultencoding('utf8')
url = 'http://www.pythontab.com/html/pythonhexinbiancheng/index.html'

request = urllib2.urlopen(url)

html = request.read()

# print html

# 解析方式
soup = BeautifulSoup(html, 'html.parser')
'''
 <ul class="list lh24 f14" id="catlist">
                    <li>
                <h3>Python高级教程</h3>
                <a href="http://www.pythontab.com/html/2017/pythonhexinbiancheng_0821/1166.html" target="_blank" >
                <h2>一名数据挖掘工程师给新人整理的入门资料</h2>  </a>
                <p>四年前我一次听说数据挖掘这个词,三年前我学习了数据挖掘理论知识,两年前我做了几个与数据挖掘有关的项目,一年前我成为一名数据挖掘工程   <a href="http://www.pythontab.com/html/2017/pythonhexinbiancheng_0821/1166.html" class="content_detail" target="_blank">[详细]</a></p>
            </li>
'''

# 先通过id 找 再通过标签找,注意空格必须有

# 找到链接和标题
items = soup.select('#catlist > li > a')

# 只找到标题
titles = soup.select('#catlist > li > a > h2')

# 找超链接和标题,方便后续分割
links = soup.select('#catlist > li > a')
for i in items:
    print i.get_text()

# zip函数,titles和links是列表通过zip将两个列表下标对应的两个作为字典
'''
l1 = [1,2,3]
l2 = {'a','b','c'}
d = zip(l1,l2)

[(1, 'a'), (2, 'c'), (3, 'b')]
'''

items = []
for title, link in zip(titles, links):
    data = {
        'title': title.get_text(),  # 获取标签的文本
        'link': link.get('href')  # 获取标签的属性
    }
    items.append(data)

# 输出标题和链接
for i in items:
    print i['title'], i['link']

'''
 <div class="content">
 
'''
# 获取超链接内容
for i in items:
    request = urllib2.urlopen(i['link'])
    html = request.read().encode('utf-8')
    soup = BeautifulSoup(html, 'html.parser')
    title = i['title']
    texts = soup.select('div.content > p')
    content = []
    for t in texts:
        content.append(t.get_text().encode('utf-8'))

    with open('file/%s.html' % title, 'wb') as f:
        f.write(i['title']+'\n'+html)
       # for cont in content:
        #    f.write(cont+'\n')

 

爬取知乎日报

# coding=utf-8
import urllib
import urllib2
import urlparse
import re
import bs4
import requests

import sys

reload(sys)
sys.setdefaultencoding('utf-8')

url = 'https://daily.zhihu.com/'


# 拿到网页源码
def getHtml(url):
    header = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.101 Safari/537.36'
    }

    request = urllib2.Request(url, headers=header)
    response = urllib2.urlopen(request)
    text = response.read()
    return text


# 拿到超链接
def getUrls(html):
    # 正则表达式,提高效率re.s匹配换行符
    pattern = re.compile('<a href="/story/(.*?)"', re.S)
    items = re.findall(pattern, html)

    urls = []
    for i in items:
        urls.append('https://daily.zhihu.com/story/' + i)

        # print urls
    return urls


# 解析标题和内容
def getContent(url):
    html = getHtml(url)
    # <title>在一件事情上付出越多,你对它就越喜欢,真怪</title>
    pattern = re.compile('<title>(.*?)</title>', re.S)
    titles = re.findall(pattern, html)

    # 中文在可迭代对象中是Unicode编码,
    # print  titles[0]


    pattern = re.compile('<div class="content">(.*?)<div class="view-more">', re.S);
    content = re.findall(pattern, html)[0]

    return '<h1>'+titles[0]+'/h1' + '\n' + content

#去除不需要的标签
def clear(html):
    pattern= re.compile('<p>(.*?)</p>|<li>(.*?)</li> ')
    items = re.findall(pattern,html)
    result = []

html = getHtml(url)
urls = getUrls(html)
with open('out.html', 'w+') as f:
    f.write(getContent(urls[0]))

 

腾讯新闻

# coding=utf-8


from bs4 import BeautifulSoup
import urllib
import urllib2
import sys
import requests
import cookielib
import re

reload(sys)
sys.setdefaultencoding('utf8')

# 引入相关模块
import requests
from bs4 import BeautifulSoup

url = "http://news.qq.com/"
# 请求腾讯新闻的URL,获取其text文本
wbdata = requests.get(url).text
# 对获取到的文本进行解析
soup = BeautifulSoup(wbdata, 'lxml')
# 从解析文件中通过select选择器定位指定的元素,返回一个列表
news_titles = soup.select("div.text > em.f14 > a.linkto")

# 对返回的列表进行遍历
for n in news_titles:
    # 提取出标题和链接信息
    title = n.get_text()
    link = n.get("href")

    print('标题:%s\t链接:%s' % (title,link))

 

  • 打赏
  • 点赞
  • 收藏
  • 分享
共有 人打赏支持
粉丝 10
博文 634
码字总数 480730
×
阿豪boy
如果觉得我的文章对您有用,请随意打赏。您的支持将鼓励我继续创作!
* 金额(元)
¥1 ¥5 ¥10 ¥20 其他金额
打赏人
留言
* 支付类型
微信扫码支付
打赏金额:
已支付成功
打赏金额: