python
python爬虫科研数据提取-凯发ag旗舰厅登录网址下载
1 pyquery
简介:同样是一个强大的网页解析工具 它提供了和jquery类似的语法来解析html文档,支持css选择器,使用非常方便
2 pyquery基本用法
2.1 安装
pip install pyquery
2.2 初始化
字符串初始化
html = '''
- first item
- second item
- third item
- fourth item
- fifth item
'''
from pyquery import pyquery as pq
doc = pq(html)
print(doc('li'))
first itemsecond itemthird itemfourth itemfifth itemurl初始化
from pyquery import pyquery as pq
doc = pq(url='http://www.baidu.com')
print(doc('head'))
ç�¾åº¦ä¸�ä¸�ï¼�ä½ å°±ç�¥é��文件初始化
from pyquery import pyquery as pq
doc = pq(filename='test.html')
print(doc('li'))
111111111122222222223333333333444444444455555555552.3 基本css选择器
html = '''
- first item
- second item
- third item
- fourth item
- fifth item
'''
from pyquery import pyquery as pq
doc = pq(html)
print(doc('#container .list li'))
first itemsecond itemthird itemfourth itemfifth item2.4 查找元素
2.4.1 子元素
html = '''
- first item
- second item
- third item
- fourth item
- fifth item
'''
from pyquery import pyquery as pq
doc = pq(html)
items = doc('.list')
print(type(items))
print(items)
lis = items.find('li')
print(type(lis))
print(lis)
- first item
- second item
- third item
- fourth item
- fifth item
lis = items.children()
print(type(lis))
print(lis)
first itemsecond itemthird itemfourth itemfifth itemlis = items.children('.active')
print(lis)
third itemfourth item注意:这里的item-0和active是两个类,在html中可以给同一元素设置两个类,中间用空格隔开
2.4.2 父元素
html = '''
- first item
- second item
- third item
- fourth item
- fifth item
'''
from pyquery import pyquery as pq
doc = pq(html)
items = doc('.list')
container = items.parent()
print(type(container))
print(container)
- first item
- second item
- third item
- fourth item
- fifth item
html = '''
- first item
- second item
- third item
- fourth item
- fifth item
'''
from pyquery import pyquery as pq
doc = pq(html)
items = doc('.list')
parents = items.parents()
print(type(parents))
print(parents)
- first item
- second item
- third item
- fourth item
- fifth item
- first item
- second item
- third item
- fourth item
- fifth item
parent = items.parents('.wrap')
print(parent)
- first item
- second item
- third item
- fourth item
- fifth item
2.4.3 兄弟元素
html = '''
- first item
- second item
- third item
- fourth item
- fifth item
'''
from pyquery import pyquery as pq
doc = pq(html)
li = doc('.list .item-0.active')
print(li.siblings())
second itemfirst itemfourth itemfifth item可以看到,结果的顺序并不是完全和输入的顺序一致
html = '''
- first item
- second item
- third item
- fourth item
- fifth item
'''
from pyquery import pyquery as pq
doc = pq(html)
li = doc('.list .item-0.active') # 表示class为list的元素下的元素,这个元素的class为item-0和active
print(li.siblings('.active'))
fourth item2.5 遍历
单个元素
html = '''
- first item
- second item
- third item
- fourth item
- fifth item
'''
from pyquery import pyquery as pq
doc = pq(html)
li = doc('.item-0.active')
print(li)
third item多个元素
html = '''
- first item
- second item
- third item
- fourth item
- fifth item
'''
from pyquery import pyquery as pq
doc = pq(html)
lis = doc('li').items()
print(type(lis))
for li in lis:
print(li)
first itemsecond itemthird itemfourth itemfifth item2.6 获取信息
获取属性
html = '''
- first item
- second item
- third item
- fourth item
- fifth item
'''
from pyquery import pyquery as pq
doc = pq(html)
a = doc('.item-0.active a')
print(a)
print(a.attr('href'))
print(a.attr.href)
third item
link3.html
link3.html
获取文本
html = '''
- first item
- second item
- third item
- fourth item
- fifth item
'''
from pyquery import pyquery as pq
doc = pq(html)
a = doc('.item-0.active a')
print(a)
print(a.text())
third item
third item
获取html
html = '''
- first item
- second item
- third item
- fourth item
- fifth item
'''
from pyquery import pyquery as pq
doc = pq(html)
li = doc('.item-0.active')
print(li)
print(li.html())
third itemthird item
2.7 dom操作
addclass、removeclass
html = '''
- first item
- second item
- third item
- fourth item
- fifth item
'''
from pyquery import pyquery as pq
doc = pq(html)
li = doc('.item-0.active')
print(li)
li.removeclass('active')
print(li)
li.addclass('active')
print(li)
third itemthird itemthird itemattr、css
html = '''
- first item
- second item
- third item
- fourth item
- fifth item
'''
from pyquery import pyquery as pq
doc = pq(html)
li = doc('.item-0.active')
print(li)
li.attr('name', 'link')
print(li)
li.css('font-size', '14px')
print(li)
third itemthird itemthird itemremove
html = '''
hello, world
this is a paragraph.
'''
from pyquery import pyquery as pq
doc = pq(html)
wrap = doc('.wrap')
print(wrap.text())
wrap.find('p').remove()
print(wrap.text())
hello, world
this is a paragraph.
hello, world
2.8 伪类选择器
html = '''
- first item
- second item
- third item
- fourth item
- fifth item
'''
from pyquery import pyquery as pq
doc = pq(html)
li = doc('li:first-child')
print(li)
li = doc('li:last-child')
print(li)
li = doc('li:nth-child(2)')
print(li)
li = doc('li:gt(2)')# index > 2 的所有子元素,从0开始
print(li)
li = doc('li:nth-child(2n)')
print(li)
li = doc('li:contains(second)') # 根据文本选择
print(li)
first itemfifth itemsecond itemfourth itemfifth itemsecond itemfourth itemsecond item3 实战---爬取百度校花吧(pyquery版)
import requests
from lxml import etree
import os
from pyquery import pyquery as pq
class baidu(object):
def __init__(self, name):
self.url = 'http://tieba.baidu.com/f?ie=utf-8&kw={}'.format(name)
# 使用较老版本的请求头,该浏览器不支持js
self.headers = {
'user-agent': 'mozilla/4.0 (compatible; msie 5.01; windows nt 5.0) '
}
# 发送请求,获取响应
def get_data(self, url):
response = requests.get(url,headers=self.headers)
return response.content
# 解析列表页数据,获取列表页面帖子的标题和链接
def parse_list_page(self, data):
with open('baidu1.html','wb') as f:
f.write(data)
# 实例化etree对象
# html = etree.html(data).
doc = pq(data)
node_list= doc.find('.j_thread_list .threadlist_title a')
# 使用xpath语法,提取网页数据
# node_list = html.xpath("//*[@id='thread_list']/li[@class=' j_thread_list clearfix']/div/div[2]/div[1]/div[1]/a")
# 判断获取结果
# print(len(node_list))
data_list = []
# 遍历node_list
for node in node_list.items():
temp = {}
temp['url'] = 'http://tieba.baidu.com' node.attr('href')
temp['title'] = node.text()
data_list.append(temp)
# 提取下一页的节点
next_node = doc.find('#frs_list_pager .next').attr('href')
# next_node = html.xpath('//*[@id="frs_list_pager"]/a[last()-1]/@href')[0]
# print(next_node)
# 拼接下一页的完整url
next_url = 'http:' next_node
# print(next_url)
return data_list,next_url
def parse_detail_page(self, data_list):
html = etree.html(data_list)
doc = pq(data_list)
# 提取详情页面的图片链接
imagelst = doc.find('.bde_image').items()
image_list = [img.attr('src') for img in imagelst]
# image_list = html.xpath("//cc/div[contains(@class,'d_post')]/img[@class='bde_image']/@src")
# 返回图片节点列表
print(image_list)
return image_list
# 下载图片,保存图片文件
# 创建文件夹
def download(self, image_list):
if not os.path.exists('images1'):
os.makedirs('images1')
for image in image_list:
# os.sep在mac系统中是/,如果是windows系统,\\,跨平台
file_name = 'images1' os.sep image.split('/')[-1]
image_data = self.get_data(image)
with open(file_name,'wb') as f:
f.write(image_data)
def run(self):
# 构造url和请求头
# 发送请求,获取响应
next_url = self.url
# 开启循环,
while next_url:
data = self.get_data(next_url)
# 解析列表页数据,返回的列表数据、下一页的的数据
data_list,next_url = self.parse_list_page(data)
# 解析详情页的数据,获取详情页的图片的链接地址
for data in data_list:
url = data['url']
result_list = self.get_data(url)
image_list = self.parse_detail_page(result_list)
# 保存数据,下载图片
self.download(image_list)
if __name__ == '__main__':
# 爬取百度校花吧所有图片并存在文件夹中
baidu = baidu('校花吧')
baidu.run()
总结
以上是凯发ag旗舰厅登录网址下载为你收集整理的python爬虫科研数据提取_python爬虫数据提取四之pyquery的全部内容,希望文章能够帮你解决所遇到的问题。
如果觉得凯发ag旗舰厅登录网址下载网站内容还不错,欢迎将凯发ag旗舰厅登录网址下载推荐给好友。
- 上一篇: python cv2 matchtemp
- 下一篇: