初试爬虫

使用的python包

我并没有使用urllib,我刚开始用这个,一直抓取不到我网站,直接报错。按网上配置什么浏览器参数之类的还是报错。用这一个就简单了许多,而且代码也比较少。

代码

这代码仅针对于pixabay的图片下载,如果要转换的话。注意去查看你需要网站的页面等等信息,自行修改;

# 图片来源于Pixabay: https://pixabay.com/zh/
import requests as req
import math
import re
from bs4 import BeautifulSoup

origin_img_link = []

# 获取page页有关key图片信息
def get_picture_page(key,page):
    i = 1
    while(i < page):
        print('------------这是第%d页----------' %i)
        origin_rul = 'https://pixabay.com/zh/images/search/'+ key +'/?pagi=' + str(i)  #仅针对于pixabay的页面翻页形式/Get得到页数,自行修改
        r = req.get(origin_rul)
        bs = BeautifulSoup(r.content, 'html.parser') #解析网页
        hyperlink = bs.find_all(name = 'img')  # 标签是否要附加信息,如要附加。去BeautifulSoup查看文档,我目前测试过attrs={'alt' : ''}
        for h in hyperlink:
            hh = h.get('src')
            print(hh)
            origin_img_link.append(hh)
        i += 1

get_picture_page('it',5)        #修改传参

# 按正则匹配下载    
for m in origin_img_link:
    if(re.match(r'^((https|http|ftp|rtsp|mms)?://)[a-zA-z]+.[a-zA-z]+.[a-zA-z]+/photo/d{4}/d{2}/d{2}/d{2}/d{2}/',m)):
        r = req.get(m)
        s = re.sub(r'^((https|http|ftp|rtsp|mms)?://)[a-zA-z]+.[a-zA-z]+.[a-zA-z]+/photo/d{4}/d{2}/d{2}/d{2}/d{2}/','',m)  # 正则表达式把https://给替换掉
        if r.status_code == 200:
            with open(s, 'wb') as f:
                f.write(r.content)
            print(s + '下载成功!')

print('(…^&^)下载完成')            

Comments

Leave a Reply

Your email address will not be published. Required fields are marked *