首页 编程教程正文

Python3 - 爬取美图录

piaodoo 编程教程 2020-02-22 22:12:46 1441 0 python教程

本文来源吾爱破解论坛

本帖最后由 好想吃掉你 于 2019-3-27 15:53 编辑

[Python] 纯文本查看 复制代码

import requests
from bs4 import BeautifulSoup
import os
import chardet
from chardet import detect
import re


headers = {
    'User-Agent': "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1",
    'Accept': "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
    'Accept-Encoding': 'gzip',
    "Referer": "https://www.meitulu.com/"
}   #头部文件
path="jpg" #jpg为保存目录,可以随意更改
os.chdir(path)

url = 'https://www.meitulu.com/t/qingdouke/'  #青豆客


#'https://www.meitulu.com/t/youwuguan/'  尤物馆
#'https://www.meitulu.com/t/girlt/'  果团网 , 'https://www.meitulu.com/t/mistar/', 魅妍社
#'https://www.meitulu.com/t/1088/', 星颜社 'https://www.meitulu.com/t/ishow/', iSHOW爱秀
#'https://www.meitulu.com/t/huayan/',花の颜 'https://www.meitulu.com/t/xingleyuan/',星乐园
#'https://www.meitulu.com/t/tukmo/', 'https://www.meitulu.com/t/aiss/',
#'https://www.meitulu.com/t/miitao/', 'https://www.meitulu.com/t/uxing/',
#'https://www.meitulu.com/t/taste/',  'https://www.meitulu.com/t/micat/',
#'https://www.meitulu.com/t/candy/', 'https://www.meitulu.com/t/yunvlang/',
#'https://www.meitulu.com/t/youmihui/', 'https://www.meitulu.com/t/1113/',
#'https://www.meitulu.com/t/1209/', 'https://www.meitulu.com/t/imiss/',
#'https://www.meitulu.com/t/yingsihui-wings/', https://www.meitulu.com/t/dianannan/ 嗲囡囡
#url可更改为上面随意一个
#美图录的随意一个目录url复制即可运行

response1 = requests.get(url, headers=headers, allow_redirects=False) 

e = chardet.detect(response1.content)['encoding']
response1.encoding = e

html_soup1 = BeautifulSoup(response1.text, 'lxml')
all_a = html_soup1.find('ul', class_='img').find_all('li') #
count=0
for l in all_a:
    count = count + 1
    a = l.a
    #print(a)


    if (a.find('img') == None):
        continue
    else:

        href = a["href"]
        print(href)

        href2=href.replace('.html', '_')
        response2 = requests.get(href, headers=headers)

        e = chardet.detect(response2.content)['encoding']
        response2.encoding = e

        html_soup2 = BeautifulSoup(response2.text, 'lxml')
        name = html_soup2.find('title').text.replace('/', '-')
        os.mkdir(name)
        print(name)
        img1 = html_soup2.find("div", {"class": "content"}).find_all("img")
        page = html_soup2.find("div", {"id": "pages"}).find_all('a')[-2]
        max_page = page.get_text()


        for pic in img1:
            os.chdir(name)
            count = count + 1
            pic1 = pic['src']
            # print(pic1)
            pic2 = requests.get(pic1, headers=headers)
            f = open(name+str(count) + '.jpg', 'ab') 
            f.write(pic2.content) 
            f.close()
            os.chdir("/Users/w2ng/untitled/venv/jpg")

        for i in range(2, int(max_page) + 1):
            os.chdir(name)
            href3 = href2 + str(i) + '.html'
            response3 = requests.get(href3, headers=headers)

            html_soup3 = BeautifulSoup(response3.text, 'lxml')
            img1 = html_soup3.find("div", {"class": "content"}).find_all("img")
            # print(href3)
            for pic in img1:
                count = count + 1

                pic1 = pic['src']
                # print(pic1)
                pic2 = requests.get(pic1, headers=headers)
                f = open(name+str(count) + '.jpg', 'ab') 
                f.write(pic2.content) 
                f.close()
            os.chdir("/Users/w2ng/untitled/venv/jpg") #获取自己的工作目录,将/Users/w2ng/untitled/venv 改为os.getcwd()的结果就ok


print("完成!")



测试图:

QQ20190327-155224@2x.png (416.35 KB, 下载次数: 27)

下载附件  保存到相册

2019-3-27 15:52 上传

QQ20190327-155224@2x.png

版权声明:

本站所有资源均为站长或网友整理自互联网或站长购买自互联网,站长无法分辨资源版权出自何处,所以不承担任何版权以及其他问题带来的法律责任,如有侵权或者其他问题请联系站长删除!站长QQ754403226 谢谢。

有关影视版权:本站只供百度云网盘资源,版权均属于影片公司所有,请在下载后24小时删除,切勿用于商业用途。本站所有资源信息均从互联网搜索而来,本站不对显示的内容承担责任,如您认为本站页面信息侵犯了您的权益,请附上版权证明邮件告知【754403226@qq.com】,在收到邮件后72小时内删除。本文链接:https://www.piaodoo.com/7814.html

搜索