import time
from jsonpath import jsonpath
import requests
from bs4 import BeautifulSoup
from concurrent.futures import ThreadPoolExecutor
class Netbian:
def __init__(self):
self.proxy = {
'http': 'http://' + '', # 初始代{过}{滤}理,自己添加
'https': 'https://' + ''
}
self.count = 0
self.headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.107 Safari/537.36',
'Host': 'www.netbian.com',
'Cookie': 'yjs_js_security_passport=56950c00c92b7532be82ed4ae7f342785ac382dd_1633550649_js; '
}
def get_proxy(self):
url = '' # 代{过}{滤}理池API,自己弄
re = requests.get(url)
data = re.json()
ip = jsonpath(data, '$..ip')[0]
port = jsonpath(data, '$..port')[0]
proxy = str(ip) + ':' + str(port)
self.proxy = {
'http': 'http://' + proxy,
'https': 'https://' + proxy
}
def get_data(self, num=1):
url = 'http://www.netbian.com/'
if num != 1:
url = 'http://www.netbian.com/index_' + str(num) + '.htm'
respone = requests.get(
url=url,
headers=self.headers,
proxies=self.proxy)
if respone.status_code == 503:
self.get_proxy()
respone = requests.get(
url=url,
headers=self.headers,
proxies=self.proxy)
respone.encoding = 'gb2312'
soup = BeautifulSoup(respone.text, 'lxml')
return soup
def analytical_data(self, num):
data = self.get_data(num)
all_li = data.find(attrs={'class': 'list'}).ul.find_all('li')
for i in all_li:
title = i.a.get('title')
if title is not None:
pictrue_url = i.img.get('src')
with open('Save Pictrue' + '/' + title + '.jpg', "wb") as code:
img_content = requests.get(url=pictrue_url, headers=self.headers)
code.write(img_content.content)
self.count += 1
print('下载成功,第 ' + str(self.count) + ' 张.')
def main(self):
number = input('从多少页开始爬取?\n')
page = input('爬取多少页?\n')
with ThreadPoolExecutor(max_workers=5) as t:
obj_list = []
for i in range((int(page))):
obj = t.submit(self.analytical_data, int(number) + i)
obj_list.append(obj)
if __name__ == '__main__':
Netbian().main()
使用Python爬取美女图片示例
(0)个小伙伴在吐槽