Skip to content

Commit 1242cf9

Browse files
committed
新增美女壁纸下载器
1 parent e6c240f commit 1242cf9

File tree

2 files changed

+109
-2
lines changed

2 files changed

+109
-2
lines changed

README.md

+3-2
Original file line numberDiff line numberDiff line change
@@ -61,7 +61,7 @@
6161

6262
> **其他实战**
6363
64-
[美团 解析与token生成](https://github.com/wkunzhi/Python3-Spider/tree/master/【美团】数据解析、token生成) | [bilibili 视频下载](https://github.com/wkunzhi/Python3-Spider/tree/master/【bilibili】视频下载) | [51job 查岗位](https://github.com/wkunzhi/Python3-Spider/tree/master/【51Job】查岗位) | [百度 翻译](https://github.com/wkunzhi/Python3-Spider/tree/master/【百度】翻译) | [美团 全国区域](https://github.com/wkunzhi/Python3-Spider/tree/master/各站案例/MeiTuanArea) | [快递查询](https://github.com/wkunzhi/Python3-Spider/tree/master/【快递】单号查询) | [金逸电影 注册](https://github.com/wkunzhi/Python3-Spider/tree/master/其他实战/【金逸电影】自动注册) | [Python加密库Demo](https://github.com/wkunzhi/Python3-Spider/tree/master/其他实战/【Python加密库】Demo)
64+
[美女壁纸下载](https://github.com/wkunzhi/Python3-Spider/tree/master/【壁纸】美女壁纸下载器) | [美团 解析与token生成](https://github.com/wkunzhi/Python3-Spider/tree/master/【美团】数据解析、token生成) | [bilibili 视频下载](https://github.com/wkunzhi/Python3-Spider/tree/master/【bilibili】视频下载) | [51job 查岗位](https://github.com/wkunzhi/Python3-Spider/tree/master/【51Job】查岗位) | [百度 翻译](https://github.com/wkunzhi/Python3-Spider/tree/master/【百度】翻译) | [美团 全国区域](https://github.com/wkunzhi/Python3-Spider/tree/master/各站案例/MeiTuanArea) | [快递查询](https://github.com/wkunzhi/Python3-Spider/tree/master/【快递】单号查询) | [金逸电影 注册](https://github.com/wkunzhi/Python3-Spider/tree/master/其他实战/【金逸电影】自动注册) | [Python加密库Demo](https://github.com/wkunzhi/Python3-Spider/tree/master/其他实战/【Python加密库】Demo)
6565

6666

6767

@@ -116,5 +116,6 @@
116116

117117
![缺口识别](https://zok-blog.oss-cn-hangzhou.aliyuncs.com/images/20190818/WX20191011-203441%402x.png?x-oss-process=image/resize,h_200)
118118

119-
119+
### 美女壁纸下载器
120+
![美女壁纸下载器](https://zok-blog.oss-cn-hangzhou.aliyuncs.com/images/20190818/WX20191106-114450.png)
120121

Original file line numberDiff line numberDiff line change
@@ -0,0 +1,106 @@
1+
# -*- coding: utf-8 -*-
2+
# __author__ = "zok" [email protected]
3+
# Date: 2019-11-06 Python: 3.7
4+
5+
from requests import get
6+
from filetype import guess
7+
from os import rename
8+
from os import makedirs
9+
from os.path import exists
10+
from json import loads
11+
from contextlib import closing
12+
13+
14+
class DownBg:
15+
"""
16+
超级高清图片下载
17+
"""
18+
def __init__(self):
19+
self.headers = {
20+
"User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36"
21+
}
22+
23+
def down_load(self, file_url, file_full_name, now_photo_count, all_photo_count):
24+
25+
# 开始下载图片
26+
with closing(get(file_url, headers=self.headers, stream=True)) as response:
27+
chunk_size = 1024 # 单次请求最大值
28+
content_size = int(response.headers['content-length']) # 文件总大小
29+
data_count = 0 # 当前已传输的大小
30+
with open(file_full_name, "wb") as file:
31+
for data in response.iter_content(chunk_size=chunk_size):
32+
file.write(data)
33+
done_block = int((data_count / content_size) * 50)
34+
data_count = data_count + len(data)
35+
now_jd = (data_count / content_size) * 100
36+
print("\r %s:[%s%s] %d%% %d/%d" % (
37+
file_full_name, done_block * '█', ' ' * (50 - 1 - done_block), now_jd, now_photo_count,
38+
all_photo_count), end=" ")
39+
# 下载完图片后获取图片扩展名,并为其增加扩展名
40+
file_type = guess(file_full_name)
41+
rename(file_full_name, file_full_name + '.' + file_type.extension)
42+
43+
def crawler_photo(self, type_id, photo_count):
44+
"""
45+
:param type_id: 最新 1, 最热 2, 女生 3, 星空 4
46+
:param photo_count: 下载数量
47+
:return:
48+
"""
49+
type_dict = {
50+
'1': '5c68ffb9463b7fbfe72b0db0',
51+
'2': '5c69251c9b1c011c41bb97be',
52+
'3': '5c81087e6aee28c541eefc26',
53+
'4': '5c81f64c96fad8fe211f5367'
54+
}
55+
56+
url = 'https://service.paper.meiyuan.in/api/v2/columns/flow/{key}?page=1&per_page='.format(
57+
key=type_dict.get(str(type_id))) + str(photo_count)
58+
59+
# 获取图片列表数据
60+
respond = get(url, headers=self.headers)
61+
photo_data = loads(respond.content)
62+
63+
# 已经下载的图片张数
64+
now_photo_count = 1
65+
66+
# 所有图片张数
67+
all_photo_count = len(photo_data)
68+
69+
# 开始下载并保存5K分辨率壁纸
70+
for photo in photo_data:
71+
72+
# 创建一个文件夹存放我们下载的图片
73+
if not exists('./' + str(type_id)):
74+
makedirs('./' + str(type_id))
75+
76+
# 准备下载的图片链接
77+
file_url = photo['urls']['raw']
78+
79+
# 准备下载的图片名称,不包含扩展名
80+
file_name_only = file_url.split('/')
81+
file_name_only = file_name_only[len(file_name_only) - 1]
82+
83+
# 准备保存到本地的完整路径
84+
file_full_name = './' + str(type_id) + '/' + file_name_only
85+
86+
# 开始下载图片
87+
self.down_load(file_url, file_full_name, now_photo_count, all_photo_count)
88+
now_photo_count = now_photo_count + 1
89+
90+
91+
if __name__ == '__main__':
92+
dg = DownBg()
93+
94+
wall_paper_id = 1
95+
wall_paper_count = 10
96+
while True:
97+
wall_paper_id = input("\n\n壁纸类型:最新壁纸 1, 最热壁纸 2, 女生壁纸 3, 星空壁纸 4\n请输入编号以便选择5K超清壁纸类型:")
98+
wall_paper_count = input("请输入要下载的5K超清壁纸的数量:")
99+
100+
if wall_paper_id not in ['1', '2', '3', '4'] or not wall_paper_count.isdigit():
101+
print('输入有误')
102+
continue
103+
104+
print("正在下载5K超清壁纸,请稍等……")
105+
dg.crawler_photo(int(wall_paper_id), int(wall_paper_count))
106+
print('\n下载5K高清壁纸成功!')

0 commit comments

Comments
 (0)