|
| 1 | +# -*- coding: utf-8 -*- |
| 2 | +# __author__ = "zok" [email protected] |
| 3 | +# Date: 2019-11-06 Python: 3.7 |
| 4 | + |
| 5 | +from requests import get |
| 6 | +from filetype import guess |
| 7 | +from os import rename |
| 8 | +from os import makedirs |
| 9 | +from os.path import exists |
| 10 | +from json import loads |
| 11 | +from contextlib import closing |
| 12 | + |
| 13 | + |
| 14 | +class DownBg: |
| 15 | + """ |
| 16 | + 超级高清图片下载 |
| 17 | + """ |
| 18 | + def __init__(self): |
| 19 | + self.headers = { |
| 20 | + "User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36" |
| 21 | + } |
| 22 | + |
| 23 | + def down_load(self, file_url, file_full_name, now_photo_count, all_photo_count): |
| 24 | + |
| 25 | + # 开始下载图片 |
| 26 | + with closing(get(file_url, headers=self.headers, stream=True)) as response: |
| 27 | + chunk_size = 1024 # 单次请求最大值 |
| 28 | + content_size = int(response.headers['content-length']) # 文件总大小 |
| 29 | + data_count = 0 # 当前已传输的大小 |
| 30 | + with open(file_full_name, "wb") as file: |
| 31 | + for data in response.iter_content(chunk_size=chunk_size): |
| 32 | + file.write(data) |
| 33 | + done_block = int((data_count / content_size) * 50) |
| 34 | + data_count = data_count + len(data) |
| 35 | + now_jd = (data_count / content_size) * 100 |
| 36 | + print("\r %s:[%s%s] %d%% %d/%d" % ( |
| 37 | + file_full_name, done_block * '█', ' ' * (50 - 1 - done_block), now_jd, now_photo_count, |
| 38 | + all_photo_count), end=" ") |
| 39 | + # 下载完图片后获取图片扩展名,并为其增加扩展名 |
| 40 | + file_type = guess(file_full_name) |
| 41 | + rename(file_full_name, file_full_name + '.' + file_type.extension) |
| 42 | + |
| 43 | + def crawler_photo(self, type_id, photo_count): |
| 44 | + """ |
| 45 | + :param type_id: 最新 1, 最热 2, 女生 3, 星空 4 |
| 46 | + :param photo_count: 下载数量 |
| 47 | + :return: |
| 48 | + """ |
| 49 | + type_dict = { |
| 50 | + '1': '5c68ffb9463b7fbfe72b0db0', |
| 51 | + '2': '5c69251c9b1c011c41bb97be', |
| 52 | + '3': '5c81087e6aee28c541eefc26', |
| 53 | + '4': '5c81f64c96fad8fe211f5367' |
| 54 | + } |
| 55 | + |
| 56 | + url = 'https://service.paper.meiyuan.in/api/v2/columns/flow/{key}?page=1&per_page='.format( |
| 57 | + key=type_dict.get(str(type_id))) + str(photo_count) |
| 58 | + |
| 59 | + # 获取图片列表数据 |
| 60 | + respond = get(url, headers=self.headers) |
| 61 | + photo_data = loads(respond.content) |
| 62 | + |
| 63 | + # 已经下载的图片张数 |
| 64 | + now_photo_count = 1 |
| 65 | + |
| 66 | + # 所有图片张数 |
| 67 | + all_photo_count = len(photo_data) |
| 68 | + |
| 69 | + # 开始下载并保存5K分辨率壁纸 |
| 70 | + for photo in photo_data: |
| 71 | + |
| 72 | + # 创建一个文件夹存放我们下载的图片 |
| 73 | + if not exists('./' + str(type_id)): |
| 74 | + makedirs('./' + str(type_id)) |
| 75 | + |
| 76 | + # 准备下载的图片链接 |
| 77 | + file_url = photo['urls']['raw'] |
| 78 | + |
| 79 | + # 准备下载的图片名称,不包含扩展名 |
| 80 | + file_name_only = file_url.split('/') |
| 81 | + file_name_only = file_name_only[len(file_name_only) - 1] |
| 82 | + |
| 83 | + # 准备保存到本地的完整路径 |
| 84 | + file_full_name = './' + str(type_id) + '/' + file_name_only |
| 85 | + |
| 86 | + # 开始下载图片 |
| 87 | + self.down_load(file_url, file_full_name, now_photo_count, all_photo_count) |
| 88 | + now_photo_count = now_photo_count + 1 |
| 89 | + |
| 90 | + |
| 91 | +if __name__ == '__main__': |
| 92 | + dg = DownBg() |
| 93 | + |
| 94 | + wall_paper_id = 1 |
| 95 | + wall_paper_count = 10 |
| 96 | + while True: |
| 97 | + wall_paper_id = input("\n\n壁纸类型:最新壁纸 1, 最热壁纸 2, 女生壁纸 3, 星空壁纸 4\n请输入编号以便选择5K超清壁纸类型:") |
| 98 | + wall_paper_count = input("请输入要下载的5K超清壁纸的数量:") |
| 99 | + |
| 100 | + if wall_paper_id not in ['1', '2', '3', '4'] or not wall_paper_count.isdigit(): |
| 101 | + print('输入有误') |
| 102 | + continue |
| 103 | + |
| 104 | + print("正在下载5K超清壁纸,请稍等……") |
| 105 | + dg.crawler_photo(int(wall_paper_id), int(wall_paper_count)) |
| 106 | + print('\n下载5K高清壁纸成功!') |
0 commit comments