本文来源吾爱破解论坛
本帖最后由 编草鞋的蚂蚱 于 2018-11-27 16:03 编辑
学习python就要学以致用,写个下载器分享下:
这里将版本信息放在json文件中,首先要读取json文件。
JSON:
JSON(JavaScript Object Notation) 是一种轻量级的数据交换格式。它基于ECMAScript的一个子集。 JSON采用完全独立于语言的文本格式,但是也使用了类似于C语言家族的习惯(包括C、C++、Java、JavaScript、Perl、Python等)。这些特性使JSON成为理想的数据交换语言。易于人阅读和编写,同时也易于机器解析和生成(一般用于提升网络传输速率)。
JSON在python中分别由list和dict组成,,用于字符串和python数据类型间进行转换。
Json模块提供了四个功能:dumps、dump、loads、load:
dumps把数据类型转换成字符串 将python中的 字典 转换为 字符串
dump把数据类型转换成字符串并存储在文件中 将数据写入json文件中
loads把字符串转换成数据类型 将 字符串 转换为 字典
load把文件打开从字符串转换成数据类型 把文件打开,并把字符串变换为数据类型
#参考文献:http://www.cnblogs.com/luotianshuai/p/5002110.html
# https://www.cnblogs.com/wangyayu ... utm_medium=referral
自动更新包括:检查版本号、对比版本号并判断是否需要更新、(不需要更新则终止程序,需要更新则下载压缩包)下载文件、删除老文件、解压新文件、删除压缩包、改写文件版本信息等功能
***这里的下载是服务器控制下载,否则下载将会失败。
# -*- coding:utf-8 -*-
# ! /usr/bin/env python
from __future__ import unicode_literals
import os
import shutil
import sys
import cPickle
import threading
from collections import namedtuple
from multiprocessing.pool import ThreadPool
from urlparse import urlsplit
import json
import urllib
import urllib2
import time
import argparse
import zipfile
def getlocver(versionpath): # 读取本地版本信息
f = open(versionpath, 'rb') # 打开路径下的json文件,‘rb’表示文件可读
data = f.read()
print data
j = json.loads(data)
version = j["version"] # 读取key"version"对应的value值
print version
return version
# 参考文献:https://www.cnblogs.com/laoniubile/p/6036919.html
def geturl1(): # 发送第一条请求将版本信息上传到服务器
url = "http:"XXXX" + getlocver(versionpath) #将版本号写在http上
req = urllib2.Request(url)
res_data = urllib2.urlopen(req)
res = res_data.read()
return res # 服务器返回信息作为返回值
# 参考文献:https://www.cnblogs.com/poerli/p/6429673.html
def getYN(): # 读取第一条请求的返回信息的YN值 YN=1下载,否则终止下载
data = geturl1()
h = json.loads(data)
YN = h["YN"]
return YN
def getserversion(): # 读取第一条请求的返回信息的version值即新版本号
data = geturl1()
w = json.loads(data)
version = w["version"]
print (version)
return version
# global lock
lock = threading.Lock()
# default parameters
defaults = dict(
thread_count=10,
buffer_size=500 * 1024,
block_size=1000 * 1024)
def progress(percent, width=50): # 显示下载进度
print "%s %d%%\r" % (('%%-%ds' % width) % (width * percent / 100 * '>>'), percent),
if percent >= 100:
print
sys.stdout.flush()
def write_data(filepath, data):
with open(filepath, 'wb') as output:
cPickle.dump(data, output)
def read_data(filepath):
with open(filepath, 'rb') as output:
return cPickle.load(output)
FileInfo = namedtuple('FileInfo', 'url name size lastmodified')
def get_file_info(url): # 发送请求读取要下载的文件大小
class HeadRequest(urllib2.Request):
def get_method(self):
return "HEAD"
res = urllib2.urlopen(HeadRequest(url))
res.read()
headers = dict(res.headers)
size = int(headers.get('content-length', 0))
lastmodified = headers.get('last-modified', '')
name = None
if headers.has_key('content-disposition'):
name = headers['content-disposition'].split('filename=')[1]
if name[0] == '"' or name[0] == "'":
name = name[1:-1]
else:
name = os.path.basename(urlsplit(url)[2])
return FileInfo(url, name, size, lastmodified)
def _worker((url, block, fobj, buffer_size)): # 发送请求,确认后开始下载
req = urllib2.Request(url)
req.headers['Range'] = 'bytes=%s-%s' % (block[1], block[2])
res = urllib2.urlopen(req)
while 1:
chunk = res.read(buffer_size)
if not chunk:
break
with lock:
fobj.seek(block[1])
fobj.write(chunk)
block[1] += len(chunk)
def _monitor(infopath, file_info, blocks):
while 1:
with lock:
percent = sum([block[1] - block[0]
for block in blocks]) * 100 / file_info.size
progress(percent)
if percent >= 100:
break
write_data(infopath, (file_info, blocks))
time.sleep(2)
def download(url, output, # 下载
thread_count=defaults['thread_count'],
buffer_size=defaults['buffer_size'],
block_size=defaults['block_size']):
file_info = get_file_info(url)
if output is None:
output = file_info.name
workpath = '%s.ing' % output
infopath = '%s.inf' % output
blocks = []
if os.path.exists(infopath):
_x, blocks = read_data(infopath)
if (_x.url != url or
_x.name != file_info.name or
_x.lastmodified != file_info.lastmodified):
blocks = []
if len(blocks) == 0:
if block_size > file_info.size:
blocks = [[0, 0, file_info.size]]
else:
block_count, remain = divmod(file_info.size, block_size)
blocks = [[i * block_size, i * block_size,
(i + 1) * block_size - 1] for i in range(block_count)]
blocks[-1][-1] += remain
with open(workpath, 'wb') as fobj:
fobj.write('')
print 'Downloading %s' % url
threading.Thread(target=_monitor, args=(
infopath, file_info, blocks)).start()
with open(workpath, 'rb+') as fobj:
args = [(url, blocks, fobj, buffer_size)
for i in range(len(blocks)) if blocks[1] < blocks[2]]
if thread_count > len(args):
thread_count = len(args)
pool = ThreadPool(thread_count)
pool.map(_worker, args)
pool.close()
pool.join()
if os.path.exists(output):
os.remove(output)
os.rename(workpath, output)
if os.path.exists(infopath):
os.remove(infopath)
assert all([block[1] >= block[2] for block in blocks]) is True
def reversion(jsonpath): # 将获取的新版本号替换进老版本信息中
f = open(jsonpath, 'r+')
data = f.read()
print data
j = json.loads(data)
j["version"] = getserversion()
with open(jsonpath, 'wb') as f:
f.write(json.dumps(j)) # 写进json
def rename(pathT): # 更改老版本文件名
os.listdir(pathT)
b = 'willdele'
os.rename('filename' + getlocver(versionpath), b.encode("utf-8"))
# 参考文献:http://www.runoob.com/python/os-rename.html
def delold(): # 删除更名后的老版本
shutil.rmtree("将要删除的文件夹路径和文件夹名willdele")
# 参考文献:https://www.cnblogs.com/FengZiQ/p/8532141.html
def un_zip(file_name): # 对下载下来的压缩包进行解压
zip_file = zipfile.ZipFile(file_name)
if os.path.isdir(file_name + "_files"):
pass
else:
os.mkdir(file_name + "_files")
for names in zip_file.namelist():
zip_file.extract(names, file_name + "_files/")
zip_file.close()
# 参考文献:https://blog.csdn.net/abcwanglinyong/article/details/80840813
def delzip(pathT, zipfile_name): # 删除压缩包
os.listdir(pathT)
os.remove(zipfile_name)
# 参考文献:https://www.cnblogs.com/yuanqiangfei/p/8110185.html
def newrename(pathT, j, zipfile_name): # 将解压后的压缩包更名到 包名+版本号
os.listdir(pathT)
b = 'filename' + j
os.rename(zipfile_name + "_files", b.encode("utf-8"))
return b
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='多线程文件下载器.')
parser.add_argument('url', type=str, help='下载连接')
parser.add_argument('-o', type=str, default=None, dest="output", help='输出文件')
parser.add_argument('-t', type=int, default=defaults['thread_count'], dest="thread_count", help='下载的线程数量')
parser.add_argument('-b', type=int, default=defaults['buffer_size'], dest="buffer_size", help='缓存大小')
parser.add_argument('-s', type=int, default=defaults['block_size'], dest="block_size", help='字区大小')
argv = sys.argv[1:]
if len(argv) == 0:
argv = argv = ['http:XXXXX']
args = parser.parse_args(argv)
start_time = time.time()
pathT = '文件夹path'
versionpath = "jsonpath"
yn = getYN()
j = getserversion()
zipfile_name = "filename" + getserversion() + ".zip"
if yn == 1:
print("准备更新,请稍后》》")
rename(pathT)
download(args.url, args.output, args.thread_count,
args.buffer_size, args.block_size)
reversion(versionpath)
delold()
un_zip(zipfile_name)
delzip(pathT, zipfile_name)
newrename(pathT, j, zipfile_name)
else:
print ("已经是最新版本《《")
exit()
版权声明:
本站所有资源均为站长或网友整理自互联网或站长购买自互联网,站长无法分辨资源版权出自何处,所以不承担任何版权以及其他问题带来的法律责任,如有侵权或者其他问题请联系站长删除!站长QQ754403226 谢谢。
- 上一篇: 用Python实现微信公众号API素材库图文消息抓取
- 下一篇: 某图片网爬虫