2022年 11月 4日

8 个 Python 实用脚本,赶紧收藏备用

下面整理 8 个实用的 Python 脚本,需要的时候改改直接用,建议收藏!

1.解决 linux 下 unzip 乱码的问题。

  1. import os
  2. import sys
  3. import zipfile
  4. import argparse
  5. s = '\x1b[%d;%dm%s\x1b[0m'
  6. def unzip(path):
  7. file = zipfile.ZipFile(path,"r")
  8. if args.secret:
  9. file.setpassword(args.secret)
  10. for name in file.namelist():
  11. try:
  12. utf8name=name.decode('gbk')
  13. pathname = os.path.dirname(utf8name)
  14. except:
  15. utf8name=name
  16. pathname = os.path.dirname(utf8name)
  17. #print s % (1, 92, ' >> extracting:'), utf8name
  18. #pathname = os.path.dirname(utf8name)
  19. if not os.path.exists(pathname) and pathname != "":
  20. os.makedirs(pathname)
  21. data = file.read(name)
  22. if not os.path.exists(utf8name):
  23. try:
  24. fo = open(utf8name, "w")
  25. fo.write(data)
  26. fo.close
  27. except:
  28. pass
  29. file.close()
  30. def main(argv):
  31. ######################################################
  32. # for argparse
  33. p = argparse.ArgumentParser(description='解决unzip乱码')
  34. p.add_argument('xxx', type=str, nargs='*', \
  35. help='命令对象.')
  36. p.add_argument('-s', '--secret', action='store', \
  37. default=None, help='密码')
  38. global args
  39. args = p.parse_args(argv[1:])
  40. xxx = args.xxx
  41. for path in xxx:
  42. if path.endswith('.zip'):
  43. if os.path.exists(path):
  44. print s % (1, 97, ' ++ unzip:'), path
  45. unzip(path)
  46. else:
  47. print s % (1, 91, ' !! file doesn\'t exist.'), path
  48. else:
  49. print s % (1, 91, ' !! file isn\'t a zip file.'), path
  50. if __name__ == '__main__':
  51. argv = sys.argv
  52. main(argv)

2.统计当前根目录代码行数。

  1. # coding=utf-8
  2. import os
  3. import time
  4. # 设定根目录
  5. basedir = './'
  6. filelists = []
  7. # 指定想要统计的文件类型
  8. whitelist = ['cpp', 'h']
  9. #遍历文件, 递归遍历文件夹中的所有
  10. def getFile(basedir):
  11. global filelists
  12. for parent,dirnames,filenames in os.walk(basedir):
  13. for filename in filenames:
  14. ext = filename.split('.')[-1]
  15. #只统计指定的文件类型,略过一些log和cache文件
  16. if ext in whitelist:
  17. filelists.append(os.path.join(parent,filename))
  18. #统计一个的行数
  19. def countLine(fname):
  20. count = 0
  21. # 把文件做二进制看待,read.
  22. for file_line in open(fname, 'rb').readlines():
  23. if file_line != '' and file_line != '\n': #过滤掉空行
  24. count += 1
  25. print (fname + '----' , count)
  26. return count
  27. if __name__ == '__main__' :
  28. startTime = time.clock()
  29. getFile(basedir)
  30. totalline = 0
  31. for filelist in filelists:
  32. totalline = totalline + countLine(filelist)
  33. print ('total lines:',totalline)
  34. print ('Done! Cost Time: %0.2f second' % (time.clock() - startTime))

3.扫描当前目录和所有子目录并显示大小。

  1. import os
  2. import sys
  3. try:
  4. directory = sys.argv[1]
  5. except IndexError:
  6. sys.exit("Must provide an argument.")
  7. dir_size = 0
  8. fsizedicr = {'Bytes': 1,
  9. 'Kilobytes': float(1) / 1024,
  10. 'Megabytes': float(1) / (1024 * 1024),
  11. 'Gigabytes': float(1) / (1024 * 1024 * 1024)}
  12. for (path, dirs, files) in os.walk(directory):
  13. for file in files:
  14. filename = os.path.join(path, file)
  15. dir_size += os.path.getsize(filename)
  16. fsizeList = [str(round(fsizedicr[key] * dir_size, 2)) + " " + key for key in fsizedicr]
  17. if dir_size == 0: print ("File Empty")
  18. else:
  19. for units in sorted(fsizeList)[::-1]:
  20. print ("Folder Size: " + units)

4.将源目录240天以上的所有文件移动到目标目录。

  1. import shutil
  2. import sys
  3. import time
  4. import os
  5. import argparse
  6. usage = 'python move_files_over_x_days.py -src [SRC] -dst [DST] -days [DAYS]'
  7. description = 'Move files from src to dst if they are older than a certain number of days. Default is 240 days'
  8. args_parser = argparse.ArgumentParser(usage=usage, description=description)
  9. args_parser.add_argument('-src', '--src', type=str, nargs='?', default='.', help='(OPTIONAL) Directory where files will be moved from. Defaults to current directory')
  10. args_parser.add_argument('-dst', '--dst', type=str, nargs='?', required=True, help='(REQUIRED) Directory where files will be moved to.')
  11. args_parser.add_argument('-days', '--days', type=int, nargs='?', default=240, help='(OPTIONAL) Days value specifies the minimum age of files to be moved. Default is 240.')
  12. args = args_parser.parse_args()
  13. if args.days < 0:
  14. args.days = 0
  15. src = args.src # 设置源目录
  16. dst = args.dst # 设置目标目录
  17. days = args.days # 设置天数
  18. now = time.time() # 获得当前时间
  19. if not os.path.exists(dst):
  20. os.mkdir(dst)
  21. for f in os.listdir(src): # 遍历源目录所有文件
  22. if os.stat(f).st_mtime < now - days * 86400: # 判断是否超过240天
  23. if os.path.isfile(f): # 检查是否是文件
  24. shutil.move(f, dst) # 移动文件

5.扫描脚本目录,并给出不同类型脚本的计数。

  1. import os
  2. import shutil
  3. from time import strftime
  4. logsdir="c:\logs\puttylogs"
  5. zipdir="c:\logs\puttylogs\zipped_logs"
  6. zip_program="zip.exe"
  7. for files in os.listdir(logsdir):
  8. if files.endswith(".log"):
  9. files1=files+"."+strftime("%Y-%m-%d")+".zip"
  10. os.chdir(logsdir)
  11. os.system(zip_program + " " + files1 +" "+ files)
  12. shutil.move(files1, zipdir)
  13. os.remove(files)

6.下载Leetcode的算法题。

  1. import sys
  2. import re
  3. import os
  4. import argparse
  5. import requests
  6. from lxml import html as lxml_html
  7. try:
  8. import html
  9. except ImportError:
  10. import HTMLParser
  11. html = HTMLParser.HTMLParser()
  12. try:
  13. import cPickle as pk
  14. except ImportError:
  15. import pickle as pk
  16. class LeetcodeProblems(object):
  17. def get_problems_info(self):
  18. leetcode_url = 'https://leetcode.com/problemset/algorithms'
  19. res = requests.get(leetcode_url)
  20. if not res.ok:
  21. print('request error')
  22. sys.exit()
  23. cm = res.text
  24. cmt = cm.split('tbody>')[-2]
  25. indexs = re.findall(r'<td>(\d+)</td>', cmt)
  26. problem_urls = ['https://leetcode.com' + url \
  27. for url in re.findall(
  28. r'<a href="(/problems/.+?)"', cmt)]
  29. levels = re.findall(r"<td value='\d*'>(.+?)</td>", cmt)
  30. tinfos = zip(indexs, levels, problem_urls)
  31. assert (len(indexs) == len(problem_urls) == len(levels))
  32. infos = []
  33. for info in tinfos:
  34. res = requests.get(info[-1])
  35. if not res.ok:
  36. print('request error')
  37. sys.exit()
  38. tree = lxml_html.fromstring(res.text)
  39. title = tree.xpath('//meta[@property="og:title"]/@content')[0]
  40. description = tree.xpath('//meta[@property="description"]/@content')
  41. if not description:
  42. description = tree.xpath('//meta[@property="og:description"]/@content')[0]
  43. else:
  44. description = description[0]
  45. description = html.unescape(description.strip())
  46. tags = tree.xpath('//div[@id="tags"]/following::a[@class="btn btn-xs btn-primary"]/text()')
  47. infos.append(
  48. {
  49. 'title': title,
  50. 'level': info[1],
  51. 'index': int(info[0]),
  52. 'description': description,
  53. 'tags': tags
  54. }
  55. )
  56. with open('leecode_problems.pk', 'wb') as g:
  57. pk.dump(infos, g)
  58. return infos
  59. def to_text(self, pm_infos):
  60. if self.args.index:
  61. key = 'index'
  62. elif self.args.title:
  63. key = 'title'
  64. elif self.args.tag:
  65. key = 'tags'
  66. elif self.args.level:
  67. key = 'level'
  68. else:
  69. key = 'index'
  70. infos = sorted(pm_infos, key=lambda i: i[key])
  71. text_template = '## {index} - {title}\n' \
  72. '~{level}~ {tags}\n' \
  73. '{description}\n' + '\n' * self.args.line
  74. text = ''
  75. for info in infos:
  76. if self.args.rm_blank:
  77. info['description'] = re.sub(r'[\n\r]+', r'\n', info['description'])
  78. text += text_template.format(**info)
  79. with open('leecode problems.txt', 'w') as g:
  80. g.write(text)
  81. def run(self):
  82. if os.path.exists('leecode_problems.pk') and not self.args.redownload:
  83. with open('leecode_problems.pk', 'rb') as f:
  84. pm_infos = pk.load(f)
  85. else:
  86. pm_infos = self.get_problems_info()
  87. print('find %s problems.' % len(pm_infos))
  88. self.to_text(pm_infos)
  89. def handle_args(argv):
  90. p = argparse.ArgumentParser(description='extract all leecode problems to location')
  91. p.add_argument('--index', action='store_true', help='sort by index')
  92. p.add_argument('--level', action='store_true', help='sort by level')
  93. p.add_argument('--tag', action='store_true', help='sort by tag')
  94. p.add_argument('--title', action='store_true', help='sort by title')
  95. p.add_argument('--rm_blank', action='store_true', help='remove blank')
  96. p.add_argument('--line', action='store', type=int, default=10, help='blank of two problems')
  97. p.add_argument('-r', '--redownload', action='store_true', help='redownload data')
  98. args = p.parse_args(argv[1:])
  99. return args
  100. def main(argv):
  101. args = handle_args(argv)
  102. x = LeetcodeProblems()
  103. x.args = args
  104. x.run()
  105. if __name__ == '__main__':
  106. argv = sys.argv
  107. main(argv)

7.将 Markdown 转换为 HTML。

  1. import sys
  2. import os
  3. from bs4 import BeautifulSoup
  4. import markdown
  5. class MarkdownToHtml:
  6. headTag = '<head><meta charset="utf-8" /></head>'
  7. def __init__(self,cssFilePath = None):
  8. if cssFilePath != None:
  9. self.genStyle(cssFilePath)
  10. def genStyle(self,cssFilePath):
  11. with open(cssFilePath,'r') as f:
  12. cssString = f.read()
  13. self.headTag = self.headTag[:-7] + '<style type="text/css">{}</style>'.format(cssString) + self.headTag[-7:]
  14. def markdownToHtml(self, sourceFilePath, destinationDirectory = None, outputFileName = None):
  15. if not destinationDirectory:
  16. # 未定义输出目录则将源文件目录(注意要转换为绝对路径)作为输出目录
  17. destinationDirectory = os.path.dirname(os.path.abspath(sourceFilePath))
  18. if not outputFileName:
  19. # 未定义输出文件名则沿用输入文件名
  20. outputFileName = os.path.splitext(os.path.basename(sourceFilePath))[0] + '.html'
  21. if destinationDirectory[-1] != '/':
  22. destinationDirectory += '/'
  23. with open(sourceFilePath,'r', encoding='utf8') as f:
  24. markdownText = f.read()
  25. # 编译出原始 HTML 文本
  26. rawHtml = self.headTag + markdown.markdown(markdownText,output_format='html5')
  27. # 格式化 HTML 文本为可读性更强的格式
  28. beautifyHtml = BeautifulSoup(rawHtml,'html5lib').prettify()
  29. with open(destinationDirectory + outputFileName, 'w', encoding='utf8') as f:
  30. f.write(beautifyHtml)
  31. if __name__ == "__main__":
  32. mth = MarkdownToHtml()
  33. # 做一个命令行参数列表的浅拷贝,不包含脚本文件名
  34. argv = sys.argv[1:]
  35. # 目前列表 argv 可能包含源文件路径之外的元素(即选项信息)
  36. # 程序最后遍历列表 argv 进行编译 markdown 时,列表中的元素必须全部是源文件路径
  37. outputDirectory = None
  38. if '-s' in argv:
  39. cssArgIndex = argv.index('-s') +1
  40. cssFilePath = argv[cssArgIndex]
  41. # 检测样式表文件路径是否有效
  42. if not os.path.isfile(cssFilePath):
  43. print('Invalid Path: '+cssFilePath)
  44. sys.exit()
  45. mth.genStyle(cssFilePath)
  46. # pop 顺序不能随意变化
  47. argv.pop(cssArgIndex)
  48. argv.pop(cssArgIndex-1)
  49. if '-o' in argv:
  50. dirArgIndex = argv.index('-o') +1
  51. outputDirectory = argv[dirArgIndex]
  52. # 检测输出目录是否有效
  53. if not os.path.isdir(outputDirectory):
  54. print('Invalid Directory: ' + outputDirectory)
  55. sys.exit()
  56. # pop 顺序不能随意变化
  57. argv.pop(dirArgIndex)
  58. argv.pop(dirArgIndex-1)
  59. # 至此,列表 argv 中的元素均是源文件路径
  60. # 遍历所有源文件路径
  61. for filePath in argv:
  62. # 判断文件路径是否有效
  63. if os.path.isfile(filePath):
  64. mth.markdownToHtml(filePath, outputDirectory)
  65. else:
  66. print('Invalid Path: ' + filePath)

8.文本文件编码检测与转换。

  1. import sys
  2. import os
  3. import argparse
  4. from chardet.universaldetector import UniversalDetector
  5. parser = argparse.ArgumentParser(description = '文本文件编码检测与转换')
  6. parser.add_argument('filePaths', nargs = '+',
  7. help = '检测或转换的文件路径')
  8. parser.add_argument('-e', '--encoding', nargs = '?', const = 'UTF-8',
  9. help = '''
  10. 目标编码。支持的编码有:
  11. ASCII, (Default) UTF-8 (with or without a BOM), UTF-16 (with a BOM),
  12. UTF-32 (with a BOM), Big5, GB2312/GB18030, EUC-TW, HZ-GB-2312, ISO-2022-CN, EUC-JP, SHIFT_JIS, ISO-2022-JP,
  13. ISO-2022-KR, KOI8-R, MacCyrillic, IBM855, IBM866, ISO-8859-5, windows-1251, ISO-8859-2, windows-1250, EUC-KR,
  14. ISO-8859-5, windows-1251, ISO-8859-1, windows-1252, ISO-8859-7, windows-1253, ISO-8859-8, windows-1255, TIS-620
  15. ''')
  16. parser.add_argument('-o', '--output',
  17. help = '输出目录')
  18. # 解析参数,得到一个 Namespace 对象
  19. args = parser.parse_args()
  20. # 输出目录不为空即视为开启转换, 若未指定转换编码,则默认为 UTF-8
  21. if args.output != None:
  22. if not args.encoding:
  23. # 默认使用编码 UTF-8
  24. args.encoding = 'UTF-8'
  25. # 检测用户提供的输出目录是否有效
  26. if not os.path.isdir(args.output):
  27. print('Invalid Directory: ' + args.output)
  28. sys.exit()
  29. else:
  30. if args.output[-1] != '/':
  31. args.output += '/'
  32. # 实例化一个通用检测器
  33. detector = UniversalDetector()
  34. print()
  35. print('Encoding (Confidence)',':','File path')
  36. for filePath in args.filePaths:
  37. # 检测文件路径是否有效,无效则跳过
  38. if not os.path.isfile(filePath):
  39. print('Invalid Path: ' + filePath)
  40. continue
  41. # 重置检测器
  42. detector.reset()
  43. # 以二进制模式读取文件
  44. for each in open(filePath, 'rb'):
  45. # 检测器读取数据
  46. detector.feed(each)
  47. # 若检测完成则跳出循环
  48. if detector.done:
  49. break
  50. # 关闭检测器
  51. detector.close()
  52. # 读取结果
  53. charEncoding = detector.result['encoding']
  54. confidence = detector.result['confidence']
  55. # 打印信息
  56. if charEncoding is None:
  57. charEncoding = 'Unknown'
  58. confidence = 0.99
  59. print('{} {:>12} : {}'.format(charEncoding.rjust(8),
  60. '('+str(confidence*100)+'%)', filePath))
  61. if args.encoding and charEncoding != 'Unknown' and confidence > 0.6:
  62. # 若未设置输出目录则覆盖源文件
  63. outputPath = args.output + os.path.basename(filePath) if args.output else filePath
  64. with open(filePath, 'r', encoding = charEncoding, errors = 'replace') as f:
  65. temp = f.read()
  66. with open(outputPath, 'w', encoding = args.encoding, errors = 'replace') as f:
  67. f.write(temp)

今日内容就分享到这了,小编最后还为大家准备了一份python大礼包【加君羊:419693945】帮助大家更好的学习!