first commit

This commit is contained in:
Hong_SZ
2025-10-03 17:36:19 +08:00
commit ae703e80fa
4 changed files with 421 additions and 0 deletions

0
README.md Normal file
View File

48
download_modpacket.py Normal file
View File

@@ -0,0 +1,48 @@
import os
import re
import requests
from concurrent.futures import ThreadPoolExecutor, as_completed
from tqdm import tqdm
# 创建packet文件夹
os.makedirs("packet", exist_ok=True)
# 读取link.txt并提取所有https链接
with open("link.txt", "r", encoding="utf-8") as f:
content = f.read()
# 匹配所有https链接
links = re.findall(r"https://[^\s,]+", content)
# 下载函数
def download_file(url):
local_filename = os.path.join("packet", url.split("/")[-1].split("?")[0])
try:
with requests.get(url, stream=True) as r:
r.raise_for_status()
total_size = int(r.headers.get("content-length", 0))
with open(local_filename, "wb") as f, tqdm(
desc=os.path.basename(local_filename),
total=total_size,
unit="B",
unit_scale=True,
unit_divisor=1024,
leave=False
) as bar:
for chunk in r.iter_content(chunk_size=8192):
if chunk:
f.write(chunk)
bar.update(len(chunk))
return local_filename
except Exception as e:
print(f"下载失败: {url} -> {e}")
return None
# 多线程下载
with ThreadPoolExecutor(max_workers=8) as executor:
future_to_url = {executor.submit(download_file, url): url for url in links}
for future in as_completed(future_to_url):
url = future_to_url[future]
result = future.result()
if result:
print(f"已下载: {result}")

141
organize_zips.py Normal file
View File

@@ -0,0 +1,141 @@
import os
import re
import shutil
import sys
from pathlib import Path
from concurrent.futures import ThreadPoolExecutor, as_completed
from collections import Counter
try:
from tqdm import tqdm # type: ignore
_USE_TQDM = True
except Exception:
_USE_TQDM = False
INVALID_WIN_CHARS = '<>:"/\\|?*'
def sanitize_folder_name(name: str) -> str:
sanitized = ''.join('-' if ch in INVALID_WIN_CHARS else ch for ch in name)
return sanitized.strip()
def extract_category(zip_name: str) -> str:
# Expect pattern like: cjsyun-xxx_aaa_xx.zip
base = zip_name
if base.startswith('cjsyun-'):
base = base[len('cjsyun-') :]
# Remove extension
if base.lower().endswith('.zip'):
base = base[:-4]
parts = base.split('_')
if len(parts) >= 3:
return sanitize_folder_name(parts[1])
return 'Uncategorized'
def resolve_target_path(dest_dir: Path, file_name: str) -> Path:
target = dest_dir / file_name
if not target.exists():
return target
stem = target.stem
suffix = target.suffix
i = 1
while True:
candidate = dest_dir / f"{stem}({i}){suffix}"
if not candidate.exists():
return candidate
i += 1
def process_file(file_path: Path, base_dir: Path, dry_run: bool = False) -> tuple[str, Path, Path]:
category = extract_category(file_path.name)
dest_dir = base_dir / category
if not dry_run:
dest_dir.mkdir(parents=True, exist_ok=True)
target = resolve_target_path(dest_dir, file_path.name)
shutil.move(str(file_path), str(target))
return category, file_path, target
else:
# Dry-run: don't move, just compute where it would go
target = resolve_target_path(dest_dir, file_path.name)
return category, file_path, target
def main():
import argparse
parser = argparse.ArgumentParser(description='并发分类并移动 ZIP 文件到类型文件夹。')
parser.add_argument(
'--dir', '--from-dir', dest='from_dir', default=None,
help='要处理的目录(默认是脚本所在目录)',
)
parser.add_argument(
'--workers', type=int, default=max(4, (os.cpu_count() or 4) + 4),
help='线程数默认CPU核数+4至少4',
)
parser.add_argument(
'--dry-run', action='store_true',
help='试运行,仅显示计划移动,不实际移动',
)
args = parser.parse_args()
base_dir = Path(args.from_dir).resolve() if args.from_dir else Path(__file__).resolve().parent
if not base_dir.exists() or not base_dir.is_dir():
print(f'目录无效:{base_dir}')
sys.exit(1)
zip_files = sorted(base_dir.glob('*.zip'))
if not zip_files:
print(f'未在目录中找到 zip 文件:{base_dir}')
sys.exit(0)
print(f'发现 {len(zip_files)} 个 ZIP 文件,开始并发处理...')
moved_counter: Counter[str] = Counter()
# Prepare progress bar
pbar = None
if _USE_TQDM:
pbar = tqdm(total=len(zip_files), ncols=80, desc='处理进度')
futures = []
with ThreadPoolExecutor(max_workers=args.workers) as executor:
for f in zip_files:
futures.append(executor.submit(process_file, f, base_dir, args.dry_run))
for fut in as_completed(futures):
try:
category, src, dst = fut.result()
moved_counter[category] += 1
if _USE_TQDM:
assert pbar is not None
pbar.update(1)
else:
processed = sum(moved_counter.values())
percent = processed * 100 // len(zip_files)
print(f'[{percent:3d}%] {src.name} -> {category}/')
except Exception as e:
if _USE_TQDM:
assert pbar is not None
pbar.write(f'错误:{e!r}')
else:
print(f'错误:{e!r}')
if _USE_TQDM and pbar is not None:
pbar.close()
print('\n分类汇总:')
for cat, cnt in sorted(moved_counter.items()):
print(f'- {cat}: {cnt}')
if args.dry_run:
print('\n试运行完成(未实际移动文件)。取消 --dry-run 以执行移动。')
else:
print('\n完成:已将 ZIP 文件移动到对应类型的文件夹。')
if __name__ == '__main__':
main()

232
process_archives.py Normal file
View File

@@ -0,0 +1,232 @@
import os
import zipfile
import shutil
import tempfile
import glob
import subprocess
from pathlib import Path
import threading
from concurrent.futures import ThreadPoolExecutor, as_completed
import time
# 导入可能需要安装的库
import tqdm
import patoolib
# 全局锁用于线程安全的输出和文件操作
print_lock = threading.Lock()
file_operation_lock = threading.Lock()
def safe_print(*args, **kwargs):
"""线程安全的打印函数"""
with print_lock:
print(*args, **kwargs)
def safe_file_operation(operation, *args, **kwargs):
"""线程安全的文件操作函数"""
with file_operation_lock:
return operation(*args, **kwargs)
def extract_archive(archive_path, extract_dir):
"""解压缩文件到指定目录"""
safe_print("正在解压文件...")
if archive_path.lower().endswith('.zip'):
# 使用zipfile处理zip文件可以显示进度条
with zipfile.ZipFile(archive_path, 'r') as zip_ref:
total = len(zip_ref.namelist())
for file in tqdm.tqdm(zip_ref.namelist(), total=total, desc="解压进度"):
zip_ref.extract(file, extract_dir)
else:
# 使用patoolib处理其他类型的压缩文件包括RAR
patoolib.extract_archive(archive_path, outdir=extract_dir)
safe_print("解压完成")
# 如果解压后只有一个文件夹将其内容移到extract_dir
items = os.listdir(extract_dir)
if len(items) == 1 and os.path.isdir(os.path.join(extract_dir, items[0])):
subfolder = os.path.join(extract_dir, items[0])
# 将子文件夹中的所有内容移到extract_dir
for item in os.listdir(subfolder):
shutil.move(os.path.join(subfolder, item), extract_dir)
# 删除空文件夹
os.rmdir(subfolder)
def process_files(directory):
"""处理目录中的文件:删除.bat文件重命名.jar文件"""
safe_print("正在处理文件...")
# 查找所有.bat文件并删除
bat_files = []
for root, _, files in os.walk(directory):
for file in files:
if file.lower().endswith('.bat'):
bat_path = os.path.join(root, file)
bat_files.append(bat_path)
# 使用线程池并行删除.bat文件
if bat_files:
def safe_remove_file(file_path):
"""安全删除文件"""
try:
safe_file_operation(os.remove, file_path)
return True
except Exception as e:
safe_print(f"删除文件 {file_path} 时出错: {e}")
return False
with ThreadPoolExecutor(max_workers=4) as executor:
futures = [executor.submit(safe_remove_file, bat_file) for bat_file in bat_files]
successful_deletions = 0
for future in tqdm.tqdm(as_completed(futures), total=len(futures), desc="删除.bat文件"):
if future.result():
successful_deletions += 1
safe_print(f"成功删除 {successful_deletions}/{len(bat_files)} 个.bat文件")
else:
safe_print("未找到需要删除的.bat文件")
# 查找所有.jar文件
jar_files = []
for root, _, files in os.walk(directory):
for file in files:
if file.lower().endswith('.jar'):
jar_path = os.path.join(root, file)
jar_files.append(jar_path)
# 处理.jar文件
if jar_files:
for jar_file in tqdm.tqdm(jar_files, desc="处理.jar文件"):
dir_path = os.path.dirname(jar_file)
# 检查是否存在fabric-server-launch.jar和server.jar
if os.path.basename(jar_file).lower() == 'fabric-server-launch.jar':
server_jar = os.path.join(dir_path, 'server.jar')
if os.path.exists(server_jar):
# 将server.jar重命名为ser.jar
ser_jar = os.path.join(dir_path, 'ser.jar')
safe_file_operation(shutil.move, server_jar, ser_jar)
# 将fabric-server-launch.jar重命名为server.jar
safe_file_operation(shutil.move, jar_file, os.path.join(dir_path, 'server.jar'))
# 如果是普通的jar文件且不是server.jar或ser.jar则重命名为server.jar
elif os.path.basename(jar_file).lower() != 'server.jar' and os.path.basename(jar_file).lower() != 'ser.jar':
# 检查目录中是否已存在server.jar
server_jar = os.path.join(dir_path, 'server.jar')
if not os.path.exists(server_jar):
safe_file_operation(shutil.move, jar_file, server_jar)
else:
safe_print("未找到需要处理的.jar文件")
def compress_directory(source_dir, output_path):
"""将目录压缩为zip文件确保解压后直接是文件而不是文件夹"""
safe_print("正在压缩文件...")
with zipfile.ZipFile(output_path, 'w', zipfile.ZIP_DEFLATED) as zipf:
# 获取要压缩的文件列表
file_paths = []
for root, dirs, files in os.walk(source_dir):
for file in files:
file_path = os.path.join(root, file)
file_paths.append(file_path)
# 显示压缩进度
for file in tqdm.tqdm(file_paths, desc="压缩进度"):
# 计算相对路径,确保解压后不会有额外的目录层级
# 获取文件相对于source_dir的路径
rel_path = os.path.relpath(file, source_dir)
# 如果文件在子文件夹中,我们需要保留子文件夹结构,但去掉最外层文件夹
# 例如,如果文件在 temp_dir/folder1/file.txt我们希望在zip中存储为 folder1/file.txt
# 如果文件在 temp_dir/file.txt我们希望在zip中存储为 file.txt
zipf.write(file, rel_path)
def process_single_archive(archive_path, current_dir, index, total):
"""处理单个压缩文件的函数"""
safe_print(f"\n[线程 {threading.current_thread().name}] 处理第 {index+1}/{total} 个压缩文件: {os.path.basename(archive_path)}")
try:
# 创建临时目录用于解压
with tempfile.TemporaryDirectory() as temp_dir:
# 解压文件
try:
extract_archive(archive_path, temp_dir)
except Exception as e:
safe_print(f"解压失败: {e}")
return False
# 处理文件
process_files(temp_dir)
# 确保successzip目录存在
success_dir = os.path.join(current_dir, 'successzip')
if not os.path.exists(success_dir):
safe_file_operation(os.makedirs, success_dir, exist_ok=True)
# 创建新的压缩文件名,使用"cjsyun-"前缀保存到successzip文件夹
processed_archive = os.path.join(success_dir, f"cjsyun-{os.path.basename(archive_path)}")
if processed_archive.lower().endswith('.rar'):
processed_archive = processed_archive[:-4] + '.zip' # 将RAR转换为ZIP
# 压缩处理后的文件
compress_directory(temp_dir, processed_archive)
safe_print(f"[线程 {threading.current_thread().name}] 处理完成: {os.path.basename(processed_archive)}")
return True
except Exception as e:
safe_print(f"处理文件时出错: {e}")
return False
def main():
# 获取当前目录下的所有压缩文件
current_dir = os.path.dirname(os.path.abspath(__file__))
archives = []
for ext in ['*.zip', '*.rar']:
archives.extend(glob.glob(os.path.join(current_dir, ext)))
if not archives:
safe_print("当前目录下没有找到压缩文件")
return
safe_print(f"找到 {len(archives)} 个压缩文件")
# 确定线程数量最多使用4个线程但不超过文件数量
max_workers = min(4, len(archives))
safe_print(f"使用 {max_workers} 个线程并行处理")
start_time = time.time()
# 使用线程池并行处理压缩文件
with ThreadPoolExecutor(max_workers=max_workers, thread_name_prefix="Archive") as executor:
# 提交所有任务
futures = []
for i, archive_path in enumerate(archives):
future = executor.submit(process_single_archive, archive_path, current_dir, i, len(archives))
futures.append(future)
# 等待所有任务完成并收集结果
successful = 0
failed = 0
for future in as_completed(futures):
try:
result = future.result()
if result:
successful += 1
else:
failed += 1
except Exception as e:
safe_print(f"任务执行出错: {e}")
failed += 1
end_time = time.time()
total_time = end_time - start_time
safe_print(f"\n处理完成!")
safe_print(f"成功处理: {successful} 个文件")
safe_print(f"处理失败: {failed} 个文件")
safe_print(f"总耗时: {total_time:.2f}")
safe_print(f"平均每个文件: {total_time/len(archives):.2f}")
if __name__ == "__main__":
main()