求助大神

感谢整理工具,这玩意手动整理起来太折磨了 :innocent:

牛逼,感谢大佬

大佬给了纯爱航线的spine 整理工具,我就来将原帖游戏的spine整理一下吧。

由于技术问题,我提取的skel和atlas的效率不是很理想(虽然可以用)。所以就借用一下纯爱航线的spine 整理工具。(不知道为什么把大佬的工具在hex修改salt也不能提取png)。

先用纯爱航线的spine 整理工具解密解压astc,提取skel和atlas;

然后用下面这个提取json格式的atlas文件,记得输出到新的文件夹中(偷懒,提取png的脚本不会识别json是否有atlas结构);

json版atlas寻找.py
import os
import shutil
import json
import time

def remove_quotes(path):
    """去除路径字符串中的引号"""
    return path.strip('"\'')

def filter_json_files(input_folder, output_folder, target_strings):
    """
    遍历输入文件夹,筛选包含特定字符串的JSON文件并复制到输出文件夹
    
    参数:
        input_folder (str): 输入文件夹路径
        output_folder (str): 输出文件夹路径
        target_strings (list): 需要查找的目标字符串列表
    """
    # 去除引号
    input_folder = remove_quotes(input_folder)
    output_folder = remove_quotes(output_folder)

    # 确保输出文件夹存在
    if not os.path.exists(output_folder):
        os.makedirs(output_folder)
        print(f"创建输出文件夹: {output_folder}")
    
    # 计数器
    total_json_files = 0
    matched_files = 0
    processed_files = 0
    
    # 开始计时
    start_time = time.time()
    
    # 遍历输入文件夹统计总数
    print(f"\n开始扫描文件夹: {input_folder}")
    for root, _, files in os.walk(input_folder):
        for file in files:
            if file.lower().endswith('.json'):
                total_json_files += 1
    
    print(f"找到 {total_json_files} 个JSON文件,开始筛选包含以下字符串的文件: {target_strings}")
    
    # 再次遍历处理文件
    for root, _, files in os.walk(input_folder):
        for file in files:
            if file.lower().endswith('.json'):
                processed_files += 1
                file_path = os.path.join(root, file)
                
                # 显示进度
                progress = (processed_files / total_json_files) * 100
                print(f"\r处理进度: {processed_files}/{total_json_files} ({progress:.1f}%) - 检查 {file}", end="")
                
                # 读取并解析JSON文件
                try:
                    with open(file_path, 'r', encoding='utf-8') as f:
                        data = json.load(f)
                    
                    # 检查是否包含所有目标字符串
                    if contains_all_strings(data, target_strings):
                        # 复制文件到输出文件夹
                        relative_path = os.path.relpath(file_path, input_folder)
                        dest_path = os.path.join(output_folder, relative_path)
                        
                        # 确保目标文件夹存在
                        dest_dir = os.path.dirname(dest_path)
                        if not os.path.exists(dest_dir):
                            os.makedirs(dest_dir)
                            print(f"\n创建子文件夹: {dest_dir}")
                        
                        shutil.copy2(file_path, dest_path)
                        matched_files += 1
                        print(f"\n匹配成功: {relative_path}")
                
                except json.JSONDecodeError:
                    print(f"\n警告: {file_path} 不是有效的JSON文件")
                except Exception as e:
                    print(f"\n处理文件 {file_path} 时出错: {e}")
    
    # 结束计时
    elapsed_time = time.time() - start_time
    
    # 打印统计信息
    print(f"\n\n处理完成!")
    print(f"共扫描 {total_json_files} 个JSON文件")
    print(f"匹配并复制了 {matched_files} 个文件到 {output_folder}")
    print(f"总耗时: {elapsed_time:.2f} 秒")

def contains_all_strings(obj, strings):
    """递归检查对象的所有字符串值是否包含所有目标字符串"""
    if isinstance(obj, str):
        # 检查当前字符串是否包含所有目标字符串
        return all(s in obj for s in strings)
    
    elif isinstance(obj, dict):
        # 递归检查字典的所有值
        for value in obj.values():
            if contains_all_strings(value, strings):
                return True
    
    elif isinstance(obj, list):
        # 递归检查列表的所有元素
        for item in obj:
            if contains_all_strings(item, strings):
                return True
    
    return False

def main():
    """交互式主函数"""
    target_strings = ['size:', 'format:', 'filter:']  # 目标字符串
    
    while True:
        print("\n" + "="*40)
        print("=== JSON文件筛选复制工具 ===")
        print("="*40)
        print(f"输入文件夹路径,程序将筛选包含以下字符串的JSON文件: {target_strings}")
        
        # 获取输入文件夹路径
        input_folder = input("\n请输入源文件夹路径 (输入'q'退出): ").strip()
        
        # 检查退出条件
        if input_folder.lower() == 'q':
            print("程序已退出。")
            break
        
        input_folder = remove_quotes(input_folder)
        if not os.path.exists(input_folder):
            print(f"错误: 文件夹 {input_folder} 不存在")
            continue
        
        # 获取输出文件夹路径
        output_folder = input("请输入输出文件夹路径: ").strip()
        output_folder = remove_quotes(output_folder)
        
        # 确认输出文件夹
        if os.path.exists(output_folder):
            confirm = input(f"警告: 输出文件夹 {output_folder} 已存在,可能会覆盖文件。继续? (y/n): ").strip().lower()
            if confirm != 'y':
                print("操作已取消")
                continue
        
        # 执行筛选和复制
        filter_json_files(input_folder, output_folder, target_strings)
        
        # 询问是否继续
        continue_choice = input("\n是否继续执行? (y/n): ").strip().lower()
        if continue_choice != 'y':
            print("程序已退出。")
            break

if __name__ == "__main__":
    main()

再用下面这个提取png;

提取png.py
import os
import re
import hashlib
import shutil
from pathlib import Path
import concurrent.futures
import multiprocessing

# Base64字符集和值映射(采用你提供的定义方式)
BASE64_CHARS = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
BASE64_VALUES = [0] * 128
for idx, char in enumerate(BASE64_CHARS):
    BASE64_VALUES[ord(char)] = idx

HEX_CHARS = list('0123456789abcdef')
_t = ['', '', '', '']
UUID_TEMPLATE = _t + _t + ['-'] + _t + ['-'] + _t + ['-'] + _t + ['-'] + _t + _t + _t
INDICES = [i for i, x in enumerate(UUID_TEMPLATE) if x != '-']

def decode_uuid(base64_str):
    """将Base64编码的字符串还原为UUID格式(采用你提供的实现逻辑)"""
    if len(base64_str) != 22:
        return base64_str
    result = UUID_TEMPLATE.copy()

    result[0] = base64_str[0]
    result[1] = base64_str[1]

    j = 2
    for i in range(2, 22, 2):
        lhs = BASE64_VALUES[ord(base64_str[i])]
        rhs = BASE64_VALUES[ord(base64_str[i + 1])]

        result[INDICES[j]] = HEX_CHARS[lhs >> 2]
        j += 1
        result[INDICES[j]] = HEX_CHARS[((lhs & 3) << 2) | (rhs >> 4)]
        j += 1
        result[INDICES[j]] = HEX_CHARS[rhs & 0xF]
        j += 1

    print(f"解码UUID过程: base64_str={base64_str} -> uuid={''.join(result)}")
    return ''.join(result)

def calculate_formatted_md5(uuid_str, suffix="120e8962b50b34d26e49730d884a32e4"):
    """计算MD5并格式化为指定形式"""
    combined_str = f"{uuid_str}.astc{suffix}"
    print(f"计算MD5输入: {combined_str}")
    md5_hash = hashlib.md5(combined_str.encode()).hexdigest()
    print(f"原始MD5哈希: {md5_hash} (长度: {len(md5_hash)})")

    # 格式化为8-5-5-5-9的形式
    if len(md5_hash) >= 32:
        formatted = f"{md5_hash[:8]}-{md5_hash[8:13]}-{md5_hash[13:18]}-{md5_hash[18:23]}-{md5_hash[23:]}.png"
        print(f"格式化后的文件名: {formatted}")
        return formatted
    else:
        print(f"警告: MD5哈希长度不足,使用原始哈希: {md5_hash}.png")
        return f"{md5_hash}.png"

def find_and_copy_file(filename, source_dir, dest_dir, json_filename):
    """在源目录中查找文件并复制到目标目录的子文件夹中"""
    if not os.path.exists(source_dir):
        print(f"错误: 源目录 {source_dir} 不存在")
        return False

    # 提取JSON文件名(不包含扩展名)
    json_base_name = os.path.splitext(json_filename)[0]

    # 寻找包含JSON文件名的子文件夹
    found_subfolder = None
    for subfolder in os.listdir(dest_dir):
        subfolder_path = os.path.join(dest_dir, subfolder)
        if os.path.isdir(subfolder_path) and json_base_name in subfolder:
            found_subfolder = subfolder_path
            print(f"提示: 找到包含JSON文件名的子文件夹 {found_subfolder},将使用此文件夹")
            break

    # 如果没有找到,使用output子文件夹
    if found_subfolder is None:
        output_subfolder = os.path.join(dest_dir, "output")
        if not os.path.exists(output_subfolder):
            os.makedirs(output_subfolder)
            print(f"创建子文件夹: {output_subfolder}")
        found_subfolder = output_subfolder

    print(f"搜索文件: {filename} 在目录: {source_dir}")
    found = False
    for root, _, files in os.walk(source_dir):
        if filename in files:
            source_path = os.path.join(root, filename)
            dest_path = os.path.join(found_subfolder, filename)
            print(f"找到文件: {source_path},准备复制到: {dest_path}")

            try:
                # 覆盖已存在的文件
                if os.path.exists(dest_path):
                    os.remove(dest_path)
                shutil.copy2(source_path, dest_path)
                print(f"成功复制: {source_path} -> {dest_path}")
                found = True
                break
            except Exception as e:
                print(f"复制文件时出错: {e}")
                return False

    if not found:
        print(f"警告: 未找到文件 {filename} 在目录 {source_dir} 中")
    return found

def rename_png_using_atlas(png_dir, atlas_filename, png_filename):
    """使用atlas文件中的信息重命名PNG文件(支持覆盖已存在文件)"""
    atlas_path = os.path.join(png_dir, atlas_filename)
    print(f"尝试读取atlas文件: {atlas_path}")

    if not os.path.exists(atlas_path):
        print(f"警告: 未找到atlas文件 {atlas_path}")
        return False

    try:
        with open(atlas_path, 'r', encoding='utf-8') as f:
            lines = f.readlines()
            print(f"atlas文件包含 {len(lines)} 行")
            for i, line in enumerate(lines):
                line = line.strip()
                print(f"处理第 {i+1} 行: '{line}'")
                if '.png' in line:
                    parts = line.split()
                    print(f"分割后的行内容: {parts}")
                    if parts:
                        new_name = parts[0]
                        print(f"找到新文件名: {new_name}")
                        old_png_path = os.path.join(png_dir, png_filename)
                        new_png_path = os.path.join(png_dir, new_name)

                        print(f"原PNG路径: {old_png_path}")
                        print(f"新PNG路径: {new_png_path}")
                        if os.path.exists(old_png_path):
                            # 覆盖已存在的新文件名
                            if os.path.exists(new_png_path):
                                os.remove(new_png_path)
                            os.rename(old_png_path, new_png_path)
                            print(f"成功重命名: {png_filename} -> {new_name}")
                            return True
                        else:
                            print(f"警告: 未找到PNG文件 {old_png_path}")
                            return False
            print(f"警告: 在atlas文件中未找到包含'.png'的行")
            return False
    except Exception as e:
        print(f"读取atlas文件时出错: {e}")
        return False

def process_json_file(json_path, png_dir, output_dir):
    """处理单个JSON文件"""
    try:
        json_filename = os.path.basename(json_path)
        print(f"\n===== 开始处理JSON文件: {json_filename} =====")

        # 读取前256字节
        with open(json_path, 'r', encoding='utf-8', errors='ignore') as f:
            first_256 = f.read(256)
        print(f"读取JSON文件前256字节: {first_256[:100]}... (完整长度: {len(first_256)})")

        # 提取符合模式的字符串 (22字符+@+5字符)
        pattern = r'([A-Za-z0-9+/]{22})@[A-Za-z0-9]{5}'
        matches = re.findall(pattern, first_256)
        print(f"正则表达式匹配结果: {matches} (共 {len(matches)} 个匹配)")

        if not matches:
            print(f"警告: 在{json_filename}中未找到符合模式的字符串")
            return

        for i, match in enumerate(matches):
            print(f"\n--- 处理第 {i+1} 个匹配项 ---")
            base64_str = match
            print(f"找到Base64字符串: {base64_str}")

            # 还原UUID(使用新的解码逻辑)
            uuid = decode_uuid(base64_str)
            print(f"还原的UUID: {uuid}")

            # 计算格式化的MD5
            formatted_md5 = calculate_formatted_md5(uuid)
            print(f"生成的文件名: {formatted_md5}")

            # 查找并复制文件
            found_subfolder = None  # 初始化变量
            if find_and_copy_file(formatted_md5, png_dir, output_dir, json_filename):
                # 获取实际使用的子文件夹路径(从find_and_copy_file函数中获取)
                json_base_name = os.path.splitext(json_filename)[0]
                for subfolder in os.listdir(output_dir):
                    subfolder_path = os.path.join(output_dir, subfolder)
                    if os.path.isdir(subfolder_path) and json_base_name in subfolder:
                        found_subfolder = subfolder_path
                        break

                # 如果没有找到,使用output子文件夹
                if found_subfolder is None:
                    found_subfolder = os.path.join(output_dir, "output")

                # 查找atlas文件并重命名PNG
                print(f"atlas文件搜索目录: {found_subfolder}")

                # 查找atlas文件并增加存在性检查
                if os.path.exists(found_subfolder):
                    atlas_files = [f for f in os.listdir(found_subfolder) if f.endswith('.atlas')]
                    print(f"找到 {len(atlas_files)} 个atlas文件: {atlas_files}")
                    if atlas_files:
                        atlas_file = atlas_files[0]
                        print(f"使用atlas文件: {atlas_file}")
                        try:
                            rename_png_using_atlas(found_subfolder, atlas_file, formatted_md5)
                        except Exception as e:
                            print(f"重命名PNG时出错: {e}")
                    else:
                        print(f"警告: 在{found_subfolder}中未找到atlas文件")
                else:
                    print(f"警告: 目录{found_subfolder}不存在")

    except IndexError as e:
        print(f"\n===== 处理文件{json_path}时发生索引越界错误 =====")
        print(f"错误详情: {e}")
        print(f"当前JSON文件名: {json_filename}")
        # 打印可能导致错误的上下文信息
        try:
            with open(json_path, 'r', encoding='utf-8', errors='ignore') as f:
                print(f"JSON文件前500字节内容: {f.read(500)}...")
        except:
            print("无法读取JSON文件内容进行调试")
    except Exception as e:
        print(f"处理文件{json_path}时出错: {e}")

def main():
    """交互式主函数"""
    print("=== UUID还原与资源处理工具 (调试模式) ===")
    print("本程序将详细显示字符串处理的每一步过程")
    print("输入'q'或'exit'退出程序")

    while True:
        # 获取用户输入
        json_input = input("\n请输入JSON文件或文件夹路径: ").strip()
        if json_input.lower() in ['q', 'exit']:
            print("程序已退出。")
            break

        png_dir = input("请输入PNG文件夹路径: ").strip()
        if png_dir.lower() in ['q', 'exit']:
            print("程序已退出。")
            break

        output_dir = input("请输入输出文件夹路径: ").strip()
        if output_dir.lower() in ['q', 'exit']:
            print("程序已退出。")
            break

        # 确保输出目录存在
        os.makedirs(output_dir, exist_ok=True)

        try:
            if os.path.isfile(json_input) and json_input.endswith('.json'):
                # 处理单个JSON文件
                process_json_file(json_input, png_dir, output_dir)
            elif os.path.isdir(json_input):
                # 处理文件夹中的所有JSON文件
                json_files = [os.path.join(json_input, f) for f in os.listdir(json_input)
                              if f.endswith('.json')]

                if not json_files:
                    print(f"警告: 在{json_input}中未找到JSON文件")
                else:
                    print(f"找到{len(json_files)}个JSON文件,开始处理...")
                    # 获取CPU核心数
                    num_threads = multiprocessing.cpu_count()
                    with concurrent.futures.ThreadPoolExecutor(max_workers=num_threads) as executor:
                        futures = [executor.submit(process_json_file, json_file, png_dir, output_dir) for json_file in json_files]
                        for future in concurrent.futures.as_completed(futures):
                            try:
                                future.result()
                            except Exception as e:
                                print(f"处理过程中发生错误: {e}")
            else:
                print(f"错误: 路径{json_input}不是有效的JSON文件或文件夹")
        except Exception as e:
            print(f"处理过程中发生错误: {e}")

        # 询问是否继续
        continue_choice = input("\n是否继续处理其他文件? (y/n): ").strip().lower()
        if continue_choice != 'y':
            print("程序已退出。")
            break

if __name__ == "__main__":
    main()

前两天整理工具天更新了v2版本,整理输出目录有所调整,其实早就写好了,只是忘记到这里更新了

1 个赞

挖个坟

看一位外国友人的邮件
nutaku上线了这个游戏似乎和h365的不大一样有些更新
草草先分析了一下加密

import os
from typing import Union

Asign = b'4ccda27cc7c6450015f8d51faf'
Akey = b'*android_nutaku*'


def decrypt(data: Union[str, bytes]) -> bytes:
    if isinstance(data, str):
        data = data.encode()

    sig_len = len(Asign)
    header_len = sig_len + 6
    rkey = Akey + data[sig_len:sig_len + 4]
    encrypted_data = data[header_len:]

    decrypted_data = bytearray(encrypted_data)
    key_index = 0

    for i in range(0, len(encrypted_data), 3):
        decrypted_data[i] = encrypted_data[i] ^ rkey[key_index]
        key_index = (key_index + 1) % len(rkey)

    return decrypted_data


def decrypt_file_if_encrypted(filepath: str):
    try:
        with open(filepath, "rb") as f:
            data = f.read()
        if not data.startswith(Asign):
            return False 

        decrypted = decrypt(data)

        with open(filepath, "wb") as f_out:
            f_out.write(decrypted)

        print(f"已解密: {filepath}")
        return True

    except Exception as e:
        print(f"解密失败: {filepath}\n原因: {e}")
        return False


def process_directory(root_path: str):
    count_total = 0
    count_decrypted = 0

    for dirpath, _, filenames in os.walk(root_path):
        for filename in filenames:
            filepath = os.path.join(dirpath, filename)
            count_total += 1
            if decrypt_file_if_encrypted(filepath):
                count_decrypted += 1

    print(f"总数: {count_total},解密: {count_decrypted}")


if __name__ == "__main__":
    assets_path = r"C:\Users\User\Downloads\sail-girl_1\assets"
    process_directory(assets_path)

这是解密代码安卓端
这个版本有盐吗如果有估计js也是高度混淆的

盐是 d28172660f5fd6c7325904bcc305e7f8
他的spine文件似乎有点意思在
assets\assets\68c2a6a4f8beb26c98d36d2c874b2f9d\import下只有一个
03\03a102888.json
里面似乎包含了多个spine的json和atlas需要分离

大概更新了166个cg相对h365的

import json
import requests
from hashlib import md5
import os
import subprocess
import threading

def SailGirldownload(overwrite):
    ASTCENC_PATH = r"astcenc-avx2.exe"  # astcenc-avx2.exe 去github下
    DECODE_MODE = "-ds"
    Asign = b'4ccda27cc7c6450015f8d51faf'
    Akey = b'*android_nutaku*'

    def convert_astc_to_png(astc_path):
        png_path = os.path.splitext(astc_path)[0] + ".png"
        cmd = [ASTCENC_PATH, DECODE_MODE, astc_path, png_path]

        try:
            result = subprocess.run(cmd, capture_output=True, text=True)
            if result.returncode == 0:
                print(f"转换成功: {astc_path} -> {png_path}")
                # 删除原始 ASTC 文件
                try:
                    os.remove(astc_path)
                    print(f"已删除原文件: {astc_path}")
                except Exception as e:
                    print(f"删除失败: {astc_path}\n原因: {e}")
            else:
                print(f"转换失败: {astc_path}\n{result.stderr}")
        except Exception as e:
            print(f"执行出错: {astc_path}\n原因: {e}")

    def batch_convert(root_dir):
        count_total = 0
        count_success = 0

        for dirpath, dirnames, filenames in os.walk(root_dir):
            if "_native_cache" in dirpath:
                continue

            for filename in filenames:
                if filename.lower().endswith(".astc"):
                    count_total += 1
                    full_path = os.path.join(dirpath, filename)
                    convert_astc_to_png(full_path)
                    count_success += 1

        print(f"\n处理完成,共找到 {count_total} 个 ASTC 文件,已全部尝试转换。")

    def decryptfile(data):
        if isinstance(data, str):
            data = data.encode()
        sig_len = len(Asign)
        header_len = sig_len + 6
        rkey = Akey + data[sig_len:sig_len + 4]
        encrypted_data = data[header_len:]
        decrypted_data = bytearray(encrypted_data)
        key_index = 0
        for i in range(0, len(encrypted_data), 3):
            decrypted_data[i] = encrypted_data[i] ^ rkey[key_index]
            key_index = (key_index + 1) % len(rkey)
        return bytes(decrypted_data)

    def decrypt_if_needed(data):
        if data.startswith(Asign):
            return decryptfile(data)
        return data

    BASE64_CHARS = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
    BASE64_VALUES = [0] * 128
    for idx, char in enumerate(BASE64_CHARS):
        BASE64_VALUES[ord(char)] = idx

    SALT = b'd28172660f5fd6c7325904bcc305e7f8'
    HEX_CHARS = list('0123456789abcdef')
    _t = ['', '', '', '']
    UUID_TEMPLATE = _t + _t + ['-'] + _t + ['-'] + _t + ['-'] + _t + ['-'] + _t + _t + _t
    INDICES = [i for i, x in enumerate(UUID_TEMPLATE) if x != '-']

    def insert_chars(s, positions, char):
        for i in sorted(positions, reverse=True):
            s = s[:i] + char + s[i:]
        return s

    def decode_uuid(base64_str):
        """严格保持原始 UUID 解码逻辑"""
        if len(base64_str) != 22:
            return base64_str
        result = UUID_TEMPLATE.copy()
        result[0] = base64_str[0]
        result[1] = base64_str[1]
        j = 2
        for i in range(2, 22, 2):
            lhs = BASE64_VALUES[ord(base64_str[i])]
            rhs = BASE64_VALUES[ord(base64_str[i + 1])]
            result[INDICES[j]] = HEX_CHARS[lhs >> 2];
            j += 1
            result[INDICES[j]] = HEX_CHARS[((lhs & 3) << 2) | (rhs >> 4)];
            j += 1
            result[INDICES[j]] = HEX_CHARS[rhs & 0xF];
            j += 1
        return ''.join(result)

    def md5_uuid_filename(decoded_uuid, version):
        raw = f"{decoded_uuid}.{version}.json{SALT.decode()}"
        hashed = md5(raw.encode()).hexdigest()
        formatted = insert_chars(hashed, [8, 13, 18, 23], '-')
        return f"{formatted}.json"

    def md5_uuid_images_for_remote(decoded_uuid, version):
        raw = f"{decoded_uuid}.{version}.astc{SALT.decode()}"
        hashed = md5(raw.encode()).hexdigest()
        formatted = insert_chars(hashed, [8, 13, 18, 23], '-')
        return f"{formatted}.astc"

    def md5_uuid_images_local_name(decoded_uuid, version):
        raw = f"{decoded_uuid}.{version}.astc{SALT.decode()}"
        hashed = md5(raw.encode()).hexdigest()
        formatted = insert_chars(hashed, [8, 13, 18, 23], '-')
        return f"{formatted}.astc"

    def download_file(url, session=None, timeout=20):
        print(f"下载: {url}")
        s = session or requests
        r = s.get(url, timeout=timeout)
        if r.status_code != 200:
            raise Exception(f"下载失败: {r.status_code}")
        return decrypt_if_needed(r.content)

    def get_version_for_index(versions_flat, idx):
        try:
            pos = versions_flat.index(idx)
            return versions_flat[pos + 1]
        except ValueError:
            raise KeyError(f"Index {idx} not found in versions array")
        except Exception:
            raise

    import concurrent.futures

    def process_config(json_path, output_dir, base_url):
        if json_path.startswith("http://") or json_path.startswith("https://"):
            print(f"从远程下载配置 JSON: {json_path}")
            try:
                r = requests.get(json_path, timeout=20)
                r.raise_for_status()
                data_bytes = decrypt_if_needed(r.content)
            except Exception as e:
                raise Exception(f"远程配置下载或解密失败: {e}")
        else:
            print(f"从本地读取配置 JSON: {json_path}")
            with open(json_path, "rb") as f:
                data_bytes = decrypt_if_needed(f.read())

        try:
            data = json.loads(data_bytes.decode("utf-8", errors="ignore"))
        except Exception as e:
            raise Exception(f"配置 JSON 解析失败: {e}")

        uuids = data.get("uuids", [])
        versions_import = data.get("versions", {}).get("import", [])
        versions_native = data.get("versions", {}).get("native", [])
        name = data.get("name", "unknown")

        download_cache = {}
        session = requests.Session()
        cache_lock = threading.Lock()

        def handle_import(i):
            try:
                idx = versions_import[i]
                version = versions_import[i + 1]
                if idx >= len(uuids):
                    return
                uuid_base64 = uuids[idx].split("@")[0]
                if len(uuid_base64) == 22:
                    decoded = decode_uuid(uuid_base64)
                    filename = md5_uuid_filename(decoded, version)
                else:
                    filename = f"{uuid_base64}.{version}.json"
                    decoded = 'None'

                subdir = filename[:2]
                url = f"{base_url}/{name}/import/{subdir}/{filename}"

                # 下载 import JSON 文件
                try:
                    content = download_file(url, session=session)
                    json_text = content.decode("utf-8", errors="ignore")
                    json_data = json.loads(json_text)
                except Exception as e:
                    print(f"JSON 下载或解析失败: {url} ({e})")
                    return

                try:
                    is_sp = False
                    values_block = None
                    fields = None
                    skeleton_block_idx = None

                    if len(json_data) > 3 and isinstance(json_data[3], list):
                        for candidate in (0, 2):
                            try:
                                if json_data[3][candidate][0] == "sp.SkeletonData":
                                    is_sp = True
                                    skeleton_block_idx = candidate
                                    fields = [x for x in json_data[3][candidate][1]]
                                    values_block = json_data[5][0][0][0]
                                    break
                            except Exception:
                                continue

                        if not is_sp:
                            try:
                                if len(json_data[3]) > 6 and json_data[3][6][0] == "sp.SkeletonData":
                                    is_sp = True
                                    skeleton_block_idx = 6
                                    fields = [x for x in json_data[3][6][1]]
                                    values_block = json_data[5][1][0][0]
                            except Exception:
                                pass

                    if not is_sp:
                        return

                    print(f"检测到 SkeletonData: {uuid_base64}")

                    deps = json_data[1] if len(json_data) > 1 else []
                    dep_filenames = []
                    derived_path = subdir

                    for dep_uuid in deps:
                        dep_text = dep_uuid if isinstance(dep_uuid, str) else str(dep_uuid)
                        dep_clean = dep_text.split("@")[0]
                        is_astc = "@" in dep_text
                        if len(dep_clean) == 22:
                            dep_decoded = decode_uuid(dep_clean)
                        else:
                            dep_decoded = dep_clean

                        if not is_astc:
                            dep_name = f"{dep_decoded}.{get_version_for_index(versions_native, deps.index(dep_uuid)) if deps.index(dep_uuid) < len(versions_native) else '0'}.bin"
                            dep_filenames.append(dep_name)
                        else:
                            dep_filenames.append(None)

                    computed_dep_filenames = []
                    for dep_idx, dep_uuid in enumerate(deps):
                        dep_text = dep_uuid if isinstance(dep_uuid, str) else str(dep_uuid)
                        dep_clean = dep_text.split("@")[0]
                        is_astc = "@" in dep_text
                        if dep_clean in uuids:
                            global_idx = uuids.index(dep_clean)
                        else:
                            global_idx = None
                        if global_idx is not None:
                            try:
                                dep_version = get_version_for_index(versions_native, global_idx)
                            except Exception:
                                dep_version = 0
                        else:
                            dep_version = 0
                        if len(dep_clean) == 22:
                            dep_decoded = decode_uuid(dep_clean)
                        else:
                            dep_decoded = dep_clean

                        if not is_astc:
                            fname = f"{dep_decoded}.{dep_version}.bin"
                            computed_dep_filenames.append((fname, fname, False))
                        else:
                            remote_name = md5_uuid_images_for_remote(dep_decoded, dep_version)
                            local_name = md5_uuid_images_local_name(dep_decoded, dep_version)
                            computed_dep_filenames.append((remote_name, local_name, True))

                    for remote_name, local_name, is_image in computed_dep_filenames:
                        subdir2 = remote_name[:2]
                        dep_url = f"{base_url}/{name}/native/{subdir2}/{remote_name}"
                        with cache_lock:
                            if local_name in download_cache:
                                continue
                        try:
                            b = download_file(dep_url, session=session)
                            with cache_lock:
                                download_cache[local_name] = b
                            native_cache_dir = os.path.join(output_dir, "_native_cache", subdir2)
                            os.makedirs(native_cache_dir, exist_ok=True)
                            path_on_disk = os.path.join(native_cache_dir, local_name)
                            with open(path_on_disk, "wb") as wf:
                                wf.write(b)
                            print(f"已下载依赖资源并缓存: {local_name}")
                        except Exception as e:
                            print(f"依赖下载失败: {remote_name} ({e})")

                    values_seq = values_block if isinstance(values_block, list) else []
                    offset = 0
                    if len(values_seq) == len(fields) + 1:
                        offset = 1
                    else:
                        offset = 0

                    skeleton_name = None
                    skeleton_bin_localname = None
                    skeleton_json_text = None
                    atlas_text = None
                    texture_names = []
                    textures_indices = []

                    for j, fname in enumerate(fields):
                        val_index = j + offset
                        val = None
                        try:
                            val = values_seq[val_index]
                        except Exception:
                            val = None

                        if fname == "_name":
                            skeleton_name = str(val) if val is not None else "unknown_skel"
                            out_dir_for_this = os.path.join(output_dir, name, derived_path)
                            os.makedirs(out_dir_for_this, exist_ok=True)
                        elif fname == "_native":
                            if isinstance(val, str) and val == ".bin":
                                for remote_name, local_name, is_image in computed_dep_filenames:
                                    if not is_image and local_name.endswith(".bin"):
                                        if local_name in download_cache:
                                            out_dir_for_this = os.path.join(output_dir, name, derived_path)
                                            os.makedirs(out_dir_for_this, exist_ok=True)
                                            dest_skel_path = os.path.join(out_dir_for_this, f"{skeleton_name}.skel")
                                            with open(dest_skel_path, "wb") as fsk:
                                                fsk.write(download_cache[local_name])
                                            print(f"导出文件:{dest_skel_path}")
                                            skeleton_bin_localname = local_name
                                            break
                                        else:
                                            native_cache_dir = os.path.join(output_dir, "_native_cache",
                                                                            remote_name[:2])
                                            candidate = os.path.join(native_cache_dir, local_name)
                                            if os.path.exists(candidate):
                                                out_dir_for_this = os.path.join(output_dir, name, derived_path)
                                                os.makedirs(out_dir_for_this, exist_ok=True)
                                                dest_skel_path = os.path.join(out_dir_for_this, f"{skeleton_name}.skel")
                                                with open(candidate, "rb") as rf, open(dest_skel_path, "wb") as wf:
                                                    wf.write(rf.read())
                                                print(f"导出文件(来自缓存磁盘):{dest_skel_path}")
                                                skeleton_bin_localname = local_name
                                                break
                        elif fname == "_skeletonJson":
                            try:
                                skeleton_json_text = val if isinstance(val, str) else json.dumps(val,
                                                                                                 ensure_ascii=False)
                            except Exception:
                                skeleton_json_text = str(val)
                            if skeleton_name:
                                out_dir_for_this = os.path.join(output_dir, name, derived_path)
                                os.makedirs(out_dir_for_this, exist_ok=True)
                                dest_json = os.path.join(out_dir_for_this, f"{skeleton_name}.json")
                                with open(dest_json, "w", encoding="utf-8") as jf:
                                    jf.write(skeleton_json_text)
                                print(f"导出文件:{dest_json}")
                        elif fname == "_atlasText":
                            try:
                                atlas_text = val if isinstance(val, str) else json.dumps(val, ensure_ascii=False)
                            except Exception:
                                atlas_text = str(val)
                            if skeleton_name:
                                atlas_lines = atlas_text.splitlines()
                                while atlas_lines and atlas_lines[0].strip() == "":
                                    atlas_lines.pop(0)
                                atlas_text_clean = "\n".join(atlas_lines)
                                out_dir_for_this = os.path.join(output_dir, name, derived_path)
                                os.makedirs(out_dir_for_this, exist_ok=True)
                                dest_atlas = os.path.join(out_dir_for_this, f"{skeleton_name}.atlas")
                                with open(dest_atlas, "w", encoding="utf-8") as af:
                                    af.write(atlas_text_clean)
                                print(f"导出文件:{dest_atlas}(已清除首行空行)")
                        elif fname == "textureNames":
                            try:
                                texture_names = [str(x) for x in val] if isinstance(val, (list, tuple)) else []
                            except Exception:
                                texture_names = []
                        elif fname == "textures":
                            try:
                                textures_indices = [int(x) for x in val] if isinstance(val, (list, tuple)) else []
                            except Exception:
                                textures_indices = []

                    if texture_names and textures_indices:
                        for i_t, tex_name in enumerate(texture_names):
                            try:
                                dep_ref_index = textures_indices[i_t]
                            except Exception:
                                continue
                            if dep_ref_index < 0 or dep_ref_index >= len(computed_dep_filenames):
                                continue
                            for i in range(len(computed_dep_filenames)):
                                remote_name, local_name, is_image = computed_dep_filenames[i]
                                if not is_image:
                                    continue
                                out_dir_for_this = os.path.join(output_dir, name, derived_path)
                                os.makedirs(out_dir_for_this, exist_ok=True)
                                tex_astc_name = os.path.splitext(tex_name)[0] + ".astc"
                                dest_tex_path = os.path.join(out_dir_for_this, tex_astc_name)
                                if local_name in download_cache:
                                    with open(dest_tex_path, "wb") as wf:
                                        wf.write(download_cache[local_name])
                                    print(f"导出文件:{dest_tex_path}")
                                else:
                                    native_cache_dir = os.path.join(output_dir, "_native_cache", remote_name[:2])
                                    candidate = os.path.join(native_cache_dir, local_name)
                                    if os.path.exists(candidate):
                                        with open(candidate, "rb") as rf, open(dest_tex_path, "wb") as wf:
                                            wf.write(rf.read())
                                        print(f"导出文件(来自缓存磁盘):{dest_tex_path}")
                                    else:
                                        print(f"纹理数据未找到: {local_name},跳过写入 {tex_name}")
                except Exception as e:
                    print(f"处理该 import JSON 时发生异常(继续下一个): {e}")
            except Exception as e:
                print(f"Import 任务错误: {e}")

        print(f"开始并发处理 {len(versions_import) // 2} 个 import JSON ...")
        with concurrent.futures.ThreadPoolExecutor(max_workers=16) as executor:
            futures = [executor.submit(handle_import, i) for i in range(0, len(versions_import), 2)]
            concurrent.futures.wait(futures)

    def getmjurl(group):
        settings_url = "https://b2v2n-jp-prd-cdn.playrr.me/game2.0/ennutaku/androidr/remote_main_assets/src/settings.json"
        base_url = "https://b2v2n-jp-prd-cdn.playrr.me/game2.0/ennutaku/androidr/remote"

        print(f"下载 settings.json ...")
        r = requests.get(settings_url, timeout=20)
        r.raise_for_status()
        data_bytes = decrypt_if_needed(r.content)

        try:
            settings_data = json.loads(data_bytes.decode("utf-8", errors="ignore"))
        except Exception as e:
            raise Exception(f"settings.json 解析失败: {e}")

        bundleVers = settings_data.get("assets", {}).get("bundleVers", {})
        if not bundleVers:
            raise Exception("settings.json 中未找到 bundleVers")

        if group not in bundleVers:
            raise Exception(f"未找到指定分组 '{group}',可用的有: {list(bundleVers.keys())[:5]} ...")

        version = bundleVers[group]
        mjurl = f"{base_url}/{group}/cc.config.{version}.json"
        print(f"获取到 URL: {mjurl}")
        return mjurl

    def get_all_mjurl():
        settings_url = "https://b2v2n-jp-prd-cdn.playrr.me/game2.0/ennutaku/androidr/remote_main_assets/src/settings.json"
        base_url = "https://b2v2n-jp-prd-cdn.playrr.me/game2.0/ennutaku/androidr/remote"

        print("下载 settings.json ...")
        r = requests.get(settings_url, timeout=20)
        r.raise_for_status()

        try:
            data_bytes = decrypt_if_needed(r.content)
        except NameError:
            data_bytes = r.content

        try:
            settings_data = json.loads(data_bytes.decode("utf-8", errors="ignore"))
        except Exception as e:
            raise Exception(f"settings.json 解析失败: {e}")

        bundleVers = settings_data.get("assets", {}).get("bundleVers", {})
        if not bundleVers:
            raise Exception("settings.json 中未找到 bundleVers")

        print(f"共找到 {len(bundleVers)} 个分组:\n")

        urls = {}
        for group, version in bundleVers.items():
            mjurl = f"{base_url}/{group}/cc.config.{version}.json"
            urls[group] = mjurl
            process_config(mjurl, output_dir, base_url)

        return urls

    output_dir = r"resdownload/Sail-Girl"
    base_url = "https://b2v2n-jp-prd-cdn.playrr.me/game2.0/ennutaku/androidr/remote"
    os.makedirs(output_dir, exist_ok=True)
    # process_config(getmjurl("229656a2139ffde2cda70897ad505542"), output_dir, base_url)
    get_all_mjurl()
    batch_convert(output_dir)
SailGirldownload(0)

自行根据自身网络条件调整线程数量

1 个赞