幻能边境解包求助

嗯…能跑可以下载bundle_sp/special下的spine并且自动还原文件名

import os
import requests


BASE64_CHARS = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
BASE64_VALUES = [0] * 128
for idx, char in enumerate(BASE64_CHARS):
    BASE64_VALUES[ord(char)] = idx

HEX_CHARS = list('0123456789abcdef')
_t = ['', '', '', '']
UUID_TEMPLATE = _t + _t + ['-'] + _t + ['-'] + _t + ['-'] + _t + ['-'] + _t + _t + _t
INDICES = [i for i, x in enumerate(UUID_TEMPLATE) if x != '-']

def decode_uuid(base64_str):
    if len(base64_str) != 22:
        return base64_str
    result = UUID_TEMPLATE.copy()
    result[0] = base64_str[0]
    result[1] = base64_str[1]

    j = 2
    for i in range(2, 22, 2):
        lhs = BASE64_VALUES[ord(base64_str[i])]
        rhs = BASE64_VALUES[ord(base64_str[i + 1])]

        result[INDICES[j]] = HEX_CHARS[lhs >> 2]
        j += 1
        result[INDICES[j]] = HEX_CHARS[((lhs & 3) << 2) | (rhs >> 4)]
        j += 1
        result[INDICES[j]] = HEX_CHARS[rhs & 0xF]
        j += 1

    return ''.join(result)

def getresjson():
    url1 = "https://eowgame.jcbgame.com/eow-jp-game/proj.confg.json"

    response1 = requests.get(url1)
    response1.raise_for_status()

    data1 = response1.json()

    special_value = data1.get("Config", {}).get("special")
    if not special_value:
        raise ValueError("无法获取 Config -> special 的值")

    print(f"special = {special_value}")

    url2 = f"https://eowgame.jcbgame.com/eow-jp-game/bundle_sp/special/config.{special_value}.json"

    response2 = requests.get(url2)
    response2.raise_for_status()

    return response2.json()

data = getresjson()

uuids = data["uuids"]
vers_imp = data["versions"]["import"]
vers_nat = data["versions"]["native"]
paths = data["paths"]

vimp = {vers_imp[i]: vers_imp[i+1] for i in range(0, len(vers_imp), 2)}
vnat = {vers_nat[i]: vers_nat[i+1] for i in range(0, len(vers_nat), 2)}

BASE = "https://eowgame.jcbgame.com/eow-jp-game/bundle_sp/special"

output_dir = "eowout"
os.makedirs(output_dir, exist_ok=True)

def has_sp_skeleton_data(obj):
    if isinstance(obj, list):
        if len(obj) > 0 and obj[0] == "sp.SkeletonData":
            return obj
        for item in obj:
            found = has_sp_skeleton_data(item)
            if found:
                return found
    elif isinstance(obj, dict):
        for value in obj.values():
            found = has_sp_skeleton_data(value)
            if found:
                return found
    return None

for idx, sid in enumerate(uuids):
    if '@' in sid:
        uuid_base64, ext = sid.split('@', 1)
    else:
        uuid_base64, ext = sid, None

    uuid = decode_uuid(uuid_base64)
    full_uuid = f"{uuid}@{ext}" if ext else uuid

    if idx not in vimp or idx not in vnat:
        continue
    imp_ver = vimp[idx]
    nat_ver = vnat[idx]

    json_url = f"{BASE}/import/{uuid[:2]}/{full_uuid}.{imp_ver}.json"
    try:
        response = requests.get(json_url)
        response.raise_for_status()
        json_data = response.json()
    except Exception as e:
        print(f"[!] 下载失败: {json_url} ({e})")
        continue

    if not has_sp_skeleton_data(json_data):
        continue

    path_info = paths.get(str(idx))
    if not path_info:
        print(f"[!] 未找到 paths 映射: idx={idx}")
        continue

    res_path = path_info[0]
    folder = os.path.join(output_dir, *res_path.split("/"))
    os.makedirs(folder, exist_ok=True)
    base_name = os.path.basename(res_path)

    try:
        atlas_raw = json_data[5][0][3]
        with open(os.path.join(folder, base_name + ".atlas"), "w", encoding="utf-8") as f:
            f.write(atlas_raw)
        print(f"[+] 写入 atlas 成功:{base_name}.atlas")
        pngdata = json_data[1]
        for i in range(len(pngdata)):
            pnguuid = (decode_uuid((pngdata[i])[:22]))
            idxpng = uuids.index((pngdata[i])[:22])
            nat_verpng = vnat[idxpng]
            pngurl = f"{BASE}/native/{pnguuid[:2]}/{pnguuid}.{nat_verpng}.png"
            try:
                png_data = requests.get(pngurl)
                png_data.raise_for_status()
                png_tempname = f"{base_name}_{i+1}" if i >= 1 else f"{base_name}"
                png_path = os.path.join(folder, png_tempname + ".png")
                with open(png_path, "wb") as f:
                    f.write(png_data.content)
                    print(f"[+] 下载 png 成功:{png_tempname}.png")
            except Exception as e:
                print(f"[!] 下载 .png 失败: {pngurl} ({e})")
                continue



    except Exception as e:
        print(f"[!] 写入 .atlas 失败: {e}")
        continue

    skel_url = f"{BASE}/native/{uuid[:2]}/{uuid}.{nat_ver}.bin"
    try:
        skel_data = requests.get(skel_url)
        skel_data.raise_for_status()
        skel_path = os.path.join(folder, base_name + ".skel")
        with open(skel_path, "wb") as f:
            f.write(skel_data.content)
            print(f"[+] 下载 skel 成功:{base_name}.skel")
    except Exception as e:
        print(f"[!] 下载 .skel 失败: {skel_url} ({e})")
        continue

    print(f"[√] 导出成功: {res_path}")


参考获取到的资源列表

D:.
└─spine
    ├─chapter
    │  ├─chapters01
    │  │  └─chapters01
    │  │      └─images
    │  ├─chapters02
    │  │  └─chapters02
    │  │      └─images
    │  ├─chapters03
    │  │  └─chapters03
    │  │      └─images
    │  ├─chapters04
    │  │  └─chapters04
    │  │      └─images
    │  ├─chapters05
    │  │  └─chapters05
    │  │      └─images
    │  ├─chapters06
    │  │  └─chapters06
    │  │      └─images
    │  ├─chapters07
    │  │  └─chapters07
    │  │      └─images
    │  ├─chapters08
    │  │  └─chapters08
    │  │      └─images
    │  ├─chapters09
    │  │  └─chapters09
    │  │      └─images
    │  └─chapters10
    │      └─chapters10
    │          └─images
    └─date
        ├─100101s
        │  └─100101s
        │      └─images
        ├─100102s
        │  └─100102s
        │      └─images
        ├─100103s
        │  └─100103s
        │      └─images
        ├─100201s
        │  └─100201s
        │      └─images
        ├─100202s
        │  └─100202s
        │      └─images
        ├─100203s
        │  └─100203s
        │      └─images
        ├─100301s
        │  └─100301s
        │      └─images
        ├─100302s
        │  └─100302s
        │      └─images
        ├─100303s
        │  └─100303s
        │      └─images
        ├─100401s
        │  └─100401s
        │      └─images
        ├─100402s
        │  └─100402s
        │      └─images
        │          └─man
        ├─100403s
        │  └─100403s
        │      └─images
        │          └─man
        ├─100601s
        │  └─100601s
        │      └─images
        ├─100602s
        │  └─100602s
        │      └─images
        ├─100603s
        │  └─100603s
        │      └─images
        ├─100701s
        │  └─100701s
        │      └─images
        ├─100702s
        │  └─100702s
        │      └─images
        ├─100703s
        │  └─100703s
        │      └─images
        ├─101201s
        │  └─101201s
        │      └─images
        ├─101202s
        │  └─101202s
        │      └─images
        ├─101203s
        │  └─101203s
        │      └─images
        ├─101401s
        │  └─101401s
        │      └─images
        ├─101402s
        │  └─101402s
        │      └─images
        ├─101403s
        │  └─101403s
        │      └─images
        ├─101501s
        │  └─101501s
        │      └─images
        ├─101502s
        │  └─101502s
        │      └─images
        ├─101503s
        │  └─101503s
        │      └─images
        ├─101601s
        │  └─101601s
        │      └─images
        ├─101602s
        │  └─101602s
        │      └─images
        ├─101603s
        │  └─101603s
        │      └─images
        ├─101901s
        │  └─101901s
        │      └─images
        ├─101902s
        │  └─101902s
        │      └─images
        ├─101903s
        │  └─101903s
        │      └─images
        ├─101911s
        │  └─101911s
        │      └─images
        ├─101912s
        │  └─101912s
        │      └─images
        ├─101913s
        │  └─101913s
        │      └─images
        ├─102201s
        │  └─102201s
        │      └─images
        ├─102202s
        │  └─102202s
        │      └─images
        ├─102203s
        │  └─102203s
        │      └─images
        ├─102301s
        │  └─102301s
        │      └─images
        ├─102302s
        │  └─102302s
        │      └─images
        ├─102303s
        │  └─102303s
        │      └─images
        ├─102401s
        │  └─102401s
        │      └─images
        ├─102402s
        │  └─102402s
        │      └─images
        ├─102403s
        │  └─102403s
        │      └─images
        ├─102501s
        │  └─102501s
        │      └─images
        ├─102502s
        │  └─102502s
        │      └─images
        ├─102503s
        │  └─102503s
        │      └─images
        ├─102601s
        │  └─102601s
        │      └─images
        ├─102602s
        │  └─102602s
        │      └─images
        ├─102603s
        │  └─102603s
        │      └─images
        ├─103301s
        │  └─103301s
        │      └─images
        ├─103302s
        │  └─103302s
        │      └─images
        ├─103303s
        │  └─103303s
        │      └─images
        ├─104401s
        │  └─104401s
        │      └─images
        ├─104402s
        │  └─104402s
        │      └─images
        ├─104403s
        │  └─104403s
        │      └─images
        ├─105801s
        │  └─105801s
        │      └─images
        ├─105802s
        │  └─105802s
        │      └─images
        └─105803s
            └─105803s
                └─images

anima的

import os
import requests


BASE64_CHARS = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
BASE64_VALUES = [0] * 128
for idx, char in enumerate(BASE64_CHARS):
    BASE64_VALUES[ord(char)] = idx

HEX_CHARS = list('0123456789abcdef')
_t = ['', '', '', '']
UUID_TEMPLATE = _t + _t + ['-'] + _t + ['-'] + _t + ['-'] + _t + ['-'] + _t + _t + _t
INDICES = [i for i, x in enumerate(UUID_TEMPLATE) if x != '-']

def decode_uuid(base64_str):
    if len(base64_str) != 22:
        return base64_str
    result = UUID_TEMPLATE.copy()
    result[0] = base64_str[0]
    result[1] = base64_str[1]

    j = 2
    for i in range(2, 22, 2):
        lhs = BASE64_VALUES[ord(base64_str[i])]
        rhs = BASE64_VALUES[ord(base64_str[i + 1])]

        result[INDICES[j]] = HEX_CHARS[lhs >> 2]
        j += 1
        result[INDICES[j]] = HEX_CHARS[((lhs & 3) << 2) | (rhs >> 4)]
        j += 1
        result[INDICES[j]] = HEX_CHARS[rhs & 0xF]
        j += 1

    return ''.join(result)


def getresjson():
    url1 = "https://eowgame.jcbgame.com/eow-jp-game/bundle/version.json"
    response1 = requests.get(url1)
    response1.raise_for_status()

    data1 = response1.json()

    anima_entry = next((item for item in data1 if item.get("abName") == "anima"), None)
    if not anima_entry:
        raise ValueError("未找到 abName 为 'anima' 的数据")

    anima_version = anima_entry.get("version")
    if not anima_version:
        raise ValueError("anima 的 version 字段为空")

    print(f"anima version = {anima_version}")

    url2 = f"https://eowgame.jcbgame.com/eow-jp-game/bundle/anima/cc.config.{anima_version}.json"
    response2 = requests.get(url2)
    response2.raise_for_status()

    return response2.json()

data = getresjson()

uuids = data["uuids"]
vers_imp = data["versions"]["import"]
vers_nat = data["versions"]["native"]
paths = data["paths"]

vimp = {vers_imp[i]: vers_imp[i+1] for i in range(0, len(vers_imp), 2)}
vnat = {vers_nat[i]: vers_nat[i+1] for i in range(0, len(vers_nat), 2)}

BASE = "https://eowgame.jcbgame.com/eow-jp-game/bundle/anima"

output_dir = "eowoutan"
os.makedirs(output_dir, exist_ok=True)

def has_sp_skeleton_data(obj):
    if isinstance(obj, list):
        if len(obj) > 0 and obj[0] == "sp.SkeletonData":
            return obj
        for item in obj:
            found = has_sp_skeleton_data(item)
            if found:
                return found
    elif isinstance(obj, dict):
        for value in obj.values():
            found = has_sp_skeleton_data(value)
            if found:
                return found
    return None

for idx, sid in enumerate(uuids):
    if '@' in sid:
        uuid_base64, ext = sid.split('@', 1)
    else:
        uuid_base64, ext = sid, None

    uuid = decode_uuid(uuid_base64)
    full_uuid = f"{uuid}@{ext}" if ext else uuid

    if idx not in vimp or idx not in vnat:
        continue
    imp_ver = vimp[idx]
    nat_ver = vnat[idx]

    json_url = f"{BASE}/import/{uuid[:2]}/{full_uuid}.{imp_ver}.json"
    try:
        response = requests.get(json_url)
        response.raise_for_status()
        json_data = response.json()
    except Exception as e:
        print(f"[!] 下载失败: {json_url} ({e})")
        continue

    if not has_sp_skeleton_data(json_data):
        continue

    path_info = paths.get(str(idx))
    if not path_info:
        print(f"[!] 未找到 paths 映射: idx={idx}")
        continue

    res_path = path_info[0]
    folder = os.path.join(output_dir, *res_path.split("/"))
    os.makedirs(folder, exist_ok=True)
    base_name = os.path.basename(res_path)

    try:
        atlas_raw = json_data[5][0][3]
        with open(os.path.join(folder, base_name + ".atlas"), "w", encoding="utf-8") as f:
            f.write(atlas_raw)
        print(f"[+] 写入 atlas 成功:{base_name}.atlas")
        pngdata = json_data[1]
        for i in range(len(pngdata)):
            pnguuid = (decode_uuid((pngdata[i])[:22]))
            idxpng = uuids.index((pngdata[i])[:22])
            nat_verpng = vnat[idxpng]
            pngurl = f"{BASE}/native/{pnguuid[:2]}/{pnguuid}.{nat_verpng}.png"
            try:
                png_data = requests.get(pngurl)
                png_data.raise_for_status()
                png_tempname = f"{base_name}_{i+1}" if i >= 1 else f"{base_name}"
                png_path = os.path.join(folder, png_tempname + ".png")
                with open(png_path, "wb") as f:
                    f.write(png_data.content)
                    print(f"[+] 下载 png 成功:{png_tempname}.png")
            except Exception as e:
                print(f"[!] 下载 .png 失败: {pngurl} ({e})")
                continue



    except Exception as e:
        print(f"[!] 写入 .atlas 失败: {e}")
        continue

    skel_url = f"{BASE}/native/{uuid[:2]}/{uuid}.{nat_ver}.bin"
    try:
        skel_data = requests.get(skel_url)
        skel_data.raise_for_status()
        skel_path = os.path.join(folder, base_name + ".skel")
        with open(skel_path, "wb") as f:
            f.write(skel_data.content)
            print(f"[+] 下载 skel 成功:{base_name}.skel")
    except Exception as e:
        print(f"[!] 下载 .skel 失败: {skel_url} ({e})")
        continue

    print(f"[√] 导出成功: {res_path}")

速度比较慢我也莫得办法。。。不会写互斥锁多线程

大佬们发力了 :heart_eyes:

出了点问题,回退修改了。修改成了分步。可以下载全部含config的资源(json,mp4,png,jpg,spine)。

下载资源清单.py
import os
import requests
import urllib3
import json
from concurrent.futures import ThreadPoolExecutor
import time

# 忽略 InsecureRequestWarning 警告
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)

# 下载单个文件的函数
def download_file(link, download_dir):
    # 添加时间戳参数
    timestamp = int(time.time())
    if '?' in link:
        link = f"{link}&t={timestamp}"
    else:
        link = f"{link}?t={timestamp}"

    # 从链接中提取文件名(保留原始文件名)
    path_part = link.split("?")[0].split("/")
    file_name = path_part[-1] if path_part[-1] else f"file_{hash(link) % 1000000}.bin"
    
    file_path = os.path.join(download_dir, file_name)

    headers = {
        "Connection": "keep-alive",
        "sec-ch-ua-platform": "Windows",
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/136.0.0.0 Safari/537.36",
        "sec-ch-ua": '"Chromium";v="136", "Google Chrome";v="136", "Not.A/Brand";v="99"',
        "sec-ch-ua-mobile": "?0",
        "Accept": "*/*",
        "Sec-Fetch-Site": "same-origin",
        "Sec-Fetch-Mode": "cors",
        "Sec-Fetch-Dest": "empty",
        "Sec-Fetch-Storage-Access": "active",
        "Referer": "https://eowgame.jcbgame.com/eow-jp-game/game/index_zs.html?1749644799621&manifestToken=eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiJ9.eyJyMThVcmwiOiIiLCJyMThTdGF0dXMiOjEsImRldmljZSI6IiIsImRldmljZTIiOiJwYyIsImlzbmV3IjoxLCJzZGtMb2dFdmVudFVybCI6Imh0dHBzOlwvXC9qY2JnYW1lLmNvbTo0NDNcL2FwaVwvTG9nZXZlbnRcL3Nka0xvZ0V2ZW50Iiwic3dpdGNoX2FjY291bnRfYnRuX3N0YXR1cyI6MCwic2VydmljZV9zdGF0dXMiOjAsImVlcmZlY3RfaW5mb19zdGF0dXMiOjAsInF1ZXN0X3N0YXR1cyI6MCwidXNlcmlkIjoiMTAwODg4N182NyIsImdhbWVpZCI6ImRtbVNMRzQxMDMyMjAwODMiLCJzZXJ2ZXJpZCI6Im5vc2VydmVyIiwiYmluZCI6MSwiaXNzIjoiaW5nYW1lIiwiaXNhZHVsdCI6MSwicGxhdGZvcm0iOjY3LCJtb25ldGFyeXVuaXQiOiJkbW1cdTMwZGRcdTMwYTRcdTMwZjNcdTMwYzgiLCJ0ZXN0VXNlciI6MCwiZ3JhZGUiOjIsImRtbV9pZCI6IjU0NjY1ODkzIiwicGxhdGZvcm10eXBlIjoic3BlY2lhbCIsImh1b2JpX2lkIjoiMTAyIiwiZ2FtZWNvZGUiOiJkbW1TTEc0MTAzMjIwMDgzIiwidGltZSI6MTc0OTY0NDc5OH0.Txr-1yDOxyLgM34Zb0p5IVjY5FAY5FB6wUkSYy2hVPUdQiVLviZrKhijd2Tc0ZwTYEnppXEg-xwOU8LvsHoM4wa6piBLoDI7kx7TtSCW3zZtT6kAKEujrnELTL4nJrWWWxK7zimquMEdJxPshEdbZM30knHPPn6BuIXHDISkUHvg5uELMn3Xmp5LIbZfgKLv3baG9j2ND6GoWAtR7YC2cAAuehbG8KkSMtZXVlg8u777QiWkL2_3EHoE_zINVJi_okRKdDWmfgByXbAxBI6KkfP0IcrRqP-FnK0hOuU3RpEOKg3y9oGsao9K2pcId5YZkQPvDqwINGwPcDHzEc1p_A&userAgreeStatus=1&lang=jp&r18Status=special",
        "Accept-Encoding": "gzip, deflate, br, zstd",
        "Accept-Language": "zh-CN,zh;q=0.9"
    }

    try:
        # 禁用 SSL 证书验证
        response = requests.get(link, headers=headers, verify=False)
        response.raise_for_status()

        # 确定响应内容类型
        content_type = response.headers.get('Content-Type', '')
        
        # 根据内容类型选择写入模式
        if 'json' in content_type or 'text' in content_type:
            # 文本内容
            with open(file_path, 'w', encoding='utf-8') as file:
                file.write(response.text)
            print(f"下载完成: {file_name} (文本模式)")
        else:
            # 二进制内容
            with open(file_path, 'wb') as file:
                file.write(response.content)
            print(f"下载完成: {file_name} (二进制模式)")
            
        # 打印文件信息
        file_size = os.path.getsize(file_path)
        print(f"文件大小: {file_size} 字节")
        print(f"保存路径: {os.path.abspath(file_path)}")

    except requests.RequestException as e:
        print(f"请求过程中出现错误: {e}")
    except Exception as e:
        print(f"下载过程中出现未知错误: {e}")

# 主下载函数
def main_download():
    # 第一部分下载链接
    download_links = ["https://eowgame.jcbgame.com/eow-jp-game/bundle/version.json", "https://eowgame.jcbgame.com/eow-jp-game/proj.confg.json"]
    
    all_download_links = []
    all_download_links.extend(download_links)

    # 确保下载目录存在
    download_dir = os.getcwd()
    os.makedirs(download_dir, exist_ok=True)
    
    print(f"下载目录: {os.path.abspath(download_dir)}")
    
    # 使用线程池进行下载
    with ThreadPoolExecutor(max_workers=2) as executor:
        print(f"开始下载: {download_links}")
        executor.map(lambda link: download_file(link, download_dir), download_links)

    # 处理 version.json 文件
    try:
        with open('version.json', 'r', encoding='utf-8') as f:
            data = json.load(f)
        base_url = "https://eowgame.jcbgame.com/eow-jp-game/bundle/"
        for item in data:
            ab_name = item.get('abName')
            version = item.get('version')
            if ab_name and version:
                url = f"{base_url}{ab_name}/cc.config.{version}.json"
                all_download_links.append(url)
    except FileNotFoundError:
        print("未找到 version.json 文件,请确保该文件存在于当前目录下。")
    except json.JSONDecodeError:
        print("无法解析 version.json 文件,请确保文件内容为有效的 JSON 格式。")

    # 处理 proj.confg.json 文件
    try:
        with open('proj.confg.json', 'r', encoding='utf-8') as file:
            config_data = json.load(file)
        config_block = config_data.get('Config', {})
        keys = ['normal', 'spdata', 'special', 'special2']
        for key in keys:
            value = config_block.get(key)
            if value:
                url = f"https://eowgame.jcbgame.com/eow-jp-game/bundle_sp/{key}/config.{value}.json"
                all_download_links.append(url)
    except FileNotFoundError:
        print("未找到 proj.confg.json 文件,请确保该文件存在于当前目录下。")
    except json.JSONDecodeError:
        print("proj.confg.json 文件格式错误,请检查文件内容。")

    if not all_download_links:
        print("没有可用的下载链接。")
        return

    # 再次使用线程池进行下载后续链接
    with ThreadPoolExecutor(max_workers=2) as executor:
        print(f"开始下载后续文件: {all_download_links[len(download_links):]}")
        executor.map(lambda link: download_file(link, download_dir), all_download_links[len(download_links):])

    print("\n🎉 文件下载完成!")

if __name__ == "__main__":
    # 执行下载操作
    main_download()
    
    # 等待用户查看结果
    input("按 Enter 键退出...")
解析提取下载.py
import re
import os
import requests
import time
import json
import concurrent.futures
import urllib3
from datetime import datetime

# 禁用 InsecureRequestWarning 警告
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)

# 定义替换规则
replace_rules = {
    "cc.ImageAsset": ".json,.png",
    "cc.SpriteFrame": ".json",
    "sp.SkeletonData": ".atlas,.skel",
    "cc.TextAsset": ".json",
    "cc.VideoClip": ".json,.mp4"
}

BASE64_CHARS = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
BASE64_VALUES = [0] * 128
for idx, char in enumerate(BASE64_CHARS):
    BASE64_VALUES[ord(char)] = idx

HEX_CHARS = list('0123456789abcdef')
_t = ['', '', '', '']
UUID_TEMPLATE = _t + _t + ['-'] + _t + ['-'] + _t + ['-'] + _t + ['-'] + _t + _t + _t
INDICES = [i for i, x in enumerate(UUID_TEMPLATE) if x != '-']


def decode_uuid(base64_str):
    """将Base64编码的字符串还原为UUID格式,处理包含@的情况"""
    # 检查是否包含@
    if '@' in base64_str:
        main_part, suffix = base64_str.split('@', 1)
    else:
        main_part = base64_str
        suffix = ''

    if len(main_part) != 22:
        result = main_part
    else:
        result = UUID_TEMPLATE.copy()
        result[0] = main_part[0]
        result[1] = main_part[1]

        j = 2
        for i in range(2, 22, 2):
            lhs = BASE64_VALUES[ord(main_part[i])]
            rhs = BASE64_VALUES[ord(main_part[i + 1])]

            result[INDICES[j]] = HEX_CHARS[lhs >> 2]
            j += 1
            result[INDICES[j]] = HEX_CHARS[((lhs & 3) << 2) | (rhs >> 4)]
            j += 1
            result[INDICES[j]] = HEX_CHARS[rhs & 0xF]
            j += 1

        result = ''.join(result)

    # 如果有后缀,添加回去
    if suffix:
        result = f"{result}@{suffix}"

    return result


def extract_arrays_from_json(json_path):
    # 1. 读取 JSON 文件内容
    try:
        with open(json_path, "r", encoding="utf-8") as f:
            content = f.read()
        print(f"✅ 读取 JSON 文件成功,长度: {len(content)} 字符")
    except Exception as e:
        print(f"❌ 读取文件失败: {str(e)}")
        return None, None, None, None, None, None, None

    # 2. 提取 uuids 数组(匹配 [] 内的内容)
    try:
        uuids_match = re.search(r'"uuids":\s*\[([^\]]+)\]', content)
        if not uuids_match:
            print("警告: 未找到 uuids 数组")
            uuids = []
        else:
            uuids_str = uuids_match.group(1)
            # 清理元素(去除引号、空格、逗号)
            uuids = [item.strip().strip('"') for item in uuids_str.split(',') if item.strip()]
            uuids = [decode_uuid(uuid) for uuid in uuids]
        print(f"✅ 提取到 uuids: {len(uuids)} 条")

    except Exception as e:
        print(f"❌ 处理 uuids 失败: {str(e)}")
        uuids = []

    # 3. 提取 import 数组(位于 versions.import 中)
    try:
        # 匹配 versions.import: [...]
        import_match = re.search(r'"versions":\s*\{\s*"import":\s*\[([^\]]+)\]', content)
        if not import_match:
            print("警告: 未找到 versions.import 数组")
            imports = {}
        else:
            import_str = import_match.group(1)
            # 按每两个元素分组(数字+字符串)
            import_items = [item.strip() for item in import_str.split(',') if item.strip()]
            imports = {}
            for i in range(0, len(import_items), 2):
                if i + 1 < len(import_items):
                    # 数字保持原样,字符串去除引号
                    num = import_items[i]
                    string_val = import_items[i + 1].strip('"')
                    imports[num] = string_val
        print(f"✅ 提取到 import: {len(imports)} 项")

    except Exception as e:
        print(f"❌ 处理 import 失败: {str(e)}")
        imports = {}

    # 4. 提取 native 数组
    try:
        native_match = re.search(r'"native":\s*\[([^\]]+)\]', content)
        if not native_match:
            print("警告: 未找到 native 数组")
            natives = {}
        else:
            native_str = native_match.group(1)
            # 按每两个元素分组(数字+字符串)
            native_items = [item.strip() for item in native_str.split(',') if item.strip()]
            natives = {}
            for i in range(0, len(native_items), 2):
                if i + 1 < len(native_items):
                    # 数字保持原样,字符串去除引号
                    num = native_items[i]
                    string_val = native_items[i + 1].strip('"')
                    natives[num] = string_val
        print(f"✅ 提取到 native: {len(natives)} 项")

    except Exception as e:
        print(f"❌ 处理 native 失败: {str(e)}")
        natives = {}

    # 5. 提取 paths 块
    try:
        paths_match = re.search(r'"paths":\s*\{([^}]+)\}', content)
        if not paths_match:
            print("警告: 未找到 paths 块")
            paths = {}
        else:
            paths_str = paths_match.group(1)
            # 匹配每个路径项
            path_items = re.findall(r'"(\d+)":\s*\[\s*"([^"]+)"\s*,\s*(\d+)\s*,\s*(\d+)\s*\]', paths_str)

            if not path_items:
                print("警告: 在 paths 块中未找到有效路径项")
                paths = {}
            else:
                paths = {}
                for key, path, num1, num2 in path_items:
                    paths[key] = {
                        'path': path,
                        'num': int(num1) - 1
                    }
        print(f"✅ 提取到 paths: {len(paths)} 项")

    except Exception as e:
        print(f"❌ 处理 paths 失败: {str(e)}")
        paths = {}

    # 6. 提取 types 数组
    try:
        types_match = re.search(r'"types":\s*\[([^\]]+)\]', content)
        if not types_match:
            print("警告: 未找到 types 数组")
            types = []
        else:
            types_str = types_match.group(1)
            # 清理元素(去除引号、空格、逗号)
            types = [item.strip().strip('"') for item in types_str.split(',') if item.strip()]
            # 进行替换操作
            new_types = []
            for line in types:
                replaced = False
                for old_text, new_text in replace_rules.items():
                    if old_text in line:
                        new_types.append(new_text)
                        replaced = True
                        break
                if not replaced:
                    new_types.append('')
            types = new_types
        print(f"✅ 提取到 types: {len(types)} 项")

    except Exception as e:
        print(f"❌ 处理 types 失败: {str(e)}")
        types = []

    # 7. 提取 name 字段
    try:
        # 匹配 "name": "anima" 格式(支持多层嵌套)
        name_match = re.search(r'"name"\s*:\s*"([^"]+)"', content)
        if not name_match:
            print("警告: 未找到 name 字段")
            name = ''
        else:
            name = name_match.group(1)
        print(f"✅ 提取到 name: {name}")

    except Exception as e:
        print(f"❌ 处理 name 失败: {str(e)}")
        name = ''

    return uuids, imports, natives, paths, types, name


def determine_file_name(path):
    """根据路径确定文件名,处理spriteFrame和texture的特殊情况"""
    parts = path.split('/')
    last_part = parts[-1]

    if 'spriteFrame' in parts or 'texture' in parts:
        for i in range(len(parts) - 2, -1, -1):
            if parts[i] not in ['spriteFrame', 'texture']:
                return parts[i]
        return last_part
    else:
        return last_part.split('.')[0] if '.' in last_part else last_part


def get_final_path(path, file_name, suffix):
    """生成最终的文件路径,保持原后缀不变"""
    parts = path.split('/')
    if parts[-1] in ['spriteFrame', 'texture']:
        parts = parts[:-1]
    parts.append(f"{file_name}{suffix}")
    return "/".join(parts)


def get_unique_path(path, written_paths):
    """获取唯一的文件路径,如果路径已存在则添加编号"""
    base_path, ext = os.path.splitext(path)
    counter = 1
    new_path = path
    while new_path in written_paths:
        new_path = f"{base_path}({counter}){ext}"
        counter += 1
    return new_path


def extract_urls(uuids, imports, natives, paths, types, name, json_file_name):
    download_links = []
    target_names = []
    written_paths = set()
    # 根据文件名判断URL头
    if "cc.config" in json_file_name:
        base_url = "https://eowgame.jcbgame.com/eow-jp-game/bundle/"
    else:
        base_url = "https://eowgame.jcbgame.com/eow-jp-game/bundle_sp/"

    for serial, data in paths.items():
        path = data['path']
        num = data['num']

        print(f"\n[处理路径] 序列号: {serial}")

        # 计算type
        type_idx = num + 1
        print(f"[INFO] 计算type_idx: {type_idx}")

        # 确保type_idx在有效范围内
        if type_idx >= len(types):
            print(f"[ERROR] type索引{type_idx}超出types范围(共{len(types)}项),跳过处理")
            continue

        # 获取后缀
        processed_line = types[type_idx].strip()
        print(f"[INFO] types第{type_idx}项 - 处理后: '{processed_line}'")

        # 空行或全空格行视为无后缀
        if not processed_line:
            houzhui_list = []
        else:
            houzhui_list = [h.strip() for h in processed_line.split(',')]

        houzhui1 = houzhui_list[0] if len(houzhui_list) > 0 else ''
        houzhui2 = houzhui_list[1] if len(houzhui_list) > 1 else ''
        print(f"[INFO] houzhui1: {houzhui1}, houzhui2: {houzhui2}")

        # 确定文件名
        file_name = determine_file_name(path)
        print(f"[INFO] 文件名: {file_name}")

        # 获取uuid
        uuid_idx = int(serial)
        if uuid_idx >= len(uuids):
            print(f"[ERROR] uuid索引{uuid_idx}超出uuids范围(共{len(uuids)}个),跳过处理")
            continue
        uuid_val = uuids[uuid_idx]
        uu = uuid_val[:2] if uuid_val else ''
        print(f"[INFO] uuid: {uuid_val}, uu: {uu}")

        # 处理第一个后缀(.json等,从imports获取hash)
        if houzhui1:
            hash1 = imports.get(serial, '')
            print(f"[INFO] imports中序列号{serial}的hash: {hash1}")
            if not hash1:
                print(f"[WARN] 未找到序列号{serial}对应的imports hash,跳过.houzhui1 URL")
            else:
                # 处理.atlas后缀替换为.json
                processed_houzhui1 = houzhui1
                if houzhui1 == '.atlas':
                    processed_houzhui1 = '.json'
                    print(f"[INFO] 检测到.atlas后缀,替换为.json")
                url = f"{base_url}{name}/import/{uu}/{uuid_val}.{hash1}{processed_houzhui1}"
                download_links.append(url)
                print(f"[SUCCESS] 生成import URL: {url.strip()}")
                # 写入文件地址和文件名(保持原后缀)
                final_path = get_final_path(path, file_name, houzhui1)
                final_path = get_unique_path(final_path, written_paths)
                written_paths.add(final_path)
                target_names.append(final_path)

        # 处理第二个后缀(.png/.bin等,从natives获取hash)
        if houzhui2:
            hash2 = natives.get(serial, '')
            print(f"[INFO] natives中序列号{serial}的hash: {hash2}")
            if not hash2:
                print(f"[WARN] 未找到序列号{serial}对应的natives hash,跳过.houzhui2 URL")
            else:
                # 处理.skel后缀替换为.bin
                processed_houzhui2 = houzhui2
                if houzhui2 == '.skel':
                    processed_houzhui2 = '.bin'
                    print(f"[INFO] 检测到.skel后缀,替换为.bin")
                url = f"{base_url}{name}/native/{uu}/{uuid_val}.{hash2}{processed_houzhui2}"
                download_links.append(url)
                print(f"[SUCCESS] 生成native URL: {url.strip()}")
                # 写入文件地址和文件名(保持原后缀)
                final_path = get_final_path(path, file_name, houzhui2)
                final_path = get_unique_path(final_path, written_paths)
                written_paths.add(final_path)
                target_names.append(final_path)

    return download_links, target_names


def get_simplified_timestamp():
    now = datetime.now()
    return now.strftime("%y%m%d%H%M%S")


def generate_atlas_file(json_file_path, atlas_save_path):
    """使用新逻辑从JSON中提取atlas内容"""
    try:
        with open(json_file_path, 'r', encoding='utf-8') as file:
            json_data = json.load(file)

            # 直接通过索引获取atlas文本
            atlas_raw = json_data[5][0][3]

            # 确保保存目录存在
            os.makedirs(os.path.dirname(atlas_save_path), exist_ok=True)

            # 写入atlas文件
            with open(atlas_save_path, "w", encoding="utf-8") as f:
                f.write(atlas_raw)

            print(f"[ATLAS] 成功生成: {atlas_save_path}")
            return True
    except json.JSONDecodeError:
        print(f"[错误] JSON解析失败: {json_file_path}")
    except IndexError:
        print(f"[错误] JSON结构不符合预期,索引越界: {json_file_path}")
    except Exception as e:
        print(f"[错误] 生成atlas时出错: {str(e)}")
    return False


def download_file(link, save_path, total_files, index, digit_count, success_files, failed_files, target_names, name):
    """下载文件并按指定路径保存,处理atlas生成"""
    max_retries = 3
    retry_delay = 5
    # 创建总下载目录
    download_dir = os.path.join(os.getcwd(), "downloader", name)
    final_save_path = os.path.join(download_dir, save_path)
    os.makedirs(os.path.dirname(final_save_path), exist_ok=True)

    for attempt in range(max_retries):
        try:
            # 检查文件是否已存在且大小一致
            if os.path.exists(final_save_path):
                local_size = os.path.getsize(final_save_path)
                headers = requests.head(link, allow_redirects=True, timeout=10, verify=False).headers
                remote_size = int(headers.get('Content-Length', 0))
                if local_size == remote_size and remote_size != 0:
                    print(f"[跳过] {total_files}/{index:0{digit_count}d} 已存在: {final_save_path}")
                    success_files.append(final_save_path)
                    return

            # 执行下载
            response = requests.get(link, stream=True, timeout=10, verify=False)
            response.raise_for_status()

            with open(final_save_path, 'wb') as file:
                for chunk in response.iter_content(chunk_size=8192):
                    if chunk:
                        file.write(chunk)

            # 验证文件有效性
            if os.path.exists(final_save_path):
                final_size = os.path.getsize(final_save_path)
                if final_size == 0:
                    raise Exception("下载文件大小为0")

                print(f"[成功] {total_files}/{index:0{digit_count}d} 下载完成: {final_save_path}")
                success_files.append(final_save_path)

                # 获取目标文件名(用于判断后缀)
                target_name = target_names[index - 1]

                # 处理atlas生成(仅当目标文件是atlas后缀时)
                if target_name.lower().endswith('.atlas'):
                    # 生成atlas文件
                    if generate_atlas_file(final_save_path, final_save_path):
                        # 注意:这里不删除原文件,因为save_path已经是atlas文件路径
                        print(f"[转换] 已将JSON转换为atlas: {final_save_path}")
                return

        except requests.HTTPError as e:
            if response.status_code == 404:
                # 保留.png转.jpg的格式转换逻辑
                if final_save_path.lower().endswith('.png'):
                    jpg_save_path = final_save_path[:-4] + '.jpg'
                    jpg_link = link[:-4] + '.jpg'
                    print(f"[INFO] {final_save_path} 404 错误,尝试下载 {jpg_save_path}")
                    try:
                        jpg_response = requests.get(jpg_link, stream=True, timeout=10, verify=False)
                        jpg_response.raise_for_status()
                        with open(jpg_save_path, 'wb') as file:
                            for chunk in jpg_response.iter_content(chunk_size=8192):
                                if chunk:
                                    file.write(chunk)
                        if os.path.exists(jpg_save_path):
                            final_size = os.path.getsize(jpg_save_path)
                            if final_size == 0:
                                raise Exception("下载文件大小为0")
                            print(f"[成功] {total_files}/{index:0{digit_count}d} 下载完成: {jpg_save_path}")
                            success_files.append(jpg_save_path)
                            return
                    except requests.RequestException as jpg_e:
                        print(f"[失败] {total_files}/{index:0{digit_count}d} 下载失败: {str(jpg_e)}")
                        failed_files.append(jpg_link)
                else:
                    print(f"[失败] {total_files}/{index:0{digit_count}d} 404错误: {link}")
                    failed_files.append(link)
                break
            elif attempt < max_retries - 1:
                print(f"[重试] {attempt + 1}/{max_retries} 下载失败: {str(e)}")
                time.sleep(retry_delay)
            else:
                print(f"[失败] {total_files}/{index:0{digit_count}d} 下载失败: {str(e)}")
                failed_files.append(link)
                break
        except requests.RequestException as e:
            if attempt < max_retries - 1:
                print(f"[重试] {attempt + 1}/{max_retries} 下载失败: {str(e)}")
                time.sleep(retry_delay)
            else:
                print(f"[失败] {total_files}/{index:0{digit_count}d} 下载失败: {str(e)}")
                failed_files.append(link)
                break


def count_files_in_directory(directory):
    """计算目录内文件总数"""
    file_count = 0
    for root, _, files in os.walk(directory):
        file_count += len(files)
    return file_count


def main():
    # 查找包含config的json文件
    json_files = [f for f in os.listdir('.') if f.endswith('.json') and 'config' in f]
    if not json_files:
        print("❌ 未找到包含config的json文件")
        return

    overall_success_files = []
    overall_failed_files = []
    overall_total_files = 0

    for json_path in json_files:
        json_file_name = os.path.basename(json_path)
        print(f"\n开始处理文件: {json_file_name}")

        # 解析JSON文件
        uuids, imports, natives, paths, types, name = extract_arrays_from_json(json_path)
        if uuids is None:
            continue

        # 提取URL
        download_links, target_names = extract_urls(uuids, imports, natives, paths, types, name, json_file_name)

        # 下载文件
        total_files = len(download_links)
        digit_count = len(str(total_files))
        # 创建总下载目录
        download_dir = os.path.join(os.getcwd(), "downloader")
        os.makedirs(download_dir, exist_ok=True)

        success_files = []
        failed_files = []
        print(f"✅ 开始下载 {total_files} 个文件到 {download_dir}")
        print("-" * 60)

        # 生成完整保存路径
        save_paths = [os.path.join(download_dir, name, path) for path in target_names]

        # 多线程下载
        with concurrent.futures.ThreadPoolExecutor(max_workers=16) as executor:
            futures = []
            for idx, (link, path) in enumerate(zip(download_links, target_names), 1):
                futures.append(executor.submit(
                    download_file, link, path, total_files, idx, digit_count,
                    success_files, failed_files, target_names, name
                ))
            # 等待所有任务完成
            for future in futures:
                future.result()

        # 输出统计结果
        print("\n" + "=" * 60)
        print(f"📊 本次下载统计 | 成功: {len(success_files)} | 失败: {len(failed_files)}")

        if failed_files:
            print("\n❌ 本次下载失败的链接:")
            for link in failed_files:
                print(f" - {link}")
        else:
            print("\n🎉 本次所有文件下载完成!")

        # 验证文件数量
        actual_count = count_files_in_directory(os.path.join(download_dir, name))
        print(f"\n📁 本次实际文件数: {actual_count} | 本次目标文件数: {total_files}")
        if actual_count == total_files:
            print("✅ 本次数量一致,下载完成")
        else:
            print(f"❌ 本次数量不一致,可能存在文件生成或删除异常")

        overall_success_files.extend(success_files)
        overall_failed_files.extend(failed_files)
        overall_total_files += total_files

    # 输出整体统计结果
    print("\n" + "=" * 60)
    print(f"📊 整体下载统计 | 成功: {len(overall_success_files)} | 失败: {len(overall_failed_files)}")

    if overall_failed_files:
        print("\n❌ 整体下载失败的链接:")
        for link in overall_failed_files:
            print(f" - {link}")
    else:
        print("\n🎉 所有文件下载完成!")

    # 验证整体文件数量
    overall_actual_count = count_files_in_directory(os.path.join(os.getcwd(), "downloader"))
    print(f"\n📁 整体实际文件数: {overall_actual_count} | 整体目标文件数: {overall_total_files}")
    if overall_actual_count == overall_total_files:
        print("✅ 整体数量一致,下载完成")
    else:
        print(f"❌ 整体数量不一致,可能存在文件生成或删除异常")

    input("\n按回车键退出...")


if __name__ == "__main__":
    try:
        import requests
    except ImportError:
        print("❌ 请先安装依赖: pip install requests")
        exit(1)
    main()

尽量一次下完或者分开config下载,因为下载不会覆盖。修改了含@的解码,合并时ai莫名其妙吞了我这一段。虽然我也不清楚下那么多没用的json干嘛。

测试了一下,可以一次性全下下来,差不多1.7g。

经测试能下载所有的spine文件包括无码的在special2文件夹中

import os
import requests
import json

BASE64_CHARS = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
BASE64_VALUES = [0] * 128
for idx, char in enumerate(BASE64_CHARS):
    BASE64_VALUES[ord(char)] = idx

HEX_CHARS = list('0123456789abcdef')
_t = ['', '', '', '']
UUID_TEMPLATE = _t + _t + ['-'] + _t + ['-'] + _t + ['-'] + _t + ['-'] + _t + _t + _t
INDICES = [i for i, x in enumerate(UUID_TEMPLATE) if x != '-']

def decode_uuid(base64_str):
    if len(base64_str) != 22:
        return base64_str
    result = UUID_TEMPLATE.copy()
    result[0] = base64_str[0]
    result[1] = base64_str[1]

    j = 2
    for i in range(2, 22, 2):
        lhs = BASE64_VALUES[ord(base64_str[i])]
        rhs = BASE64_VALUES[ord(base64_str[i + 1])]

        result[INDICES[j]] = HEX_CHARS[lhs >> 2]
        j += 1
        result[INDICES[j]] = HEX_CHARS[((lhs & 3) << 2) | (rhs >> 4)]
        j += 1
        result[INDICES[j]] = HEX_CHARS[rhs & 0xF]
        j += 1

    return ''.join(result)

def has_sp_skeleton_data(obj):
    if isinstance(obj, list):
        if len(obj) > 0 and obj[0] == "sp.SkeletonData":
            return obj
        for item in obj:
            found = has_sp_skeleton_data(item)
            if found:
                return found
    elif isinstance(obj, dict):
        for value in obj.values():
            found = has_sp_skeleton_data(value)
            if found:
                return found
    return None

def getspresjson(bundle):
    url1 = "https://eowgame.jcbgame.com/eow-jp-game/proj.confg.json"
    response1 = requests.get(url1)
    response1.raise_for_status()

    data1 = response1.json()

    special_value = data1.get("Config", {}).get(bundle)
    if not special_value:
        raise ValueError(f"无法获取 Config -> {bundle} 的值")

    print(f"{bundle} = {special_value}")

    url2 = f"https://eowgame.jcbgame.com/eow-jp-game/bundle_sp/{bundle}/config.{special_value}.json"

    response2 = requests.get(url2)
    response2.raise_for_status()

    return response2.json()

def getresjson(bundle):
    url1 = "https://eowgame.jcbgame.com/eow-jp-game/bundle/version.json"
    response1 = requests.get(url1)
    response1.raise_for_status()

    data1 = response1.json()

    anima_entry = next((item for item in data1 if item.get("abName") == bundle), None)
    if not anima_entry:
        raise ValueError(f"未找到 abName 为 '{bundle}' 的数据")

    anima_version = anima_entry.get("version")
    if not anima_version:
        raise ValueError(f"{bundle} 的 version 字段为空")

    print(f"anima version = {anima_version}")
    url2 = f"https://eowgame.jcbgame.com/eow-jp-game/bundle/{bundle}/cc.config.{anima_version}.json"
    response2 = requests.get(url2)
    response2.raise_for_status()

    return response2.json()


def resdownloader(BASE, data,output_dir):
    uuids = data["uuids"]
    vers_imp = data["versions"]["import"]
    vers_nat = data["versions"]["native"]
    paths = data["paths"]
    types = data["types"]

    if "sp.SkeletonData" not in types:
        print(f"[!] 未找到 sp.SkeletonData 数据")
        return

    SpriteFrameint = types.index("sp.SkeletonData")

    vimp = {vers_imp[i]: vers_imp[i + 1] for i in range(0, len(vers_imp), 2)}
    vnat = {vers_nat[i]: vers_nat[i + 1] for i in range(0, len(vers_nat), 2)}
    os.makedirs(output_dir, exist_ok=True)

    for idx, sid in enumerate(uuids):
        if paths.get(str(idx)) and paths.get(str(idx))[1] == SpriteFrameint:
            if '@' in sid:
                uuid_base64, ext = sid.split('@', 1)
            else:
                uuid_base64, ext = sid, None
            uuid = decode_uuid(uuid_base64)
            full_uuid = f"{uuid}@{ext}" if ext else uuid
            imp_ver = f".{vimp.get(idx, '')}"
            nat_ver = f".{vnat.get(idx, '')}"

            json_url = f"{BASE}/import/{uuid[:2]}/{full_uuid}{imp_ver}.json"
            skel_url = f"{BASE}/native/{uuid[:2]}/{full_uuid}{nat_ver}.bin"

            base_name = os.path.basename(paths[str(idx)][0])
            print(f'json - {base_name}.json -> {json_url} | skel - {base_name}.skel -> {skel_url}')

            try:
                response = requests.get(json_url)
                response.raise_for_status()
                json_data = response.json()
            except Exception as e:
                print(f"[!] 下载失败: {json_url} ({e})")
                continue

            if not has_sp_skeleton_data(json_data):
                continue

            res_path = paths[str(idx)][0]
            folder = os.path.join(output_dir, *res_path.split("/"))
            os.makedirs(folder, exist_ok=True)

            try:
                sp_type_block = next(item for item in json_data[3] if item[0] == "sp.SkeletonData")
                field_names = sp_type_block[1]
            except Exception as e:
                print(f"[!] 无法解析字段名: {e}")
                continue

            try:
                obj_data = json_data[5][0]
                field_values = obj_data[1:]
                sp_dict = dict(zip(field_names, field_values))

                _name = sp_dict.get("_name", base_name)
                _atlasText = sp_dict.get("_atlasText", "")
                _skeletonJson = sp_dict.get("_skeletonJson", None)

                atlas_path = os.path.join(folder, f"{_name}.atlas")
                with open(atlas_path, "w", encoding="utf-8") as f:
                    f.write(_atlasText)
                print(f"[+] 写入 atlas 成功:{_name}.atlas")

                if _skeletonJson:
                    json_path = os.path.join(folder, f"{_name}.json")
                    with open(json_path, "w", encoding="utf-8") as f:
                        json.dump(_skeletonJson, f, ensure_ascii=False, indent=2)
                    print(f"[+] 写入 skeletonJson 成功:{_name}.json")
                else:
                    skel_path = os.path.join(folder, f"{_name}.skel")
                    with open("eowurl.txt", "a") as f:
                        f.write(f"{skel_url}\n  out={skel_path}\n")

                pngdata = json_data[1]
                for i, pngsid in enumerate(pngdata):
                    pnguuid = decode_uuid(pngsid[:22])
                    try:
                        idxpng = uuids.index(pngsid[:22])
                        nat_verpng = vnat[idxpng]
                        pngurl = f"{BASE}/native/{pnguuid[:2]}/{pnguuid}.{nat_verpng}.png"
                        png_tempname = f"{_name}_{i + 1}" if i >= 1 else f"{_name}"
                        png_path = os.path.join(folder, png_tempname + ".png")
                        print(f'{png_tempname}.png -> {pngurl}')
                        with open("eowurl.txt", "a") as f:
                            f.write(f"{pngurl}\n  out={png_path}\n")
                    except Exception as e:
                        print(f"[!] PNG处理失败: {e}")
            except Exception as e:
                print(f"[!] 解包 sp.SkeletonData 数据失败: {e}")

resdownloader("https://eowgame.jcbgame.com/eow-jp-game/bundle_sp/normal",getspresjson('normal'),"eow\\bundle_sp\\normal")
resdownloader("https://eowgame.jcbgame.com/eow-jp-game/bundle_sp/spdata",getspresjson('spdata'),"eow\\bundle_sp\\spdata")
resdownloader("https://eowgame.jcbgame.com/eow-jp-game/bundle_sp/special",getspresjson('special'),"eow\\bundle_sp\\special")
resdownloader("https://eowgame.jcbgame.com/eow-jp-game/bundle_sp/special2",getspresjson('special2'),"eow\\bundle_sp\\special2")

resdownloader("https://eowgame.jcbgame.com/eow-jp-game/bundle/anima",getresjson('anima'),"eow\\bundle\\anima")
resdownloader("https://eowgame.jcbgame.com/eow-jp-game/bundle/activity",getresjson('activity'),"eow\\bundle\\activity")
resdownloader("https://eowgame.jcbgame.com/eow-jp-game/bundle/barner",getresjson('barner'),"eow\\bundle\\barner")
resdownloader("https://eowgame.jcbgame.com/eow-jp-game/bundle/battle",getresjson('battle'),"eow\\bundle\\battle")
resdownloader("https://eowgame.jcbgame.com/eow-jp-game/bundle/bgs",getresjson('bgs'),"eow\\bundle\\bgs")
resdownloader("https://eowgame.jcbgame.com/eow-jp-game/bundle/career",getresjson('career'),"eow\\bundle\\career")
resdownloader("https://eowgame.jcbgame.com/eow-jp-game/bundle/cat",getresjson('cat'),"eow\\bundle\\cat")
resdownloader("https://eowgame.jcbgame.com/eow-jp-game/bundle/dictionary",getresjson('dictionary'),"eow\\bundle\\dictionary")
resdownloader("https://eowgame.jcbgame.com/eow-jp-game/bundle/mp4",getresjson('mp4'),"eow\\bundle\\mp4")
resdownloader("https://eowgame.jcbgame.com/eow-jp-game/bundle/ornament",getresjson('ornament'),"eow\\bundle\\ornament")
resdownloader("https://eowgame.jcbgame.com/eow-jp-game/bundle/sd",getresjson('sd'),"eow\\bundle\\sd")
resdownloader("https://eowgame.jcbgame.com/eow-jp-game/bundle/sound",getresjson('sound'),"eow\\bundle\\sound")
resdownloader("https://eowgame.jcbgame.com/eow-jp-game/bundle/texture",getresjson('texture'),"eow\\bundle\\texture")
resdownloader("https://eowgame.jcbgame.com/eow-jp-game/bundle/vd",getresjson('vd'),"eow\\bundle\\vd")
resdownloader("https://eowgame.jcbgame.com/eow-jp-game/bundle/icon",getresjson('icon'),"eow\\bundle\\icon")
resdownloader("https://eowgame.jcbgame.com/eow-jp-game/bundle/maps",getresjson('maps'),"eow\\bundle\\maps")
resdownloader("https://eowgame.jcbgame.com/eow-jp-game/bundle/normal",getresjson('normal'),"eow\\bundle\\normal")
resdownloader("https://eowgame.jcbgame.com/eow-jp-game/bundle/special",getresjson('special'),"eow\\bundle\\special")
resdownloader("https://eowgame.jcbgame.com/eow-jp-game/bundle/spdata",getresjson('spdata'),"eow\\bundle\\spdata")
resdownloader("https://eowgame.jcbgame.com/eow-jp-game/bundle/special2",getresjson('special2'),"eow\\bundle\\special2")
os.system("aria2c -i eowurl.txt -j 32 -s 16 -x 16 --check-certificate=false")

e服上线了

冲冲冲,我馋立绘的那个奶盖好久了

E服就是换个url…
json的hash都是一样的…

import os
import requests
import json

BASE64_CHARS = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
BASE64_VALUES = [0] * 128
for idx, char in enumerate(BASE64_CHARS):
    BASE64_VALUES[ord(char)] = idx

HEX_CHARS = list('0123456789abcdef')
_t = ['', '', '', '']
UUID_TEMPLATE = _t + _t + ['-'] + _t + ['-'] + _t + ['-'] + _t + ['-'] + _t + _t + _t
INDICES = [i for i, x in enumerate(UUID_TEMPLATE) if x != '-']

def decode_uuid(base64_str):
    if len(base64_str) != 22:
        return base64_str
    result = UUID_TEMPLATE.copy()
    result[0] = base64_str[0]
    result[1] = base64_str[1]

    j = 2
    for i in range(2, 22, 2):
        lhs = BASE64_VALUES[ord(base64_str[i])]
        rhs = BASE64_VALUES[ord(base64_str[i + 1])]

        result[INDICES[j]] = HEX_CHARS[lhs >> 2]
        j += 1
        result[INDICES[j]] = HEX_CHARS[((lhs & 3) << 2) | (rhs >> 4)]
        j += 1
        result[INDICES[j]] = HEX_CHARS[rhs & 0xF]
        j += 1

    return ''.join(result)

def has_sp_skeleton_data(obj):
    if isinstance(obj, list):
        if len(obj) > 0 and obj[0] == "sp.SkeletonData":
            return obj
        for item in obj:
            found = has_sp_skeleton_data(item)
            if found:
                return found
    elif isinstance(obj, dict):
        for value in obj.values():
            found = has_sp_skeleton_data(value)
            if found:
                return found
    return None

def getspresjson(bundle):
    url1 = "https://cpev.fhaqlbp.com/eow-jp-game/proj.confg.json"
    response1 = requests.get(url1)
    response1.raise_for_status()

    data1 = response1.json()

    special_value = data1.get("Config", {}).get(bundle)
    if not special_value:
        raise ValueError(f"无法获取 Config -> {bundle} 的值")

    print(f"{bundle} = {special_value}")

    url2 = f"https://cpev.fhaqlbp.com/eow-jp-game/bundle_sp/{bundle}/config.{special_value}.json"

    response2 = requests.get(url2)
    response2.raise_for_status()

    return response2.json()

def getresjson(bundle):
    url1 = "https://cpev.fhaqlbp.com/eow-jp-game/bundle/version.json"
    response1 = requests.get(url1)
    response1.raise_for_status()

    data1 = response1.json()

    anima_entry = next((item for item in data1 if item.get("abName") == bundle), None)
    if not anima_entry:
        raise ValueError(f"未找到 abName 为 '{bundle}' 的数据")

    anima_version = anima_entry.get("version")
    if not anima_version:
        raise ValueError(f"{bundle} 的 version 字段为空")

    print(f"anima version = {anima_version}")
    url2 = f"https://cpev.fhaqlbp.com/eow-jp-game/bundle/{bundle}/cc.config.{anima_version}.json"
    response2 = requests.get(url2)
    response2.raise_for_status()

    return response2.json()


def resdownloader(BASE, data,output_dir):
    uuids = data["uuids"]
    vers_imp = data["versions"]["import"]
    vers_nat = data["versions"]["native"]
    paths = data["paths"]
    types = data["types"]

    if "sp.SkeletonData" not in types:
        print(f"[!] 未找到 sp.SkeletonData 数据")
        return

    SpriteFrameint = types.index("sp.SkeletonData")

    vimp = {vers_imp[i]: vers_imp[i + 1] for i in range(0, len(vers_imp), 2)}
    vnat = {vers_nat[i]: vers_nat[i + 1] for i in range(0, len(vers_nat), 2)}
    os.makedirs(output_dir, exist_ok=True)

    for idx, sid in enumerate(uuids):
        if paths.get(str(idx)) and paths.get(str(idx))[1] == SpriteFrameint:
            if '@' in sid:
                uuid_base64, ext = sid.split('@', 1)
            else:
                uuid_base64, ext = sid, None
            uuid = decode_uuid(uuid_base64)
            full_uuid = f"{uuid}@{ext}" if ext else uuid
            imp_ver = f".{vimp.get(idx, '')}"
            nat_ver = f".{vnat.get(idx, '')}"

            json_url = f"{BASE}/import/{uuid[:2]}/{full_uuid}{imp_ver}.json"
            skel_url = f"{BASE}/native/{uuid[:2]}/{full_uuid}{nat_ver}.bin"

            base_name = os.path.basename(paths[str(idx)][0])
            print(f'json - {base_name}.json -> {json_url} | skel - {base_name}.skel -> {skel_url}')

            try:
                response = requests.get(json_url)
                response.raise_for_status()
                json_data = response.json()
            except Exception as e:
                print(f"[!] 下载失败: {json_url} ({e})")
                continue

            if not has_sp_skeleton_data(json_data):
                continue

            res_path = paths[str(idx)][0]
            folder = os.path.join(output_dir, *res_path.split("/"))
            os.makedirs(folder, exist_ok=True)

            try:
                sp_type_block = next(item for item in json_data[3] if item[0] == "sp.SkeletonData")
                field_names = sp_type_block[1]
            except Exception as e:
                print(f"[!] 无法解析字段名: {e}")
                continue

            try:
                obj_data = json_data[5][0]
                field_values = obj_data[1:]
                sp_dict = dict(zip(field_names, field_values))

                _name = sp_dict.get("_name", base_name)
                _atlasText = sp_dict.get("_atlasText", "")
                _skeletonJson = sp_dict.get("_skeletonJson", None)

                atlas_path = os.path.join(folder, f"{_name}.atlas")
                if os.path.exists(atlas_path): #and not overwrite:
                    print(f"[!] 已存在 atlas,跳过:{_name}.atlas")
                    continue
                with open(atlas_path, "w", encoding="utf-8") as f:
                    f.write(_atlasText)
                print(f"[+] 写入 atlas 成功:{_name}.atlas")

                if _skeletonJson:
                    json_path = os.path.join(folder, f"{_name}.json")
                    with open(json_path, "w", encoding="utf-8") as f:
                        json.dump(_skeletonJson, f, ensure_ascii=False, indent=2)
                    print(f"[+] 写入 skeletonJson 成功:{_name}.json")
                else:
                    skel_path = os.path.join(folder, f"{_name}.skel")
                    with open("eroeowurl.txt", "a", encoding="utf-8") as f:
                        f.write(f"{skel_url}\n  out={skel_path}\n")

                pngdata = json_data[1]
                for i, pngsid in enumerate(pngdata):
                    pnguuid = decode_uuid(pngsid[:22])
                    try:
                        idxpng = uuids.index(pngsid[:22])
                        nat_verpng = vnat[idxpng]
                        pngurl = f"{BASE}/native/{pnguuid[:2]}/{pnguuid}.{nat_verpng}.png"
                        png_tempname = f"{_name}_{i + 1}" if i >= 1 else f"{_name}"
                        png_path = os.path.join(folder, png_tempname + ".png")
                        print(f'{png_tempname}.png -> {pngurl}')
                        with open("eroeowurl.txt", "a", encoding="utf-8") as f:
                            f.write(f"{pngurl}\n  out={png_path}\n")
                    except Exception as e:
                        print(f"[!] PNG处理失败: {e}")
            except Exception as e:
                print(f"[!] 解包 sp.SkeletonData 数据失败: {e}")

if os.path.exists("eroeowurl.txt"):
    os.remove("eroeowurl.txt")
resdownloader("https://cpev.fhaqlbp.com/eow-jp-game/bundle_sp/normal",getspresjson('normal'),"resdownload\\EroLab幻能边境\\bundle_sp\\normal")
resdownloader("https://cpev.fhaqlbp.com/eow-jp-game/bundle_sp/spdata",getspresjson('spdata'),"resdownload\\EroLab幻能边境\\bundle_sp\\spdata")
resdownloader("https://cpev.fhaqlbp.com/eow-jp-game/bundle_sp/special",getspresjson('special'),"resdownload\\EroLab幻能边境\\bundle_sp\\special")
resdownloader("https://cpev.fhaqlbp.com/eow-jp-game/bundle_sp/special2",getspresjson('special2'),"resdownload\\EroLab幻能边境\\bundle_sp\\special2")

resdownloader("https://cpev.fhaqlbp.com/eow-jp-game/bundle/anima",getresjson('anima'),"resdownload\\EroLab幻能边境\\bundle\\anima")
resdownloader("https://cpev.fhaqlbp.com/eow-jp-game/bundle/activity",getresjson('activity'),"resdownload\\EroLab幻能边境\\bundle\\activity")
resdownloader("https://cpev.fhaqlbp.com/eow-jp-game/bundle/barner",getresjson('barner'),"resdownload\\EroLab幻能边境\\bundle\\barner")
resdownloader("https://cpev.fhaqlbp.com/eow-jp-game/bundle/battle",getresjson('battle'),"resdownload\\EroLab幻能边境\\bundle\\battle")
resdownloader("https://cpev.fhaqlbp.com/eow-jp-game/bundle/bgs",getresjson('bgs'),"resdownload\\EroLab幻能边境\\bundle\\bgs")
resdownloader("https://cpev.fhaqlbp.com/eow-jp-game/bundle/career",getresjson('career'),"resdownload\\EroLab幻能边境\\bundle\\career")
resdownloader("https://cpev.fhaqlbp.com/eow-jp-game/bundle/cat",getresjson('cat'),"resdownload\\EroLab幻能边境\\bundle\\cat")
resdownloader("https://cpev.fhaqlbp.com/eow-jp-game/bundle/dictionary",getresjson('dictionary'),"resdownload\\EroLab幻能边境\\bundle\\dictionary")
resdownloader("https://cpev.fhaqlbp.com/eow-jp-game/bundle/mp4",getresjson('mp4'),"resdownload\\EroLab幻能边境\\bundle\\mp4")
resdownloader("https://cpev.fhaqlbp.com/eow-jp-game/bundle/ornament",getresjson('ornament'),"resdownload\\EroLab幻能边境\\bundle\\ornament")
resdownloader("https://cpev.fhaqlbp.com/eow-jp-game/bundle/sd",getresjson('sd'),"resdownload\\EroLab幻能边境\\bundle\\sd")
resdownloader("https://cpev.fhaqlbp.com/eow-jp-game/bundle/sound",getresjson('sound'),"resdownload\\EroLab幻能边境\\bundle\\sound")
resdownloader("https://cpev.fhaqlbp.com/eow-jp-game/bundle/texture",getresjson('texture'),"resdownload\\EroLab幻能边境\\bundle\\texture")
resdownloader("https://cpev.fhaqlbp.com/eow-jp-game/bundle/vd",getresjson('vd'),"resdownload\\EroLab幻能边境\\bundle\\vd")
resdownloader("https://cpev.fhaqlbp.com/eow-jp-game/bundle/icon",getresjson('icon'),"resdownload\\EroLab幻能边境\\bundle\\icon")
resdownloader("https://cpev.fhaqlbp.com/eow-jp-game/bundle/maps",getresjson('maps'),"resdownload\\EroLab幻能边境\\bundle\\maps")
resdownloader("https://cpev.fhaqlbp.com/eow-jp-game/bundle/normal",getresjson('normal'),"resdownload\\EroLab幻能边境\\bundle\\normal")
resdownloader("https://cpev.fhaqlbp.com/eow-jp-game/bundle/special",getresjson('special'),"resdownload\\EroLab幻能边境\\bundle\\special")
resdownloader("https://cpev.fhaqlbp.com/eow-jp-game/bundle/spdata",getresjson('spdata'),"resdownload\\EroLab幻能边境\\bundle\\spdata")
resdownloader("https://cpev.fhaqlbp.com/eow-jp-game/bundle/special2",getresjson('special2'),"resdownload\\EroLab幻能边境\\bundle\\special2")
os.system("aria2c -i eroeowurl.txt -j 32 -s 16 -x 16 --check-certificate=false")

好吧我搞错了。。。
E服似乎重做了cg的部分细节但是较为违和(指挖孔)

1 个赞