From cb556b47c083aab877a829ae39facb7ab2592b00 Mon Sep 17 00:00:00 2001 From: ygidtu Date: Thu, 26 Feb 2026 14:02:42 +0800 Subject: [PATCH] =?UTF-8?q?=E6=8F=90=E4=BA=A4=E9=A5=B1=E5=92=8C=E7=BC=96?= =?UTF-8?q?=E8=BE=91=E7=9A=84=E7=9B=B8=E5=85=B3=E8=AE=BE=E8=AE=A1=EF=BC=8C?= =?UTF-8?q?=E5=8F=8A=E6=A3=80=E9=AA=8C=E4=BB=A3=E7=A0=81?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .gitignore | 301 ++++ better_input_seq.py | 89 + combine_pridict2_primedesign.py | 222 +++ design/main.py | 450 +++++ design/pyproject.toml | 16 + design/src/editseq.py | 233 +++ design/src/liftover.py | 53 + design/src/mutation.py | 216 +++ design/src/reader.py | 236 +++ design/src/safe_target.py | 139 ++ design/src/snp.py | 114 ++ design/src/snv-N-2N-3N.py | 263 +++ filter.py | 87 + filter_freq.py | 210 +++ interactive/README.md | 0 interactive/db.py | 222 +++ interactive/frontend/README.md | 5 + interactive/frontend/auto-imports.d.ts | 77 + interactive/frontend/components.d.ts | 30 + interactive/frontend/index.html | 13 + interactive/frontend/package.json | 26 + interactive/frontend/pnpm-lock.yaml | 1554 +++++++++++++++++ interactive/frontend/public/vite.svg | 1 + interactive/frontend/src/App.vue | 26 + interactive/frontend/src/assets/vue.svg | 1 + .../frontend/src/components/HelloWorld.vue | 375 ++++ interactive/frontend/src/main.ts | 4 + interactive/frontend/src/style.css | 79 + interactive/frontend/tsconfig.app.json | 16 + interactive/frontend/tsconfig.json | 7 + interactive/frontend/tsconfig.node.json | 26 + interactive/frontend/vite.config.ts | 38 + interactive/main.py | 133 ++ interactive/pyproject.toml | 9 + merge_results.py | 53 + select_primedesign.py | 113 ++ 36 files changed, 5437 insertions(+) create mode 100644 .gitignore create mode 100644 better_input_seq.py create mode 100644 combine_pridict2_primedesign.py create mode 100644 design/main.py create mode 100644 design/pyproject.toml create mode 100644 design/src/editseq.py create mode 100644 design/src/liftover.py create mode 100644 design/src/mutation.py create mode 100644 design/src/reader.py create mode 100644 design/src/safe_target.py create mode 100644 design/src/snp.py create mode 100644 design/src/snv-N-2N-3N.py create mode 100644 filter.py create mode 100644 filter_freq.py create mode 100644 interactive/README.md create mode 100644 interactive/db.py create mode 100644 interactive/frontend/README.md create mode 100644 interactive/frontend/auto-imports.d.ts create mode 100644 interactive/frontend/components.d.ts create mode 100644 interactive/frontend/index.html create mode 100644 interactive/frontend/package.json create mode 100644 interactive/frontend/pnpm-lock.yaml create mode 100644 interactive/frontend/public/vite.svg create mode 100644 interactive/frontend/src/App.vue create mode 100644 interactive/frontend/src/assets/vue.svg create mode 100644 interactive/frontend/src/components/HelloWorld.vue create mode 100644 interactive/frontend/src/main.ts create mode 100644 interactive/frontend/src/style.css create mode 100644 interactive/frontend/tsconfig.app.json create mode 100644 interactive/frontend/tsconfig.json create mode 100644 interactive/frontend/tsconfig.node.json create mode 100644 interactive/frontend/vite.config.ts create mode 100644 interactive/main.py create mode 100644 interactive/pyproject.toml create mode 100644 merge_results.py create mode 100644 select_primedesign.py diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..f83609e --- /dev/null +++ b/.gitignore @@ -0,0 +1,301 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +pip-wheel-metadata/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +.python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + + +# Logs +logs +*.log +npm-debug.log* +yarn-debug.log* +yarn-error.log* +lerna-debug.log* +.pnpm-debug.log* + +# Diagnostic reports (https://nodejs.org/api/report.html) +report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json + +# Runtime data +pids +*.pid +*.seed +*.pid.lock + +# Directory for instrumented libs generated by jscoverage/JSCover +lib-cov + +# Coverage directory used by tools like istanbul +coverage +*.lcov + +# nyc test coverage +.nyc_output + +# Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files) +.grunt + +# Bower dependency directory (https://bower.io/) +bower_components + +# node-waf configuration +.lock-wscript + +# Compiled binary addons (https://nodejs.org/api/addons.html) +build/Release + +# Dependency directories +node_modules/ +jspm_packages/ + +# Snowpack dependency directory (https://snowpack.dev/) +web_modules/ + +# TypeScript cache +*.tsbuildinfo + +# Optional npm cache directory +.npm + +# Optional eslint cache +.eslintcache + +# Optional stylelint cache +.stylelintcache + +# Microbundle cache +.rpt2_cache/ +.rts2_cache_cjs/ +.rts2_cache_es/ +.rts2_cache_umd/ + +# Optional REPL history +.node_repl_history + +# Output of 'npm pack' +*.tgz + +# Yarn Integrity file +.yarn-integrity + +# dotenv environment variable files +.env +.env.development.local +.env.test.local +.env.production.local +.env.local + +# parcel-bundler cache (https://parceljs.org/) +.cache +.parcel-cache + +# Next.js build output +.next +out + +# Nuxt.js build / generate output +.nuxt +dist + +# Gatsby files +.cache/ +# Comment in the public line in if your project uses Gatsby and not Next.js +# https://nextjs.org/blog/next-9-1#public-directory-support +# public + +# vuepress build output +.vuepress/dist + +# vuepress v2.x temp and cache directory +.temp +.cache + +# Docusaurus cache and generated files +.docusaurus + +# Serverless directories +.serverless/ + +# FuseBox cache +.fusebox/ + +# DynamoDB Local files +.dynamodb/ + +# TernJS port file +.tern-port + +# Stores VSCode versions used for testing VSCode extensions +.vscode-test + +# yarn v2 +.yarn/cache +.yarn/unplugged +.yarn/build-state.yml +.yarn/install-state.gz +.pnp.* +Footer +© 2022 GitHub, Inc. +Footer navigation +Terms + + +.idea/ +*.png +*.pdf + +Mus_musculus.GRCm38.101.gtf.gz* +hg38.chr19.gtf* +Homo_sapiens.GRCh38.101.sorted.gtf.gz* +SRX8994511.corrected_reads.bed.gz* +SRX8994511_sample.bed.gz* +tmp +new_run.sh +example.sorted.sorted.gtf +example.sorted.sorted.gtf.gz +example.sorted.sorted.gtf.gz.tbi + +.vscode/ + +docs/_* +plots/ +conda_build.py +run.sh +param.py +.DS_Store + +ui/ +*.rds +*.zip +example/ +recipes/ +AppDir/ +appimage-build/ +*_issue +.ruff_cache +*.csv \ No newline at end of file diff --git a/better_input_seq.py b/better_input_seq.py new file mode 100644 index 0000000..83baf28 --- /dev/null +++ b/better_input_seq.py @@ -0,0 +1,89 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +import csv +import os +import gzip +from glob import glob +from tqdm import tqdm + +import pandas as pd + + +def load_left_aa(ref): + df = pd.read_csv(ref) + + aa = set() + for _, row in df.iterrows(): + gene, pos = row["gene"], row["aa_pos"] + aa.add(f"{gene}_AA{pos}") + + return aa + + +def reader(path): + with gzip.open(path, "rt") as r: + dict_reader = csv.DictReader(r) + + for row in tqdm(dict_reader): + yield row + + + +def process_seq(sequence: str): + + before, codon = sequence.split("(") + codon, after = codon.split(")") + + src, dst = codon.split("/") + + mismatch_codon = sum([x != y for x, y in zip(src, dst)]) + if mismatch_codon == 1: + if src[:2] == dst[:2]: + before += src[:2] + return f"{before}({src[-1]}/{dst[-1]}){after}" + if src[1:] == dst[1:]: + after = src[1:] + after + return f"{before}({src[0]}/{dst[0]}){after}" + + before += src[0] + after = src[-1] + after + return f"{before}({src[1]}/{dst[1]}){after}" + elif mismatch_codon == 2: + if src[0] == dst[0]: + before = before + src[0] + return f"{before}({src[1:]}/{dst[1:]}){after}" + if src[-1] == dst[-1]: + after = src[-1] + after + return f"{before}({src[:2]}/{dst[:2]}){after}" + return None + # return sequence + + +def main(ref, infile, outfile): + ref = load_left_aa(ref) + + data = [] + for file in glob(infile): + for row in reader(file): + seq_name = row["sequence_name"].split("_")[:2] + seq_name = "_".join(seq_name) + + if seq_name in ref: + row["editseq"] = process_seq(row["editseq"]) + + if row["editseq"]: + row.pop("strategy") + row.pop("mutation_type") + data.append(row) + + with gzip.open(outfile, "wt+") as w: + dict_writer = csv.DictWriter(w, fieldnames=data[0].keys()) + dict_writer.writeheader() + + # 写入数据行 + dict_writer.writerows(data) + + +if __name__ == '__main__': + from fire import Fire + Fire(main) diff --git a/combine_pridict2_primedesign.py b/combine_pridict2_primedesign.py new file mode 100644 index 0000000..9f8f2e4 --- /dev/null +++ b/combine_pridict2_primedesign.py @@ -0,0 +1,222 @@ +import csv +import gzip +import logging +from typing import Set, Dict, Iterator + +# 配置日志记录 +logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') +logger = logging.getLogger(__name__) + + +def read_excluded_sequences(file_path: str, sequence_column: str = 'sequence_name') -> Set[str]: + """ + 从CSV文件中读取需要排除的序列名称 + + Args: + file_path: CSV文件路径 + sequence_column: 序列名称所在的列名 + + Returns: + 排除序列名称的集合 + """ + excluded = set() + try: + with gzip.open(file_path, 'rt', newline='', encoding='utf-8') as f: + reader = csv.DictReader(f) + + # 检查必要的列是否存在 + if sequence_column not in reader.fieldnames: + raise ValueError(f"CSV文件中缺少'{sequence_column}'列") + + # 逐行读取,收集序列名称 + for row in reader: + sequence_name = row.get(sequence_column) + if sequence_name: + excluded.add(sequence_name.strip()) + + logger.info(f"从 {file_path} 读取了 {len(excluded)} 个排除序列") + return excluded + + except FileNotFoundError: + logger.error(f"文件不存在: {file_path}") + raise + except Exception as e: + logger.error(f"读取文件 {file_path} 时出错: {e}") + raise + + +def validate_csv_headers(file_path: str, expected_headers: Set[str], gzipped: bool = True) -> bool: + """ + 验证CSV文件是否包含必需的列头 + + Args: + file_path: 文件路径 + expected_headers: 必需的列头集合 + gzipped: 是否为gzip压缩文件 + + Returns: + 验证是否通过 + """ + try: + if gzipped: + opener = gzip.open + mode = 'rt' + else: + opener = open + mode = 'r' + + with opener(file_path, mode, newline='', encoding='utf-8') as f: + # 读取第一行作为表头 + reader = csv.reader(f) + headers = next(reader, None) + + if not headers: + logger.error(f"文件 {file_path} 没有表头或为空") + return False + + # 检查必需列是否存在 + headers_set = set(headers) + missing_headers = expected_headers - headers_set + + if missing_headers: + logger.error(f"文件 {file_path} 缺少必需列: {missing_headers}") + return False + + logger.info(f"文件 {file_path} 表头验证通过") + return True + + except Exception as e: + logger.error(f"验证文件 {file_path} 表头时出错: {e}") + return False + + +def process_prime_design(primedesign_path: str, excluded_sequences: Set[str], + output_path: str, batch_size: int = 10000) -> int: + """ + 处理PrimeDesign文件,过滤排除序列 + + Args: + primedesign_path: PrimeDesign文件路径(gzip压缩) + excluded_sequences: 需要排除的序列集合 + output_path: 输出文件路径 + batch_size: 批量写入大小 + + Returns: + 处理的行数 + """ + processed_count = 0 + written_count = 0 + + try: + with gzip.open(primedesign_path, 'rt', newline='', encoding='utf-8') as input_file, \ + gzip.open(output_path, 'wt', newline='', encoding='utf-8') as output_file: + + # 创建CSV读写器 + reader = csv.DictReader(input_file) + writer = csv.DictWriter(output_file, fieldnames=reader.fieldnames) + + # 写入表头 + writer.writeheader() + + # 逐行处理数据 + for row in reader: + processed_count += 1 + + try: + gRNA_type = row.get('gRNA_type', '').strip() + target_name = row.get('Target_name', '').strip() + + # 只处理pegRNA,且不在排除列表中 + if gRNA_type == "pegRNA" and target_name not in excluded_sequences: + writer.writerow(row) + written_count += 1 + + # 定期刷新缓冲区 + if written_count % batch_size == 0: + output_file.flush() + + except KeyError as e: + logger.warning(f"第 {processed_count} 行缺少字段 {e},跳过该行") + continue + except Exception as e: + logger.warning(f"处理第 {processed_count} 行时出错: {e},跳过该行") + continue + + # 最终刷新缓冲区 + output_file.flush() + + logger.info(f"处理完成: 处理了 {processed_count} 行,写入了 {written_count} 行") + return written_count + + except Exception as e: + logger.error(f"处理PrimeDesign文件时出错: {e}") + raise + + +def main(pegrna: str, primedesign: str, output: str) -> None: + """ + 主函数:处理PrimeDesign输出文件,过滤排除序列 + + Args: + pegrna: 包含需要排除的序列的CSV文件 + primedesign: PrimeDesign输出文件(gzip压缩) + output: 输出文件前缀 + """ + logger.info("开始处理PrimeDesign文件") + + # 步骤1: 验证输入文件格式 + logger.info("验证输入文件格式...") + + # 验证pegrna文件格式 + if not validate_csv_headers(pegrna, {'sequence_name'}, gzipped=True): + raise ValueError("pegrna文件格式验证失败") + + # 验证primedesign文件格式 + if not validate_csv_headers(primedesign, {'gRNA_type', 'Target_name'}, gzipped=True): + raise ValueError("primedesign文件格式验证失败") + + # 步骤2: 读取排除序列 + logger.info("读取需要排除的序列...") + excluded_sequences = read_excluded_sequences(pegrna) + + # 步骤3: 处理PrimeDesign文件 + logger.info("开始处理PrimeDesign文件...") + output_path = f"{output}_PrimeDesign_pegRNA.csv.gz" + + written_count = process_prime_design( + primedesign_path=primedesign, + excluded_sequences=excluded_sequences, + output_path=output_path, + batch_size=10000 + ) + + logger.info(f"输出文件已保存: {output_path}") + logger.info(f"总共写入了 {written_count} 条pegRNA记录") + + +def safe_main(pegrna: str, primedesign: str, output: str) -> None: + """ + 带错误处理的主函数包装器 + + Args: + pegrna: 包含需要排除的序列的CSV文件 + primedesign: PrimeDesign输出文件(gzip压缩) + output: 输出文件前缀 + """ + try: + main(pegrna, primedesign, output) + logger.info("程序执行成功!") + except Exception as e: + logger.error(f"程序执行失败: {e}") + raise + + +# 如果直接运行此脚本 +if __name__ == "__main__": + import sys + + pegrna_file = sys.argv[1] + primedesign_file = sys.argv[2] + output_prefix = sys.argv[3] + + safe_main(pegrna_file, primedesign_file, output_prefix) \ No newline at end of file diff --git a/design/main.py b/design/main.py new file mode 100644 index 0000000..efac559 --- /dev/null +++ b/design/main.py @@ -0,0 +1,450 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +import json +import math +import os +import re +import sys +from glob import glob +import requests as rq +import pandas as pd +from loguru import logger +import numpy as np +from src.mutation import design_mutations_for_orf +from src.reader import (extract_orf_sequence, get_cds_for_gene, + load_uniprot_region, read_gtf, Region) +from src.liftover import convert_interval +from src.snp import decode_snp, generate_sequences_with_combinations +import itertools +from src.editseq import run_analysis + + +# 清除默认的 handler +logger.remove() + +# 添加一个只输出 INFO 及以上级别日志的 sink(如控制台) +# logger.add(level="INFO") +logger.add( + sys.stderr, + colorize=True, + format="{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {message}", + # {name}: {function}: {line} + level="INFO" +) + + +def split_regions(cds): + u""" + 切分原本的cds为3bp的氨基酸reigon + 测试用例 + 14:103698801-103699017 + 14:103699133-103699179 + 14:103699364-103699576 + 14:103703173-103703327 + 14:103707003-103707215 + 14:103708522-103708659 + 14:103711033-103711087 + """ + regions = [] + cds = sorted(cds, key=lambda x: (x.chrom, x.start, x.end)) + aa_codon_len = 3 + start = 0 + + for x in cds: + # 如果start为0,则直接从目前的区域开始 + if start == 0: + start = x.start + elif start < 0: + # 如果start为负值,说明上一个cds并不能完整划分为不同的aa, + # 因此,需要单独将起始的小区域单独写出来 + regions.append(Region(x.chrom, x.start, x.start - start, kind="start")) + regions[-1].addition = x + start = x.start - start + + while start + aa_codon_len <= x.end: + # 记录下是否跨边界,以及跨的是哪一个边界 + code = "regular" + if start == x.start: + code = "start" + elif start + aa_codon_len == x.end: + code = "end" + + regions.append(Region(x.chrom, start, start + aa_codon_len, kind=code)) + regions[-1].addition = x + start += aa_codon_len + + if start < x.end: + # 如果是跨到end边界上了,那么就记录跨的边界 + regions.append(Region(x.chrom, start, x.end, kind="end")) + regions[-1].addition = x + start = start - x.end + 1 + else: + # 如果没有,则把start的指针归零 + start = 0 + + return regions + + +def download_uniprot_region(protein, output): + resp = output.replace(".tsv", ".json") + url = f"https://www.ebi.ac.uk/proteins/api/coordinates?accession={protein}" + + if os.path.exists(resp): + with open(resp, "r") as r: + resp = json.load(r) + else: + resp = rq.get(url, headers={"Accept": "application/json"}) + resp = resp.json() + + with open(output.replace(".tsv", ".json"), "w+") as w: + json.dump(resp, w, indent=4) + + if not resp[0]["name"].endswith("HUMAN"): + raise ValueError(f"protein is not human") + + __chroms__ = [str(x) for x in range(1, 23)] + ["chr" + str(x) for x in range(1, 23)] + ["X", "Y", "chrX", "chrY"] + + with open(output, "w+") as w: + w.write(f"#{url}\n") + for coord in resp[0]["gnCoordinate"]: + chromosome = coord["genomicLocation"]["chromosome"] + + if chromosome not in __chroms__: + continue + + for row in coord["genomicLocation"]["exon"]: + genome = row["genomeLocation"] + genome = str(genome["begin"]["position"]) + "-" + str(genome["end"]["position"]) + + protein = row["proteinLocation"] + + if "end" not in protein and "position" in protein: + protein = [str(protein["position"]["position"]), "-", str(protein["position"]["position"])] + else: + protein = [str(protein["begin"]["position"]), "-", str(protein["end"]["position"])] + + row = f"{chromosome}:{genome}\t{'\t'.join(protein)}" + w.write(row + "\n") + + break + + +def get_aa_coords(genes, output): + os.makedirs(output, exist_ok=True) + df = pd.read_excel(genes) + # df = df.loc[df["Batch"] == 1, :] + + for _, row in df.iterrows(): + gene_name = row[1] + + url = f"https://rest.uniprot.org/uniprotkb/search?query=gene:{gene_name}+AND+organism_id:9606+AND+reviewed:true&format=json" + resp = rq.get(url) + + for row in resp.json().get("results", []): + if "HUMAN" in row["uniProtkbId"]: + priority = row["primaryAccession"] + download_uniprot_region(priority, os.path.join(output, f"{gene_name}_{priority}.tsv")) + break + + + +def adjust_cross_border_region(row): + start = row.start + end = row.end + if len(row) < 3 and "cross" in row.kind: + if row.kind == "cross_start": + start = end - 3 + else: + end = start + 3 + return f"{row.chrom}:{start}-{end}" + return str(row) + + +def design_by_aa(genes, fasta, output, stop_codon = False): + u""" 根据氨基酸设计错配的位点和错配规则 """ + df = [] + for gene in glob(os.path.join(genes, "*.tsv")): + logger.info(f"开始设计突变 {gene}...") + key = os.path.basename(gene).split(".")[0] + + # 读取已有的区域 + cds = load_uniprot_region(gene) + + # 按照氨基酸位置划分 + cds = split_regions(cds) + + if not cds: + continue + + # 提取序列 + cds = extract_orf_sequence(fasta, cds, half_open=True) + + for idx, x in enumerate(cds): + for strategy in ["3N"]: + results = design_mutations_for_orf(x.sequence, strategy=strategy) + for res in results: + for var in res["variants"]: + if var == res["original_codon"]: + continue + + # 如果1个bp在起点,则说明2bp在前边end,该位点的前2bp必须与记录的内含子相同 + if "cross_start" == x.kind and len(x) == 1 and var[:2] != x.sequence[:2]: + continue + + # 如果2bp在起点,则说明1bp在前边end,则该位点的第一个碱基必须为内含子相同位点 + elif "cross_start" == x.kind and len(x) == 2 and var[0] != x.sequence[0]: + continue + + # 如果1bp在end,则说明2bp在后边,则该位点的2bp位点必须与内含子相同 + elif "cross_end" == x.kind and len(x) == 1 and var[1:] != x.sequence[1:]: + continue + + # 如果1bp在end,则说明1bp在后边,则该位点的1bp位点必须为C或G + elif "cross_end" == x.kind and len(x) == 2 and var[-1] not in ["C", "G"]: + continue + + row = [key, str(x.addition), idx+1, str(x), adjust_cross_border_region(x), x.kind, strategy, res["original_codon"], var] + df.append(row) + + df = pd.DataFrame(df) + df.columns = ["gene", "cds_region", "aa_index", "aa_region", "region_with_intron", "cross_cds_border", "strategy", + "origial_code", "mutation_code"] + strategy = [] + for _, row in df.iterrows(): + match = np.sum([x == y for x, y in zip(row["origial_code"], row["mutation_code"])]) + strategy.append(f"{3-match}N") + + df["strategy"] = strategy + + if stop_codon: + df = df[df["mutation_code"].isin(["TAA", "TAG", "TGA"])] + + df.to_csv(output, index = False) + + +def design_by_snp(snp_info, targets, genes, fasta, fasta_hg38, output): + logger.info("读取染色体") + chroms = {} + starts = {} + for gene in glob(os.path.join(genes, "*.tsv")): + key = os.path.basename(gene).split(".")[0] + cds = load_uniprot_region(gene) + cds = sorted(cds, key=lambda x:[x.chrom, x.start, x.end]) + chroms[key] = cds[0].chrom + starts[key] = cds[0].start + + logger.info(f"读取snp的信息:{snp_info}") + all_sheets = pd.read_excel(snp_info, sheet_name=None) + + # 遍历所有工作表 + res = {} + for sheet_name, df in all_sheets.items(): + temp = {} + for _, row in df.iterrows(): + cdna = row["DNA change (cDNA) "] + hg38 = row["DNA change (genomic) (hg19)     "] + temp[cdna] = hg38 + for sheet in re.split(r"[\((\s\))]", sheet_name): + res[sheet] = temp + + print(res.keys()) + + logger.info(f"读取目标:{targets}") + df = pd.read_excel(targets, sheet_name=2) + + with open(output, "w+") as w: + w.write(",".join(["gene", "cdna code", "genomic code", "mutation_region", "version", "original_codon", "mutation_code"]) + "\n") + for column in df.columns: + if "Unnamed" in column: + continue + + for code in df[column]: + if not isinstance(code, str) and math.isnan(code): + continue + + genomic_code = res.get(column, {}).get(code) + + if genomic_code: + sites, rule = decode_snp(genomic_code) + elif str(code).startswith("c."): + sites, rule = decode_snp(code, ref_start=starts["FANCD2" if column == "FAND2" else column]) + else: + continue + + region = Region(chroms["FANCD2" if column == "FAND2" else column], start=sites[0], end=sites[-1]) + + hg38 = False + if genomic_code: + region = extract_orf_sequence(fasta, [region])[0] + elif str(code).startswith("c."): + hg38 = True + region = extract_orf_sequence(fasta_hg38, [region])[0] + + original, replacement = "", "" + if ">" in rule: + original, replacement = rule.split(">") + original = region.sequence + elif rule == "dup": + original = region.sequence + replacement = original * 2 + elif rule == "del": + original = region.sequence + replacement = "" + elif rule == "ins": + replacement = region.sequence + elif "delins" in rule: + original = region.sequence + replacement = rule.replace("delins", "") + elif "ins" in rule: + original = region.sequence + replacement = rule.replace("ins", "") + + if not genomic_code: + genomic_code = "" + + # 序列中所有N替换后的排列组合 + for o, r in itertools.product(generate_sequences_with_combinations(original), generate_sequences_with_combinations(replacement)): + w.write(",".join([column, code.strip(), str(genomic_code).strip(), str(region), "hg38" if hg38 else "hg19", o, r]) + "\n") + + # data = pd.DataFrame(data) + # data.columns = ["gene", "cdna code", "genomic code", "mutation_region", "original_codon", "mutation_code"] + # data.to_csv(output, index = False) + + + +def extract_fastq_seq(fastq: str, chrom, start, end): + import pysam + with pysam.FastaFile(fastq) as fh: + rec = fh.fetch(str(chrom), start, end) + # print(rec) + return rec + + +def decode_mutation(rule: str, sequence): + original, replacement = "", "" + if ">" in rule: + original, replacement = rule.split(">") + original = sequence + elif rule == "dup": + original = sequence + replacement = original * 2 + elif rule == "del": + original = sequence + replacement = "" + elif rule == "ins": + replacement = sequence + elif "delins" in rule: + original = sequence + replacement = rule.replace("delins", "") + elif "ins" in rule: + original = sequence + replacement = rule.replace("ins", "") + return original, replacement + + +def design_by_hmgd(data, fasta, outfile): + import re + res = pd.read_csv(data) + # print(res.head()) + + # hgvs + # chromosome + # startCoord + # endCoord + + data = [] + for idx, row in res.iterrows(): + + key = row["gene"] + "_" + str(idx) + + try: + seq = extract_fastq_seq(fasta, int(row["chromosome"]), row["startCoord"] - 1,row["endCoord"]) + + seq, replace = decode_mutation(row["hgvs"], seq) + + if not seq: + continue + replace = re.sub(r"[\d_]", "", replace) + + if "del" in replace: + replace = "" + + print(key, seq, replace) + + before = extract_fastq_seq(fasta, int(row["chromosome"]), row["startCoord"] - 1 - 100, row["startCoord"]) + after = extract_fastq_seq(fasta, int(row["chromosome"]), row["endCoord"], row["endCoord"] + 100) + + + seq = f"{before}({seq}/{replace}){after}" + data.append({"sequence_name": key, "editseq": seq}) + except Exception: + continue + + data = pd.DataFrame(data) + data.to_csv(outfile, index=False) + + + + +if __name__ == "__main__": + from fire import Fire + + # get_aa_coords( + # "../metainfo/Cancer and blood disorder panels_v2.xlsx", + # "../gene_coords/batch2" + # ) + + # get_aa_coords( + # "../metainfo/DDR gene library in 2021 Cell.xlsx", + # "../gene_coords/positive" + # ) + + + # # Fire({"aa": design_by_aa}) + # design_by_aa( + # "../gene_coords/batch2", + # fasta="../ref/ensembl_115/Homo_sapiens.GRCh38.dna.primary_assembly.fa.gz", + # output="../gene_aa_target_batch2.csv.gz" + # ) + + # design_by_aa( + # "../gene_coords/positive", + # fasta="../ref/ensembl_115/Homo_sapiens.GRCh38.dna.primary_assembly.fa.gz", + # output="../gene_aa_target_positive.csv.gz", + # stop_codon = True + # ) + + # run_analysis( + # "../gene_aa_target_batch2.csv.gz", + # reference="../ref/ensembl_115/Homo_sapiens.GRCh38.dna.primary_assembly.fa.gz", + # outdir="../../prediction/input/batch2" + # ) + + # run_analysis( + # "../gene_aa_target_positive.csv.gz", + # reference="../ref/ensembl_115/Homo_sapiens.GRCh38.dna.primary_assembly.fa.gz", + # outdir="../../prediction/input/positive" + # ) + + # 生成snp结构,snp_info是整理完的snp信息 + # targets是记录了需要处理的基因 + # design_by_snp( + # snp_info="../metainfo/副本FA家族基因-20250829-DJJ_XD.xlsx", + # targets="../metainfo/实验计划.xlsx", + # output="gene_snp_target.csv", + # fasta="../ref/gencode/GRCh37.p13.genome.fa.gz", + # fasta_hg38="../ref/ensembl_115/Homo_sapiens.GRCh38.dna.primary_assembly.fa.gz", + # genes="../gene_coords" + # ) + + # design_by_hmgd( + # "../metainfo/allmut.csv", + # fasta="../ref/ensembl_115/Homo_sapiens.GRCh38.dna.primary_assembly.fa.gz", + # outfile="../../prediction/input/pos_v2.csv.gz" + # ) + + + # url = "https://www.ebi.ac.uk/proteins/api/coordinates?accession=P21359-1" + # download_uniprot_region("Test", "P21359") + + diff --git a/design/pyproject.toml b/design/pyproject.toml new file mode 100644 index 0000000..ddf86e5 --- /dev/null +++ b/design/pyproject.toml @@ -0,0 +1,16 @@ +[project] +name = "pgrna" +version = "0.1.0" +description = "Add your description here" +readme = "README.md" +requires-python = ">=3.12" +dependencies = [ + "biopython>=1.85", + "fire>=0.7.1", + "loguru>=0.7.3", + "openpyxl>=3.1.5", + "pandas>=2.3.3", + "pyfaidx>=0.9.0.3", + "pyliftover>=0.4.1", + "rich>=14.2.0", +] diff --git a/design/src/editseq.py b/design/src/editseq.py new file mode 100644 index 0000000..1982a8b --- /dev/null +++ b/design/src/editseq.py @@ -0,0 +1,233 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +created by lanzl +modified by zym +""" + +import os +import pandas as pd +import re +import sys +from pyfaidx import Fasta, FetchError + +HG19_FASTA_PATH = "/rawdata1/project/peRNA_design/ref/gencode/GRCh37.p13.genome.fa.gz" +HG38_FASTA_PATH = "/rawdata1/project/peRNA_design/ref/ensembl_115/Homo_sapiens.GRCh38.dna.primary_assembly.fa.gz" + + +def parse_region(region_str: str) -> tuple: + """解析 'chrom:start-end' 格式的基因组区域,并确保染色体名带有 'chr' 前缀。""" + match = re.match(r"(\w+):(\d+)-(\d+)", region_str) + chrom, start, end = match.groups() + + if not chrom.lower().startswith("chr"): + chrom = "chr" + chrom + + return chrom, int(start), int(end) + + +def extract_orf_sequence(genome: Fasta, chrom: str, start: int, end: int) -> str: + """ + 从预加载的 Fasta 对象中提取序列(1-based inclusive)。 + """ + try: + sequence = str(genome.get_seq(chrom, start, end)) + return sequence.upper() + except (KeyError, FetchError) as e: + alt_chrom = chrom + if chrom.lower().startswith("chr"): + alt_chrom = chrom[3:] + + if alt_chrom != chrom: + try: + sequence = str(genome.get_seq(alt_chrom, start, end)) + return sequence.upper() + except (KeyError, FetchError) as inner_e: + raise FetchError( + f"Requested rname '{chrom}' (also tried '{alt_chrom}') does not exist in FASTA index." + ) from inner_e + raise e + + +def generate_editseq( + original: str, + replacement: str, + region_str: str, + genome: Fasta, + flank_size: int = 100, +) -> str: + """构造 EditSeq 序列字符串(突变位点 + 100bp 侧翼)。""" + chrom, mut_start, mut_end = parse_region(region_str) + + # 计算侧翼坐标 + upstream_start = mut_start - flank_size + upstream_end = mut_start - 1 + downstream_start = mut_end + 1 + downstream_end = mut_end + flank_size + + # 提取侧翼序列 + upstream_flank = extract_orf_sequence(genome, chrom, upstream_start, upstream_end) + downstream_flank = extract_orf_sequence( + genome, chrom, downstream_start, downstream_end + ) + + original = str(original).strip() + replacement = str(replacement).strip() + + # --- 突变逻辑:所有替换(等长或不等长)统一使用 (ORIGINAL/REPLACEMENT) 格式 --- + mut_part = "" + if original and replacement: + # 替换或 Delins: (ORIGINAL/REPLACEMENT) + mut_part = f"({original}/{replacement})" + elif original: + # 删除: (-ORIGINAL) + mut_part = f"(-{original})" + elif replacement: + # 插入: (+REPLACEMENT) + mut_part = f"(+{replacement})" + else: + mut_part = "(Invalid mutation logic)" + + return f"{upstream_flank}{mut_part}{downstream_flank}" + + +# --- 氨基酸突变处理 --- +def process_aa_mutations(df_aa: pd.DataFrame, genome_hg38: Fasta) -> pd.DataFrame: + """处理氨基酸(AA)饱和诱变数据,并返回包含 EditSeq, strategy 和 mutation_type 的 DataFrame。""" + results = [] + + # 性能优化:使用 to_dict('records') 替代 iterrows() + for row in df_aa.to_dict("records"): + original = ( + str(row["origial_code"]).strip() if pd.notna(row["origial_code"]) else "" + ) + replacement = ( + str(row["mutation_code"]).strip() if pd.notna(row["mutation_code"]) else "" + ) + + # 确定突变类型 + if original and replacement: + mut_type = "REPL" + elif original and not replacement: + mut_type = "DEL" + elif not original and replacement: + mut_type = "INS" + else: + mut_type = "UNKNOWN" + + # 生成序列名称 + seq_name = f"{row['gene']}_AA{row['aa_index']}_{row['origial_code']}_{row['mutation_code']}" + + # 生成 EditSeq + editseq = generate_editseq( + original=row["origial_code"], + replacement=row["mutation_code"], + region_str=row["aa_region"], + genome=genome_hg38, + ) + + # 收集结果,包括 'strategy' 和 'mutation_type' 列 + results.append( + { + "sequence_name": seq_name, + "editseq": editseq, + "strategy": row["strategy"], + "mutation_type": mut_type, + } + ) + + return pd.DataFrame(results) + + +# --- SNP/cDNA 突变处理 --- +def process_snp_mutations( + df_snp: pd.DataFrame, genome_hg19: Fasta, genome_hg38: Fasta +) -> pd.DataFrame: + """处理 SNP/cDNA 突变数据,返回包含 EditSeq 和 mutation_type 的 DataFrame。""" + results = [] + + # 性能优化:使用 to_dict('records') 替代 iterrows() + for row in df_snp.to_dict("records"): + original = ( + str(row["original_codon"]).strip() + if pd.notna(row["original_codon"]) + else "" + ) + replacement = ( + str(row["mutation_code"]).strip() if pd.notna(row["mutation_code"]) else "" + ) + + # 根据版本选择合适的 Fasta 对象 + version = str(row["version"]).lower() + genome_to_use = genome_hg38 if version == "hg38" else genome_hg19 + + # 确定用于序列命名的突变类型 + if original and replacement: + mut_type = "REPL" + elif original and not replacement: + mut_type = "DEL" + elif not original and replacement: + mut_type = "INS" + else: + mut_type = "UNKNOWN" + + # 构造序列名称 + cdna_code_clean = str(row["cdna code"]).replace(".", "").replace("_", "p") + seq_name = f"{row['gene']}_{mut_type}_{cdna_code_clean}" + + # 生成 EditSeq + editseq = generate_editseq( + original=original, + replacement=replacement, + region_str=str(row["mutation_region"]), + genome=genome_to_use, + ) + + results.append( + {"sequence_name": seq_name, "editseq": editseq, "mutation_type": mut_type} + ) + + return pd.DataFrame(results) + + +def run_analysis(infile, reference, outdir): + # AA_INPUT_FILE = "/rawdata1/project/peRNA_design/gene_aa_target.csv" + # SNP_INPUT_FILE = "/rawdata1/project/peRNA_design/gene_snp_target.csv" + + # genome_hg19 = Fasta(HG19_FASTA_PATH) + # genome_hg38 = Fasta(HG38_FASTA_PATH) + genome = Fasta(reference) + os.makedirs(outdir, exist_ok=True) + + aa_df_input = pd.read_csv(infile) + # snp_df_input = pd.read_csv(SNP_INPUT_FILE) + + # --- 阶段一: 处理 SNP/cDNA 突变并输出 --- + # snp_df = process_snp_mutations(snp_df_input, genome_hg19, genome_hg38) + # snp_output_file = "snp_editseq_output.csv" + # snp_df.to_csv(snp_output_file, index=False) + + # --- 阶段二: 按 strategy 分组处理 AA 突变并输出 --- + aa_df_input["strategy"] = aa_df_input["strategy"].str.upper() + strategies = aa_df_input["strategy"].unique() + + for strategy in strategies: + # 跳过空的 strategy + if pd.isna(strategy): + continue + + # 过滤 DataFrame + aa_subset_df = aa_df_input[aa_df_input["strategy"] == strategy].copy() + if aa_subset_df.empty: + continue + + # 处理子集 + aa_df_processed = process_aa_mutations(aa_subset_df, genome) + + # 保存 + aa_output_file = f"aa_{strategy}_editseq_output.csv" + aa_df_processed.to_csv(os.path.join(outdir, aa_output_file), index=False) + + +if __name__ == "__main__": + run_analysis() diff --git a/design/src/liftover.py b/design/src/liftover.py new file mode 100644 index 0000000..2d2efe0 --- /dev/null +++ b/design/src/liftover.py @@ -0,0 +1,53 @@ +import pandas as pd + +from pyliftover import LiftOver + +# lo = LiftOver('/home/zym/projects/pgRNA/liftover/hg19ToHg38.over.chain.gz') + +from pyliftover import LiftOver + +# 创建从 hg19 到 hg38 的转换器 +lo = LiftOver("hg19", "hg38") + + +def convert_interval(chrom, start, end): + """ + 将区间 (start, end) 从源基因组转换到目标基因组 + 返回: (new_chrom, new_start, new_end) 或 None + """ + # 注意:pyliftover 使用 1-based 坐标 + # 如果你的 BED 是 0-based(如 BED 文件),start 需要 +1 + # 这里假设输入是 1-based;如果是 0-based,请用 start+1, end + result_start = lo.convert_coordinate(chrom, start) + result_end = lo.convert_coordinate(chrom, end) + + if not result_start or not result_end: + return None # 无法转换 + + # 取置信度最高的映射 + best_start = max(result_start, key=lambda x: x[3]) + best_end = max(result_end, key=lambda x: x[3]) + + new_chrom = best_start[0] + new_start = best_start[1] + new_end = best_end[1] + + # 确保 start <= end + if new_start >= new_end: + new_start, new_end = new_end, new_start + return new_chrom, new_start, new_end + + +def get_seq(path, coord): + import pysam + + # 打开 FASTA 文件 + fasta = pysam.FastaFile(path) # 自动读取 genome.fa.fai + + # 提取序列:chr1:1000000-1000100(0-based, [start, end)) + # 注意:pysam 使用 0-based 坐标,与 BED 一致 + return fasta.fetch(region=f"{coord[0]}:{coord[1]}-{coord[2]}") + + +if __name__ == "__main__": + pass diff --git a/design/src/mutation.py b/design/src/mutation.py new file mode 100644 index 0000000..575245c --- /dev/null +++ b/design/src/mutation.py @@ -0,0 +1,216 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + +import itertools + +from Bio.Seq import Seq +from loguru import logger + +# 遗传密码表:DNA -> 氨基酸 +codon_table = { + "TTT": "F", + "TTC": "F", + "TTA": "L", + "TTG": "L", + "TCT": "S", + "TCC": "S", + "TCA": "S", + "TCG": "S", + "TAT": "Y", + "TAC": "Y", + "TAA": "*", + "TAG": "*", + "TGT": "C", + "TGC": "C", + "TGA": "*", + "TGG": "W", + "CTT": "L", + "CTC": "L", + "CTA": "L", + "CTG": "L", + "CCT": "P", + "CCC": "P", + "CCA": "P", + "CCG": "P", + "CAT": "H", + "CAC": "H", + "CAA": "Q", + "CAG": "Q", + "CGT": "R", + "CGC": "R", + "CGA": "R", + "CGG": "R", + "ATT": "I", + "ATC": "I", + "ATA": "I", + "ATG": "M", + "ACT": "T", + "ACC": "T", + "ACA": "T", + "ACG": "T", + "AAT": "N", + "AAC": "N", + "AAA": "K", + "AAG": "K", + "AGT": "S", + "AGC": "S", + "AGA": "R", + "AGG": "R", + "GTT": "V", + "GTC": "V", + "GTA": "V", + "GTG": "V", + "GCT": "A", + "GCC": "A", + "GCA": "A", + "GCG": "A", + "GAT": "D", + "GAC": "D", + "GAA": "E", + "GAG": "E", + "GGT": "G", + "GGC": "G", + "GGA": "G", + "GGG": "G", +} + +# 反向查找:氨基酸 -> 密码子列表 +aa_to_codons = {} +for codon, aa in codon_table.items(): + if aa not in aa_to_codons: + aa_to_codons[aa] = [] + aa_to_codons[aa].append(codon) + +# 碱基 +bases = ["A", "T", "G", "C"] + + +def generate_nnn(): + """生成所有 NNN 组合(64种)""" + return ["".join(c) for c in itertools.product(bases, repeat=3)] + + +def generate_2n_variants(original_codon, fixed_position=None): + """ + 生成双N突变(两个位置随机,一个固定) + fixed_position: 0,1,2 表示哪个位置保持不变(0=第一个碱基) + 若不指定,则生成所有三种模式:NNT, NTN, TNN + """ + variants = set() + for pos in [fixed_position] if fixed_position is not None else [0, 1, 2]: + fixed_base = original_codon[pos] + for b1 in bases: + for b2 in bases: + codon_list = ["_", "_", "_"] + codon_list[pos] = fixed_base + idx = 0 + for i in range(3): + if i != pos: + codon_list[i] = [b1, b2][idx] + idx += 1 + variant = "".join(codon_list) + variants.add(variant) + return sorted(variants) + + +def generate_1n_variants(original_codon, fixed_positions=None): + """ + 生成单N突变(一个位置随机,两个固定) + fixed_positions: 如 [0,1] 表示第0和第1位固定 + 若不指定,则生成所有三种模式:ANT, ATN, TAN + """ + variants = set() + if fixed_positions: + positions = [fixed_positions] + else: + positions = [[0, 1], [0, 2], [1, 2]] + + for fix in positions: + var_pos = 3 - sum(fix) # 剩下那个位置是变量 + for i in range(3): + if i not in fix: + var_pos = i + break + base1, base2 = original_codon[fix[0]], original_codon[fix[1]] + for b in bases: + codon_list = ["_", "_", "_"] + codon_list[fix[0]] = base1 + codon_list[fix[1]] = base2 + codon_list[var_pos] = b + variant = "".join(codon_list) + variants.add(variant) + return sorted(variants) + + +def translate(codon): + return codon_table.get(codon, "X") + + +def design_mutations_for_orf(dna_seq, strategy="3N"): + """ + 对整个 ORF 序列进行饱和突变设计 + strategy: '3N', '2N', '1N' + """ + if len(dna_seq) % 3 != 0: + raise ValueError(f"ORF 长度必须是 3 的倍数!{dna_seq}") + + num_codons = len(dna_seq) // 3 + results = [] + + for i in range(num_codons): + start = i * 3 + end = start + 3 + orig_codon = dna_seq[start:end] + orig_aa = translate(orig_codon) + + logger.debug( + f"\n--- 位点 {i + 1} (氨基酸 {i + 1}): {orig_aa} ({orig_codon}) ---" + ) + + variants = [] + if strategy == "3N": + variants = generate_nnn() + logger.debug(f"策略: 3N (NNN) → 共 {len(variants)} 种组合") + elif strategy == "2N": + variants = generate_2n_variants(orig_codon) + logger.debug(f"策略: 2N (任意两个随机) → 共 {len(variants)} 种组合") + elif strategy == "1N": + variants = generate_1n_variants(orig_codon) + logger.debug(f"策略: 1N (任意一个随机) → 共 {len(variants)} 种组合") + else: + raise ValueError("strategy 必须是 '3N', '2N', 或 '1N'") + + # 过滤掉无效密码子(理论上不会) + valid_variants = [v for v in variants if len(v) == 3] + + # 统计突变结果 + mutant_aa_count = {} + stop_count = 0 + for v in valid_variants: + aa = translate(v) + if aa == "*": + stop_count += 1 + mutant_aa_count[aa] = mutant_aa_count.get(aa, 0) + 1 + + logger.debug(f"→ 共产生 {len(valid_variants)} 个有效突变") + logger.debug(f"→ 可产生 {len(mutant_aa_count)} 种不同氨基酸(含终止)") + logger.debug(f"→ 引入终止密码子: {stop_count} 次") + logger.debug(f"→ 氨基酸分布: {mutant_aa_count}") + + results.append( + { + "position": i + 1, + "original_codon": orig_codon, + "original_aa": orig_aa, + "variants": valid_variants, + "variant_count": len(valid_variants), + "mutant_aa_count": mutant_aa_count, + "stop_count": stop_count, + } + ) + + return results + + +if __name__ == "__main__": + pass diff --git a/design/src/reader.py b/design/src/reader.py new file mode 100644 index 0000000..1366b26 --- /dev/null +++ b/design/src/reader.py @@ -0,0 +1,236 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +从gtf中读取CDS,并读取对应的sequence +""" + +import gzip + +import pandas as pd +from Bio.Seq import Seq +from loguru import logger +from pyfaidx import Fasta + + +class Region(object): + """记录坐标位点,用于融合""" + + def __init__(self, chrom, start, end, strand="+", kind=None): + self.chrom = chrom + self.start = start + self.end = end + self.strand = strand + self.sequence = None + self.kind_ = kind + self.addition = None + + @classmethod + def create(cls, region): + chrom, sites = region.split(":") + sites = [int(x) for x in sites.split("-")] + return cls(chrom, sites[0], sites[-1], "+") + + def set_seq(self, sequence: str): + self.sequence = sequence.upper() + + # 如果是负链,需要反向互补 + if self.strand == "-": + self.sequence = str(Seq(sequence).reverse_complement()) + + def __and__(self, other): + if self.chrom != other.chrom: + return False + + return self.start < other.end and self.end > other.start + + def __add__(self, other): + if not self & other: + raise ValueError("没有重合位点") + + self.start = min(self.start, other.start) + self.end = max(self.end, other.end) + return self + + def __str__(self) -> str: + return f"{self.chrom}:{self.start}-{self.end}" + + def __hash__(self): + return hash(str(self)) + + def __len__(self): + return self.end - self.start + + @property + def kind(self): + if len(self) >= 3: + return self.kind_ + if not self.kind_: + return "" + else: + return f"cross_{self.kind_}" + + +def read_gtf(gtf_path): + """ + 读取 GTF 文件,返回 DataFrame + """ + logger.info("正在读取 GTF 文件...") + columns = [ + "seqname", + "source", + "feature", + "start", + "end", + "score", + "strand", + "frame", + "attribute", + ] + + df = pd.read_csv( + gtf_path, sep="\t", comment="#", header=None, names=columns, low_memory=False + ) + + # 过滤出 CDS 行 + cds_df = df[df["feature"] == "CDS"].copy() + + # 解析 attribute 列,展开为多个列 + # 使用 pd.json_normalize 将字典列表转换为 DataFrame + try: + attributes_df = pd.json_normalize(cds_df["attribute"].apply(parse_attributes)) + except Exception as e: + logger.error(f"解析 attribute 字段失败: {e}") + raise + + # 将原始列与解析后的属性列合并 + result_df = pd.concat([cds_df.reset_index(drop=True), attributes_df], axis=1) + + logger.info(f"成功读取并解析 GTF 文件,共 {len(result_df)} 个 CDS 特征。") + return result_df + + +def parse_attributes(attr_str): + """ + 解析 GTF 的 attribute 字段,返回字典 + """ + attributes = {} + for item in attr_str.split(";"): + item = item.strip() + if not item: + continue + if " " in item: + key, value = item.split(" ", 1) + attributes[key] = value.strip('"') + return attributes + + +def get_cds_for_gene(cds_df, gene_name): + """ + 提取指定基因的所有 CDS 条目,并按转录本分组,选择最长的转录本 + """ + logger.info(f"正在查找基因 '{gene_name}' 的 CDS...") + + # 添加解析后的属性 + cds_df["attributes_parsed"] = cds_df["attribute"].apply(parse_attributes) + + # 筛选包含该基因名的行 + gene_cds_list = [] + for idx, row in cds_df.iterrows(): + attrs = row["attributes_parsed"] + if attrs.get("transcript_id") == gene_name: + # if attrs.get('gene_name') == gene_name or attrs.get('gene_id').startswith(gene_name): + gene_cds_list.append(row) + + if not gene_cds_list: + raise ValueError(f"未在 GTF 中找到基因 '{gene_name}'") + + df = pd.DataFrame(gene_cds_list) + df = df[ + ["seqname", "feature", "start", "end", "strand", "transcript_id"] + ].drop_duplicates() + + res = [] + last = None + for _, row in df.iterrows(): + temp = Region( + str(row["seqname"]), + row["start"], + row["end"], + str(row["strand"]), + row["transcript_id"], + ) + if last is None: + last = temp + elif temp & last: + last = last + temp + else: + res.append(last) + last = temp + if last not in res: + res.append(last) + + return res + + +def load_uniprot_region(path): + res = [] + last = None + with open(path) as r: + for line in r: + if line.startswith("#"): + continue + temp = Region.create(line.split()[0]) + if last is None: + last = temp + elif temp & last: + last = last + temp + else: + res.append(last) + last = temp + + if last not in res: + res.append(last) + return res + + +def extract_orf_sequence(genome_fasta, cds_rows, half_open=False): + """ + 从参考基因组中提取 CDS 并拼接成 ORF + """ + + if not cds_rows: + raise ValueError("not cds") + + seqname = cds_rows[0].chrom + strand = cds_rows[0].strand + + logger.debug(f"从参考基因组提取序列 (chr{seqname})...") + genome = Fasta(genome_fasta) + + # 获取染色体序列 + try: + chrom_seq = genome[seqname] # 如 "chr1", "1" 等,根据 FASTA 命名调整 + except KeyError: + if "chr" in seqname: + seqname = seqname.replace("chr", "") + else: + seqname = "chr" + seqname + chrom_seq = genome[seqname] # 如 "chr1", "1" 等,根据 FASTA 命名调整 + + for row in cds_rows: + start = int(row.start) - 1 # GTF 是 1-based,pyfaidx 是 0-based + end = int(row.end) - (1 if half_open else 0) + + if len(row) < 3 and "cross" in row.kind: + if row.kind == "cross_start": + start = end - 3 + else: + end = start + 3 + + row.set_seq(chrom_seq[start:end].seq) + + return cds_rows + + +if __name__ == "__main__": + pass diff --git a/design/src/safe_target.py b/design/src/safe_target.py new file mode 100644 index 0000000..e255019 --- /dev/null +++ b/design/src/safe_target.py @@ -0,0 +1,139 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +生成safe targeting 序列 +""" + +import pandas as pd +import random +import pysam + +from tqdm import tqdm + + +seed = 42 +random.seed(42) + + +__AAs__ = { + "丙氨酸": ["GCU", "GCC", "GCA", "GCG"], + "精氨酸": ["CGU", "CGC", "CGA", "CGG", "AGA", "AGG"], + "天冬酰胺": ["AAU", "AAC"], + "天冬氨酸": ["GAU", "GAC"], + "半胱氨酸": ["UGU", "UGC"], + "谷氨酰胺": ["CAA", "CAG"], + "谷氨酸": ["GAA", "GAG"], + "甘氨酸": ["GGU", "GGC", "GGA", "GGG"], + "组氨酸": ["CAU", "CAC"], + "异亮氨酸": ["AUU", "AUC", "AUA"], + "亮氨酸": ["UUA", "UUG", "CUU", "CUC", "CUA", "CUG"], + "赖氨酸": ["AAA", "AAG"], + "甲硫氨酸": ["AUG"], + "苯丙氨酸": ["UUU", "UUC"], + "脯氨酸": ["CCU", "CCC", "CCA", "CCG"], + "丝氨酸": ["UCU", "UCC", "UCA", "UCG", "AGU", "AGC"], + "苏氨酸": ["ACU", "ACC", "ACA", "ACG"], + "色氨酸": ["UGG"], + "酪氨酸": ["UAU", "UAC"], + "缬氨酸": ["GUU", "GUC", "GUA", "GUG"], + "终止密码子": ["UAA", "UAG", "UGA"], +} + + +def codons(): + for key, values in __AAs__.items(): + yield key, [value.replace("U", "T") for value in values] + + +class Region: + """Represents a 3bp codon region in the full sequence.""" + + def __init__(self, chrom: str, start: int, end: int): + self.chrom = chrom + self.start = start + self.end = end + self.__shift__ = 0 + + def __str__(self): + return f"{self.chrom}:{self.start}-{self.end}" + + def shift(self, fasta: str): + for i in range(0, self.end - self.start): + if self.__shift__ != 0: + break + seq = extract_fastq_seq(fasta, Region(self.chrom, self.start+i, self.start+i+3)) + + for _, values in codons(): + if seq in values: + self.__shift__ = i + break + + def choose(self, number: int = 3): + length_of_codon = 3 + + regions = [] + for i in range(self.start + self.__shift__, self.end, length_of_codon): + if i + length_of_codon > self.end: + break + regions.append([i, i + length_of_codon]) + + # np.choice(my_list, size=3, replace=False) + + if number > len(regions): + return [Region(self.chrom, x[0], x[1]) for x in regions] + + return [Region(self.chrom, x[0], x[1]) for x in random.sample(regions, number)] + + +def extract_fastq_seq(fastq: str, region: Region, seq_len: int = 100): + with pysam.FastaFile(fastq) as fh: + rec = fh.fetch(region.chrom, region.start, region.end) + # print(rec) + return rec + + +def mutation(seq: str): + random.seed(seed) + for key, value in __AAs__.items(): + if seq in value: + random_keys = random.sample([x for x in __AAs__.keys() if x != key], 1)[0] + return random.sample(__AAs__[random_keys], 1)[0].replace("U", "T") + + + +def main(infile, outfile, reference = "../ref/UCSC/hg19.fa.gz", seq_len: int = 100): + + meta = pd.read_excel(infile, sheet_name="Human Safe Regions", header=None) + meta = meta.sample(n=2000, random_state=seed) + + data = [] + for idx in tqdm(meta.iloc[:, 0], total=meta.shape[0]): + + idx = idx.split(";") + region = Region(idx[0], int(idx[1]), int(idx[2])) + region.shift(reference) + + regions = region.choose(5) + + for reg in regions: + seq = extract_fastq_seq(reference, reg) + mut = mutation(seq) + + if seq is None or mut is None: + continue + + key = str(reg) + "_" + seq + "_" + mut + before = extract_fastq_seq(reference, Region(region.chrom, reg.start - seq_len, reg.start)) + after = extract_fastq_seq(reference, Region(region.chrom, reg.end, reg.end + seq_len)) + + seq = f"{before}({seq}/{mut}){after}" + data.append({"sequence_name": key, "editseq": seq}) + + data = pd.DataFrame(data) + data.to_csv(outfile, index=False) + + +if __name__ == "__main__": + from fire import Fire + Fire(main) + diff --git a/design/src/snp.py b/design/src/snp.py new file mode 100644 index 0000000..ec3a5f6 --- /dev/null +++ b/design/src/snp.py @@ -0,0 +1,114 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +"""用来解析snp错配信息""" + +import re +from itertools import product + + +def generate_sequences_with_combinations(seq): + """ + 将 DNA 序列中连续的 N 替换为所有可能的 A/T/C/G 组合, + 返回所有可能的序列列表。 + + 参数: + seq (str): 输入的 DNA 序列,可包含 N + + 返回: + list: 所有可能的序列(字符串列表) + """ + if "N" not in seq: + return [seq] + + # 分割序列,保留分隔符信息 + segments = [] + i = 0 + while i < len(seq): + if seq[i] == "N": + j = i + while j < len(seq) and seq[j] == "N": + j += 1 + length = j - i + segments.append(("N", length)) # ('N', 3) 表示连续3个N + i = j + else: + j = i + while j < len(seq) and seq[j] != "N": + j += 1 + segments.append(("seq", seq[i:j])) + i = j + + # 提取每个 N 块的可能组合 + n_block_options = [] + for seg_type, content in segments: + if seg_type == "N": + # 生成所有长度为 content 的 ATCG 组合 + options = ["".join(p) for p in product("ATCG", repeat=content)] + n_block_options.append(options) + + # 如果没有 N,直接返回原序列 + if not n_block_options: + return [seq] + + # 使用 itertools.product 生成所有组合 + from itertools import product as iter_product + + all_combinations = list(iter_product(*n_block_options)) + + # 构建所有可能的序列 + results = [] + for combo in all_combinations: + new_seq = "" + n_index = 0 + for seg_type, content in segments: + if seg_type == "seq": + new_seq += content + elif seg_type == "N": + new_seq += combo[n_index] + n_index += 1 + results.append(new_seq) + + return results + + +def decode_snp(label, ref_start=0): + if label is None: + return "" + + if ":" in label: + label = label.split(":")[-1] + + if ref_start <= 0 and not label.startswith("g."): + raise ValueError(f"{label} not genomic label") + elif ref_start > 0 and not label.startswith("c."): + raise ValueError(f"{label} not cdna label") + + label = re.sub(r"([cg]\.|\[\d+\])", "", label) + + sites = [] + + for x in label.split("_"): + if not x: + continue + + x = re.sub(r"[^\d\+-]", "", x) + if "+" in x: + x = [int(y) for y in x.split("+")] + x = x[0] + x[-1] + elif "-" in x: + x = [int(y) for y in x.split("-")] + x = x[0] + x[-1] + else: + x = int(x) + + sites.append(x + ref_start) + + sites = sorted(sites) + + rule = re.sub(r"[\d_\+-]", "", label) + return sites, rule.strip() + + +if __name__ == "__main__": + pass + diff --git a/design/src/snv-N-2N-3N.py b/design/src/snv-N-2N-3N.py new file mode 100644 index 0000000..c9da688 --- /dev/null +++ b/design/src/snv-N-2N-3N.py @@ -0,0 +1,263 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +import os +import re +import sys +import itertools +import random +import gzip +from typing import List, Dict, Any, Optional + +import pandas as pd +from loguru import logger + +# ps +# 将目标序列按3bp密码子分割,每个密码子系统性地生成三类突变 +# 为每个突变自动附加100bp的上下游侧翼序列,每类随机抽取150个突变 + +# Mutation design constants +NUCLEOTIDES = ["A", "T", "C", "G"] +UPSTREAM_LEN = 100 +DOWNSTREAM_LEN = 100 +TARGET_MUTATIONS = 150 + + +class Region: + """Represents a 3bp codon region in the full sequence.""" + + def __init__( + self, chrom: str, start: int, end: int, sequence: str, absolute_index: int + ): + self.chrom = chrom + self.start = start + self.end = end + self.sequence = sequence.upper() + self.absolute_index = absolute_index + + +def read_fasta(fasta_path: str) -> Dict[str, str]: + """Parses FASTA file, returning {header: sequence}.""" + sequences = {} + current_header: Optional[str] = None + opener = gzip.open if fasta_path.endswith(".gz") else open + + if not os.path.exists(fasta_path): + logger.error(f"FASTA file not found: {fasta_path}") + return {} + + with opener(fasta_path, "rt") as f: + current_seq: List[str] = [] + for line in f: + line = line.strip() + if not line: + continue + + if line.startswith(">"): + if current_header and current_seq: + sequences[current_header] = ( + "".join(current_seq).upper().replace("U", "T") + ) + + current_header = line[1:].split()[0] + current_seq = [] + else: + if current_header: + current_seq.append(line) + + if current_header and current_seq: + sequences[current_header] = "".join(current_seq).upper().replace("U", "T") + + return sequences + + +def split_sequence_to_codons(full_seq: str, gene_name: str) -> List[Region]: + """Splits full sequence into 3bp Region objects.""" + regions: List[Region] = [] + + for i in range(0, len(full_seq), 3): + codon = full_seq[i : i + 3] + if len(codon) == 3: + regions.append(Region(gene_name, i, i + 2, codon, i)) + return regions + + +def analyze_variant(ref: str, alt: str) -> str: + """Simplifies substitution representation (e.g., 'CAT'->'CGT' becomes 'C(A/G)T').""" + if len(ref) != len(alt): + return f"({ref}/{alt})" + + diffs = [ + {"index": i, "ref_base": ref[i], "alt_base": alt[i]} + for i in range(len(ref)) + if ref[i] != alt[i] + ] + + if not diffs: + return ref + + positions = [d["index"] for d in diffs] + is_consecutive = all( + positions[i + 1] - positions[i] == 1 for i in range(len(positions) - 1) + ) + + # Rule 1: Continuous differences (e.g., 'CAT'->'CGC' becomes 'C(AT/GC)') + if is_consecutive: + start_pos, end_pos = positions[0], positions[-1] + 1 + return f"{ref[:start_pos]}({ref[start_pos:end_pos]}/{alt[start_pos:end_pos]}){ref[end_pos:]}" + + # Rule 2: Intermittent differences (e.g., 'GTT'->'GCG' becomes 'G(T/C)(T/G)') + out = [] + prev_end = 0 + for d in diffs: + pos, r, a = d["index"], d["ref_base"], d["alt_base"] + out.append(ref[prev_end:pos]) + out.append(f"({r}/{a})") + prev_end = pos + 1 + out.append(ref[prev_end:]) + + return "".join(out) + + +def generate_codon_mutations(original_codon: str, n_mutations: int) -> List[str]: + """Generates all codon variants with exactly n_mutations.""" + mutants = set() + codon_length = len(original_codon) + + for indices in itertools.combinations(range(codon_length), n_mutations): + base_options: List[List[str]] = [] + for i in range(codon_length): + if i in indices: + options = [b for b in NUCLEOTIDES if b != original_codon[i]] + else: + options = [original_codon[i]] + base_options.append(options) + + for combination in itertools.product(*base_options): + mutant_codon = "".join(combination) + if mutant_codon != original_codon: + mutants.add(mutant_codon) + + return sorted(list(mutants)) + + +def generate_editseq_and_metadata( + full_seq: str, regions: List[Region], gene_name: str +) -> pd.DataFrame: + """Generates all mutations (1N, 2N, 3N) and constructs the final DataFrame.""" + df_list: List[Dict[str, str]] = [] + + for idx, x in enumerate(regions): + abs_start = x.absolute_index + original_codon = x.sequence + + # 1. Extract flanking sequences + flank_up = full_seq[max(0, abs_start - UPSTREAM_LEN) : abs_start] + flank_down = full_seq[ + abs_start + 3 : min(len(full_seq), abs_start + 3 + DOWNSTREAM_LEN) + ] + + for strategy, n_mut in [("3N", 3), ("2N", 2), ("1N", 1)]: + variants = generate_codon_mutations(original_codon, n_mut) + + for mutation_codon in variants: + # Use analyze_variant to simplify representation + simplified_codon = analyze_variant(original_codon, mutation_codon) + + # sequence_name: GENE_SUB_STRATEGY_AAINDEX_ORIGINAL>MUTATION + seq_name = f"{gene_name}_SUB_{strategy}_AA{idx + 1}_{original_codon}>{mutation_codon}" + + # editseq: flank_up + simplified_codon + flank_down + edit_seq = f"{flank_up}{simplified_codon}{flank_down}" + + df_list.append( + { + "sequence_name": seq_name, + "editseq": edit_seq, + "strategy": strategy, + "mutation_type": "REPL", # Replacement + } + ) + + return pd.DataFrame(df_list) + + +def run_mutation_design(fasta_file: str, gene_name: str, output_base_name: str): + """Executes the mutation design pipeline and saves 3 separate files.""" + + logger.info(f"Targeting gene: {gene_name}") + fasta_data = read_fasta(fasta_file) + full_seq, target_id = "", "" + + # Locate target sequence + for seq_id, seq in fasta_data.items(): + if gene_name.upper() in seq_id.upper(): + full_seq = seq + target_id = seq_id + break + + if not full_seq and fasta_data: + # Fallback: use longest sequence + target_id, full_seq = max(fasta_data.items(), key=lambda item: len(item[1])) + if full_seq: + logger.warning( + f"Using longest sequence ID: {target_id} (Length: {len(full_seq)} bp)" + ) + + if not full_seq: + logger.error(f"Failed to extract target sequence.") + return + + logger.info(f"Target sequence ID: {target_id}, Length: {len(full_seq)} bp") + + # 1. Generate ALL mutations (1N, 2N, 3N) + cds_regions = split_sequence_to_codons(full_seq, gene_name) + all_mutations_df = generate_editseq_and_metadata(full_seq, cds_regions, gene_name) + + # 2. Process and save + strategies = ["1N", "2N", "3N"] + + for strategy in strategies: + # Filter for the current strategy + strategy_df = all_mutations_df[all_mutations_df["strategy"] == strategy].copy() + original_count = len(strategy_df) + + # Determine output file name (e.g., AAVS1_1N_150_mutations.csv) + output_file_name = output_base_name.replace("{strategy}", strategy) + + if original_count == 0: + logger.warning( + f"Strategy {strategy}: No mutations generated. Skipping file creation for {output_file_name}." + ) + continue + + # Random sampling for the current strategy + if original_count > TARGET_MUTATIONS: + final_df = strategy_df.sample(n=TARGET_MUTATIONS, random_state=42) + logger.success( + f"Strategy {strategy}: Sampled {TARGET_MUTATIONS} mutations from {original_count} designs." + ) + else: + final_df = strategy_df + logger.warning( + f"Strategy {strategy}: Generated {original_count} mutations; saving all." + ) + + # Save result, ensuring column order + final_df[["sequence_name", "editseq", "strategy", "mutation_type"]].to_csv( + output_file_name, index=False + ) + logger.success(f"Strategy {strategy}: Design saved to {output_file_name}.") + + +if __name__ == "__main__": + AAVS1_FASTA_PATH = ( + "/rawdata1/project/peRNA_design/ref/AAVS1/ncbi_dataset/data/rna.fna" + ) + GENE_NAME = "AAVS1" + OUTPUT_BASE_NAME = "AAVS1_{strategy}_150_mutations.csv" + + run_mutation_design( + fasta_file=AAVS1_FASTA_PATH, + gene_name=GENE_NAME, + output_base_name=OUTPUT_BASE_NAME, + ) diff --git a/filter.py b/filter.py new file mode 100644 index 0000000..e2a5f49 --- /dev/null +++ b/filter.py @@ -0,0 +1,87 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +import os +import gzip +from glob import glob + +import pandas as pd + + + +def amino_acid_to_codon(amino_acid): + """ + 简化的氨基酸到密码子转换函数 + + 参数: + amino_acid (str): 单字母氨基酸代码 + + 返回: + list: 可能的密码子列表 + """ + genetic_code = { + 'A': ['GCT', 'GCC', 'GCA', 'GCG'], + 'R': ['CGT', 'CGC', 'CGA', 'CGG', 'AGA', 'AGG'], + 'N': ['AAT', 'AAC'], + 'D': ['GAT', 'GAC'], + 'C': ['TGT', 'TGC'], + 'E': ['GAA', 'GAG'], + 'Q': ['CAA', 'CAG'], + 'G': ['GGT', 'GGC', 'GGA', 'GGG'], + 'H': ['CAT', 'CAC'], + 'I': ['ATT', 'ATC', 'ATA'], + 'L': ['TTA', 'TTG', 'CTT', 'CTC', 'CTA', 'CTG'], + 'K': ['AAA', 'AAG'], + 'M': ['ATG'], + 'F': ['TTT', 'TTC'], + 'P': ['CCT', 'CCC', 'CCA', 'CCG'], + 'S': ['TCT', 'TCC', 'TCA', 'TCG', 'AGT', 'AGC'], + 'T': ['ACT', 'ACC', 'ACA', 'ACG'], + 'W': ['TGG'], + 'Y': ['TAT', 'TAC'], + 'V': ['GTT', 'GTC', 'GTA', 'GTG'], + '*': ['TAA', 'TAG', 'TGA'], + } + + return genetic_code.get(amino_acid.upper(), []) + + + +def main(ref, infile, outfile): + print(infile, outfile) + df = pd.read_excel(ref, 1) + + keys = {} + for _, row in df.iterrows(): + row = list(row) + for src in amino_acid_to_codon(row[1]): + keys[f"{src}_{row[2]}"] = 0 + + if os.path.dirname(outfile): + os.makedirs(os.path.dirname(outfile), exist_ok = True) + + header = False + with gzip.open(outfile, "wt+") as w: + with gzip.open(infile, "rt") as r: + for line in r: + if not header: + w.write(line.strip() + "\n") + header = line.strip().split(",") + continue + + try: + target = header.index("sequence_name") + except ValueError: + target = header.index("Target_name") + + key = line.strip().split(",")[target] + key = key.split("_")[2:] + key = "_".join(key).strip('"') + + if key in keys: + w.write(line.strip() + "\n") + + +if __name__ == '__main__': + from fire import Fire + Fire(main) + diff --git a/filter_freq.py b/filter_freq.py new file mode 100644 index 0000000..231dacb --- /dev/null +++ b/filter_freq.py @@ -0,0 +1,210 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +import os +import gzip +import heapq +import click +import csv +import polars as pl + +from multiprocessing import Pool + +from glob import glob +from tqdm import tqdm + + +def amino_acid_to_codon(): + """ + 简化的氨基酸到密码子转换函数 + + 参数: + amino_acid (str): 单字母氨基酸代码 + + 返回: + list: 可能的密码子列表 + """ + genetic_code = { + 'A': ['GCT', 'GCC', 'GCA', 'GCG'], + 'R': ['CGT', 'CGC', 'CGA', 'CGG', 'AGA', 'AGG'], + 'N': ['AAT', 'AAC'], + 'D': ['GAT', 'GAC'], + 'C': ['TGT', 'TGC'], + 'E': ['GAA', 'GAG'], + 'Q': ['CAA', 'CAG'], + 'G': ['GGT', 'GGC', 'GGA', 'GGG'], + 'H': ['CAT', 'CAC'], + 'I': ['ATT', 'ATC', 'ATA'], + 'L': ['TTA', 'TTG', 'CTT', 'CTC', 'CTA', 'CTG'], + 'K': ['AAA', 'AAG'], + 'M': ['ATG'], + 'F': ['TTT', 'TTC'], + 'P': ['CCT', 'CCC', 'CCA', 'CCG'], + 'S': ['TCT', 'TCC', 'TCA', 'TCG', 'AGT', 'AGC'], + 'T': ['ACT', 'ACC', 'ACA', 'ACG'], + 'W': ['TGG'], + 'Y': ['TAT', 'TAC'], + 'V': ['GTT', 'GTC', 'GTA', 'GTG'], + '*': ['TAA', 'TAG', 'TGA'], + } + + codes = [] + for val in genetic_code.values(): + codes += val + + return set(codes) # genetic_code.get(amino_acid.upper(), []) + +__CODONS__ = amino_acid_to_codon() + + +def reader(path: str, rt_len: int = 24): + """ + 流式读取 CSV 文件,逐行返回 dict。 + 内存占用恒定(只缓存一行),适合 GB 级文件。 + """ + with gzip.open(path, "rt", newline="") as f: + for row in csv.DictReader(f): + try: + if float(row["RTlength"]) <= rt_len: + yield row + except TypeError: + continue + + +def __check_target__(key: str): + if ">" in key: + key = key.replace(">", "_") + + keys = key.split("_") + + return keys[-1] in __CODONS__ + + +def __decode_codon_n__(key: str) -> str: + # BRIP1_AA580_CTC_CTT + + if ">" in key: + key = key.replace(">", "_") + + keys = key.split("_") + + try: + res = [] + for x, y in zip(keys[-2], keys[-1]): + if x == y: + res.append(x) + else: + res.append("N") + keys[-1] = "".join(res) + except IndexError as err: + print(keys) + raise err + return "_".join(keys) + +def __call_func__(args): + u""" 实际处理代码 """ + f, outdir, top_n, degenerate = args + + data = {} + + # 读取文件 + for rec in tqdm(reader(f)): + + # 根据设定好的sequence名称 + key = rec["sequence_name"] + + if not __check_target__(key): + # 如果target不是已知的编码氨基酸的codon则跳过 + continue + + if degenerate: + try: + key = __decode_codon_n__(rec["sequence_name"]) + rec["orig_seq_name"] = rec.pop("sequence_name") + rec["sequence_name"] = key + except IndexError: + continue + + if key not in data: + data[key] = [] + + # 数据heap化 + if "DeepCas9score" in rec.keys(): + k = "DeepCas9score" + elif "PRIDICT2_0_editing_Score_deep_K562" in rec.keys(): + k = "PRIDICT2_0_editing_Score_deep_K562" + else: + print(f, rec) + continue + # raise ValueError(f"PRIDICT2_0_editing_Score_deep_K562 not exists in {f}") + + try: + score = float(rec[k]) + except (ValueError, KeyError) as e: + print(f"Warning: Skipping invalid record in {f}: {rec}") + continue # 或 raise,根据需求 + + if len(data[key]) < top_n: + heapq.heappush(data[key], (score, rec)) + else: + try: + if score > data[key][0][0]: + heapq.heapreplace(data[key], (score, rec)) + except TypeError as err: + print(err) + print(key) + print(score) + print(len(data[key])) + raise err + + # 第二遍:整理结果(按 score 降序) + final_records = [] + for heap in data.values(): + # 从堆中取出并按 score 降序排列 + sorted_recs = [rec for _, rec in sorted(heap, key=lambda x: x[0], reverse=True)] + final_records.extend(sorted_recs) + + if not final_records: + print(f"No valid records in {f}, skipping output.") + return + + # 安全写入 CSV(使用 csv 模块) + output_path = os.path.join(outdir, os.path.basename(f)) + with gzip.open(output_path, "wt+", newline="", encoding="utf-8") as w: + writer = csv.DictWriter(w, fieldnames=final_records[0].keys(), quoting=csv.QUOTE_MINIMAL) + writer.writeheader() + writer.writerows(final_records) + + +@click.command() +@click.option("-i", "--indir", type=str, help="字符串形式的输入路径,可以*通配多个文件和目录") +@click.option("-o", "--outdir", type=str, help="输出目录") +@click.option("-t", "--top-n", type=int, help="选择前几", default=3) +@click.option("-n", "--degenerate", is_flag=True, help="是否使用兼并碱基") +@click.argument('args', nargs=-1) # 捕获所有位置参数 +def main(indir, outdir, top_n, degenerate, args): + + if not indir and len(args) > 0: + indir = args[0] + if not outdir and len(args) > 0: + outdir = args[-1] + + if indir == outdir: + raise ValueError("indir and outdir should not be the same") + + os.makedirs(outdir, exist_ok=True) + + # 获取输入文件,生成参数 + args = [[f, outdir, top_n, degenerate] for f in glob(indir)] + + # for arg in args: + # print(arg[0]) + # __call_func__(arg) + + with Pool(len(args)) as p: + list(tqdm(p.imap(__call_func__, args), total=len(args))) + + +if __name__ == '__main__': + main() + diff --git a/interactive/README.md b/interactive/README.md new file mode 100644 index 0000000..e69de29 diff --git a/interactive/db.py b/interactive/db.py new file mode 100644 index 0000000..cfc4aca --- /dev/null +++ b/interactive/db.py @@ -0,0 +1,222 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +import peewee as pw +import re +import csv +import gzip + +from typing import Dict + + +db = pw.SqliteDatabase("./pegrna.db") + + +class BaseModel(pw.Model): + + class Meta: + database = db + + +KEY_MAP = { + "pridict2": { + "sequence_name": "sequence", + "EditedAllele": "dst", + "OriginalAllele": "src", + "PRIDICT2_0_editing_Score_deep_K562": "k562", + "PRIDICT2_0_editing_Score_deep_HEK": "hek", + "K562_rank": "k562_rank", + "HEK_rank": "hek_rank", + "PRIDICT2_Format": "template", + "Target-Strand": "strand", + "PBSlength": "pbs_len", + "RToverhanglength": "rtt_oh_len", + "RTlength": "rtt_len", + "Spacer-Sequence": "spacer", + "Scaffold_Optimized": "scaffold", + "pegRNA": "pegrna", + "PBSrevcomp": "pbs", + "RTseqoverhangrevcomp": "rtt_oh", + "RTrevcomp": "rtt", + }, + "prime_design": { + "Target_name": "sequence", + # "": "dst", + # "": "src", + "Target_sequence": "template", + "Strand": "strand", + "PBS_length": "pbs_len", + "RTT_length": "rtt_len", + "Spacer_sequence": "spacer", + "PAM_sequence": "pam", + "Extension_sequence": "extension", # RTT + PBS + "Spacer_sequence_order_TOP": "before_spacer", + "Spacer_sequence_order_BOTTOM": "after_spacer", + "pegRNA_extension_sequence_order_TOP": "before_pegnra_ext", + "pegRNA_extension_sequence_order_BOTTOM": "after_pegnra_ext", + } +} + + +def bulk_insert(table, data, chunk = 100): + with db.atomic(): + for i in range(0, len(data), chunk): + table.insert_many(data[i:i + chunk]).execute() + + +class Pridict2(BaseModel): + gene = pw.CharField() + aa = pw.IntegerField() + + sequence = pw.CharField() + + src = pw.CharField() + dst = pw.CharField() + + k562 = pw.FloatField() + hek = pw.FloatField() + + k562_rank = pw.IntegerField() + hek_rank = pw.IntegerField() + + template = pw.CharField() + strand = pw.CharField() + + pbs_len = pw.IntegerField() + rtt_oh_len = pw.IntegerField() + rtt_len = pw.IntegerField() + + spacer = pw.CharField() + scaffold = pw.CharField() + pegrna = pw.CharField() + pbs = pw.CharField() + rtt_oh = pw.CharField() + rtt = pw.CharField() + + class Meta: + table_name = "pridict2" + + +class PrimeDesign(BaseModel): + gene = pw.CharField() + aa = pw.IntegerField() + sequence = pw.CharField() + src = pw.CharField() + dst = pw.CharField() + + template = pw.CharField() + strand = pw.CharField() + + pbs_len = pw.IntegerField() + rtt_len = pw.IntegerField() + + pam = pw.CharField() + spacer = pw.CharField() + extension = pw.CharField() + pbs = pw.CharField() + rtt = pw.CharField() + before_spacer = pw.CharField() + after_spacer = pw.CharField() + + before_pegnra_ext = pw.CharField() + after_pegnra_ext= pw.CharField() + + class Meta: + table_name = "prime_design" + + +def format_data(value: Dict, mapping: Dict[str, str]) -> Dict[str, any]: + res = {} + for key, value in value.items(): + if key in mapping.keys(): + res[mapping[key]] = value + + if not res.get("src"): + res["src"] = res["sequence"].split("_")[-2] + res["dst"] = res["sequence"].split("_")[-1] + + res["aa"] = int(re.sub(r"\D", "", res["sequence"].split("_")[1])) + res["gene"] = res["sequence"].split("_")[0] + + if not res.get("pbs") and res.get("extension") and res.get("pbs_len") and res.get("rtt_len"): + if len(res["extension"]) == int(res["pbs_len"]) + int(res["rtt_len"]): + res["pbs"] = res["extension"][:int(res["pbs_len"])] + res["rtt"] = res["extension"][int(res["pbs_len"]):] + return res + + +def insert(path: str, kind: str = "PRIDICT2", chunk: int = 10000): + + if not Pridict2.table_exists(): + Pridict2.create_table() + + if not PrimeDesign.table_exists(): + PrimeDesign.create_table() + + + kind = kind.lower() + + assert kind in KEY_MAP.keys() + + data = [] + rows = 0 + with gzip.open(path, 'rt', encoding='utf-8') as file: + csv_dict_reader = csv.DictReader(file) + + # 逐行读取,每行是一个字典 + for row in csv_dict_reader: + # 通过列名访问数据 + data.append(format_data(row, KEY_MAP[kind])) + rows += 1 + if len(data) >= chunk: + print(f"finished {rows} rows") + bulk_insert(Pridict2 if kind == "pridict2" else PrimeDesign, data) + data = [] + + if data: + bulk_insert(Pridict2 if kind == "pridict2" else PrimeDesign, data) + + +def index(): + # 创建简单索引 + + for i in [ + Pridict2.gene, + Pridict2.aa, + Pridict2.sequence, + Pridict2.dst, + Pridict2.src, + + Pridict2.k562, + Pridict2.hek, + + Pridict2.pbs_len, + Pridict2.rtt_len, + ]: + print(Pridict2.__name__, i.name) + sql = f"CREATE INDEX IF NOT EXISTS {Pridict2.__name__}_{i.name}_idx ON pridict2 ({i.name});" + db.execute_sql(sql) + + + for i in [ + PrimeDesign.gene, + PrimeDesign.aa, + PrimeDesign.sequence, + PrimeDesign.dst, + PrimeDesign.src, + + PrimeDesign.pbs_len, + PrimeDesign.rtt_len, + ]: + print(PrimeDesign.__name__, i.name) + sql = f"CREATE INDEX IF NOT EXISTS {PrimeDesign.__name__}_{i.name}_idx ON prime_design ({i.name});" + db.execute_sql(sql) + + +def table_columns(table): + return {x: y for x, y in table.__dict__.items() if "__" not in x} + + +if __name__ == "__main__": + print(table_columns(Pridict2)) + pass + diff --git a/interactive/frontend/README.md b/interactive/frontend/README.md new file mode 100644 index 0000000..33895ab --- /dev/null +++ b/interactive/frontend/README.md @@ -0,0 +1,5 @@ +# Vue 3 + TypeScript + Vite + +This template should help get you started developing with Vue 3 and TypeScript in Vite. The template uses Vue 3 ` + + diff --git a/interactive/frontend/package.json b/interactive/frontend/package.json new file mode 100644 index 0000000..2ae1e45 --- /dev/null +++ b/interactive/frontend/package.json @@ -0,0 +1,26 @@ +{ + "name": "frontend", + "private": true, + "version": "0.0.0", + "type": "module", + "scripts": { + "dev": "vite", + "build": "vue-tsc -b && vite build", + "preview": "vite preview" + }, + "dependencies": { + "axios": "^1.13.5", + "naive-ui": "^2.43.2", + "unplugin-auto-import": "^21.0.0", + "unplugin-vue-components": "^31.0.0", + "vue": "^3.5.25" + }, + "devDependencies": { + "@types/node": "^24.10.1", + "@vitejs/plugin-vue": "^6.0.2", + "@vue/tsconfig": "^0.8.1", + "typescript": "~5.9.3", + "vite": "^7.3.1", + "vue-tsc": "^3.1.5" + } +} diff --git a/interactive/frontend/pnpm-lock.yaml b/interactive/frontend/pnpm-lock.yaml new file mode 100644 index 0000000..7f535dd --- /dev/null +++ b/interactive/frontend/pnpm-lock.yaml @@ -0,0 +1,1554 @@ +lockfileVersion: '9.0' + +settings: + autoInstallPeers: true + excludeLinksFromLockfile: false + +importers: + + .: + dependencies: + axios: + specifier: ^1.13.5 + version: 1.13.5 + naive-ui: + specifier: ^2.43.2 + version: 2.43.2(vue@3.5.27(typescript@5.9.3)) + unplugin-auto-import: + specifier: ^21.0.0 + version: 21.0.0 + unplugin-vue-components: + specifier: ^31.0.0 + version: 31.0.0(vue@3.5.27(typescript@5.9.3)) + vue: + specifier: ^3.5.25 + version: 3.5.27(typescript@5.9.3) + devDependencies: + '@types/node': + specifier: ^24.10.1 + version: 24.10.12 + '@vitejs/plugin-vue': + specifier: ^6.0.2 + version: 6.0.4(vite@7.3.1(@types/node@24.10.12))(vue@3.5.27(typescript@5.9.3)) + '@vue/tsconfig': + specifier: ^0.8.1 + version: 0.8.1(typescript@5.9.3)(vue@3.5.27(typescript@5.9.3)) + typescript: + specifier: ~5.9.3 + version: 5.9.3 + vite: + specifier: ^7.3.1 + version: 7.3.1(@types/node@24.10.12) + vue-tsc: + specifier: ^3.1.5 + version: 3.2.4(typescript@5.9.3) + +packages: + + '@babel/helper-string-parser@7.27.1': + resolution: {integrity: sha512-qMlSxKbpRlAridDExk92nSobyDdpPijUq2DW6oDnUqd0iOGxmQjyqhMIihI9+zv4LPyZdRje2cavWPbCbWm3eA==} + engines: {node: '>=6.9.0'} + + '@babel/helper-validator-identifier@7.28.5': + resolution: {integrity: sha512-qSs4ifwzKJSV39ucNjsvc6WVHs6b7S03sOh2OcHF9UHfVPqWWALUsNUVzhSBiItjRZoLHx7nIarVjqKVusUZ1Q==} + engines: {node: '>=6.9.0'} + + '@babel/parser@7.29.0': + resolution: {integrity: sha512-IyDgFV5GeDUVX4YdF/3CPULtVGSXXMLh1xVIgdCgxApktqnQV0r7/8Nqthg+8YLGaAtdyIlo2qIdZrbCv4+7ww==} + engines: {node: '>=6.0.0'} + hasBin: true + + '@babel/types@7.29.0': + resolution: {integrity: sha512-LwdZHpScM4Qz8Xw2iKSzS+cfglZzJGvofQICy7W7v4caru4EaAmyUuO6BGrbyQ2mYV11W0U8j5mBhd14dd3B0A==} + engines: {node: '>=6.9.0'} + + '@css-render/plugin-bem@0.15.14': + resolution: {integrity: sha512-QK513CJ7yEQxm/P3EwsI+d+ha8kSOcjGvD6SevM41neEMxdULE+18iuQK6tEChAWMOQNQPLG/Rw3Khb69r5neg==} + peerDependencies: + css-render: ~0.15.14 + + '@css-render/vue3-ssr@0.15.14': + resolution: {integrity: sha512-//8027GSbxE9n3QlD73xFY6z4ZbHbvrOVB7AO6hsmrEzGbg+h2A09HboUyDgu+xsmj7JnvJD39Irt+2D0+iV8g==} + peerDependencies: + vue: ^3.0.11 + + '@emotion/hash@0.8.0': + resolution: {integrity: sha512-kBJtf7PH6aWwZ6fka3zQ0p6SBYzx4fl1LoZXE2RrnYST9Xljm7WfKJrU4g/Xr3Beg72MLrp1AWNUmuYJTL7Cow==} + + '@esbuild/aix-ppc64@0.27.3': + resolution: {integrity: sha512-9fJMTNFTWZMh5qwrBItuziu834eOCUcEqymSH7pY+zoMVEZg3gcPuBNxH1EvfVYe9h0x/Ptw8KBzv7qxb7l8dg==} + engines: {node: '>=18'} + cpu: [ppc64] + os: [aix] + + '@esbuild/android-arm64@0.27.3': + resolution: {integrity: sha512-YdghPYUmj/FX2SYKJ0OZxf+iaKgMsKHVPF1MAq/P8WirnSpCStzKJFjOjzsW0QQ7oIAiccHdcqjbHmJxRb/dmg==} + engines: {node: '>=18'} + cpu: [arm64] + os: [android] + + '@esbuild/android-arm@0.27.3': + resolution: {integrity: sha512-i5D1hPY7GIQmXlXhs2w8AWHhenb00+GxjxRncS2ZM7YNVGNfaMxgzSGuO8o8SJzRc/oZwU2bcScvVERk03QhzA==} + engines: {node: '>=18'} + cpu: [arm] + os: [android] + + '@esbuild/android-x64@0.27.3': + resolution: {integrity: sha512-IN/0BNTkHtk8lkOM8JWAYFg4ORxBkZQf9zXiEOfERX/CzxW3Vg1ewAhU7QSWQpVIzTW+b8Xy+lGzdYXV6UZObQ==} + engines: {node: '>=18'} + cpu: [x64] + os: [android] + + '@esbuild/darwin-arm64@0.27.3': + resolution: {integrity: sha512-Re491k7ByTVRy0t3EKWajdLIr0gz2kKKfzafkth4Q8A5n1xTHrkqZgLLjFEHVD+AXdUGgQMq+Godfq45mGpCKg==} + engines: {node: '>=18'} + cpu: [arm64] + os: [darwin] + + '@esbuild/darwin-x64@0.27.3': + resolution: {integrity: sha512-vHk/hA7/1AckjGzRqi6wbo+jaShzRowYip6rt6q7VYEDX4LEy1pZfDpdxCBnGtl+A5zq8iXDcyuxwtv3hNtHFg==} + engines: {node: '>=18'} + cpu: [x64] + os: [darwin] + + '@esbuild/freebsd-arm64@0.27.3': + resolution: {integrity: sha512-ipTYM2fjt3kQAYOvo6vcxJx3nBYAzPjgTCk7QEgZG8AUO3ydUhvelmhrbOheMnGOlaSFUoHXB6un+A7q4ygY9w==} + engines: {node: '>=18'} + cpu: [arm64] + os: [freebsd] + + '@esbuild/freebsd-x64@0.27.3': + resolution: {integrity: sha512-dDk0X87T7mI6U3K9VjWtHOXqwAMJBNN2r7bejDsc+j03SEjtD9HrOl8gVFByeM0aJksoUuUVU9TBaZa2rgj0oA==} + engines: {node: '>=18'} + cpu: [x64] + os: [freebsd] + + '@esbuild/linux-arm64@0.27.3': + resolution: {integrity: sha512-sZOuFz/xWnZ4KH3YfFrKCf1WyPZHakVzTiqji3WDc0BCl2kBwiJLCXpzLzUBLgmp4veFZdvN5ChW4Eq/8Fc2Fg==} + engines: {node: '>=18'} + cpu: [arm64] + os: [linux] + + '@esbuild/linux-arm@0.27.3': + resolution: {integrity: sha512-s6nPv2QkSupJwLYyfS+gwdirm0ukyTFNl3KTgZEAiJDd+iHZcbTPPcWCcRYH+WlNbwChgH2QkE9NSlNrMT8Gfw==} + engines: {node: '>=18'} + cpu: [arm] + os: [linux] + + '@esbuild/linux-ia32@0.27.3': + resolution: {integrity: sha512-yGlQYjdxtLdh0a3jHjuwOrxQjOZYD/C9PfdbgJJF3TIZWnm/tMd/RcNiLngiu4iwcBAOezdnSLAwQDPqTmtTYg==} + engines: {node: '>=18'} + cpu: [ia32] + os: [linux] + + '@esbuild/linux-loong64@0.27.3': + resolution: {integrity: sha512-WO60Sn8ly3gtzhyjATDgieJNet/KqsDlX5nRC5Y3oTFcS1l0KWba+SEa9Ja1GfDqSF1z6hif/SkpQJbL63cgOA==} + engines: {node: '>=18'} + cpu: [loong64] + os: [linux] + + '@esbuild/linux-mips64el@0.27.3': + resolution: {integrity: sha512-APsymYA6sGcZ4pD6k+UxbDjOFSvPWyZhjaiPyl/f79xKxwTnrn5QUnXR5prvetuaSMsb4jgeHewIDCIWljrSxw==} + engines: {node: '>=18'} + cpu: [mips64el] + os: [linux] + + '@esbuild/linux-ppc64@0.27.3': + resolution: {integrity: sha512-eizBnTeBefojtDb9nSh4vvVQ3V9Qf9Df01PfawPcRzJH4gFSgrObw+LveUyDoKU3kxi5+9RJTCWlj4FjYXVPEA==} + engines: {node: '>=18'} + cpu: [ppc64] + os: [linux] + + '@esbuild/linux-riscv64@0.27.3': + resolution: {integrity: sha512-3Emwh0r5wmfm3ssTWRQSyVhbOHvqegUDRd0WhmXKX2mkHJe1SFCMJhagUleMq+Uci34wLSipf8Lagt4LlpRFWQ==} + engines: {node: '>=18'} + cpu: [riscv64] + os: [linux] + + '@esbuild/linux-s390x@0.27.3': + resolution: {integrity: sha512-pBHUx9LzXWBc7MFIEEL0yD/ZVtNgLytvx60gES28GcWMqil8ElCYR4kvbV2BDqsHOvVDRrOxGySBM9Fcv744hw==} + engines: {node: '>=18'} + cpu: [s390x] + os: [linux] + + '@esbuild/linux-x64@0.27.3': + resolution: {integrity: sha512-Czi8yzXUWIQYAtL/2y6vogER8pvcsOsk5cpwL4Gk5nJqH5UZiVByIY8Eorm5R13gq+DQKYg0+JyQoytLQas4dA==} + engines: {node: '>=18'} + cpu: [x64] + os: [linux] + + '@esbuild/netbsd-arm64@0.27.3': + resolution: {integrity: sha512-sDpk0RgmTCR/5HguIZa9n9u+HVKf40fbEUt+iTzSnCaGvY9kFP0YKBWZtJaraonFnqef5SlJ8/TiPAxzyS+UoA==} + engines: {node: '>=18'} + cpu: [arm64] + os: [netbsd] + + '@esbuild/netbsd-x64@0.27.3': + resolution: {integrity: sha512-P14lFKJl/DdaE00LItAukUdZO5iqNH7+PjoBm+fLQjtxfcfFE20Xf5CrLsmZdq5LFFZzb5JMZ9grUwvtVYzjiA==} + engines: {node: '>=18'} + cpu: [x64] + os: [netbsd] + + '@esbuild/openbsd-arm64@0.27.3': + resolution: {integrity: sha512-AIcMP77AvirGbRl/UZFTq5hjXK+2wC7qFRGoHSDrZ5v5b8DK/GYpXW3CPRL53NkvDqb9D+alBiC/dV0Fb7eJcw==} + engines: {node: '>=18'} + cpu: [arm64] + os: [openbsd] + + '@esbuild/openbsd-x64@0.27.3': + resolution: {integrity: sha512-DnW2sRrBzA+YnE70LKqnM3P+z8vehfJWHXECbwBmH/CU51z6FiqTQTHFenPlHmo3a8UgpLyH3PT+87OViOh1AQ==} + engines: {node: '>=18'} + cpu: [x64] + os: [openbsd] + + '@esbuild/openharmony-arm64@0.27.3': + resolution: {integrity: sha512-NinAEgr/etERPTsZJ7aEZQvvg/A6IsZG/LgZy+81wON2huV7SrK3e63dU0XhyZP4RKGyTm7aOgmQk0bGp0fy2g==} + engines: {node: '>=18'} + cpu: [arm64] + os: [openharmony] + + '@esbuild/sunos-x64@0.27.3': + resolution: {integrity: sha512-PanZ+nEz+eWoBJ8/f8HKxTTD172SKwdXebZ0ndd953gt1HRBbhMsaNqjTyYLGLPdoWHy4zLU7bDVJztF5f3BHA==} + engines: {node: '>=18'} + cpu: [x64] + os: [sunos] + + '@esbuild/win32-arm64@0.27.3': + resolution: {integrity: sha512-B2t59lWWYrbRDw/tjiWOuzSsFh1Y/E95ofKz7rIVYSQkUYBjfSgf6oeYPNWHToFRr2zx52JKApIcAS/D5TUBnA==} + engines: {node: '>=18'} + cpu: [arm64] + os: [win32] + + '@esbuild/win32-ia32@0.27.3': + resolution: {integrity: sha512-QLKSFeXNS8+tHW7tZpMtjlNb7HKau0QDpwm49u0vUp9y1WOF+PEzkU84y9GqYaAVW8aH8f3GcBck26jh54cX4Q==} + engines: {node: '>=18'} + cpu: [ia32] + os: [win32] + + '@esbuild/win32-x64@0.27.3': + resolution: {integrity: sha512-4uJGhsxuptu3OcpVAzli+/gWusVGwZZHTlS63hh++ehExkVT8SgiEf7/uC/PclrPPkLhZqGgCTjd0VWLo6xMqA==} + engines: {node: '>=18'} + cpu: [x64] + os: [win32] + + '@jridgewell/gen-mapping@0.3.13': + resolution: {integrity: sha512-2kkt/7niJ6MgEPxF0bYdQ6etZaA+fQvDcLKckhy1yIQOzaoKjBBjSj63/aLVjYE3qhRt5dvM+uUyfCg6UKCBbA==} + + '@jridgewell/remapping@2.3.5': + resolution: {integrity: sha512-LI9u/+laYG4Ds1TDKSJW2YPrIlcVYOwi2fUC6xB43lueCjgxV4lffOCZCtYFiH6TNOX+tQKXx97T4IKHbhyHEQ==} + + '@jridgewell/resolve-uri@3.1.2': + resolution: {integrity: sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==} + engines: {node: '>=6.0.0'} + + '@jridgewell/sourcemap-codec@1.5.5': + resolution: {integrity: sha512-cYQ9310grqxueWbl+WuIUIaiUaDcj7WOq5fVhEljNVgRfOUhY9fy2zTvfoqWsnebh8Sl70VScFbICvJnLKB0Og==} + + '@jridgewell/trace-mapping@0.3.31': + resolution: {integrity: sha512-zzNR+SdQSDJzc8joaeP8QQoCQr8NuYx2dIIytl1QeBEZHJ9uW6hebsrYgbz8hJwUQao3TWCMtmfV8Nu1twOLAw==} + + '@juggle/resize-observer@3.4.0': + resolution: {integrity: sha512-dfLbk+PwWvFzSxwk3n5ySL0hfBog779o8h68wK/7/APo/7cgyWp5jcXockbxdk5kFRkbeXWm4Fbi9FrdN381sA==} + + '@rolldown/pluginutils@1.0.0-rc.2': + resolution: {integrity: sha512-izyXV/v+cHiRfozX62W9htOAvwMo4/bXKDrQ+vom1L1qRuexPock/7VZDAhnpHCLNejd3NJ6hiab+tO0D44Rgw==} + + '@rollup/rollup-android-arm-eabi@4.57.1': + resolution: {integrity: sha512-A6ehUVSiSaaliTxai040ZpZ2zTevHYbvu/lDoeAteHI8QnaosIzm4qwtezfRg1jOYaUmnzLX1AOD6Z+UJjtifg==} + cpu: [arm] + os: [android] + + '@rollup/rollup-android-arm64@4.57.1': + resolution: {integrity: sha512-dQaAddCY9YgkFHZcFNS/606Exo8vcLHwArFZ7vxXq4rigo2bb494/xKMMwRRQW6ug7Js6yXmBZhSBRuBvCCQ3w==} + cpu: [arm64] + os: [android] + + '@rollup/rollup-darwin-arm64@4.57.1': + resolution: {integrity: sha512-crNPrwJOrRxagUYeMn/DZwqN88SDmwaJ8Cvi/TN1HnWBU7GwknckyosC2gd0IqYRsHDEnXf328o9/HC6OkPgOg==} + cpu: [arm64] + os: [darwin] + + '@rollup/rollup-darwin-x64@4.57.1': + resolution: {integrity: sha512-Ji8g8ChVbKrhFtig5QBV7iMaJrGtpHelkB3lsaKzadFBe58gmjfGXAOfI5FV0lYMH8wiqsxKQ1C9B0YTRXVy4w==} + cpu: [x64] + os: [darwin] + + '@rollup/rollup-freebsd-arm64@4.57.1': + resolution: {integrity: sha512-R+/WwhsjmwodAcz65guCGFRkMb4gKWTcIeLy60JJQbXrJ97BOXHxnkPFrP+YwFlaS0m+uWJTstrUA9o+UchFug==} + cpu: [arm64] + os: [freebsd] + + '@rollup/rollup-freebsd-x64@4.57.1': + resolution: {integrity: sha512-IEQTCHeiTOnAUC3IDQdzRAGj3jOAYNr9kBguI7MQAAZK3caezRrg0GxAb6Hchg4lxdZEI5Oq3iov/w/hnFWY9Q==} + cpu: [x64] + os: [freebsd] + + '@rollup/rollup-linux-arm-gnueabihf@4.57.1': + resolution: {integrity: sha512-F8sWbhZ7tyuEfsmOxwc2giKDQzN3+kuBLPwwZGyVkLlKGdV1nvnNwYD0fKQ8+XS6hp9nY7B+ZeK01EBUE7aHaw==} + cpu: [arm] + os: [linux] + + '@rollup/rollup-linux-arm-musleabihf@4.57.1': + resolution: {integrity: sha512-rGfNUfn0GIeXtBP1wL5MnzSj98+PZe/AXaGBCRmT0ts80lU5CATYGxXukeTX39XBKsxzFpEeK+Mrp9faXOlmrw==} + cpu: [arm] + os: [linux] + + '@rollup/rollup-linux-arm64-gnu@4.57.1': + resolution: {integrity: sha512-MMtej3YHWeg/0klK2Qodf3yrNzz6CGjo2UntLvk2RSPlhzgLvYEB3frRvbEF2wRKh1Z2fDIg9KRPe1fawv7C+g==} + cpu: [arm64] + os: [linux] + + '@rollup/rollup-linux-arm64-musl@4.57.1': + resolution: {integrity: sha512-1a/qhaaOXhqXGpMFMET9VqwZakkljWHLmZOX48R0I/YLbhdxr1m4gtG1Hq7++VhVUmf+L3sTAf9op4JlhQ5u1Q==} + cpu: [arm64] + os: [linux] + + '@rollup/rollup-linux-loong64-gnu@4.57.1': + resolution: {integrity: sha512-QWO6RQTZ/cqYtJMtxhkRkidoNGXc7ERPbZN7dVW5SdURuLeVU7lwKMpo18XdcmpWYd0qsP1bwKPf7DNSUinhvA==} + cpu: [loong64] + os: [linux] + + '@rollup/rollup-linux-loong64-musl@4.57.1': + resolution: {integrity: sha512-xpObYIf+8gprgWaPP32xiN5RVTi/s5FCR+XMXSKmhfoJjrpRAjCuuqQXyxUa/eJTdAE6eJ+KDKaoEqjZQxh3Gw==} + cpu: [loong64] + os: [linux] + + '@rollup/rollup-linux-ppc64-gnu@4.57.1': + resolution: {integrity: sha512-4BrCgrpZo4hvzMDKRqEaW1zeecScDCR+2nZ86ATLhAoJ5FQ+lbHVD3ttKe74/c7tNT9c6F2viwB3ufwp01Oh2w==} + cpu: [ppc64] + os: [linux] + + '@rollup/rollup-linux-ppc64-musl@4.57.1': + resolution: {integrity: sha512-NOlUuzesGauESAyEYFSe3QTUguL+lvrN1HtwEEsU2rOwdUDeTMJdO5dUYl/2hKf9jWydJrO9OL/XSSf65R5+Xw==} + cpu: [ppc64] + os: [linux] + + '@rollup/rollup-linux-riscv64-gnu@4.57.1': + resolution: {integrity: sha512-ptA88htVp0AwUUqhVghwDIKlvJMD/fmL/wrQj99PRHFRAG6Z5nbWoWG4o81Nt9FT+IuqUQi+L31ZKAFeJ5Is+A==} + cpu: [riscv64] + os: [linux] + + '@rollup/rollup-linux-riscv64-musl@4.57.1': + resolution: {integrity: sha512-S51t7aMMTNdmAMPpBg7OOsTdn4tySRQvklmL3RpDRyknk87+Sp3xaumlatU+ppQ+5raY7sSTcC2beGgvhENfuw==} + cpu: [riscv64] + os: [linux] + + '@rollup/rollup-linux-s390x-gnu@4.57.1': + resolution: {integrity: sha512-Bl00OFnVFkL82FHbEqy3k5CUCKH6OEJL54KCyx2oqsmZnFTR8IoNqBF+mjQVcRCT5sB6yOvK8A37LNm/kPJiZg==} + cpu: [s390x] + os: [linux] + + '@rollup/rollup-linux-x64-gnu@4.57.1': + resolution: {integrity: sha512-ABca4ceT4N+Tv/GtotnWAeXZUZuM/9AQyCyKYyKnpk4yoA7QIAuBt6Hkgpw8kActYlew2mvckXkvx0FfoInnLg==} + cpu: [x64] + os: [linux] + + '@rollup/rollup-linux-x64-musl@4.57.1': + resolution: {integrity: sha512-HFps0JeGtuOR2convgRRkHCekD7j+gdAuXM+/i6kGzQtFhlCtQkpwtNzkNj6QhCDp7DRJ7+qC/1Vg2jt5iSOFw==} + cpu: [x64] + os: [linux] + + '@rollup/rollup-openbsd-x64@4.57.1': + resolution: {integrity: sha512-H+hXEv9gdVQuDTgnqD+SQffoWoc0Of59AStSzTEj/feWTBAnSfSD3+Dql1ZruJQxmykT/JVY0dE8Ka7z0DH1hw==} + cpu: [x64] + os: [openbsd] + + '@rollup/rollup-openharmony-arm64@4.57.1': + resolution: {integrity: sha512-4wYoDpNg6o/oPximyc/NG+mYUejZrCU2q+2w6YZqrAs2UcNUChIZXjtafAiiZSUc7On8v5NyNj34Kzj/Ltk6dQ==} + cpu: [arm64] + os: [openharmony] + + '@rollup/rollup-win32-arm64-msvc@4.57.1': + resolution: {integrity: sha512-O54mtsV/6LW3P8qdTcamQmuC990HDfR71lo44oZMZlXU4tzLrbvTii87Ni9opq60ds0YzuAlEr/GNwuNluZyMQ==} + cpu: [arm64] + os: [win32] + + '@rollup/rollup-win32-ia32-msvc@4.57.1': + resolution: {integrity: sha512-P3dLS+IerxCT/7D2q2FYcRdWRl22dNbrbBEtxdWhXrfIMPP9lQhb5h4Du04mdl5Woq05jVCDPCMF7Ub0NAjIew==} + cpu: [ia32] + os: [win32] + + '@rollup/rollup-win32-x64-gnu@4.57.1': + resolution: {integrity: sha512-VMBH2eOOaKGtIJYleXsi2B8CPVADrh+TyNxJ4mWPnKfLB/DBUmzW+5m1xUrcwWoMfSLagIRpjUFeW5CO5hyciQ==} + cpu: [x64] + os: [win32] + + '@rollup/rollup-win32-x64-msvc@4.57.1': + resolution: {integrity: sha512-mxRFDdHIWRxg3UfIIAwCm6NzvxG0jDX/wBN6KsQFTvKFqqg9vTrWUE68qEjHt19A5wwx5X5aUi2zuZT7YR0jrA==} + cpu: [x64] + os: [win32] + + '@types/estree@1.0.8': + resolution: {integrity: sha512-dWHzHa2WqEXI/O1E9OjrocMTKJl2mSrEolh1Iomrv6U+JuNwaHXsXx9bLu5gG7BUWFIN0skIQJQ/L1rIex4X6w==} + + '@types/katex@0.16.8': + resolution: {integrity: sha512-trgaNyfU+Xh2Tc+ABIb44a5AYUpicB3uwirOioeOkNPPbmgRNtcWyDeeFRzjPZENO9Vq8gvVqfhaaXWLlevVwg==} + + '@types/lodash-es@4.17.12': + resolution: {integrity: sha512-0NgftHUcV4v34VhXm8QBSftKVXtbkBG3ViCjs6+eJ5a6y6Mi/jiFGPc1sC7QK+9BFhWrURE3EOggmWaSxL9OzQ==} + + '@types/lodash@4.17.23': + resolution: {integrity: sha512-RDvF6wTulMPjrNdCoYRC8gNR880JNGT8uB+REUpC2Ns4pRqQJhGz90wh7rgdXDPpCczF3VGktDuFGVnz8zP7HA==} + + '@types/node@24.10.12': + resolution: {integrity: sha512-68e+T28EbdmLSTkPgs3+UacC6rzmqrcWFPQs1C8mwJhI/r5Uxr0yEuQotczNRROd1gq30NGxee+fo0rSIxpyAw==} + + '@vitejs/plugin-vue@6.0.4': + resolution: {integrity: sha512-uM5iXipgYIn13UUQCZNdWkYk+sysBeA97d5mHsAoAt1u/wpN3+zxOmsVJWosuzX+IMGRzeYUNytztrYznboIkQ==} + engines: {node: ^20.19.0 || >=22.12.0} + peerDependencies: + vite: ^5.0.0 || ^6.0.0 || ^7.0.0 || ^8.0.0-0 + vue: ^3.2.25 + + '@volar/language-core@2.4.27': + resolution: {integrity: sha512-DjmjBWZ4tJKxfNC1F6HyYERNHPYS7L7OPFyCrestykNdUZMFYzI9WTyvwPcaNaHlrEUwESHYsfEw3isInncZxQ==} + + '@volar/source-map@2.4.27': + resolution: {integrity: sha512-ynlcBReMgOZj2i6po+qVswtDUeeBRCTgDurjMGShbm8WYZgJ0PA4RmtebBJ0BCYol1qPv3GQF6jK7C9qoVc7lg==} + + '@volar/typescript@2.4.27': + resolution: {integrity: sha512-eWaYCcl/uAPInSK2Lze6IqVWaBu/itVqR5InXcHXFyles4zO++Mglt3oxdgj75BDcv1Knr9Y93nowS8U3wqhxg==} + + '@vue/compiler-core@3.5.27': + resolution: {integrity: sha512-gnSBQjZA+//qDZen+6a2EdHqJ68Z7uybrMf3SPjEGgG4dicklwDVmMC1AeIHxtLVPT7sn6sH1KOO+tS6gwOUeQ==} + + '@vue/compiler-dom@3.5.27': + resolution: {integrity: sha512-oAFea8dZgCtVVVTEC7fv3T5CbZW9BxpFzGGxC79xakTr6ooeEqmRuvQydIiDAkglZEAd09LgVf1RoDnL54fu5w==} + + '@vue/compiler-sfc@3.5.27': + resolution: {integrity: sha512-sHZu9QyDPeDmN/MRoshhggVOWE5WlGFStKFwu8G52swATgSny27hJRWteKDSUUzUH+wp+bmeNbhJnEAel/auUQ==} + + '@vue/compiler-ssr@3.5.27': + resolution: {integrity: sha512-Sj7h+JHt512fV1cTxKlYhg7qxBvack+BGncSpH+8vnN+KN95iPIcqB5rsbblX40XorP+ilO7VIKlkuu3Xq2vjw==} + + '@vue/language-core@3.2.4': + resolution: {integrity: sha512-bqBGuSG4KZM45KKTXzGtoCl9cWju5jsaBKaJJe3h5hRAAWpZUuj5G+L+eI01sPIkm4H6setKRlw7E85wLdDNew==} + + '@vue/reactivity@3.5.27': + resolution: {integrity: sha512-vvorxn2KXfJ0nBEnj4GYshSgsyMNFnIQah/wczXlsNXt+ijhugmW+PpJ2cNPe4V6jpnBcs0MhCODKllWG+nvoQ==} + + '@vue/runtime-core@3.5.27': + resolution: {integrity: sha512-fxVuX/fzgzeMPn/CLQecWeDIFNt3gQVhxM0rW02Tvp/YmZfXQgcTXlakq7IMutuZ/+Ogbn+K0oct9J3JZfyk3A==} + + '@vue/runtime-dom@3.5.27': + resolution: {integrity: sha512-/QnLslQgYqSJ5aUmb5F0z0caZPGHRB8LEAQ1s81vHFM5CBfnun63rxhvE/scVb/j3TbBuoZwkJyiLCkBluMpeg==} + + '@vue/server-renderer@3.5.27': + resolution: {integrity: sha512-qOz/5thjeP1vAFc4+BY3Nr6wxyLhpeQgAE/8dDtKo6a6xdk+L4W46HDZgNmLOBUDEkFXV3G7pRiUqxjX0/2zWA==} + peerDependencies: + vue: 3.5.27 + + '@vue/shared@3.5.27': + resolution: {integrity: sha512-dXr/3CgqXsJkZ0n9F3I4elY8wM9jMJpP3pvRG52r6m0tu/MsAFIe6JpXVGeNMd/D9F4hQynWT8Rfuj0bdm9kFQ==} + + '@vue/tsconfig@0.8.1': + resolution: {integrity: sha512-aK7feIWPXFSUhsCP9PFqPyFOcz4ENkb8hZ2pneL6m2UjCkccvaOhC/5KCKluuBufvp2KzkbdA2W2pk20vLzu3g==} + peerDependencies: + typescript: 5.x + vue: ^3.4.0 + peerDependenciesMeta: + typescript: + optional: true + vue: + optional: true + + acorn@8.15.0: + resolution: {integrity: sha512-NZyJarBfL7nWwIq+FDL6Zp/yHEhePMNnnJ0y3qfieCrmNvYct8uvtiV41UvlSe6apAfk0fY1FbWx+NwfmpvtTg==} + engines: {node: '>=0.4.0'} + hasBin: true + + alien-signals@3.1.2: + resolution: {integrity: sha512-d9dYqZTS90WLiU0I5c6DHj/HcKkF8ZyGN3G5x8wSbslulz70KOxaqCT0hQCo9KOyhVqzqGojvNdJXoTumZOtcw==} + + async-validator@4.2.5: + resolution: {integrity: sha512-7HhHjtERjqlNbZtqNqy2rckN/SpOOlmDliet+lP7k+eKZEjPk3DgyeU9lIXLdeLz0uBbbVp+9Qdow9wJWgwwfg==} + + asynckit@0.4.0: + resolution: {integrity: sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==} + + axios@1.13.5: + resolution: {integrity: sha512-cz4ur7Vb0xS4/KUN0tPWe44eqxrIu31me+fbang3ijiNscE129POzipJJA6zniq2C/Z6sJCjMimjS8Lc/GAs8Q==} + + call-bind-apply-helpers@1.0.2: + resolution: {integrity: sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==} + engines: {node: '>= 0.4'} + + chokidar@5.0.0: + resolution: {integrity: sha512-TQMmc3w+5AxjpL8iIiwebF73dRDF4fBIieAqGn9RGCWaEVwQ6Fb2cGe31Yns0RRIzii5goJ1Y7xbMwo1TxMplw==} + engines: {node: '>= 20.19.0'} + + combined-stream@1.0.8: + resolution: {integrity: sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==} + engines: {node: '>= 0.8'} + + confbox@0.1.8: + resolution: {integrity: sha512-RMtmw0iFkeR4YV+fUOSucriAQNb9g8zFR52MWCtl+cCZOFRNL6zeB395vPzFhEjjn4fMxXudmELnl/KF/WrK6w==} + + confbox@0.2.4: + resolution: {integrity: sha512-ysOGlgTFbN2/Y6Cg3Iye8YKulHw+R2fNXHrgSmXISQdMnomY6eNDprVdW9R5xBguEqI954+S6709UyiO7B+6OQ==} + + css-render@0.15.14: + resolution: {integrity: sha512-9nF4PdUle+5ta4W5SyZdLCCmFd37uVimSjg1evcTqKJCyvCEEj12WKzOSBNak6r4im4J4iYXKH1OWpUV5LBYFg==} + + csstype@3.0.11: + resolution: {integrity: sha512-sa6P2wJ+CAbgyy4KFssIb/JNMLxFvKF1pCYCSXS8ZMuqZnMsrxqI2E5sPyoTpxoPU/gVZMzr2zjOfg8GIZOMsw==} + + csstype@3.2.3: + resolution: {integrity: sha512-z1HGKcYy2xA8AGQfwrn0PAy+PB7X/GSj3UVJW9qKyn43xWa+gl5nXmU4qqLMRzWVLFC8KusUX8T/0kCiOYpAIQ==} + + date-fns-tz@3.2.0: + resolution: {integrity: sha512-sg8HqoTEulcbbbVXeg84u5UnlsQa8GS5QXMqjjYIhS4abEVVKIUwe0/l/UhrZdKaL/W5eWZNlbTeEIiOXTcsBQ==} + peerDependencies: + date-fns: ^3.0.0 || ^4.0.0 + + date-fns@4.1.0: + resolution: {integrity: sha512-Ukq0owbQXxa/U3EGtsdVBkR1w7KOQ5gIBqdH2hkvknzZPYvBxb/aa6E8L7tmjFtkwZBu3UXBbjIgPo/Ez4xaNg==} + + delayed-stream@1.0.0: + resolution: {integrity: sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==} + engines: {node: '>=0.4.0'} + + dunder-proto@1.0.1: + resolution: {integrity: sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==} + engines: {node: '>= 0.4'} + + entities@7.0.1: + resolution: {integrity: sha512-TWrgLOFUQTH994YUyl1yT4uyavY5nNB5muff+RtWaqNVCAK408b5ZnnbNAUEWLTCpum9w6arT70i1XdQ4UeOPA==} + engines: {node: '>=0.12'} + + es-define-property@1.0.1: + resolution: {integrity: sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==} + engines: {node: '>= 0.4'} + + es-errors@1.3.0: + resolution: {integrity: sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==} + engines: {node: '>= 0.4'} + + es-object-atoms@1.1.1: + resolution: {integrity: sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==} + engines: {node: '>= 0.4'} + + es-set-tostringtag@2.1.0: + resolution: {integrity: sha512-j6vWzfrGVfyXxge+O0x5sh6cvxAog0a/4Rdd2K36zCMV5eJ+/+tOAngRO8cODMNWbVRdVlmGZQL2YS3yR8bIUA==} + engines: {node: '>= 0.4'} + + esbuild@0.27.3: + resolution: {integrity: sha512-8VwMnyGCONIs6cWue2IdpHxHnAjzxnw2Zr7MkVxB2vjmQ2ivqGFb4LEG3SMnv0Gb2F/G/2yA8zUaiL1gywDCCg==} + engines: {node: '>=18'} + hasBin: true + + escape-string-regexp@5.0.0: + resolution: {integrity: sha512-/veY75JbMK4j1yjvuUxuVsiS/hr/4iHs9FTT6cgTexxdE0Ly/glccBAkloH/DofkjRbZU3bnoj38mOmhkZ0lHw==} + engines: {node: '>=12'} + + estree-walker@2.0.2: + resolution: {integrity: sha512-Rfkk/Mp/DL7JVje3u18FxFujQlTNR2q6QfMSMB7AvCBx91NGj/ba3kCfza0f6dVDbw7YlRf/nDrn7pQrCCyQ/w==} + + estree-walker@3.0.3: + resolution: {integrity: sha512-7RUKfXgSMMkzt6ZuXmqapOurLGPPfgj6l9uRZ7lRGolvk0y2yocc35LdcxKC5PQZdn2DMqioAQ2NoWcrTKmm6g==} + + evtd@0.2.4: + resolution: {integrity: sha512-qaeGN5bx63s/AXgQo8gj6fBkxge+OoLddLniox5qtLAEY5HSnuSlISXVPxnSae1dWblvTh4/HoMIB+mbMsvZzw==} + + exsolve@1.0.8: + resolution: {integrity: sha512-LmDxfWXwcTArk8fUEnOfSZpHOJ6zOMUJKOtFLFqJLoKJetuQG874Uc7/Kki7zFLzYybmZhp1M7+98pfMqeX8yA==} + + fdir@6.5.0: + resolution: {integrity: sha512-tIbYtZbucOs0BRGqPJkshJUYdL+SDH7dVM8gjy+ERp3WAUjLEFJE+02kanyHtwjWOnwrKYBiwAmM0p4kLJAnXg==} + engines: {node: '>=12.0.0'} + peerDependencies: + picomatch: ^3 || ^4 + peerDependenciesMeta: + picomatch: + optional: true + + follow-redirects@1.15.11: + resolution: {integrity: sha512-deG2P0JfjrTxl50XGCDyfI97ZGVCxIpfKYmfyrQ54n5FO/0gfIES8C/Psl6kWVDolizcaaxZJnTS0QSMxvnsBQ==} + engines: {node: '>=4.0'} + peerDependencies: + debug: '*' + peerDependenciesMeta: + debug: + optional: true + + form-data@4.0.5: + resolution: {integrity: sha512-8RipRLol37bNs2bhoV67fiTEvdTrbMUYcFTiy3+wuuOnUog2QBHCZWXDRijWQfAkhBj2Uf5UnVaiWwA5vdd82w==} + engines: {node: '>= 6'} + + fsevents@2.3.3: + resolution: {integrity: sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==} + engines: {node: ^8.16.0 || ^10.6.0 || >=11.0.0} + os: [darwin] + + function-bind@1.1.2: + resolution: {integrity: sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==} + + get-intrinsic@1.3.0: + resolution: {integrity: sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==} + engines: {node: '>= 0.4'} + + get-proto@1.0.1: + resolution: {integrity: sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==} + engines: {node: '>= 0.4'} + + gopd@1.2.0: + resolution: {integrity: sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==} + engines: {node: '>= 0.4'} + + has-symbols@1.1.0: + resolution: {integrity: sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==} + engines: {node: '>= 0.4'} + + has-tostringtag@1.0.2: + resolution: {integrity: sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw==} + engines: {node: '>= 0.4'} + + hasown@2.0.2: + resolution: {integrity: sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==} + engines: {node: '>= 0.4'} + + highlight.js@11.11.1: + resolution: {integrity: sha512-Xwwo44whKBVCYoliBQwaPvtd/2tYFkRQtXDWj1nackaV2JPXx3L0+Jvd8/qCJ2p+ML0/XVkJ2q+Mr+UVdpJK5w==} + engines: {node: '>=12.0.0'} + + js-tokens@9.0.1: + resolution: {integrity: sha512-mxa9E9ITFOt0ban3j6L5MpjwegGz6lBQmM1IJkWeBZGcMxto50+eWdjC/52xDbS2vy0k7vIMK0Fe2wfL9OQSpQ==} + + local-pkg@1.1.2: + resolution: {integrity: sha512-arhlxbFRmoQHl33a0Zkle/YWlmNwoyt6QNZEIJcqNbdrsix5Lvc4HyyI3EnwxTYlZYc32EbYrQ8SzEZ7dqgg9A==} + engines: {node: '>=14'} + + lodash-es@4.17.23: + resolution: {integrity: sha512-kVI48u3PZr38HdYz98UmfPnXl2DXrpdctLrFLCd3kOx1xUkOmpFPx7gCWWM5MPkL/fD8zb+Ph0QzjGFs4+hHWg==} + + lodash@4.17.23: + resolution: {integrity: sha512-LgVTMpQtIopCi79SJeDiP0TfWi5CNEc/L/aRdTh3yIvmZXTnheWpKjSZhnvMl8iXbC1tFg9gdHHDMLoV7CnG+w==} + + magic-string@0.30.21: + resolution: {integrity: sha512-vd2F4YUyEXKGcLHoq+TEyCjxueSeHnFxyyjNp80yg0XV4vUhnDer/lvvlqM/arB5bXQN5K2/3oinyCRyx8T2CQ==} + + math-intrinsics@1.1.0: + resolution: {integrity: sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==} + engines: {node: '>= 0.4'} + + mime-db@1.52.0: + resolution: {integrity: sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==} + engines: {node: '>= 0.6'} + + mime-types@2.1.35: + resolution: {integrity: sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==} + engines: {node: '>= 0.6'} + + mlly@1.8.0: + resolution: {integrity: sha512-l8D9ODSRWLe2KHJSifWGwBqpTZXIXTeo8mlKjY+E2HAakaTeNpqAyBZ8GSqLzHgw4XmHmC8whvpjJNMbFZN7/g==} + + muggle-string@0.4.1: + resolution: {integrity: sha512-VNTrAak/KhO2i8dqqnqnAHOa3cYBwXEZe9h+D5h/1ZqFSTEFHdM65lR7RoIqq3tBBYavsOXV84NoHXZ0AkPyqQ==} + + naive-ui@2.43.2: + resolution: {integrity: sha512-YlLMnGrwGTOc+zMj90sG3ubaH5/7czsgLgGcjTLA981IUaz8r6t4WIujNt8r9PNr+dqv6XNEr0vxkARgPPjfBQ==} + peerDependencies: + vue: ^3.0.0 + + nanoid@3.3.11: + resolution: {integrity: sha512-N8SpfPUnUp1bK+PMYW8qSWdl9U+wwNWI4QKxOYDy9JAro3WMX7p2OeVRF9v+347pnakNevPmiHhNmZ2HbFA76w==} + engines: {node: ^10 || ^12 || ^13.7 || ^14 || >=15.0.1} + hasBin: true + + obug@2.1.1: + resolution: {integrity: sha512-uTqF9MuPraAQ+IsnPf366RG4cP9RtUi7MLO1N3KEc+wb0a6yKpeL0lmk2IB1jY5KHPAlTc6T/JRdC/YqxHNwkQ==} + + path-browserify@1.0.1: + resolution: {integrity: sha512-b7uo2UCUOYZcnF/3ID0lulOJi/bafxa1xPe7ZPsammBSpjSWQkjNxlt635YGS2MiR9GjvuXCtz2emr3jbsz98g==} + + pathe@2.0.3: + resolution: {integrity: sha512-WUjGcAqP1gQacoQe+OBJsFA7Ld4DyXuUIjZ5cc75cLHvJ7dtNsTugphxIADwspS+AraAUePCKrSVtPLFj/F88w==} + + picocolors@1.1.1: + resolution: {integrity: sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==} + + picomatch@4.0.3: + resolution: {integrity: sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==} + engines: {node: '>=12'} + + pkg-types@1.3.1: + resolution: {integrity: sha512-/Jm5M4RvtBFVkKWRu2BLUTNP8/M2a+UwuAX+ae4770q1qVGtfjG+WTCupoZixokjmHiry8uI+dlY8KXYV5HVVQ==} + + pkg-types@2.3.0: + resolution: {integrity: sha512-SIqCzDRg0s9npO5XQ3tNZioRY1uK06lA41ynBC1YmFTmnY6FjUjVt6s4LoADmwoig1qqD0oK8h1p/8mlMx8Oig==} + + postcss@8.5.6: + resolution: {integrity: sha512-3Ybi1tAuwAP9s0r1UQ2J4n5Y0G05bJkpUIO0/bI9MhwmD70S5aTWbXGBwxHrelT+XM1k6dM0pk+SwNkpTRN7Pg==} + engines: {node: ^10 || ^12 || >=14} + + proxy-from-env@1.1.0: + resolution: {integrity: sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg==} + + quansync@0.2.11: + resolution: {integrity: sha512-AifT7QEbW9Nri4tAwR5M/uzpBuqfZf+zwaEM/QkzEjj7NBuFD2rBuy0K3dE+8wltbezDV7JMA0WfnCPYRSYbXA==} + + readdirp@5.0.0: + resolution: {integrity: sha512-9u/XQ1pvrQtYyMpZe7DXKv2p5CNvyVwzUB6uhLAnQwHMSgKMBR62lc7AHljaeteeHXn11XTAaLLUVZYVZyuRBQ==} + engines: {node: '>= 20.19.0'} + + rollup@4.57.1: + resolution: {integrity: sha512-oQL6lgK3e2QZeQ7gcgIkS2YZPg5slw37hYufJ3edKlfQSGGm8ICoxswK15ntSzF/a8+h7ekRy7k7oWc3BQ7y8A==} + engines: {node: '>=18.0.0', npm: '>=8.0.0'} + hasBin: true + + scule@1.3.0: + resolution: {integrity: sha512-6FtHJEvt+pVMIB9IBY+IcCJ6Z5f1iQnytgyfKMhDKgmzYG+TeH/wx1y3l27rshSbLiSanrR9ffZDrEsmjlQF2g==} + + seemly@0.3.10: + resolution: {integrity: sha512-2+SMxtG1PcsL0uyhkumlOU6Qo9TAQ/WyH7tthnPIOQB05/12jz9naq6GZ6iZ6ApVsO3rr2gsnTf3++OV63kE1Q==} + + source-map-js@1.2.1: + resolution: {integrity: sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==} + engines: {node: '>=0.10.0'} + + strip-literal@3.1.0: + resolution: {integrity: sha512-8r3mkIM/2+PpjHoOtiAW8Rg3jJLHaV7xPwG+YRGrv6FP0wwk/toTpATxWYOW0BKdWwl82VT2tFYi5DlROa0Mxg==} + + tinyglobby@0.2.15: + resolution: {integrity: sha512-j2Zq4NyQYG5XMST4cbs02Ak8iJUdxRM0XI5QyxXuZOzKOINmWurp3smXu3y5wDcJrptwpSjgXHzIQxR0omXljQ==} + engines: {node: '>=12.0.0'} + + treemate@0.3.11: + resolution: {integrity: sha512-M8RGFoKtZ8dF+iwJfAJTOH/SM4KluKOKRJpjCMhI8bG3qB74zrFoArKZ62ll0Fr3mqkMJiQOmWYkdYgDeITYQg==} + + typescript@5.9.3: + resolution: {integrity: sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw==} + engines: {node: '>=14.17'} + hasBin: true + + ufo@1.6.3: + resolution: {integrity: sha512-yDJTmhydvl5lJzBmy/hyOAA0d+aqCBuwl818haVdYCRrWV84o7YyeVm4QlVHStqNrrJSTb6jKuFAVqAFsr+K3Q==} + + undici-types@7.16.0: + resolution: {integrity: sha512-Zz+aZWSj8LE6zoxD+xrjh4VfkIG8Ya6LvYkZqtUQGJPZjYl53ypCaUwWqo7eI0x66KBGeRo+mlBEkMSeSZ38Nw==} + + unimport@5.6.0: + resolution: {integrity: sha512-8rqAmtJV8o60x46kBAJKtHpJDJWkA2xcBqWKPI14MgUb05o1pnpnCnXSxedUXyeq7p8fR5g3pTo2BaswZ9lD9A==} + engines: {node: '>=18.12.0'} + + unplugin-auto-import@21.0.0: + resolution: {integrity: sha512-vWuC8SwqJmxZFYwPojhOhOXDb5xFhNNcEVb9K/RFkyk/3VnfaOjzitWN7v+8DEKpMjSsY2AEGXNgt6I0yQrhRQ==} + engines: {node: '>=20.19.0'} + peerDependencies: + '@nuxt/kit': ^4.0.0 + '@vueuse/core': '*' + peerDependenciesMeta: + '@nuxt/kit': + optional: true + '@vueuse/core': + optional: true + + unplugin-utils@0.3.1: + resolution: {integrity: sha512-5lWVjgi6vuHhJ526bI4nlCOmkCIF3nnfXkCMDeMJrtdvxTs6ZFCM8oNufGTsDbKv/tJ/xj8RpvXjRuPBZJuJog==} + engines: {node: '>=20.19.0'} + + unplugin-vue-components@31.0.0: + resolution: {integrity: sha512-4ULwfTZTLuWJ7+S9P7TrcStYLsSRkk6vy2jt/WTfgUEUb0nW9//xxmrfhyHUEVpZ2UKRRwfRb8Yy15PDbVZf+Q==} + engines: {node: '>=20.19.0'} + peerDependencies: + '@nuxt/kit': ^3.2.2 || ^4.0.0 + vue: ^3.0.0 + peerDependenciesMeta: + '@nuxt/kit': + optional: true + + unplugin@2.3.11: + resolution: {integrity: sha512-5uKD0nqiYVzlmCRs01Fhs2BdkEgBS3SAVP6ndrBsuK42iC2+JHyxM05Rm9G8+5mkmRtzMZGY8Ct5+mliZxU/Ww==} + engines: {node: '>=18.12.0'} + + vdirs@0.1.8: + resolution: {integrity: sha512-H9V1zGRLQZg9b+GdMk8MXDN2Lva0zx72MPahDKc30v+DtwKjfyOSXWRIX4t2mhDubM1H09gPhWeth/BJWPHGUw==} + peerDependencies: + vue: ^3.0.11 + + vite@7.3.1: + resolution: {integrity: sha512-w+N7Hifpc3gRjZ63vYBXA56dvvRlNWRczTdmCBBa+CotUzAPf5b7YMdMR/8CQoeYE5LX3W4wj6RYTgonm1b9DA==} + engines: {node: ^20.19.0 || >=22.12.0} + hasBin: true + peerDependencies: + '@types/node': ^20.19.0 || >=22.12.0 + jiti: '>=1.21.0' + less: ^4.0.0 + lightningcss: ^1.21.0 + sass: ^1.70.0 + sass-embedded: ^1.70.0 + stylus: '>=0.54.8' + sugarss: ^5.0.0 + terser: ^5.16.0 + tsx: ^4.8.1 + yaml: ^2.4.2 + peerDependenciesMeta: + '@types/node': + optional: true + jiti: + optional: true + less: + optional: true + lightningcss: + optional: true + sass: + optional: true + sass-embedded: + optional: true + stylus: + optional: true + sugarss: + optional: true + terser: + optional: true + tsx: + optional: true + yaml: + optional: true + + vooks@0.2.12: + resolution: {integrity: sha512-iox0I3RZzxtKlcgYaStQYKEzWWGAduMmq+jS7OrNdQo1FgGfPMubGL3uGHOU9n97NIvfFDBGnpSvkWyb/NSn/Q==} + peerDependencies: + vue: ^3.0.0 + + vscode-uri@3.1.0: + resolution: {integrity: sha512-/BpdSx+yCQGnCvecbyXdxHDkuk55/G3xwnC0GqY4gmQ3j+A+g8kzzgB4Nk/SINjqn6+waqw3EgbVF2QKExkRxQ==} + + vue-tsc@3.2.4: + resolution: {integrity: sha512-xj3YCvSLNDKt1iF9OcImWHhmYcihVu9p4b9s4PGR/qp6yhW+tZJaypGxHScRyOrdnHvaOeF+YkZOdKwbgGvp5g==} + hasBin: true + peerDependencies: + typescript: '>=5.0.0' + + vue@3.5.27: + resolution: {integrity: sha512-aJ/UtoEyFySPBGarREmN4z6qNKpbEguYHMmXSiOGk69czc+zhs0NF6tEFrY8TZKAl8N/LYAkd4JHVd5E/AsSmw==} + peerDependencies: + typescript: '*' + peerDependenciesMeta: + typescript: + optional: true + + vueuc@0.4.65: + resolution: {integrity: sha512-lXuMl+8gsBmruudfxnMF9HW4be8rFziylXFu1VHVNbLVhRTXXV4njvpRuJapD/8q+oFEMSfQMH16E/85VoWRyQ==} + peerDependencies: + vue: ^3.0.11 + + webpack-virtual-modules@0.6.2: + resolution: {integrity: sha512-66/V2i5hQanC51vBQKPH4aI8NMAcBW59FVBs+rC7eGHupMyfn34q7rZIE+ETlJ+XTevqfUhVVBgSUNSW2flEUQ==} + +snapshots: + + '@babel/helper-string-parser@7.27.1': {} + + '@babel/helper-validator-identifier@7.28.5': {} + + '@babel/parser@7.29.0': + dependencies: + '@babel/types': 7.29.0 + + '@babel/types@7.29.0': + dependencies: + '@babel/helper-string-parser': 7.27.1 + '@babel/helper-validator-identifier': 7.28.5 + + '@css-render/plugin-bem@0.15.14(css-render@0.15.14)': + dependencies: + css-render: 0.15.14 + + '@css-render/vue3-ssr@0.15.14(vue@3.5.27(typescript@5.9.3))': + dependencies: + vue: 3.5.27(typescript@5.9.3) + + '@emotion/hash@0.8.0': {} + + '@esbuild/aix-ppc64@0.27.3': + optional: true + + '@esbuild/android-arm64@0.27.3': + optional: true + + '@esbuild/android-arm@0.27.3': + optional: true + + '@esbuild/android-x64@0.27.3': + optional: true + + '@esbuild/darwin-arm64@0.27.3': + optional: true + + '@esbuild/darwin-x64@0.27.3': + optional: true + + '@esbuild/freebsd-arm64@0.27.3': + optional: true + + '@esbuild/freebsd-x64@0.27.3': + optional: true + + '@esbuild/linux-arm64@0.27.3': + optional: true + + '@esbuild/linux-arm@0.27.3': + optional: true + + '@esbuild/linux-ia32@0.27.3': + optional: true + + '@esbuild/linux-loong64@0.27.3': + optional: true + + '@esbuild/linux-mips64el@0.27.3': + optional: true + + '@esbuild/linux-ppc64@0.27.3': + optional: true + + '@esbuild/linux-riscv64@0.27.3': + optional: true + + '@esbuild/linux-s390x@0.27.3': + optional: true + + '@esbuild/linux-x64@0.27.3': + optional: true + + '@esbuild/netbsd-arm64@0.27.3': + optional: true + + '@esbuild/netbsd-x64@0.27.3': + optional: true + + '@esbuild/openbsd-arm64@0.27.3': + optional: true + + '@esbuild/openbsd-x64@0.27.3': + optional: true + + '@esbuild/openharmony-arm64@0.27.3': + optional: true + + '@esbuild/sunos-x64@0.27.3': + optional: true + + '@esbuild/win32-arm64@0.27.3': + optional: true + + '@esbuild/win32-ia32@0.27.3': + optional: true + + '@esbuild/win32-x64@0.27.3': + optional: true + + '@jridgewell/gen-mapping@0.3.13': + dependencies: + '@jridgewell/sourcemap-codec': 1.5.5 + '@jridgewell/trace-mapping': 0.3.31 + + '@jridgewell/remapping@2.3.5': + dependencies: + '@jridgewell/gen-mapping': 0.3.13 + '@jridgewell/trace-mapping': 0.3.31 + + '@jridgewell/resolve-uri@3.1.2': {} + + '@jridgewell/sourcemap-codec@1.5.5': {} + + '@jridgewell/trace-mapping@0.3.31': + dependencies: + '@jridgewell/resolve-uri': 3.1.2 + '@jridgewell/sourcemap-codec': 1.5.5 + + '@juggle/resize-observer@3.4.0': {} + + '@rolldown/pluginutils@1.0.0-rc.2': {} + + '@rollup/rollup-android-arm-eabi@4.57.1': + optional: true + + '@rollup/rollup-android-arm64@4.57.1': + optional: true + + '@rollup/rollup-darwin-arm64@4.57.1': + optional: true + + '@rollup/rollup-darwin-x64@4.57.1': + optional: true + + '@rollup/rollup-freebsd-arm64@4.57.1': + optional: true + + '@rollup/rollup-freebsd-x64@4.57.1': + optional: true + + '@rollup/rollup-linux-arm-gnueabihf@4.57.1': + optional: true + + '@rollup/rollup-linux-arm-musleabihf@4.57.1': + optional: true + + '@rollup/rollup-linux-arm64-gnu@4.57.1': + optional: true + + '@rollup/rollup-linux-arm64-musl@4.57.1': + optional: true + + '@rollup/rollup-linux-loong64-gnu@4.57.1': + optional: true + + '@rollup/rollup-linux-loong64-musl@4.57.1': + optional: true + + '@rollup/rollup-linux-ppc64-gnu@4.57.1': + optional: true + + '@rollup/rollup-linux-ppc64-musl@4.57.1': + optional: true + + '@rollup/rollup-linux-riscv64-gnu@4.57.1': + optional: true + + '@rollup/rollup-linux-riscv64-musl@4.57.1': + optional: true + + '@rollup/rollup-linux-s390x-gnu@4.57.1': + optional: true + + '@rollup/rollup-linux-x64-gnu@4.57.1': + optional: true + + '@rollup/rollup-linux-x64-musl@4.57.1': + optional: true + + '@rollup/rollup-openbsd-x64@4.57.1': + optional: true + + '@rollup/rollup-openharmony-arm64@4.57.1': + optional: true + + '@rollup/rollup-win32-arm64-msvc@4.57.1': + optional: true + + '@rollup/rollup-win32-ia32-msvc@4.57.1': + optional: true + + '@rollup/rollup-win32-x64-gnu@4.57.1': + optional: true + + '@rollup/rollup-win32-x64-msvc@4.57.1': + optional: true + + '@types/estree@1.0.8': {} + + '@types/katex@0.16.8': {} + + '@types/lodash-es@4.17.12': + dependencies: + '@types/lodash': 4.17.23 + + '@types/lodash@4.17.23': {} + + '@types/node@24.10.12': + dependencies: + undici-types: 7.16.0 + + '@vitejs/plugin-vue@6.0.4(vite@7.3.1(@types/node@24.10.12))(vue@3.5.27(typescript@5.9.3))': + dependencies: + '@rolldown/pluginutils': 1.0.0-rc.2 + vite: 7.3.1(@types/node@24.10.12) + vue: 3.5.27(typescript@5.9.3) + + '@volar/language-core@2.4.27': + dependencies: + '@volar/source-map': 2.4.27 + + '@volar/source-map@2.4.27': {} + + '@volar/typescript@2.4.27': + dependencies: + '@volar/language-core': 2.4.27 + path-browserify: 1.0.1 + vscode-uri: 3.1.0 + + '@vue/compiler-core@3.5.27': + dependencies: + '@babel/parser': 7.29.0 + '@vue/shared': 3.5.27 + entities: 7.0.1 + estree-walker: 2.0.2 + source-map-js: 1.2.1 + + '@vue/compiler-dom@3.5.27': + dependencies: + '@vue/compiler-core': 3.5.27 + '@vue/shared': 3.5.27 + + '@vue/compiler-sfc@3.5.27': + dependencies: + '@babel/parser': 7.29.0 + '@vue/compiler-core': 3.5.27 + '@vue/compiler-dom': 3.5.27 + '@vue/compiler-ssr': 3.5.27 + '@vue/shared': 3.5.27 + estree-walker: 2.0.2 + magic-string: 0.30.21 + postcss: 8.5.6 + source-map-js: 1.2.1 + + '@vue/compiler-ssr@3.5.27': + dependencies: + '@vue/compiler-dom': 3.5.27 + '@vue/shared': 3.5.27 + + '@vue/language-core@3.2.4': + dependencies: + '@volar/language-core': 2.4.27 + '@vue/compiler-dom': 3.5.27 + '@vue/shared': 3.5.27 + alien-signals: 3.1.2 + muggle-string: 0.4.1 + path-browserify: 1.0.1 + picomatch: 4.0.3 + + '@vue/reactivity@3.5.27': + dependencies: + '@vue/shared': 3.5.27 + + '@vue/runtime-core@3.5.27': + dependencies: + '@vue/reactivity': 3.5.27 + '@vue/shared': 3.5.27 + + '@vue/runtime-dom@3.5.27': + dependencies: + '@vue/reactivity': 3.5.27 + '@vue/runtime-core': 3.5.27 + '@vue/shared': 3.5.27 + csstype: 3.2.3 + + '@vue/server-renderer@3.5.27(vue@3.5.27(typescript@5.9.3))': + dependencies: + '@vue/compiler-ssr': 3.5.27 + '@vue/shared': 3.5.27 + vue: 3.5.27(typescript@5.9.3) + + '@vue/shared@3.5.27': {} + + '@vue/tsconfig@0.8.1(typescript@5.9.3)(vue@3.5.27(typescript@5.9.3))': + optionalDependencies: + typescript: 5.9.3 + vue: 3.5.27(typescript@5.9.3) + + acorn@8.15.0: {} + + alien-signals@3.1.2: {} + + async-validator@4.2.5: {} + + asynckit@0.4.0: {} + + axios@1.13.5: + dependencies: + follow-redirects: 1.15.11 + form-data: 4.0.5 + proxy-from-env: 1.1.0 + transitivePeerDependencies: + - debug + + call-bind-apply-helpers@1.0.2: + dependencies: + es-errors: 1.3.0 + function-bind: 1.1.2 + + chokidar@5.0.0: + dependencies: + readdirp: 5.0.0 + + combined-stream@1.0.8: + dependencies: + delayed-stream: 1.0.0 + + confbox@0.1.8: {} + + confbox@0.2.4: {} + + css-render@0.15.14: + dependencies: + '@emotion/hash': 0.8.0 + csstype: 3.0.11 + + csstype@3.0.11: {} + + csstype@3.2.3: {} + + date-fns-tz@3.2.0(date-fns@4.1.0): + dependencies: + date-fns: 4.1.0 + + date-fns@4.1.0: {} + + delayed-stream@1.0.0: {} + + dunder-proto@1.0.1: + dependencies: + call-bind-apply-helpers: 1.0.2 + es-errors: 1.3.0 + gopd: 1.2.0 + + entities@7.0.1: {} + + es-define-property@1.0.1: {} + + es-errors@1.3.0: {} + + es-object-atoms@1.1.1: + dependencies: + es-errors: 1.3.0 + + es-set-tostringtag@2.1.0: + dependencies: + es-errors: 1.3.0 + get-intrinsic: 1.3.0 + has-tostringtag: 1.0.2 + hasown: 2.0.2 + + esbuild@0.27.3: + optionalDependencies: + '@esbuild/aix-ppc64': 0.27.3 + '@esbuild/android-arm': 0.27.3 + '@esbuild/android-arm64': 0.27.3 + '@esbuild/android-x64': 0.27.3 + '@esbuild/darwin-arm64': 0.27.3 + '@esbuild/darwin-x64': 0.27.3 + '@esbuild/freebsd-arm64': 0.27.3 + '@esbuild/freebsd-x64': 0.27.3 + '@esbuild/linux-arm': 0.27.3 + '@esbuild/linux-arm64': 0.27.3 + '@esbuild/linux-ia32': 0.27.3 + '@esbuild/linux-loong64': 0.27.3 + '@esbuild/linux-mips64el': 0.27.3 + '@esbuild/linux-ppc64': 0.27.3 + '@esbuild/linux-riscv64': 0.27.3 + '@esbuild/linux-s390x': 0.27.3 + '@esbuild/linux-x64': 0.27.3 + '@esbuild/netbsd-arm64': 0.27.3 + '@esbuild/netbsd-x64': 0.27.3 + '@esbuild/openbsd-arm64': 0.27.3 + '@esbuild/openbsd-x64': 0.27.3 + '@esbuild/openharmony-arm64': 0.27.3 + '@esbuild/sunos-x64': 0.27.3 + '@esbuild/win32-arm64': 0.27.3 + '@esbuild/win32-ia32': 0.27.3 + '@esbuild/win32-x64': 0.27.3 + + escape-string-regexp@5.0.0: {} + + estree-walker@2.0.2: {} + + estree-walker@3.0.3: + dependencies: + '@types/estree': 1.0.8 + + evtd@0.2.4: {} + + exsolve@1.0.8: {} + + fdir@6.5.0(picomatch@4.0.3): + optionalDependencies: + picomatch: 4.0.3 + + follow-redirects@1.15.11: {} + + form-data@4.0.5: + dependencies: + asynckit: 0.4.0 + combined-stream: 1.0.8 + es-set-tostringtag: 2.1.0 + hasown: 2.0.2 + mime-types: 2.1.35 + + fsevents@2.3.3: + optional: true + + function-bind@1.1.2: {} + + get-intrinsic@1.3.0: + dependencies: + call-bind-apply-helpers: 1.0.2 + es-define-property: 1.0.1 + es-errors: 1.3.0 + es-object-atoms: 1.1.1 + function-bind: 1.1.2 + get-proto: 1.0.1 + gopd: 1.2.0 + has-symbols: 1.1.0 + hasown: 2.0.2 + math-intrinsics: 1.1.0 + + get-proto@1.0.1: + dependencies: + dunder-proto: 1.0.1 + es-object-atoms: 1.1.1 + + gopd@1.2.0: {} + + has-symbols@1.1.0: {} + + has-tostringtag@1.0.2: + dependencies: + has-symbols: 1.1.0 + + hasown@2.0.2: + dependencies: + function-bind: 1.1.2 + + highlight.js@11.11.1: {} + + js-tokens@9.0.1: {} + + local-pkg@1.1.2: + dependencies: + mlly: 1.8.0 + pkg-types: 2.3.0 + quansync: 0.2.11 + + lodash-es@4.17.23: {} + + lodash@4.17.23: {} + + magic-string@0.30.21: + dependencies: + '@jridgewell/sourcemap-codec': 1.5.5 + + math-intrinsics@1.1.0: {} + + mime-db@1.52.0: {} + + mime-types@2.1.35: + dependencies: + mime-db: 1.52.0 + + mlly@1.8.0: + dependencies: + acorn: 8.15.0 + pathe: 2.0.3 + pkg-types: 1.3.1 + ufo: 1.6.3 + + muggle-string@0.4.1: {} + + naive-ui@2.43.2(vue@3.5.27(typescript@5.9.3)): + dependencies: + '@css-render/plugin-bem': 0.15.14(css-render@0.15.14) + '@css-render/vue3-ssr': 0.15.14(vue@3.5.27(typescript@5.9.3)) + '@types/katex': 0.16.8 + '@types/lodash': 4.17.23 + '@types/lodash-es': 4.17.12 + async-validator: 4.2.5 + css-render: 0.15.14 + csstype: 3.2.3 + date-fns: 4.1.0 + date-fns-tz: 3.2.0(date-fns@4.1.0) + evtd: 0.2.4 + highlight.js: 11.11.1 + lodash: 4.17.23 + lodash-es: 4.17.23 + seemly: 0.3.10 + treemate: 0.3.11 + vdirs: 0.1.8(vue@3.5.27(typescript@5.9.3)) + vooks: 0.2.12(vue@3.5.27(typescript@5.9.3)) + vue: 3.5.27(typescript@5.9.3) + vueuc: 0.4.65(vue@3.5.27(typescript@5.9.3)) + + nanoid@3.3.11: {} + + obug@2.1.1: {} + + path-browserify@1.0.1: {} + + pathe@2.0.3: {} + + picocolors@1.1.1: {} + + picomatch@4.0.3: {} + + pkg-types@1.3.1: + dependencies: + confbox: 0.1.8 + mlly: 1.8.0 + pathe: 2.0.3 + + pkg-types@2.3.0: + dependencies: + confbox: 0.2.4 + exsolve: 1.0.8 + pathe: 2.0.3 + + postcss@8.5.6: + dependencies: + nanoid: 3.3.11 + picocolors: 1.1.1 + source-map-js: 1.2.1 + + proxy-from-env@1.1.0: {} + + quansync@0.2.11: {} + + readdirp@5.0.0: {} + + rollup@4.57.1: + dependencies: + '@types/estree': 1.0.8 + optionalDependencies: + '@rollup/rollup-android-arm-eabi': 4.57.1 + '@rollup/rollup-android-arm64': 4.57.1 + '@rollup/rollup-darwin-arm64': 4.57.1 + '@rollup/rollup-darwin-x64': 4.57.1 + '@rollup/rollup-freebsd-arm64': 4.57.1 + '@rollup/rollup-freebsd-x64': 4.57.1 + '@rollup/rollup-linux-arm-gnueabihf': 4.57.1 + '@rollup/rollup-linux-arm-musleabihf': 4.57.1 + '@rollup/rollup-linux-arm64-gnu': 4.57.1 + '@rollup/rollup-linux-arm64-musl': 4.57.1 + '@rollup/rollup-linux-loong64-gnu': 4.57.1 + '@rollup/rollup-linux-loong64-musl': 4.57.1 + '@rollup/rollup-linux-ppc64-gnu': 4.57.1 + '@rollup/rollup-linux-ppc64-musl': 4.57.1 + '@rollup/rollup-linux-riscv64-gnu': 4.57.1 + '@rollup/rollup-linux-riscv64-musl': 4.57.1 + '@rollup/rollup-linux-s390x-gnu': 4.57.1 + '@rollup/rollup-linux-x64-gnu': 4.57.1 + '@rollup/rollup-linux-x64-musl': 4.57.1 + '@rollup/rollup-openbsd-x64': 4.57.1 + '@rollup/rollup-openharmony-arm64': 4.57.1 + '@rollup/rollup-win32-arm64-msvc': 4.57.1 + '@rollup/rollup-win32-ia32-msvc': 4.57.1 + '@rollup/rollup-win32-x64-gnu': 4.57.1 + '@rollup/rollup-win32-x64-msvc': 4.57.1 + fsevents: 2.3.3 + + scule@1.3.0: {} + + seemly@0.3.10: {} + + source-map-js@1.2.1: {} + + strip-literal@3.1.0: + dependencies: + js-tokens: 9.0.1 + + tinyglobby@0.2.15: + dependencies: + fdir: 6.5.0(picomatch@4.0.3) + picomatch: 4.0.3 + + treemate@0.3.11: {} + + typescript@5.9.3: {} + + ufo@1.6.3: {} + + undici-types@7.16.0: {} + + unimport@5.6.0: + dependencies: + acorn: 8.15.0 + escape-string-regexp: 5.0.0 + estree-walker: 3.0.3 + local-pkg: 1.1.2 + magic-string: 0.30.21 + mlly: 1.8.0 + pathe: 2.0.3 + picomatch: 4.0.3 + pkg-types: 2.3.0 + scule: 1.3.0 + strip-literal: 3.1.0 + tinyglobby: 0.2.15 + unplugin: 2.3.11 + unplugin-utils: 0.3.1 + + unplugin-auto-import@21.0.0: + dependencies: + local-pkg: 1.1.2 + magic-string: 0.30.21 + picomatch: 4.0.3 + unimport: 5.6.0 + unplugin: 2.3.11 + unplugin-utils: 0.3.1 + + unplugin-utils@0.3.1: + dependencies: + pathe: 2.0.3 + picomatch: 4.0.3 + + unplugin-vue-components@31.0.0(vue@3.5.27(typescript@5.9.3)): + dependencies: + chokidar: 5.0.0 + local-pkg: 1.1.2 + magic-string: 0.30.21 + mlly: 1.8.0 + obug: 2.1.1 + picomatch: 4.0.3 + tinyglobby: 0.2.15 + unplugin: 2.3.11 + unplugin-utils: 0.3.1 + vue: 3.5.27(typescript@5.9.3) + + unplugin@2.3.11: + dependencies: + '@jridgewell/remapping': 2.3.5 + acorn: 8.15.0 + picomatch: 4.0.3 + webpack-virtual-modules: 0.6.2 + + vdirs@0.1.8(vue@3.5.27(typescript@5.9.3)): + dependencies: + evtd: 0.2.4 + vue: 3.5.27(typescript@5.9.3) + + vite@7.3.1(@types/node@24.10.12): + dependencies: + esbuild: 0.27.3 + fdir: 6.5.0(picomatch@4.0.3) + picomatch: 4.0.3 + postcss: 8.5.6 + rollup: 4.57.1 + tinyglobby: 0.2.15 + optionalDependencies: + '@types/node': 24.10.12 + fsevents: 2.3.3 + + vooks@0.2.12(vue@3.5.27(typescript@5.9.3)): + dependencies: + evtd: 0.2.4 + vue: 3.5.27(typescript@5.9.3) + + vscode-uri@3.1.0: {} + + vue-tsc@3.2.4(typescript@5.9.3): + dependencies: + '@volar/typescript': 2.4.27 + '@vue/language-core': 3.2.4 + typescript: 5.9.3 + + vue@3.5.27(typescript@5.9.3): + dependencies: + '@vue/compiler-dom': 3.5.27 + '@vue/compiler-sfc': 3.5.27 + '@vue/runtime-dom': 3.5.27 + '@vue/server-renderer': 3.5.27(vue@3.5.27(typescript@5.9.3)) + '@vue/shared': 3.5.27 + optionalDependencies: + typescript: 5.9.3 + + vueuc@0.4.65(vue@3.5.27(typescript@5.9.3)): + dependencies: + '@css-render/vue3-ssr': 0.15.14(vue@3.5.27(typescript@5.9.3)) + '@juggle/resize-observer': 3.4.0 + css-render: 0.15.14 + evtd: 0.2.4 + seemly: 0.3.10 + vdirs: 0.1.8(vue@3.5.27(typescript@5.9.3)) + vooks: 0.2.12(vue@3.5.27(typescript@5.9.3)) + vue: 3.5.27(typescript@5.9.3) + + webpack-virtual-modules@0.6.2: {} diff --git a/interactive/frontend/public/vite.svg b/interactive/frontend/public/vite.svg new file mode 100644 index 0000000..e7b8dfb --- /dev/null +++ b/interactive/frontend/public/vite.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/interactive/frontend/src/App.vue b/interactive/frontend/src/App.vue new file mode 100644 index 0000000..4f5b7f9 --- /dev/null +++ b/interactive/frontend/src/App.vue @@ -0,0 +1,26 @@ + + + + + diff --git a/interactive/frontend/src/assets/vue.svg b/interactive/frontend/src/assets/vue.svg new file mode 100644 index 0000000..770e9d3 --- /dev/null +++ b/interactive/frontend/src/assets/vue.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/interactive/frontend/src/components/HelloWorld.vue b/interactive/frontend/src/components/HelloWorld.vue new file mode 100644 index 0000000..7a1a3ce --- /dev/null +++ b/interactive/frontend/src/components/HelloWorld.vue @@ -0,0 +1,375 @@ + + + + + diff --git a/interactive/frontend/src/main.ts b/interactive/frontend/src/main.ts new file mode 100644 index 0000000..b670de8 --- /dev/null +++ b/interactive/frontend/src/main.ts @@ -0,0 +1,4 @@ +import { createApp } from "vue"; +import App from "./App.vue"; + +createApp(App).mount("#app"); diff --git a/interactive/frontend/src/style.css b/interactive/frontend/src/style.css new file mode 100644 index 0000000..f691315 --- /dev/null +++ b/interactive/frontend/src/style.css @@ -0,0 +1,79 @@ +:root { + font-family: system-ui, Avenir, Helvetica, Arial, sans-serif; + line-height: 1.5; + font-weight: 400; + + color-scheme: light dark; + color: rgba(255, 255, 255, 0.87); + background-color: #242424; + + font-synthesis: none; + text-rendering: optimizeLegibility; + -webkit-font-smoothing: antialiased; + -moz-osx-font-smoothing: grayscale; +} + +a { + font-weight: 500; + color: #646cff; + text-decoration: inherit; +} +a:hover { + color: #535bf2; +} + +body { + margin: 0; + display: flex; + place-items: center; + min-width: 320px; + min-height: 100vh; +} + +h1 { + font-size: 3.2em; + line-height: 1.1; +} + +button { + border-radius: 8px; + border: 1px solid transparent; + padding: 0.6em 1.2em; + font-size: 1em; + font-weight: 500; + font-family: inherit; + background-color: #1a1a1a; + cursor: pointer; + transition: border-color 0.25s; +} +button:hover { + border-color: #646cff; +} +button:focus, +button:focus-visible { + outline: 4px auto -webkit-focus-ring-color; +} + +.card { + padding: 2em; +} + +#app { + max-width: 1280px; + margin: 0 auto; + padding: 2rem; + text-align: center; +} + +@media (prefers-color-scheme: light) { + :root { + color: #213547; + background-color: #ffffff; + } + a:hover { + color: #747bff; + } + button { + background-color: #f9f9f9; + } +} diff --git a/interactive/frontend/tsconfig.app.json b/interactive/frontend/tsconfig.app.json new file mode 100644 index 0000000..8d16e42 --- /dev/null +++ b/interactive/frontend/tsconfig.app.json @@ -0,0 +1,16 @@ +{ + "extends": "@vue/tsconfig/tsconfig.dom.json", + "compilerOptions": { + "tsBuildInfoFile": "./node_modules/.tmp/tsconfig.app.tsbuildinfo", + "types": ["vite/client"], + + /* Linting */ + "strict": true, + "noUnusedLocals": true, + "noUnusedParameters": true, + "erasableSyntaxOnly": true, + "noFallthroughCasesInSwitch": true, + "noUncheckedSideEffectImports": true + }, + "include": ["src/**/*.ts", "src/**/*.tsx", "src/**/*.vue"] +} diff --git a/interactive/frontend/tsconfig.json b/interactive/frontend/tsconfig.json new file mode 100644 index 0000000..1ffef60 --- /dev/null +++ b/interactive/frontend/tsconfig.json @@ -0,0 +1,7 @@ +{ + "files": [], + "references": [ + { "path": "./tsconfig.app.json" }, + { "path": "./tsconfig.node.json" } + ] +} diff --git a/interactive/frontend/tsconfig.node.json b/interactive/frontend/tsconfig.node.json new file mode 100644 index 0000000..8a67f62 --- /dev/null +++ b/interactive/frontend/tsconfig.node.json @@ -0,0 +1,26 @@ +{ + "compilerOptions": { + "tsBuildInfoFile": "./node_modules/.tmp/tsconfig.node.tsbuildinfo", + "target": "ES2023", + "lib": ["ES2023"], + "module": "ESNext", + "types": ["node"], + "skipLibCheck": true, + + /* Bundler mode */ + "moduleResolution": "bundler", + "allowImportingTsExtensions": true, + "verbatimModuleSyntax": true, + "moduleDetection": "force", + "noEmit": true, + + /* Linting */ + "strict": true, + "noUnusedLocals": true, + "noUnusedParameters": true, + "erasableSyntaxOnly": true, + "noFallthroughCasesInSwitch": true, + "noUncheckedSideEffectImports": true + }, + "include": ["vite.config.ts"] +} diff --git a/interactive/frontend/vite.config.ts b/interactive/frontend/vite.config.ts new file mode 100644 index 0000000..20b43dd --- /dev/null +++ b/interactive/frontend/vite.config.ts @@ -0,0 +1,38 @@ +import vue from "@vitejs/plugin-vue"; +import AutoImport from "unplugin-auto-import/vite"; +import { NaiveUiResolver } from "unplugin-vue-components/resolvers"; +import Components from "unplugin-vue-components/vite"; +// vite.config.ts +import { defineConfig } from "vite"; + +// https://vitejs.dev/config/ +export default defineConfig({ + server: { + watch: { + // 使用轮询模式,避免文件描述符问题 + usePolling: true, + interval: 1000, + // 忽略不需要监视的目录 + ignored: ["**/node_modules/**", "**/.git/**", "**/.next/**"], + }, + }, + plugins: [ + vue(), + AutoImport({ + imports: [ + "vue", + { + "naive-ui": [ + "useDialog", + "useMessage", + "useNotification", + "useLoadingBar", + ], + }, + ], + }), + Components({ + resolvers: [NaiveUiResolver()], + }), + ], +}); diff --git a/interactive/main.py b/interactive/main.py new file mode 100644 index 0000000..86ef505 --- /dev/null +++ b/interactive/main.py @@ -0,0 +1,133 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +import os +from peewee import SQL +from flask import Flask, jsonify, request, abort, send_from_directory +from flask_cors import CORS +from db import insert, index, Pridict2, PrimeDesign, table_columns + + +app = Flask(__name__, static_folder="./frontend/dist") +CORS(app) + + +@app.route("/") +def static_files(filename): + """专门处理带扩展名的文件""" + if "." not in filename: + abort(404) # 无扩展名不应走这里 + try: + return send_from_directory(app.static_folder, filename) + except FileNotFoundError: + abort(404) # 静态文件不存在就是 404 + +@app.route("/", defaults={"path": ""}) +@app.route("/") +def main(path): + """仅处理 SPA 路由(无扩展名)""" + if "." in os.path.basename(path): + # 包含扩展名?说明应该是静态文件,但没被上面的路由捕获 → 404 + abort(404) + return send_from_directory(app.static_folder, "index.html") + + +def default_value(val, default): + try: + return int(val) + except Exception: + return default + + +@app.route("/api/gene") +def gene(): + genes = set() + + source = request.args.get("source", "pridict2") + + tables = { + "pridict2": Pridict2, + "prime_design": PrimeDesign, + } + + table = tables.get(source) + if not table: + return jsonify({"message": "No such table"}), 404 + + for i in table.select(table.gene.distinct()): + genes.add(i.gene) + + return jsonify(sorted(genes)) + + +@app.route("/api/records") +def records(): + source = request.args.get("source", "pridict2") + + tables = { + "pridict2": Pridict2, + "prime_design": PrimeDesign, + } + + table = tables.get(source) + if not table: + return jsonify({"message": "No such table"}), 404 + + columns = table_columns(table) + where = None + for i in ["gene", "dst", "src"]: + value = request.args.get(i) + if value: + if where is None: + where = (SQL(i) == value) + else: + where = (where) & (SQL(i) == value) + + for i in ["pbs_len", "rtt_len"]: + value = default_value(request.args.get(i), 0) + if value: + if where is None: + where = (SQL(i) <= value) + else: + where = (where) & (SQL(i) <= value) + + query = table.select().where(where) + total = query.count() + + order_by = request.args.get("order_by") + if order_by and order_by in columns: + order = request.args.get("order", "asc") + if "desc" in order: + query = query.order_by(SQL(order_by).desc()) + else: + query = query.order_by(SQL(order_by)) + else: + query = query.order_by(table.gene, table.aa, table.src, table.dst) + + offset = default_value(request.args.get("offset"), 1) + if offset <= 0: + offset = 1 + + length = default_value(request.args.get("length"), 1) + if length > 200: + length = 200 + query = query.offset((int(offset) - 1) * length).limit(int(length)) + print(query.sql()) + return jsonify({ + "data": [x for x in query.dicts()], + "total": total, + "offset": offset, + "length": length, + }) + + +def main(host: str="0.0.0.0", port=5555): + app.run(host=host, port=port, threaded=True, debug=True) + + +if __name__ == "__main__": + from fire import Fire + Fire({ + "insert": insert, + "index": index, + "server": main + }) diff --git a/interactive/pyproject.toml b/interactive/pyproject.toml new file mode 100644 index 0000000..d948a9e --- /dev/null +++ b/interactive/pyproject.toml @@ -0,0 +1,9 @@ +[project] +name = "interactive" +version = "0.1.0" +description = "Add your description here" +readme = "README.md" +requires-python = ">=3.12" +dependencies = [ + "peewee>=3.19.0", +] diff --git a/merge_results.py b/merge_results.py new file mode 100644 index 0000000..b87d80d --- /dev/null +++ b/merge_results.py @@ -0,0 +1,53 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +import os +import gzip +import polars as pd +from glob import glob +from tqdm import tqdm +from multiprocessing import Pool + + +def read_file(args): + path, nicking = args + if os.path.getsize(path) < 1: + return None + + for i in ["FANCD2", "BRIP1", "RAD51C", "FABCI", "FANCA"]: + if path.startswith(i): + return None + + try: + df = pd.read_csv(path) + + if nicking: + key = os.path.basename(path).split("_nicking")[0] + df = df.with_columns(sequence_name=pd.lit(key)) + except Exception: + print(path) + return None + + if "low_conf" not in path: + df = df.with_columns(conf=pd.lit("high")) + else: + df = df.with_columns(conf=pd.lit("low")) + return df + + +def main(indir, output, nicking=False): + print(indir, output, nicking) + + fs = glob(os.path.join(indir, "*")) + + with Pool(6) as p: + dfs = list(tqdm(p.imap(read_file, [[x, nicking] for x in fs]), total=len(fs))) + + df = pd.concat([x for x in dfs if x is not None]) + + with gzip.open(output, "w+") as w: + df.write_csv(w) + + +if __name__ == '__main__': + from fire import Fire + Fire(main) diff --git a/select_primedesign.py b/select_primedesign.py new file mode 100644 index 0000000..f67a863 --- /dev/null +++ b/select_primedesign.py @@ -0,0 +1,113 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +import csv +import gzip +import random +import pandas as pd + +from tqdm import tqdm + +seed = 42 + + +total_codons = { + "丙氨酸": ["GCT", "GCC", "GCA", "GCG"], + "精氨酸": ["CGT", "CGC", "CGA", "CGG", "AGA", "AGG"], + "天冬酰胺": ["AAT", "AAC"], + "天冬氨酸": ["GAT", "GAC"], + "半胱氨酸": ["TGT", "TGC"], + "谷氨酰胺": ["CAA", "CAG"], + "谷氨酸": ["GAA", "GAG"], + "甘氨酸": ["GGT", "GGC", "GGA", "GGG"], + "组氨酸": ["CAT", "CAC"], + "异亮氨酸": ["ATT", "ATC", "ATA"], + "亮氨酸": ["TTA", "TTG", "CTT", "CTC", "CTA", "CTG"], + "赖氨酸": ["AAA", "AAG"], + "甲硫氨酸": ["ATG"], + "苯丙氨酸": ["TTT", "TTC"], + "脯氨酸": ["CCT", "CCC", "CCA", "CCG"], + "丝氨酸": ["TCT", "TCC", "TCA", "TCG", "AGT", "AGC"], + "苏氨酸": ["ACT", "ACC", "ACA", "ACG"], + "色氨酸": ["TGG"], + "酪氨酸": ["TAT", "TAC"], + "缬氨酸": ["GTT", "GTC", "GTA", "GTG"], + "终止密码子": ["TAA", "TAG", "TGA"] +} + + +def load_finished(ref): + df = pd.read_excel(ref) + + genes = {} + for _, row in df.iterrows(): + gene = row["gene"] + pos = row["aa_pos"] + + if gene not in genes: + genes[gene] = set() + genes[gene].add(str(pos)) + return genes + + +def reader(path): + with gzip.open(path, "rt") as r: + dict_reader = csv.DictReader(r) + + for row in tqdm(dict_reader): + yield row + + +def filter_(args): + finished, path, output = args + + data = {} + + for row in reader(path): + + # 提取各种id + key = row['Target_name'].split("_") + gene = key[0] + pos = key[1].replace("AA", "") + dst = key[-1] + + # + uid = f"{key}_{pos}_{dst}" + for k, codons in total_codons.items(): + if dst in codons: + if gene in finished and pos not in finished[gene]: + row["dst"] = k + row["aa_pos"] = pos + row["gene"] = gene + + if uid not in data: + data[uid] = [] + data[uid].append(row) + + dict_writer = None + with gzip.open(output, "wt+") as w: + for _, lines in tqdm(data.items()): + if len(lines) > 2: + random.seed(seed) + lines = random.sample(lines, 2) + + if dict_writer is None: + for row in lines: + dict_writer = csv.DictWriter(w, fieldnames=row.keys()) + dict_writer.writeheader() + break + + # 写入数据行 + dict_writer.writerows(lines) + + +def main(ref, indir, outdir): + + finished = load_finished(ref) + filter_([finished, indir, outdir]) + + +if __name__ == '__main__': + from fire import Fire + Fire(main) + +