Skip to content

Commit

Permalink
Lint pipeline code
Browse files Browse the repository at this point in the history
Closes #120
  • Loading branch information
kevinlul committed Jun 5, 2024
1 parent b3a0011 commit 20691bb
Show file tree
Hide file tree
Showing 14 changed files with 392 additions and 142 deletions.
70 changes: 70 additions & 0 deletions .github/workflows/lint.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,70 @@
# SPDX-FileCopyrightText: © 2024 Kevin Lu
# SPDX-Licence-Identifier: AGPL-3.0-or-later
name: Lint pipeline code
on:
push:
branches: [master]
paths:
- src/**/*
pull_request:
branches: [master]
paths:
- src/**/*
permissions: {}
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
# https://github.com/DataDog/guarddog
guarddog:
runs-on: ubuntu-latest
permissions:
security-events: write
steps:
- uses: actions/checkout@v4
- uses: actions/setup-python@v5
with:
python-version: "3.10"
- run: pip install guarddog
- run: guarddog pypi verify src/requirements.txt --output-format sarif --exclude-rules repository_integrity_mismatch > guarddog.sarif
- uses: github/codeql-action/upload-sarif@v3
with:
category: guarddog-builtin
sarif_file: guarddog.sarif
# https://github.com/astral-sh/ruff
lint-python:
runs-on: ubuntu-latest
permissions:
security-events: write
steps:
- uses: actions/checkout@v4
- uses: actions/setup-python@v5
with:
python-version: "3.10"
- run: pip install ruff
- run: ruff format --check src
- run: ruff check --output-format=github src
if: ${{ !cancelled() }}
- run: ruff check --output-format=sarif src > ruff.sarif
- uses: github/codeql-action/upload-sarif@v3
with:
category: ruff
sarif_file: ruff.sarif
lint-typescript:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
sparse-checkout: |
/*
!/data/
sparse-checkout-cone-mode: false
- uses: actions/setup-node@v4
with:
node-version: 20
cache: yarn
cache-dependency-path: yarn.lock
- run: yarn
- run: yarn prettier --check 'src/**/*.{js,ts}'
- if: ${{ !cancelled() }}
run: yarn eslint 'src/**/*.ts'
29 changes: 15 additions & 14 deletions src/assignments/check-for-missing.ts
Original file line number Diff line number Diff line change
Expand Up @@ -10,12 +10,9 @@ if (process.argv.length < 3) {

// https://yugipedia.com/wiki/Card_Number
// See also job_ocgtcg.py:annotate_assignments
function isPrereleaseMissingCardNumber(card: any) {
const release = card.sets.ja?.length
? card.sets.ja[0]
: card.sets.en?.length
? card.sets.en[0]
: null;
// eslint-disable-next-line @typescript-eslint/no-explicit-any
function isPrereleaseMissingCardNumber(card: any): boolean {
const release = card.sets.ja?.length ? card.sets.ja[0] : card.sets.en?.length ? card.sets.en[0] : null;
if (!release) {
console.error(`ERROR: ${card.yugipedia_page_id}\t[${card.name.en}]\tNo JP or EN sets found!`);
return true;
Expand All @@ -25,18 +22,17 @@ function isPrereleaseMissingCardNumber(card: any) {
const position = release.set_number.split("-")[1].slice(3);
const isMissing = isNaN(Number(position));
if (isMissing) {
console.warn(`WARNING: ${card.yugipedia_page_id}\t[${card.name.en}]\tNot counted due to unknown set position ${position}`);
console.warn(
`WARNING: ${card.yugipedia_page_id}\t[${card.name.en}]\tNot counted due to unknown set position ${position}`
);
}
return isMissing;
}

// Okay to skip these before print, e.g. Anotherverse Gluttonia
function isPrereleasePrizeCard(card: any) {
const release = card.sets.ja?.length
? card.sets.ja[0]
: card.sets.en?.length
? card.sets.en[0]
: null;
// eslint-disable-next-line @typescript-eslint/no-explicit-any
function isPrereleasePrizeCard(card: any): boolean {
const release = card.sets.ja?.length ? card.sets.ja[0] : card.sets.en?.length ? card.sets.en[0] : null;
const isPrizeCard = release?.set_number.split("-")[0] === "YCSW";
if (isPrizeCard) {
console.warn(`WARNING: ${card.yugipedia_page_id}\t[${card.name.en}]\tNot counted due to being a prize card`);
Expand All @@ -50,7 +46,12 @@ function isPrereleasePrizeCard(card: any) {
for (const file of files) {
if (file.endsWith(".json")) {
const card = JSON.parse(await fs.promises.readFile(path.join(process.argv[2], file), "utf8"));
if (!card.password && !card.fake_password && !isPrereleaseMissingCardNumber(card) && !isPrereleasePrizeCard(card)) {
if (
!card.password &&
!card.fake_password &&
!isPrereleaseMissingCardNumber(card) &&
!isPrereleasePrizeCard(card)
) {
missingFakePasswords.push(card);
}
}
Expand Down
6 changes: 4 additions & 2 deletions src/check-for-missing-ko.ts
Original file line number Diff line number Diff line change
Expand Up @@ -20,9 +20,11 @@ if (process.argv.length < 3) {
}
}
if (missingKoreanTranslation.length) {
console.log(`yugipedia_page_id\tkonami_id\tpassword\tfake_password\tname.en\tname.ja`)
console.log(`yugipedia_page_id\tkonami_id\tpassword\tfake_password\tname.en\tname.ja`);
for (const card of missingKoreanTranslation) {
console.log(`${card.yugipedia_page_id}\t${card.konami_id}\t${card.password}\t${card.fake_password}\t[${card.name.en}]\t[${card.name.ja}]`);
console.log(
`${card.yugipedia_page_id}\t${card.konami_id}\t${card.password}\t${card.fake_password}\t[${card.name.en}]\t[${card.name.ja}]`
);
}
process.exit(2);
}
Expand Down
56 changes: 37 additions & 19 deletions src/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,9 @@
}


def set_unofficial_translation_flag(key: str, template: wtp.Template, output: Dict[str, Any]) -> None:
def set_unofficial_translation_flag(
key: str, template: wtp.Template, output: Dict[str, Any]
) -> None:
flags = output.setdefault("is_translation_unofficial", {}).setdefault(key, {})
for lang in template.arguments[0].value.split(","):
flags[UNOFFICIAL_LANGUAGES[lang.strip()]] = True
Expand All @@ -52,7 +54,9 @@ def expand_templates(template: wtp.Template) -> str:
return ""


def initial_parse(yaml: YAML, yaml_file: str, target: str = "CardTable2") -> Optional[Dict[str, str]]:
def initial_parse(
yaml: YAML, yaml_file: str, target: str = "CardTable2"
) -> Optional[Dict[str, str]]:
with open(yaml_file) as f:
document = yaml.load(f)
properties = {}
Expand Down Expand Up @@ -121,11 +125,13 @@ def parse_sets(sets: str) -> List[Dict[str, str]]:
else:
rarities = None
logger.warning(f"Sets missing second semicolon: {printing}")
result.append({
"set_number": set_number.strip(),
"set_name": set_name.strip(),
"rarities": rarities.split(", ") if rarities else None
})
result.append(
{
"set_number": set_number.strip(),
"set_name": set_name.strip(),
"rarities": rarities.split(", ") if rarities else None,
}
)
return result


Expand Down Expand Up @@ -180,7 +186,9 @@ def transform_image(image: str) -> List[Dict[str, str]]:
return [transform_image_entry(entry) for entry in tokens]


def transform_names(wikitext: Dict[str, str], zh_cn_fallback: Optional[str] = None) -> Dict[str, str]:
def transform_names(
wikitext: Dict[str, str], zh_cn_fallback: Optional[str] = None
) -> Dict[str, str]:
return {
"en": wikitext["en_name"],
"de": wikitext.get("de_name"),
Expand All @@ -193,11 +201,13 @@ def transform_names(wikitext: Dict[str, str], zh_cn_fallback: Optional[str] = No
"ko": wikitext.get("ko_name"),
"ko_rr": wikitext.get("ko_rr_name"),
"zh-TW": wikitext.get("tc_name"),
"zh-CN": wikitext.get("sc_name") or zh_cn_fallback
"zh-CN": wikitext.get("sc_name") or zh_cn_fallback,
}


def transform_texts(wikitext: Dict[str, str], zh_cn_fallback: Optional[str] = None) -> Dict[str, str]:
def transform_texts(
wikitext: Dict[str, str], zh_cn_fallback: Optional[str] = None
) -> Dict[str, str]:
return {
"en": str_or_none(wikitext.get("lore")), # should never be none
"de": str_or_none(wikitext.get("de_lore")),
Expand All @@ -208,7 +218,7 @@ def transform_texts(wikitext: Dict[str, str], zh_cn_fallback: Optional[str] = No
"ja": str_or_none(wikitext.get("ja_lore")),
"ko": str_or_none(wikitext.get("ko_lore")),
"zh-TW": str_or_none(wikitext.get("tc_lore")),
"zh-CN": str_or_none(wikitext.get("sc_lore") or zh_cn_fallback)
"zh-CN": str_or_none(wikitext.get("sc_lore") or zh_cn_fallback),
}


Expand All @@ -235,7 +245,7 @@ def transform_multilanguage(wikitext: Dict[str, str], basename: str) -> Dict[str
"Middle-Right": "➡", # YGOPRODECK: Right
"Top-Left": "↖",
"Top-Center": "⬆", # YGOPRODECK: Top
"Top-Right": "↗"
"Top-Right": "↗",
}


Expand All @@ -254,7 +264,10 @@ def annotate_shared(document: Dict[str, Any], wikitext: Dict[str, str]) -> None:
if "rank" in wikitext:
document["rank"] = int(wikitext["rank"])
elif "link_arrows" in wikitext:
document["link_arrows"] = [LINK_ARROW_MAPPING[arrow] for arrow in wikitext["link_arrows"].split(", ")]
document["link_arrows"] = [
LINK_ARROW_MAPPING[arrow]
for arrow in wikitext["link_arrows"].split(", ")
]
else:
document["level"] = int(wikitext["level"])
document["atk"] = int_or_og(wikitext["atk"])
Expand All @@ -272,7 +285,10 @@ def annotate_shared(document: Dict[str, Any], wikitext: Dict[str, str]) -> None:
"ja": str_or_none(wikitext.get("ja_pendulum_effect")),
"ko": str_or_none(wikitext.get("ko_pendulum_effect")),
"zh-TW": str_or_none(wikitext.get("tc_pendulum_effect")),
"zh-CN": str_or_none(wikitext.get("sc_pendulum_effect") or wikitext.get("ourocg_pendulum"))
"zh-CN": str_or_none(
wikitext.get("sc_pendulum_effect")
or wikitext.get("ourocg_pendulum")
),
}
# bonus derived fields
if "ritualcard" in wikitext:
Expand Down Expand Up @@ -303,11 +319,13 @@ def load_ko_csv(key: str, filename: Optional[str]) -> Dict[int, Dict[str, str]]
return
with open(filename, encoding="utf-8-sig") as f:
reader = DictReader(f)
return {
int(row[key]): row
for row in reader
}
return {int(row[key]): row for row in reader}


# Replace Unicode interlinear annotations with HTML markup https://www.unicode.org/charts/nameslist/n_FFF0.html
def replace_interlinear_annotations(name: str) -> str:
return name.replace("\ufff9", "<ruby>").replace("\ufffa", "<rt>").replace("\ufffb", "</rt></ruby>")
return (
name.replace("\ufff9", "<ruby>")
.replace("\ufffa", "<rt>")
.replace("\ufffb", "</rt></ruby>")
)
Loading

0 comments on commit 20691bb

Please sign in to comment.