diff --git a/.github/script/Dockerfile b/.github/script/Dockerfile new file mode 100644 index 000000000..d22a38675 --- /dev/null +++ b/.github/script/Dockerfile @@ -0,0 +1,9 @@ +# Container image that runs your code +FROM python:3.6 + +RUN pip3 install requests -i https://mirrors.aliyun.com/pypi/simple/ && pip3 install art -i https://mirrors.aliyun.com/pypi/simple/ +# Copies your code file from your action repository to the filesystem path `/` of the container +COPY seekdb.py /seekdb.py + +# Code file to execute when the docker container starts up (`entrypoint.sh`) +ENTRYPOINT ["python3", "-u", "/seekdb.py"] \ No newline at end of file diff --git a/.github/script/action.yml b/.github/script/action.yml new file mode 100644 index 000000000..9fe744c92 --- /dev/null +++ b/.github/script/action.yml @@ -0,0 +1,24 @@ +# action.yml +name: 'call seekdb Farm2 to run task' +description: '' +inputs: + pipeline_id: + description: 'pipeline_id' + required: true + project: + description: 'project' + required: true + timeout: + description: 'timeout' + required: false + default: '10800' +outputs: + success: + description: 'the status for the task' +runs: + using: 'docker' + image: 'Dockerfile' + args: + - ${{ inputs.pipeline_id }} + - ${{ inputs.project }} + - ${{ inputs.timeout }} diff --git a/.github/script/seekdb.py b/.github/script/seekdb.py new file mode 100644 index 000000000..6a0c232ea --- /dev/null +++ b/.github/script/seekdb.py @@ -0,0 +1,209 @@ +# -*- coding: utf-8 -*- +import copy +import os +import sys +import traceback +import time +import json +import requests +from enum import Enum +from http import HTTPStatus + +from art import text2art + +OUTPUT = {} +RESULT_FILE_KEY = "farm/seekdb_results/" +TASK_QUEUE_FILE_KEY = "farm/seekdb_jobs/{}.json" + + +def _range(start, last): + def to_str(pos): + if pos is None: + return '' + else: + return str(pos) + + return to_str(start) + '-' + to_str(last) + + +def _make_range_string(range): + if range is None: + return '' + + start = range[0] + last = range[1] + + if start is None and last is None: + return '' + + return 'bytes=' + _range(start, last) + + +class OssProxy: + + def __init__(self, endpoint=""): + self.endpoint = endpoint + + def get_object(self, key, _range=None): + url = "{}/{}".format(self.endpoint, key) + headers = {} + if _range is not None: + _range = (_range, None) + headers.update({"range": _make_range_string(_range)}) + res = requests.get(url, headers=headers) + if res.status_code < 400: + result = res.content.decode() + return result + return "" + + def get_object_meta(self, key): + url = "{}/{}".format(self.endpoint, key) + headers = {} + res = requests.head(url, headers=headers) + return res.headers + + def exists_object(self, key): + ... + + +class GithubProxy: + + def __init__(self, host="api.github.com"): + self.host = host + + def get_job_by_id(self, project, pipeline_id): + url = "https://{}/repos/{}/actions/runs/{}".format( + self.host, project, pipeline_id + ) + try: + res = requests.get( + url, headers={ + "Accept": "application/vnd.github+json" + } + ) + status_code = res.status_code + if status_code == HTTPStatus.NOT_FOUND: + return {} + return res.json() + except: + traceback.print_exc() + return {} + + +class TaskStatusEnum(Enum): + submitting = 0 + pending = 1 + running = 2 + stopping = 3 + success = 4 + fail = -1 + kill = -2 + timeout = -3 + submit_task_fail = -4 + + +def request(method, url, params=None, payload=None, timeout=10, data=None, without_check_status=False): + params = params or {} + try: + response = requests.request( + method, + url, + params=params, + json=payload, + data=data, + timeout=timeout + ) + if not without_check_status and response.status_code >= 300: + try: + msg = response.json()["msg"] + except: + msg = response.text + print("[ERROR] MSG:{}".format(msg)) + exit(1) + return response + except Exception: + import traceback + traceback.print_exc() + print("Please contact the management personnel for assistance !") + if not without_check_status: + exit(1) + + +def monitor_tasks(oss_proxy: OssProxy, github_pipeline_id, timeout, result_out_path=None): + end_time = time.time() + int(timeout) + end_task = False + task_data = None + while time.time() <= end_time: + if end_task is True: + pass + task_data = get_task_res(oss_proxy, github_pipeline_id) + if task_data: + end_task = True + + time.sleep(1) + if task_data is not None: + task_status = int(task_data["status"]) + output_url = "https://ce-farm.oceanbase-dev.com/farm2/ci/?id={}".format(task_data["task_id"]) + if task_status <= TaskStatusEnum.fail.value: + print(TaskStatusEnum._value2member_map_[task_status]) + print("there is the output url: {}".format(output_url)) + if result_out_path: + _write_result_json(result_out_path, task_data, success=False) + return False + elif task_status >= TaskStatusEnum.success.value: + print(TaskStatusEnum._value2member_map_[task_status]) + print("there is the output url: {}".format(output_url)) + if result_out_path: + _write_result_json(result_out_path, task_data, success=True) + return True + + time.sleep(5) + if result_out_path: + _write_result_json(result_out_path, task_data or {}, success=False, timeout=True) + return False + + +def _write_result_json(path, task_data, success, timeout=False): + out = { + "success": success, + "timeout": timeout, + "task_id": task_data.get("task_id") if task_data else None, + "status": task_data.get("status") if task_data else None, + "output_url": "https://ce-farm.oceanbase-dev.com/farm2/ci/?id={}".format(task_data["task_id"]) if task_data and task_data.get("task_id") else None, + } + with open(path, "w") as f: + json.dump(out, f, indent=2) + + +def get_task_res(oss_proxy: OssProxy, github_pipeline_id): + try: + result_key = RESULT_FILE_KEY + "{}.json".format(github_pipeline_id) + origin_task_data = oss_proxy.get_object(result_key) + return json.loads(origin_task_data) + except: + return + + +def main(pipeline_id, project, timeout, result_out_path=None): + print("create a new task") + print("working....") + logo = text2art('seekdb Farm2') + print(logo) + oss_proxy = OssProxy("https://obfarm-ce.oss-cn-hongkong.aliyuncs.com") + github_proxy = GithubProxy() + job_info = github_proxy.get_job_by_id(project, pipeline_id) + attempt_number = job_info["run_attempt"] + run_pipeline_id = "{}-{}".format(pipeline_id, attempt_number) + result = monitor_tasks(oss_proxy, run_pipeline_id, timeout, result_out_path=result_out_path) + if not result: + exit(1) + + +if __name__ == "__main__": + print(sys.argv) + if len(sys.argv) < 4: + print("Missing relevant parameters !") + OUTPUT.update({"success": -1}) + sys.exit(1) + out_path = sys.argv[4] if len(sys.argv) > 4 else None + main(sys.argv[1], sys.argv[2], sys.argv[3], result_out_path=out_path) \ No newline at end of file diff --git a/.github/script/seekdb_native/README.md b/.github/script/seekdb_native/README.md new file mode 100644 index 000000000..3d4194d66 --- /dev/null +++ b/.github/script/seekdb_native/README.md @@ -0,0 +1,32 @@ +# SeekDB 执行下沉脚本(Native) + +与 `seekdb-native.yml` 配套,在 self-hosted Runner 上直接执行 Prepare → Compile → Mysqltest,不经过 Farm2。 + +**脚本来源**:已从 farm-jenkins 复制到本目录 `scripts/`,无需再配置 FARM2_SCRIPTS_REPO 或 clone。 + +| 文件 | 说明 | +|------|------| +| `scripts/frame.sh` | 自 farm-jenkins 复制并改造(/etc/hosts、dep_cache 在无权限时跳过) | +| `scripts/farm_compile.sh` | 自 farm-jenkins 复制 | +| `scripts/farm_post_compile.sh` | 自 farm-jenkins 复制 | +| `scripts/mysqltest_for_farm.sh` | 自 farm-jenkins 复制 | +| `scripts/dep_cache.sh` | 本仓 stub,供 frame 在无 dep_create 时使用 | +| `prepare.sh` | 仅生成 jobargs.output、run_jobs.output | +| `compile.sh` | 调用 scripts/farm_compile.sh(frame prepare + build) | +| `mysqltest_slice.sh` | 调用 scripts/mysqltest_for_farm.sh | +| `collect_result.sh` | 汇总 fail_cases 写 seekdb_result.json | + +## 仓库变量(可选) + +| 变量 | 说明 | +|------|------| +| `FARM2_WORKER_IMAGE` | 编译/测试用 Docker 镜像;不设则在 Runner 本机执行 | +| `FORWARDING_HOST` | mirrors.oceanbase.com 解析到该主机时填写 | +| `RELEASE_MODE` | 非空则 release 编译 | + +## 产物位置 + +- 任务目录:`$GITHUB_WORKSPACE/seekdb_build/$GITHUB_RUN_ID/` +- 编译产出:observer.zst、obproxy.zst、compile.output +- 各 slice:mysqltest.output.$i、collected_log_$i.tar.gz 等 +- 小结果由 workflow 上传为 artifact `seekdb-result-native` diff --git a/.github/script/seekdb_native/collect_result.sh b/.github/script/seekdb_native/collect_result.sh new file mode 100644 index 000000000..5efcd11f3 --- /dev/null +++ b/.github/script/seekdb_native/collect_result.sh @@ -0,0 +1,29 @@ +#!/usr/bin/env bash +# Collect result and write seekdb_result.json for native execution. +# 参考 farm-jenkins: scripts/opensource/scripts/opensource_publish_result.sh 的结论汇总方式。 +set -e + +WORKSPACE="${GITHUB_WORKSPACE:-.}" +TASK_DIR="${SEEKDB_TASK_DIR:-$WORKSPACE/seekdb_build/$GITHUB_RUN_ID}" +OUT_JSON="${1:-$WORKSPACE/seekdb_result.json}" + +# Success if no fail_cases or empty; otherwise fail +FAILED="" +if [[ -f "$TASK_DIR/fail_cases.output" ]] && [[ -s "$TASK_DIR/fail_cases.output" ]]; then + FAILED=$(cat "$TASK_DIR/fail_cases.output" | tr '\n' ' ' | sed 's/"/\\"/g') + SUCCESS="false" +else + SUCCESS="true" +fi + +cat > "$OUT_JSON" << EOF +{ + "success": ${SUCCESS}, + "task_id": "${GITHUB_RUN_ID:-native}", + "output_url": "", + "native": true, + "failed_cases": "${FAILED//\"/\\\"}" +} +EOF +echo "Wrote $OUT_JSON" +cat "$OUT_JSON" diff --git a/.github/script/seekdb_native/compile.sh b/.github/script/seekdb_native/compile.sh new file mode 100644 index 000000000..3dfe783f6 --- /dev/null +++ b/.github/script/seekdb_native/compile.sh @@ -0,0 +1,62 @@ +#!/usr/bin/env bash +# Compile step for SeekDB native execution (执行下沉). +# 使用本仓已复制的 farm 脚本:.github/script/seekdb_native/scripts/ +set -e + +WORKSPACE="${GITHUB_WORKSPACE:-.}" +TASK_DIR="${SEEKDB_TASK_DIR:-$WORKSPACE/seekdb_build/$GITHUB_RUN_ID}" +COMPILE_RUNDIR="$TASK_DIR/compile_rundir" +# 脚本来自本仓,不再 clone +SCRIPTS_DIR="$WORKSPACE/.github/script/seekdb_native/scripts" +mkdir -p "$COMPILE_RUNDIR" + +if [[ ! -f "$TASK_DIR/jobargs.output" ]] || [[ ! -f "$TASK_DIR/run_jobs.output" ]]; then + echo "Missing jobargs.output or run_jobs.output in $TASK_DIR. Run prepare first." + exit 1 +fi + +if [[ ! -f "$SCRIPTS_DIR/farm_compile.sh" ]] || [[ ! -f "$SCRIPTS_DIR/frame.sh" ]]; then + echo "Missing farm_compile.sh / frame.sh under $SCRIPTS_DIR. Scripts should be copied from farm-jenkins." + exit 1 +fi + +# 链到 HOME,供 source $HOME/scripts/frame.sh 与 farm_compile.sh 使用 +ln -sfn "$SCRIPTS_DIR" "$COMPILE_RUNDIR/scripts" + +export HOME="$COMPILE_RUNDIR" +export _CONDOR_JOB_IWD="$COMPILE_RUNDIR" +export REPO="server" +export CREATE_AGENTSERVER=0 +export CREATE_LIBOBSERVER_SO=0 +export ENABLE_LIBOBLOG=0 +export BUILD_TARGET="" +export CODE_URL="${CODE_URL:-https://github.com/${GITHUB_REPOSITORY}.git}" +export BRANCH="${BRANCH:-$GITHUB_REF_NAME}" +export COMMIT="${COMMIT:-}" +if [[ -n "${RELEASE_MODE:-}" ]]; then + export PACKAGE_TYPE="release" +else + export PACKAGE_TYPE="debug" +fi +if [[ -n "${FORWARDING_HOST:-}" ]]; then + echo "$FORWARDING_HOST mirrors.oceanbase.com" >> /etc/hosts 2>/dev/null || true +fi + +# 执行 farm_compile.sh:内部 source frame.sh && main +cd "$HOME" +bash "$HOME/scripts/farm_compile.sh" +COMPILE_EXIT=$? +[[ -f "$HOME/scripts/farm_post_compile.sh" ]] && bash "$HOME/scripts/farm_post_compile.sh" "$COMPILE_EXIT" || true + +# 压缩并拷贝产物到 TASK_DIR +for f in observer obproxy; do + [[ -f "$COMPILE_RUNDIR/$f" ]] && zstd -f "$COMPILE_RUNDIR/$f" 2>/dev/null || true +done +for fn in observer.zst obproxy.zst compile.output; do + [[ -f "$COMPILE_RUNDIR/$fn" ]] && cp "$COMPILE_RUNDIR/$fn" "$TASK_DIR/" +done +[[ -f "$COMPILE_RUNDIR/dep_cache.tar.zst" ]] && cp "$COMPILE_RUNDIR/dep_cache.tar.zst" "$TASK_DIR/" || true +[[ -f "$COMPILE_RUNDIR/post_compile.output" ]] && cp "$COMPILE_RUNDIR/post_compile.output" "$TASK_DIR/" || true + +echo "Compile done. Artifacts in $TASK_DIR" +ls -la "$TASK_DIR" diff --git a/.github/script/seekdb_native/mysqltest_slice.sh b/.github/script/seekdb_native/mysqltest_slice.sh new file mode 100644 index 000000000..da4726ced --- /dev/null +++ b/.github/script/seekdb_native/mysqltest_slice.sh @@ -0,0 +1,71 @@ +#!/usr/bin/env bash +# Run one mysqltest slice for SeekDB native execution (执行下沉). +# 使用本仓已复制的 scripts/mysqltest_for_farm.sh。 +set -e + +WORKSPACE="${GITHUB_WORKSPACE:-.}" +TASK_DIR="${SEEKDB_TASK_DIR:-$WORKSPACE/seekdb_build/$GITHUB_RUN_ID}" +SLICE_IDX="${SLICE_IDX:-0}" +SLICES="${SLICES:-4}" +MYSQLTEST_RUNDIR="$TASK_DIR/mysqltest_rundir_$SLICE_IDX" +SCRIPTS_DIR="$WORKSPACE/.github/script/seekdb_native/scripts" +mkdir -p "$MYSQLTEST_RUNDIR" + +for f in observer.zst obproxy.zst; do + if [[ ! -f "$TASK_DIR/$f" ]]; then + echo "Missing $TASK_DIR/$f. Run compile first." + exit 1 + fi +done +if [[ ! -d "$TASK_DIR/oceanbase" ]]; then + echo "Missing $TASK_DIR/oceanbase. Run compile first (frame prepare clones it)." + exit 1 +fi + +if [[ ! -f "$SCRIPTS_DIR/mysqltest_for_farm.sh" ]]; then + echo "Missing mysqltest_for_farm.sh under $SCRIPTS_DIR." + exit 1 +fi +ln -sfn "$SCRIPTS_DIR" "$MYSQLTEST_RUNDIR/scripts" + +export HOME="$MYSQLTEST_RUNDIR" +export _CONDOR_JOB_IWD="$MYSQLTEST_RUNDIR" +export CODE_URL="${CODE_URL:-https://github.com/${GITHUB_REPOSITORY}.git}" +export BRANCH="${BRANCH:-$GITHUB_REF_NAME}" +export REPO="server" +export SLICE_IDX SLICES +export GID="${GITHUB_RUN_ID:-$GITHUB_RUN_ID}" +export JOBNAME=mysqltest +export MRID="" +export WITH_PROXY="1" +export ARGV="psmall log-pattern=*" +export CLUSTER_SPEC="2x1" +export INPUT_FILES="observer,obproxy" +export MINI="1" +export FROM_FARM="1" +export SLB="" +export _CONDOR_SLOT="slot$SLICE_IDX" + +# 将 observer/obproxy 放入 HOME,供 mysqltest_for_farm run() 使用 +zstd -d -f "$TASK_DIR/observer.zst" -o "$MYSQLTEST_RUNDIR/observer" +zstd -d -f "$TASK_DIR/obproxy.zst" -o "$MYSQLTEST_RUNDIR/obproxy" +chmod +x "$MYSQLTEST_RUNDIR/observer" "$MYSQLTEST_RUNDIR/obproxy" 2>/dev/null || true + +if [[ -n "${FORWARDING_HOST:-}" ]]; then + echo "$FORWARDING_HOST mirrors.oceanbase.com" >> /etc/hosts 2>/dev/null || true +fi + +cd "$HOME" +bash "$HOME/scripts/mysqltest_for_farm.sh" +MYSQLTEST_EXIT=$? + +# 收集 slice 产出到 TASK_DIR +for fn in mysqltest.output."$SLICE_IDX" mysqltest.error."$SLICE_IDX" collected_log_"$SLICE_IDX".tar.gz mysqltest_compare_output."$SLICE_IDX"; do + [[ -f "$MYSQLTEST_RUNDIR/$fn" ]] && cp "$MYSQLTEST_RUNDIR/$fn" "$TASK_DIR/" || true +done +[[ -f "$MYSQLTEST_RUNDIR/oceanbase/tools/deploy/compare.out" ]] && cp "$MYSQLTEST_RUNDIR/oceanbase/tools/deploy/compare.out" "$TASK_DIR/mysqltest_compare_output.$SLICE_IDX" 2>/dev/null || true + +if [[ $MYSQLTEST_EXIT -ne 0 ]]; then + echo "++mysqltest++${SLICE_IDX}++" >> "$TASK_DIR/fail_cases.output" +fi +exit "$MYSQLTEST_EXIT" diff --git a/.github/script/seekdb_native/prepare.sh b/.github/script/seekdb_native/prepare.sh new file mode 100644 index 000000000..65b0eb9c1 --- /dev/null +++ b/.github/script/seekdb_native/prepare.sh @@ -0,0 +1,26 @@ +#!/usr/bin/env bash +# Prepare step for SeekDB native execution (执行下沉). +# 仅生成 jobargs.output、run_jobs.output;脚本已从 farm-jenkins 复制到本仓 .github/script/seekdb_native/scripts/,无需 clone。 +set -e + +MYSQLTEST_SLICES="${MYSQLTEST_SLICES:-4}" +WORKSPACE="${GITHUB_WORKSPACE:-.}" +TASK_DIR="${SEEKDB_TASK_DIR:-$WORKSPACE/seekdb_build/$GITHUB_RUN_ID}" +mkdir -p "$TASK_DIR" + +# 1. 生成 run_jobs.output(与 seekdb.groovy / farm-jenkins 格式一致) +echo '++compile++' > "$TASK_DIR/run_jobs.output" +for i in $(seq 0 $((MYSQLTEST_SLICES - 1))); do + echo "++mysqltest++$i++" >> "$TASK_DIR/run_jobs.output" +done + +# 2. 生成 jobargs.output +{ + echo '++is_cmake++' + echo '++need_agentserver++0' + echo '++need_libobserver_so++0' + echo '++need_liboblog++0' +} > "$TASK_DIR/jobargs.output" + +echo "Prepare done. TASK_DIR=$TASK_DIR" +ls -la "$TASK_DIR" diff --git a/.github/script/seekdb_native/scripts/dep_cache.sh b/.github/script/seekdb_native/scripts/dep_cache.sh new file mode 100644 index 000000000..6b80b5692 --- /dev/null +++ b/.github/script/seekdb_native/scripts/dep_cache.sh @@ -0,0 +1,5 @@ +#!/bin/bash +# Stub for native runner: dep_cache 仅在存在 /etc/profile.d/dep_create.sh 时由 frame.sh 加载。 +# 此处仅保证 DEP_CACHE_DIR 有默认值,避免 frame prepare 报错。 +[[ -z "$DEP_CACHE_DIR" ]] && export DEP_CACHE_DIR="${DEP_CACHE_DIR:-$HOME/../../dep_cache}" +return 0 diff --git a/.github/script/seekdb_native/scripts/farm_compile.sh b/.github/script/seekdb_native/scripts/farm_compile.sh new file mode 100644 index 000000000..971459af7 --- /dev/null +++ b/.github/script/seekdb_native/scripts/farm_compile.sh @@ -0,0 +1,221 @@ +#!/bin/sh +# 自 farm-jenkins/scripts/opensource/scripts/farm_compile.sh 复制 +set +x +export HOME=$_CONDOR_JOB_IWD +PRINT_ENV=1 + +MAKE=${MAKE:-make} +MAKE_ARGS=${MAKE_ARGS:--j32} + +## use cmake +function run_new_oceanbase() +{ + cd $HOME/oceanbase + + TARGET=observer + BINARY="src/observer/observer " + + if [[ "$PACKAGE_TYPE" == "release" ]] + then + BUILD_TARGET="release" + fi + + BINARY_PATH=$HOME + if [[ $NEED_CE_TEST_PARALLEL == "1" ]] + then + mkdir -p $BINARY_PATH + elif [[ $NEED_SANITY_TEST_PARALLEL == "1" ]] + then + mkdir -p $BINARY_PATH + BUILD_TARGET="sanity" + fi + + if [[ -f $HOME/oceanbase/tools/ob_admin/CMakeLists.txt ]] + then + TARGET="$TARGET ob_admin" + fi + + if [[ $NEED_CE_TEST_PARALLEL == "2" || $NEED_CE_TEST_PARALLEL == "1" ]] + then + BUILD_TARGET="$BUILD_TARGET -DOB_BUILD_OPENSOURCE=ON" + fi + + if [ "$ENABLE_LIBOBLOG" == "1" ] + then + if [ -f $HOME/oceanbase/.ce ] + then + BUILD_TARGET="$BUILD_TARGET -DOB_BUILD_LIBOBLOG=ON" + fi + if [ -f $HOME/oceanbase/test/.obcdc_conf ] + then + code_path=`sed '/^code_path=/!d;s/.*=//' $HOME/oceanbase/test/.obcdc_conf` + if [ -d $HOME/oceanbase/$code_path ] + then + target_path=`sed '/^output_path=/!d;s/.*=//' $HOME/oceanbase/test/.obcdc_conf` + so_name=`sed '/^so_name=/!d;s/.*=//' $HOME/oceanbase/test/.obcdc_conf` + tailf_name=`sed '/^tailf_name=/!d;s/.*=//' $HOME/oceanbase/test/.obcdc_conf` + target_str=`sed '/^build_target=/!d;s/.*=//' $HOME/oceanbase/test/.obcdc_conf | sed 's/,/ /'` + so_name_V=`sed '/^so_name_V=/!d;s/.*=//' $HOME/oceanbase/test/.obcdc_conf` + so_name_v=`sed '/^so_name_v=/!d;s/.*=//' $HOME/oceanbase/test/.obcdc_conf` + TARGET="$TARGET $target_str" + BINARY="$BINARY $target_path""src/$so_name $target_path""src/$so_name_V $target_path""src/$so_name_v $target_path""tests/$tailf_name" + else + echo "读取.obcdc_conf配置文件, 获取路径$code_path, 对应目录不存在!" + return 1 + fi + fi + if [ -d $HOME/oceanbase/tools/obcdc ] + then + TARGET="$TARGET obcdc obcdc_tailf" + BINARY="$BINARY tools/obcdc/src/libobcdc.so.1.0.0 tools/obcdc/tests/obcdc_tailf" + else + touch $BINARY_PATH/libobcdc.so.1.0.0 $BINARY_PATH/obcdc_tailf + fi + if [ -d $HOME/oceanbase/src/liboblog ] + then + TARGET="$TARGET oblog oblog_tailf" + BINARY="$BINARY src/liboblog/src/liboblog.so.1.0.0 src/liboblog/tests/oblog_tailf" + else + touch $HOME/liboblog.so.1.0.0 $HOME/oblog_tailf + fi + fi + + if [ "$CREATE_AGENTSERVER" == "1" ] && [ -f $HOME/oceanbase/tools/obtest/backup_restore_farm.sh ] + then + TARGET="$TARGET agentserver incbackupserver" + BINARY="$BINARY tools/agentserver/agentserver tools/agentserver/incbackupserver" + fi + + if [ "$CREATE_LIBOBSERVER_SO" == "1" ] + then + TARGET="$TARGET oceanbase" + BINARY="$BINARY src/observer/liboceanbase.so*" + fi + + if [ -d $HOME/oceanbase/tools/ob_error ] + then + TARGET="$TARGET oberror" + BINARY="$BINARY tools/ob_error/src/liboberror.so" + else + touch $HOME/liboberror.so + fi + if [[ -f $HOME/oceanbase/test/compile/compile.conf ]] + then + EXTRA_ARGV=`sed 's/.*EXTRA_COMPILE_ARGS=\(.*\)/\1/' $HOME/oceanbase/test/compile/compile.conf` + BUILD_TARGET="$BUILD_TARGET $EXTRA_ARGV" + fi + sed -i 's/mirrors.aliyun.com/mirrors.cloud.aliyuncs.com/g' $HOME/oceanbase/deps/init/oceanbase.el7.x86_64.deps 2>/dev/null || true + sh -x build.sh $BUILD_TARGET --init || return + cd build_* || return + + start=$(date +%s) + time $MAKE $MAKE_ARGS $TARGET + ret=$? + + mv $BINARY $BINARY_PATH + + if [ -f $HOME/oceanbase/test/.obcdc_conf ] + then + if [[ -f $BINARY_PATH/$so_name ]] + then + if [[ $so_name != "libobcdc.so" ]] + then + cp $BINARY_PATH/$so_name $BINARY_PATH/libobcdc.so + fi + elif [[ -f $BINARY_PATH/$so_name_v ]] + then + cp $BINARY_PATH/$so_name_v $BINARY_PATH/libobcdc.so + elif [[ -f $BINARY_PATH/$so_name_V ]] + then + cp $BINARY_PATH/$so_name_V $BINARY_PATH/libobcdc.so + else + touch $BINARY_PATH/libobcdc.so + fi + if [[ -f $BINARY_PATH/$tailf_name ]] + then + if [[ $tailf_name != "obcdc_tailf" ]] + then + cp $BINARY_PATH/$tailf_name $BINARY_PATH/obcdc_tailf + fi + else + touch $BINARY_PATH/obcdc_tailf + fi + fi + + if [[ ! -f $BINARY_PATH/libobcdc.so.1.0.0 ]] + then + touch $BINARY_PATH/libobcdc.so.1.0.0 $BINARY_PATH/obcdc_tailf + fi + if [[ ! -f $BINARY_PATH/libobcdc.so ]] + then + touch $BINARY_PATH/libobcdc.so + fi + if [[ ! -f $BINARY_PATH/obcdc_tailf ]] + then + touch $BINARY_PATH/obcdc_tailf + fi + + if [[ -f $HOME/oceanbase/test/obproxy_test_config ]] + then + cp $HOME/oceanbase/test/obproxy_test_config $BINARY_PATH/obproxy_test_config + fi + return $ret +} + +function run_old() +{ + cd $HOME/oceanbase && + ./build.sh init && sw && + ./build.sh $BUILD_TARGET --enable-dlink-observer=no || return 1 + + if [ "$REPO" == "server" ] + then + cd $HOME/oceanbase/src && ${MAKE} ${MAKE_ARGS} observer/observer || return 2 + mv observer/observer $HOME/ || return 3 + if [ "$ENABLE_LIBOBLOG" == "1" ] + then + if [ -d $HOME/oceanbase/src/liboblog ] + then + (cd liboblog && ${MAKE} ${MAKE_ARGS}) || return 4 + mv liboblog/{src/liboblog.la,src/.libs/liboblog.so.1.0.0,tests/oblog_tailf} $HOME/ + fi + if [ -d $HOME/oceanbase/tools/obcdc ] + then + (cd tools/obcdc && ${MAKE} ${MAKE_ARGS}) || true + touch $HOME/libobcdc.so.1.0.0 $HOME/obcdc_tailf 2>/dev/null + fi + fi + if [ "$CREATE_AGENTSERVER" == "1" ] && [ -f $HOME/oceanbase/tools/obtest/backup_restore_farm.sh ] + then + cd $HOME/oceanbase && cd tools/agentserver && ${MAKE} ${MAKE_ARGS} || return 3 + mv agentserver incbackupserver $HOME/ 2>/dev/null || true + fi + if [ "$CREATE_LIBOBSERVER_SO" == "1" ] + then + cd $HOME/oceanbase && rm -f config.status && ./build.sh $BUILD_TARGET --enable-dlink-observer=yes || return 5 + cd src && rm -f observer/libobserver.la && ${MAKE} ${MAKE_ARGS} observer/libobserver.la || return 6 + mv observer/.libs/libobserver.so.0.0.0 $HOME/ 2>/dev/null || true + fi + touch $HOME/libincbackup.la $HOME/libincbackup.a $HOME/libagentbackup.la $HOME/libagentbackup.a 2>/dev/null + touch $HOME/libobcdc.so.1.0.0 $HOME/libobcdc.so $HOME/obcdc_tailf 2>/dev/null + elif [ "$REPO" == "proxy" ] + then + cd $HOME/oceanbase/src && ${MAKE} ${MAKE_ARGS} || return 2 + mv obproxy/obproxy $HOME/ + else + errcho "Repository hasn't been supported" + return 2 + fi +} + +function run() +{ + if [[ "$REPO" == "server" && -f $HOME/oceanbase/CMakeLists.txt ]] + then + run_new_oceanbase + else + run_old + fi +} + +source $HOME/scripts/frame.sh && main diff --git a/.github/script/seekdb_native/scripts/farm_post_compile.sh b/.github/script/seekdb_native/scripts/farm_post_compile.sh new file mode 100644 index 000000000..d9581bb72 --- /dev/null +++ b/.github/script/seekdb_native/scripts/farm_post_compile.sh @@ -0,0 +1,72 @@ +#!/bin/sh +# 自 farm-jenkins/scripts/opensource/scripts/farm_post_compile.sh 复制 +set +x +ret=$1 +test "$ret" = "0" || exit $ret +function strip +{ + bin=$1 + if ! objdump -s -j .gnu_debuglink $bin 2>/dev/null | grep debuglink + then + objcopy --only-keep-debug $bin $bin.debug && + objcopy --add-gnu-debuglink=$bin.debug $bin && + objcopy -g $bin + fi +} + +function run +{ + if [ ! -e observer ] + then + echo "observer不存在" + exit 1 + fi + + root_path=`pwd` + + if [[ -f $root_path/obproxy_test_config ]] + then + set +x + cat >> /etc/hosts </dev/null || true +172.16.0.220 github.com +172.16.0.220 mirrors.aliyun.com +172.16.0.220 maven.aliyun.com +EOF + obproxy_yum_version=`sed '/^obproxy_yum_version=/!d;s/.*=//' $root_path/obproxy_test_config` + obproxy_yum_release=`sed '/^obproxy_yum_release=/!d;s/.*=//' $root_path/obproxy_test_config` + obproxy_yum_filename="obproxy-ce-"$obproxy_yum_version"-"$obproxy_yum_release".x86_64.rpm" + wget_url="http://mirrors.cloud.aliyuncs.com/oceanbase/community/stable/el/7/x86_64/"$obproxy_yum_filename + retry_times=10 + for ((i=0; i<$retry_times; i++)) + do + wget $wget_url -O $obproxy_yum_filename 2>/dev/null + [[ -f $obproxy_yum_filename ]] && break + echo "yum源网络异常, 10s后开始重试..." + sleep 10 + done + if [[ -f $obproxy_yum_filename ]] + then + rpm2cpio $obproxy_yum_filename | cpio -div + if [ -f home/admin/obproxy-*/bin/obproxy ] + then + mv home/admin/obproxy-*/bin/obproxy $root_path + chmod +x $root_path/obproxy + fi + fi + fi + + if [ ! -e obproxy ] + then + echo "obproxy不存在" + exit 1 + fi + + strip observer || return 2 + strip obproxy || return 3 + test -f libobserver.so.0.0.0 && (strip libobserver.so.0.0.0 || return 4) + test -f liboblog.so.1.0.0 && (strip liboblog.so.1.0.0 || return 5) + + touch agentserver incbackupserver 2>/dev/null + return 0 +} +run &> post_compile.output 2>&1 diff --git a/.github/script/seekdb_native/scripts/frame.sh b/.github/script/seekdb_native/scripts/frame.sh new file mode 100644 index 000000000..6d0129812 --- /dev/null +++ b/.github/script/seekdb_native/scripts/frame.sh @@ -0,0 +1,184 @@ +### predefined variables +# HOME - job home directory +# COMMIT - commit our operations are based on +# BRANCH - branch out operations are based on +# PATCH - file name of patch need to patch +# REPO - repository that the job is executing in +# JOBNAME - job name +# 自 farm-jenkins/scripts/opensource/scripts/frame.sh 复制并改造:/etc/hosts 与 dep_cache 在无权限时跳过 +set +x + +function __execute_job__(){ + init && prepare && run && clean +} + +function main() +{ + __execute_job__ + ret=$? + return $ret +} + +function clean_dir(){ + for file in `ls $1` + do + if [ -d $1"/"$file ] + then + clean_dir $1"/"$file + fi + done + rmdir $1 +} + + +function init() +{ + ulimit -s 10240 + ulimit -c unlimited + ulimit -n 655350 + user=$(whoami) + if [ -f /var/clone/clone_info ]; then + sn=`cat /var/clone/clone_info | grep -Po '"sn": "(.*)",' | awk -F': "' '{print $2}'` + sn=${sn%%\",} + else + sn=$(dmidecode -t1 2>/dev/null | egrep "Serial Number" | awk -F':' '{print $2}' | sed 's/ //g') + fi + if [ "X$sn" == "X" ]; then + ip=$(hostname -i 2>/dev/null || echo "127.0.0.1") + OB_SM_CW_TAG="$ip@$user" + else + OB_SM_CW_TAG="$sn@$user" + fi + [[ "$user" == cwork* ]] && { killall -u "$user" -9 observer 2>/dev/null; killall -u "$user" -9 obproxy 2>/dev/null; killall -u "$user" -9 ofsserver 2>/dev/null; true; } + [[ "$user" == cwork* ]] && [[ -d /sys/fs/cgroup/cpu/$user/oceanbase ]] && clean_dir /sys/fs/cgroup/cpu/$user/oceanbase 2>/dev/null || true + export DEP_CACHE_DIR="${DEP_CACHE_DIR:-$HOME/../../dep_cache}" + return 0 +} + + +function prepare() +{ + set +x + # 仅在有 FORWARDING_HOST 或可写 /etc/hosts 时追加(native runner 可能无权限) + if [[ -n "${FORWARDING_HOST:-}" ]] && [[ -w /etc/hosts ]]; then + cat >> /etc/hosts <> /etc/hosts </dev/null || true +172.16.0.220 github.com +172.16.0.220 mirrors.aliyun.com +172.16.0.220 maven.aliyun.com +EOF + fi + export PATH=/var/lib/condor/bin:/bin:/usr/local/bin:/usr/bin:/sbin + [[ -f /etc/profile.d/dep_create.sh ]] && { + source /etc/profile.d/dep_create.sh + [[ -f $(dirname ${BASH_SOURCE[0]})/dep_cache.sh ]] && source $(dirname ${BASH_SOURCE[0]})/dep_cache.sh + export DEP_CACHE_DIR=$(readlink -f $HOME/../../dep_cache 2>/dev/null || echo "$DEP_CACHE_DIR") + mkdir -p $DEP_CACHE_DIR || return 1 + } + + if [ "$REPO" == "server" ] + then + REPONAME="oceanbase" + fi + + git config --global gc.auto 0 + git config --global user.email "$(whoami)@$HOSTNAME" + git config --global user.name "$(whoami)" + + # prepare oceanbase repository: HOME/../.. 下 clone 或更新 oceanbase + cd $HOME + ( + cd ../.. + if [ -d $REPONAME ] + then + cd $REPONAME && + rm -f .git/index.lock .git/packed-refs.lock .git/ORIG_HEAD.lock .git/refs/heads/master.lock .git/refs/heads/test.lock .git/refs/heads/test_cross_validatition.lock .git/refs/remotes/origin/*.lock .git/refs/remotes/origin/{issue,task,req}/*.lock 2>/dev/null + git reset --hard && + git clean -dxff || return 1 + git submodule deinit -f . + git submodule foreach rm -f .git/index.lock .git/ORIG_HEAD.lock .git/refs/heads/master.lock 2>/dev/null + git submodule foreach 'ls .git && git reset --hard || echo' + git submodule foreach 'ls .git && git clean -dxff || echo' + git checkout master && + git reset --hard origin/master && + git clean -dxff && + ( git pull --all || git pull --all || git pull --all || return 8 ) + git remote prune origin + else + current_branch_name=${CURRENT_BRANCH:-master} + max_retries=5 + retry=0 + while [ $retry -lt $max_retries ]; do + git clone --depth 3 $CODE_URL --branch $current_branch_name -v && break + ((retry++)) + echo "Clone failed. Retrying..." + done + if [ $retry -eq $max_retries ]; then + echo "Max retries reached. Clone failed." + return 2 + fi + current_reponame=$(basename "$CODE_URL" .git) + if [[ ! $current_reponame == "oceanbase" ]] + then + mv $current_reponame $REPONAME + fi + fi + ) && ln -sfn ../../$REPONAME $HOME/oceanbase || return 1 + + # prepare oceanbase environment + cd $HOME/oceanbase + if [ "$MRID" != "" ] + then + git fetch origin merge-requests/$MRID/head:test && git checkout test + elif [[ "$TARGET_HEAD" != "" ]] + then + git checkout $TARGET_BRANCH && + git reset --hard origin/$TARGET_BRANCH && + test $(git rev-parse HEAD) == "$TARGET_HEAD" || return 3 + git merge origin/$SOURCE_BRANCH --no-commit || (echo "[ABORT] 在执行合并时发生意料外的错误,任务终止"; git status -s; return 1) || return 3 + elif [ "$COMMIT" != "" ] + then + git branch -f test_branch $COMMIT && git checkout test_branch || return 2 + else + if [ "$BRANCH" = "" ] || [ "$BRANCH" = "master" ] + then + BRANCH=origin/master + fi + if [[ "$BRANCH" != origin/* ]] + then + WHOLE_BRANCH=origin/$BRANCH + else + WHOLE_BRANCH=$BRANCH + fi + git branch -f test $WHOLE_BRANCH && git checkout test || return 2 + fi + + git clean -dxff + git submodule init + git submodule update + git submodule foreach git clean -dxff + + if [ "$PATCH" != "" ] && [ -f $HOME/$PATCH ] + then + echo "apply patch $HOME/$PATCH" + git apply --index $HOME/$PATCH && + git submodule update && + git add --all && + git commit -m 'compile commit' || return 3 + fi + + return 0 +} + + +function clean() +{ + return 0 +} + +function errcho() { >&2 echo $@; } diff --git a/.github/script/seekdb_native/scripts/mysqltest_for_farm.sh b/.github/script/seekdb_native/scripts/mysqltest_for_farm.sh new file mode 100755 index 000000000..f2b9bbb02 --- /dev/null +++ b/.github/script/seekdb_native/scripts/mysqltest_for_farm.sh @@ -0,0 +1,718 @@ +#!/bin/sh +# 自 farm-jenkins/scripts/opensource/scripts/mysqltest_for_farm.sh 复制 + +set +x + +export HOME=$_CONDOR_JOB_IWD +export PATH=/bin:/usr/bin +export USER=$(whoami) + +HOST=`hostname -i 2>/dev/null || echo 127.0.0.1` +DOWNLOAD_DIR=$HOME/downloads +SLOT_ID=`echo $_CONDOR_SLOT | cut -c5-` + +function prepare_config { + if [[ "$MINI" == "1" ]] || ([[ "$MINI" == "-1" ]] && [[ -f $HOME/oceanbase/tools/deploy/enable_mini_mode ]]) + then + [[ -f $HOME/oceanbase/tools/deploy/enable_mini_mode ]] && MINI_SIZE=$(cat $HOME/oceanbase/tools/deploy/enable_mini_mode) + [[ $MINI_SIZE =~ ^[0-9]+G ]] || MINI_SIZE="8G" + MINI_CONFIG_ITEM="ObCfg.init_config['memory_limit']='$MINI_SIZE'" + fi + + cd $HOME/oceanbase/tools/deploy + if [ "$WITH_PROXY" ] && [ "$WITH_PROXY" != "0" ] + then + if [ "$CLUSTER_SPEC" == '2x1' ] + then + SPEC="[$HOST,proxy@$HOST]@zone1 [$HOST]@zone2" + else + SPEC="[$HOST,proxy@$HOST]@zone1" + fi + else + if [[ "$CLUSTER_SPEC" == '2x1' ]] || [[ "$SLAVE" == "1" ]] + then + SPEC="[$HOST]@zone1 [$HOST]@zone2" + else + SPEC="[$HOST]@zone1" + fi + fi + cat >config7.py < test.fifo 2>&1 + elif [[ "$SLAVE" == "1" ]] + then + cat test.fifo | tee result.out & ./hap.py $USER.obs1.mysqltest collect_all slices=$SLICES slice_idx=$SLICE_IDX $slb `echo $ARGV` 1> test.fifo 2>&1 + else + cat test.fifo | tee result.out & ./hap.py $USER.mysqltest collect_all slices=$SLICES slice_idx=$SLICE_IDX $slb `echo $ARGV` 1> test.fifo 2>&1 + fi + + # check if there is error + if [ "$?" == "0" ] + then + grep -E 'PASSED' $HOME/oceanbase/tools/deploy/result.out > /dev/null && ! grep -E "FAIL LST" $HOME/oceanbase/tools/deploy/result.out > /dev/null || return 1 + else + return 1 + fi +} + +function collect_log { + cd $HOME/oceanbase/tools/deploy || return 1 + if [ -d "collected_log" ] + then + mv mysql_test/var/log/* collected_log + mv $HOME/$USER.*/core[.-]* collected_log + mv collected_log collected_log_$SLICE_IDX || return 1 + tar cvfz $HOME/collected_log_$SLICE_IDX.tar.gz collected_log_$SLICE_IDX + fi +} + +export -f run_mysqltest + +function obd_prepare_obd { + sed -i 's/mirrors.aliyun.com/mirrors.cloud.aliyuncs.com/g' $HOME/oceanbase/deps/init/oceanbase.el7.x86_64.deps + if [[ -f $HOME/oceanbase/deps/init/dep_create.sh ]] + then + if [[ "$IS_CE" == "1" ]] + then + cd $HOME/oceanbase && ./build.sh init --ce || return 3 + else + cd $HOME/oceanbase && ./build.sh init || return 3 + fi + else + if grep 'dep_create.sh' $HOME/oceanbase/build.sh + then + cd $HOME/oceanbase/deps/3rd && bash dep_create.sh all || return 4 + else + cd $HOME/oceanbase && ./build.sh init || return 3 + fi + fi + + $obd devmode enable + $obd env set OBD_DEPLOY_BASE_DIR $HOME/oceanbase/tools/deploy + $obd --version +} + +export -f obd_prepare_obd + +function obd_prepare_global { + export LANG=en_US.UTF-8 + HOST=`hostname -i` + DOWNLOAD_DIR=$HOME/downloads + SLOT_ID=`echo $_CONDOR_SLOT | cut -c5-` + PORT_NUM=`expr 5000 + $SLOT_ID \* 100` + + # 根据传入的observer binary判断是否开源版 + if [[ `$HOME/observer -V 2>&1 | grep -E '(OceanBase CE|OceanBase_CE)'` ]] + then + COMPONENT="oceanbase-ce" + OBRPOXY_COMPENT="obproxy-ce" + export IS_CE=1 + else + COMPONENT="oceanbase" + OBRPOXY_COMPENT="obproxy" + export IS_CE=0 + fi + # 根据build.sh中的内容判断依赖安装路径 + if grep 'dep_create.sh' $HOME/oceanbase/build.sh + then + DEP_PATH=$HOME/oceanbase/deps/3rd + else + DEP_PATH=$HOME/oceanbase/rpm/.dep_create/var + fi + + if [[ -f "$HOME/oceanbase/tools/deploy/mysqltest_config.yaml" ]] + then + export WITH_MYSQLTEST_CONFIG_YAML=1 + else + export WITH_MYSQLTEST_CONFIG_YAML=0 + fi + + ob_name=obcluster$SLOT_ID + app_name=$ob_name.$USER.$HOST + + export obd=$DEP_PATH/usr/bin/obd + export OBD_HOME=$HOME + export OBD_INSTALL_PRE=$DEP_PATH + export DATA_PATH=$HOME/data + +} + +function obd_prepare_config { + + MINI_SIZE="10G" + if [[ "$MINI" == "1" ]] || ([[ "$MINI" == "-1" ]] && [[ -f $HOME/oceanbase/tools/deploy/enable_mini_mode ]]) + then + [[ -f $HOME/oceanbase/tools/deploy/enable_mini_mode ]] && MINI_SIZE=$(cat $HOME/oceanbase/tools/deploy/enable_mini_mode) + [[ $MINI_SIZE =~ ^[0-9]+G ]] || MINI_SIZE="8G" + fi + if [[ "$CLUSTER_SPEC" == '2x1' ]] || [[ "$SLAVE" == "1" ]] + then + mysql_port=$PORT_NUM && PORT_NUM=`expr $PORT_NUM + 1` + rpc_port1=$PORT_NUM && PORT_NUM=`expr $PORT_NUM + 1` + obshell_port1=$PORT_NUM && PORT_NUM=`expr $PORT_NUM + 1` + mysql_port2=$PORT_NUM && PORT_NUM=`expr $PORT_NUM + 1` + rpc_port2=$PORT_NUM && PORT_NUM=`expr $PORT_NUM + 1` + obshell_port2=$PORT_NUM && PORT_NUM=`expr $PORT_NUM + 1` + SERVERS=$(cat <<-EOF + servers: + - name: server1 + ip: 127.0.0.1 + - name: server2 + ip: 127.0.0.1 + server1: + mysql_port: $mysql_port + rpc_port: $rpc_port1 + obshell_port: $obshell_port1 + home_path: $DATA_PATH/observer1 + zone: zone1 + server2: + mysql_port: $mysql_port2 + rpc_port: $rpc_port2 + obshell_port: $obshell_port2 + home_path: $DATA_PATH/observer2 + zone: zone2 +EOF +) + else + mysql_port=$PORT_NUM && PORT_NUM=`expr $PORT_NUM + 1` + rpc_port=$PORT_NUM && PORT_NUM=`expr $PORT_NUM + 1` + obshell_port=$PORT_NUM && PORT_NUM=`expr $PORT_NUM + 1` + SERVERS=$(cat <<-EOF + servers: + - name: server1 + ip: 127.0.0.1 + server1: + mysql_port: $mysql_port + rpc_port: $rpc_port + obshell_port: $obshell_port + home_path: $DATA_PATH/observer1 + zone: zone1 +EOF +) + fi + export MYSQL_PORT=$mysql_port + proxy_conf="" + if [ "$WITH_PROXY" ] && [ "$WITH_PROXY" != "0" ] + then + listen_port=$PORT_NUM && PORT_NUM=`expr $PORT_NUM + 1` + prometheus_listen_port=$PORT_NUM && PORT_NUM=`expr $PORT_NUM + 1` + + proxy_conf=$(cat $HOME/oceanbase/tools/deploy/obd/obproxy.yaml.template) + if [[ -f $HOME/oceanbase/test/obproxy_test_config ]] + then + obproxy_switch=`sed '/^obproxy_switch=/!d;s/.*=//' $HOME/oceanbase/test/obproxy_test_config` + + # 当且仅当 obproxy 指定模式为wget时,影响mysqltest obproxy配置,否则mysqltest依旧沿用原有obproxy获取逻辑 + mkdir $HOME/bin + if [[ -f $HOME/obproxy ]] + then + cp $HOME/obproxy $HOME/bin + else + wget_url=`sed '/^wget_url=/!d;s/.*=//' $HOME/oceanbase/test/obproxy_test_config` + wget $wget_url -O $HOME/bin/obproxy + if [[ -f $HOME/bin/obproxy ]] + then + echo "obproxy 文件下载成功" + chmod +x $HOME/bin/obproxy + + else + # 下载失败,沿用原有逻辑 + echo "obproxy 文件下载失败" + return 1 + fi + fi + + echo "obrpxoy版本:" + echo `$HOME/bin/obproxy -V 2>&1` + + obproxy_version=$("$HOME"/bin/obproxy --version 2>&1 | grep -E 'obproxy \(OceanBase [\.0-9]+ \S+\)' | grep -Eo '\s[.0-9]+\s') + # obproxy 额外依赖了lib目录,然后需要额外创建 + mkdir -p $HOME/lib + $obd mirror create -n $OBRPOXY_COMPENT -p $HOME -t "latest" -V $obproxy_version + echo "执行结果: $?" + + proxy_conf=$(echo "$proxy_conf" | sed '/package_hash: [0-9a-z]*/d') + proxy_conf="""$proxy_conf + tag: latest +""" + fi + proxy_conf=${proxy_conf//'{{%% COMPONENT %%}}'/$COMPONENT} + proxy_conf=${proxy_conf//'{{%% LISTEN_PORT %%}}'/$listen_port} + proxy_conf=${proxy_conf//'{{%% PROMETHEUS_LISTEN_PORT %%}}'/$prometheus_listen_port} + proxy_conf=${proxy_conf//'{{%% OBPORXY_HOME_PATH %%}}'/$DATA_PATH\/obproxy} + proxy_conf=${proxy_conf//'{{%% OBPROXY_CONFIG_SERVER_URL %%}}'/$proxy_cfg_url} + fi + + conf=$(cat $HOME/oceanbase/tools/deploy/obd/config.yaml.template) + # cgroup support + if [[ -f $HOME/oceanbase/tools/deploy/obd/.use_cgroup || $(echo " $ARGV " | grep " use-cgroup ") ]] + then + CGROUP_CONFIG="cgroup_dir: /sys/fs/cgroup/cpu/$USER" + fi + # MYSQLRTEST_ARGS contains changes for special run + proxy_conf="""$MYSQLRTEST_ARGS + $CGROUP_CONFIG +$proxy_conf""" + conf=${conf//'{{%% PROXY_CONF %%}}'/"$proxy_conf"} + conf=${conf//'{{%% DEPLOY_PATH %%}}'/"$HOME/oceanbase/tools/deploy"} + conf=${conf//'{{%% COMPONENT %%}}'/$COMPONENT} + conf=${conf//'{{%% SERVERS %%}}'/$SERVERS} + conf=${conf//'{{%% TAG %%}}'/'latest'} + conf=${conf//'{{%% MINI_SIZE %%}}'/$MINI_SIZE} + conf=${conf//'{{%% OBCONFIG_URL %%}}'/$cfg_url} + conf=${conf//'{{%% APPNAME %%}}'/$app_name} + conf=${conf//'{{%% EXTRA_PARAM %%}}'/$MYSQLRTEST_ARGS} + if [[ "$IS_CE" == "1" ]] + then + conf=$(echo "$conf" | sed 's/oceanbase:/oceanbase\-ce:/g' | sed 's/\- oceanbase$/- oceanbase-ce/g') + fi + echo "$conf" > $HOME/oceanbase/tools/deploy/config.yaml + cat $HOME/oceanbase/tools/deploy/config.yaml +} +function get_baseurl { + repo_file=$OBD_HOME/.obd/mirror/remote/taobao.repo + start=0 + end=$(cat $repo_file | wc -l ) + for line in $(grep -nE "^\[.*\]$" $repo_file) + do + num=$(echo "$line" | awk -F ":" '{print $1}') + [ "$find_section" == "1" ] && end=$num + is_target_section=$(echo $(echo "$line" | grep -E "\[\s*$repository\s*\]")) + [ "$is_target_section" != "" ] && start=$num && find_section='1' + done + repo_section=$(awk "NR>=$start && NR<=$end" $repo_file) + baseurl=$(echo "$repo_section" | grep baseurl | awk -F '=' '{print $2}') +} + +function get_version_and_release { + start=0 + end=$(cat $config_yaml | wc -l) + for line in $(grep -nE '^\S+:' $config_yaml) + do + num=$(echo "$line" | awk -F ":" '{print $1}') + [ "$find_obproxy" == "1" ] && end=$num + [ "$(echo "$line" | grep obproxy)" != "" ] && start=$num && find_obproxy='1' + done + odp_conf=$(awk "NR>=$start && NR<=$end" $config_yaml) + version=$(echo "$odp_conf" | grep version | awk '{print $2}') + release=$(echo "$odp_conf" | grep release | awk '{print $2}') + include_file=$DEPLOY_PATH/$(echo "$odp_conf"| grep 'include:'| awk '{print $2}') + if [[ -f $include_file ]] + then + _repo=$(echo $(grep '__mirror_repository_section_name:' $include_file | awk '{print $2}')) + [ "$_repo" != "" ] && repository=$_repo + version=$(echo $(grep 'version:' $include_file | awk '{print $2}')) + release=$(echo $(grep 'release:' $include_file | awk '{print $2}')) + fi +} + +function get_obproxy { + repository="taobao" + config_yaml=$HOME/oceanbase/tools/deploy/config.yaml + DEPLOY_PATH=$HOME/oceanbase/tools/deploy + obproxy_mirror_repository=$(echo $(grep '__mirror_repository_section_name' $config_yaml | awk -F':' '{print $2}')) + [ "$obproxy_mirror_repository" != "" ] && repository=$obproxy_mirror_repository + + get_version_and_release + get_baseurl + $obd mirror disable remote + OS_ARCH="$(uname -m)" + if [[ "$baseurl" != "" && "$version" != "" && "$release" != "" ]] + then + if [[ $OS_ARCH == "aarch64" ]] + then + pkg_name="obproxy-$version-$release.aarch64.rpm" + else + pkg_name="obproxy-$version-$release.x86_64.rpm" + fi + if [ "$(find $OBD_HOME/.obd/mirror/local -name $pkg_name)" == "" ] + then + download_dir=$OBD_HOME/.obd_download + mkdir -p $download_dir + wget $baseurl/obproxy/$pkg_name -O "$download_dir/$pkg_name" -o $download_dir/obproxy.down + $obd mirror clone "$download_dir/$pkg_name" && rm -rf $download_dir && return 0 + else + return 0 + fi + fi + echo "Download pkg failed. Try to get obproxy from repository $repository" + count=0 + interval=5 + retry_limit=100 + while (( $count < $retry_limit )) + do + $obd mirror disable remote + $obd mirror enable $repository + $obd mirror update && $obd mirror list $repository > /dev/null && return 0 + count=`expr $count + 1` + (( $count <= $retry_limit )) && sleep $interval + done + return 1 +} + +function obd_prepare_bin { + cd $HOME + mkdir -p $DOWNLOAD_DIR/{bin,etc,admin,lib} || return 1 + + # remove the logic to get obproxy again + # if [ "$WITH_PROXY" ] && [ "$WITH_PROXY" != "0" ] + # then + # get_obproxy || (echo "failed to get obdproxy" || return 1) + # fi + + if [ ! -x observer ] + then + return 1 + else + cp observer $DOWNLOAD_DIR/bin/ || return 2 + fi + + if [ -f $DEP_PATH/home/admin/oceanbase/bin/obshell ] + then + cp $DEP_PATH/home/admin/oceanbase/bin/obshell $DOWNLOAD_DIR/bin/ + chmod a+x $DOWNLOAD_DIR/bin/obshell + fi + + cd $HOME/oceanbase/tools/deploy + cp $DEP_PATH/u01/obclient/bin/obclient ./obclient && chmod 777 obclient + cp $DEP_PATH/u01/obclient/bin/mysqltest ./mysqltest && chmod 777 mysqltest + + if [ "$WITH_DEPS" ] && [ "$WITH_DEPS" != "0" ] + then + cd $HOME/oceanbase/tools/deploy && [[ -f copy.sh ]] && sh copy.sh + fi + if [[ -f "$HOME/oceanbase/tools/deploy/obd/.observer_obd_plugin_version" ]] + then + obs_version=$(cat $HOME/oceanbase/tools/deploy/obd/.observer_obd_plugin_version) + else + obs_version=`$DOWNLOAD_DIR/bin/observer -V 2>&1 | grep -E "observer \(OceanBase([ \_]CE)? ([.0-9]+)\)" | grep -Eo '([.0-9]+)'` + fi + + mkdir $HOME/oceanbase/tools/deploy/admin + if [[ -d $HOME/oceanbase/src/share/inner_table/sys_package ]] + then + cp $HOME/oceanbase/src/share/inner_table/sys_package/*.sql $HOME/oceanbase/tools/deploy/admin/ + fi + + $obd mirror create -n $COMPONENT -p $DOWNLOAD_DIR -t latest -V $obs_version || return 2 + +} + +function obd_init_cluster { + retries=$reboot_retries + while (( $retries > 0 )) + do + if [[ "$retries" == "$reboot_retries" ]] + then + $obd cluster deploy $ob_name -c $HOME/oceanbase/tools/deploy/config.yaml -f && $obd cluster start $ob_name -f && $obd cluster display $ob_name + else + $obd cluster redeploy $ob_name -f + fi + retries=`expr $retries - 1` + ./obclient -h 127.1 -P $MYSQL_PORT -u root -A -e "alter system set_tp tp_no = 509, error_code = 4016, frequency = 1;" + ret=$? + if [[ $ret == 0 ]] + then + init_files=('init.sql' 'init_mini.sql' 'init_for_ce.sql') + for init_file in ${init_files[*]} + do + if [[ -f $HOME/oceanbase/tools/deploy/$init_file ]] + then + if ! grep "alter system set_tp tp_no = 509, error_code = 4016, frequency = 1;" $HOME/oceanbase/tools/deploy/$init_file + then + echo -e "\nalter system set_tp tp_no = 509, error_code = 4016, frequency = 1;" >> $HOME/oceanbase/tools/deploy/$init_file + fi + fi + done + fi + $obd test mysqltest $ob_name $SERVER_ARGS --mysqltest-bin=./mysqltest --obclient-bin=./obclient --init-only --init-sql-dir=$HOME/oceanbase/tools/deploy $INIT_FLIES --log-dir=$HOME/init_case $EXTRA_ARGS -v > ./init_sql_log && init_seccess=1 && break + done +} + +function is_case_selector_arg() +{ + local arg + arg=$1 + case_selector_patterns=('test-set=*' 'testset=*' 'suite=*' 'tag=*' 'regress_suite=*' 'all' 'psmall') + for p in ${case_selector_patterns[@]} + do + [[ "$(echo $arg | grep -Eo $p)" != "" ]] && return 0 + done + return 1 +} + +function obd_run_mysqltest { + obd_prepare_global + + cd $HOME/oceanbase/tools/deploy + if [[ "$SLB" != "" ]] && [[ "$EXECID" != "" ]] + then + SCHE_ARGS="--slb-host=$SLB --exec-id=$EXECID " + else + SCHE_ARGS="--slices=$SLICES --slice-idx=$SLICE_IDX " + fi + + + if [[ "$MINI" == "1" && -f core-test.init_mini.sql ]] + then + INIT_FLIES="--init-sql-files=core-test.init_mini.sql,init_user.sql|root@mysql|test" + [ -f init_user_oracle.sql ] && INIT_FLIES="${INIT_FLIES},init_user_oracle.sql|SYS@oracle|SYS" + else + if [[ "$IS_CE" == "1" && -f $HOME/oceanbase/.ce ]] + then + INIT_FLIES="--init-sql-files=init_mini.sql,init_user.sql|root@mysql|test " + fi + fi + CLUSTER_MODE="c" + if [[ "$SLAVE" == "1" ]] + then + SERVER_ARGS="--test-server=server2" + CLUSTER_MODE="slave" + else + SERVER_ARGS="" + fi + if [ "$WITH_PROXY" ] && [ "$WITH_PROXY" != "0" ] + then + CLUSTER_MODE="proxy" + fi + reboot_retries=2 + if [[ "$WITH_MYSQLTEST_CONFIG_YAML" == "1" ]] + then + EXTRA_ARGS="--test-mode=$CLUSTER_MODE " + else + EXTRA_ARGS="--cluster-mode=$CLUSTER_MODE " + fi + if [[ -f $HOME/oceanbase/tools/deploy/obd/.fast-reboot ]] + then + EXTRA_ARGS="${EXTRA_ARGS}--fast-reboot " + fi + + if [[ "$IS_CE" == "1" ]] + then + EXTRA_ARGS="${EXTRA_ARGS}--mode=mysql " + fi + + EXTRA_ARGS_WITHOUT_CASE=$EXTRA_ARGS + + if [[ "$ARGV" != "" ]] + then + for arg in $ARGV + do + [[ "${arg%%=*}" == "testset" ]] && arg="${arg/testset=/test-set=}" + [[ "${arg%%=*}" == "reboot-timeout" ]] && has_reboot_timeout=1 + [[ "${arg%%=*}" == "reboot-retries" ]] && reboot_retries=${arg#*=} + [[ "${arg%%=*}" == "disable-collect" ]] && disable_collect=1 && continue + [[ "${arg%%=*}" == "use-cgroup" ]] && continue + EXTRA_ARGS="${EXTRA_ARGS}--${arg} " + if ! is_case_selector_arg "${arg}" + then + EXTRA_ARGS_WITHOUT_CASE="${EXTRA_ARGS_WITHOUT_CASE}--${arg} " + fi + done + fi + + if [[ "$has_reboot_timeout" != "1" ]] + then + REBOOT_TIMEOUT="--reboot-timeout=600" + fi + if [[ "$disable_collect" != "1" ]] + then + COLLECT_ARG="--collect-all" + if [[ "$WITH_MYSQLTEST_CONFIG_YAML" == "0" ]] && [ "$WITH_PROXY" ] && [ "$WITH_PROXY" != "0" ] && [[ -f "$HOME/oceanbase/tools/deploy/obd/.collect_proxy_log" ]] + then + COLLECT_ARG="$COLLECT_ARG --collect-components=$COMPONENT,obproxy" + fi + fi + obd_init_cluster + if [[ "$init_seccess" != "1" ]] + then + cat ./init_sql_log + echo "Failed to init sql, see more log at ./collected_log/_test_init" + mkdir -p $HOME/collected_log/_test_init/ + [[ -d "$DATA_PATH/observer1/log" ]] && mkdir -p $HOME/collected_log/_test_init/observer1 && mv $DATA_PATH/observer1/log/* $HOME/collected_log/_test_init/observer1 + [[ -d "$DATA_PATH/observer2/log" ]] && mkdir -p $HOME/collected_log/_test_init/observer2 && mv $DATA_PATH/observer2/log/* $HOME/collected_log/_test_init/observer2 + [[ -d "$DATA_PATH/obproxy/log" ]] && mkdir -p $HOME/collected_log/_test_init/obproxy && mv $DATA_PATH/obproxy/log/* $HOME/collected_log/_test_init/obproxy + exit 1 + fi + mysqltest_cmd="$obd test mysqltest $ob_name $SERVER_ARGS --mysqltest-bin=./mysqltest --obclient-bin=./obclient $COLLECT_ARG --init-sql-dir=$HOME/oceanbase/tools/deploy --log-dir=./var/log $REBOOT_TIMEOUT $VERBOSE_ARG $EXTRA_ARGS" + $mysqltest_cmd $RUN_EXTRA_CHECK_CMD $INIT_FLIES $SCHE_ARGS 2>&1 | tee compare.out && ( exit ${PIPESTATUS[0]}) + ret=$? + if [[ $JOBNAME == 'mysqltest_opensource' ]] + then + submarker="_opensource" + else + submarker="" + fi + mv compare.out $HOME/mysqltest${submarker}_compare_output.$SLICE_IDX + echo "finish!" + return $ret +} + +function obd_prepare_env { + obd_prepare_global + obd_prepare_obd + obd_prepare_config + obd_prepare_bin +} + +function obd_collect_log { + + echo "collect log" + cd $HOME/ + mkdir -p collected_log + mkdir -p collected_log/obd_log + mkdir -p collected_log/mysqltest_log + mkdir -p collected_log/mysqltest_rec_log + find $DATA_PATH -name 'core[.-]*' | xargs -i cp {} collected_log + [[ "$OBD_HOME" != "" ]] && mv $OBD_HOME/.obd/log/* collected_log/obd_log/ + mv oceanbase/tools/deploy/var/log/* collected_log/mysqltest_log/ + mv oceanbase/tools/deploy/var/rec_log/* collected_log/mysqltest_rec_log/ + mv collected_log collected_log_$SLICE_IDX + tar zcvf collected_log_$SLICE_IDX.tar.gz collected_log_$SLICE_IDX +} + +function collect_obd_case_log { + + echo "collect obd case log" + cd $HOME/ + if [[ $JOBNAME == 'mysqltest_opensource' ]] + then + submarker="_opensource" + else + submarker="" + fi + [[ "$OBD_HOME" != "" ]] && cat $OBD_HOME/.obd/log/* | grep '\[INFO\]' > mysqltest_cases_log${submarker}_$SLICE_IDX.output + +} + + +export -f obd_prepare_global +export -f obd_prepare_config +export -f obd_run_mysqltest +export -f obd_init_cluster +export -f is_case_selector_arg + +function run { + set +x + if ([[ -f $HOME/oceanbase/rpm/oceanbase.deps ]] && [[ "$(grep 'ob-deploy' $HOME/oceanbase/rpm/oceanbase.deps )" != "" ]]) || + ([[ -f $HOME/oceanbase/deps/3rd/oceanbase.el7.x86_64.deps ]] && [[ "$(grep 'ob-deploy' $HOME/oceanbase/deps/3rd/oceanbase.el7.x86_64.deps )" != "" ]]) || + ([[ -f $HOME/oceanbase/deps/init/oceanbase.el7.x86_64.deps ]] && [[ "$(grep 'ob-deploy' $HOME/oceanbase/deps/init/oceanbase.el7.x86_64.deps )" != "" ]]) + then + timeout=18000 + [[ "$SPECIAL_RUN" == "1" ]] && timeout=72000 + obd_prepare_env + prepare_result=$? + if [[ $prepare_result -ne 0 ]] + then + obd_collect_log + return $prepare_result + fi + timeout $timeout bash -c "obd_run_mysqltest" + test_ret=$? + error_log_ret=0 + if [[ -f $HOME/oceanbase/tools/deploy/error_log_filter.json && ( $BRANCH == 'master' || $BRANCH == "4_2_x_release" ) ]] + then + # notice the core + # python $HOME/common/analyse_the_observer_core.py collect -p ${DATA_PATH} -j ${JOBNAME}.${SLICE_IDX} -o $HOME + error_log_ret=0 + # collect_obd_case_log + fi + if [[ $test_ret -ne 0 || $error_log_ret -ne 0 ]] + then + obd_collect_log + fi + return `[[ $test_ret = 0 && $error_log_ret = 0 ]]` + else + prepare_env && + timeout 18000 bash -c "run_mysqltest" + test_ret=$? + error_log_ret=0 + if [[ -f $HOME/oceanbase/tools/deploy/error_log_filter.json && ( $BRANCH == 'master' || $BRANCH == "4_2_x_release" ) ]] + then + python $HOME/common/analyse_the_observer_log.py collect -p ${DATA_PATH}/observer1/log -p ${DATA_PATH}/observer2/log -i $GID -j ${JOBNAME}.${SLICE_IDX} -o $HOME -F $HOME/oceanbase/tools/deploy/error_log_filter.json + error_log_ret=0 + fi + if [[ $test_ret -ne 0 || $error_log_ret -ne 0 ]] + then + collect_log + fi + return `[[ $test_ret = 0 && $error_log_ret = 0 ]]` + fi + +} + +source $HOME/scripts/frame.sh && main \ No newline at end of file diff --git a/.github/workflows/seekdb-farm2.yml b/.github/workflows/seekdb-farm2.yml new file mode 100644 index 000000000..e5e490af9 --- /dev/null +++ b/.github/workflows/seekdb-farm2.yml @@ -0,0 +1,30 @@ +name: seekdb Farm2 + +on: + pull_request: + branches: [ master,develop ] + types: [opened, reopened, synchronize, ready_for_review] + paths-ignore: + - 'docs/**' + - '.github/ISSUE_TEMPLATE/**' + - '.github/pull_request_template.md' + - 'README.md' + - 'README_CN.md' + - 'CONTRIBUTING.md' + schedule: + - cron: '30 9 * * *' + +jobs: + seekdb: + if: ${{ github.repository_owner == 'oceanbase' && !github.event.pull_request.draft }} + name: seekdb + runs-on: ubuntu-latest + steps: + - name: Checkout workspace + uses: actions/checkout@v3 + - name: action by seekdb + uses: ./.github/script/ + id: seekdb + with: + pipeline_id: ${{ github.run_id }} + project: ${{ github.repository }} \ No newline at end of file diff --git a/.github/workflows/seekdb-native.yml b/.github/workflows/seekdb-native.yml new file mode 100644 index 000000000..19bf20e86 --- /dev/null +++ b/.github/workflows/seekdb-native.yml @@ -0,0 +1,170 @@ +# SeekDB CI - 执行下沉版(Native) +# 编译与测试在 self-hosted Runner 上直接执行,不经过 Farm2。 +# farm 脚本已从 farm-jenkins 复制到 .github/script/seekdb_native/scripts/,无需 clone。可选:FARM2_WORKER_IMAGE。 +name: SeekDB CI (Native) + +on: + pull_request: + branches: [master, develop] + types: [opened, reopened, synchronize, ready_for_review] + paths-ignore: + - 'docs/**' + - '.github/ISSUE_TEMPLATE/**' + - '.github/pull_request_template.md' + - 'README.md' + - 'README_CN.md' + - 'CONTRIBUTING.md' + schedule: + - cron: '30 9 * * *' + workflow_dispatch: + +env: + MYSQLTEST_SLICES: "4" + SEEKDB_TASK_DIR: ${{ github.workspace }}/seekdb_build/${{ github.run_id }} + +jobs: + seekdb-native: + if: ${{ github.repository_owner == 'oceanbase' && !github.event.pull_request.draft }} + name: seekdb-native + # 使用已注册的 Runner:seekdb-test-arc-runner(若该 Runner 的 label 不是此名,请改成实际 label) + runs-on: seekdb-test-arc-runner + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Prepare (jobargs, run_jobs) + run: | + chmod +x .github/script/seekdb_native/prepare.sh + export GITHUB_RUN_ID="${{ github.run_id }}" + export GITHUB_WORKSPACE="${{ github.workspace }}" + export MYSQLTEST_SLICES="${{ env.MYSQLTEST_SLICES }}" + export SEEKDB_TASK_DIR="${{ env.SEEKDB_TASK_DIR }}" + .github/script/seekdb_native/prepare.sh + + - name: Compile + id: compile + run: | + chmod +x .github/script/seekdb_native/compile.sh + export GITHUB_RUN_ID="${{ github.run_id }}" + export GITHUB_WORKSPACE="${{ github.workspace }}" + export SEEKDB_TASK_DIR="${{ env.SEEKDB_TASK_DIR }}" + export RELEASE_MODE="${{ vars.RELEASE_MODE || '' }}" + export FORWARDING_HOST="${{ vars.FORWARDING_HOST || '' }}" + if [[ -n "${{ vars.FARM2_WORKER_IMAGE || '' }}" ]]; then + docker run --rm \ + -v "${{ github.workspace }}:/workspace" \ + -w /workspace \ + -e GITHUB_RUN_ID="${{ github.run_id }}" \ + -e GITHUB_WORKSPACE="/workspace" \ + -e SEEKDB_TASK_DIR="/workspace/seekdb_build/${{ github.run_id }}" \ + -e RELEASE_MODE="${{ vars.RELEASE_MODE || '' }}" \ + -e FORWARDING_HOST="${{ vars.FORWARDING_HOST || '' }}" \ + "${{ vars.FARM2_WORKER_IMAGE }}" \ + /bin/bash -c '. /workspace/.github/script/seekdb_native/compile.sh' + else + .github/script/seekdb_native/compile.sh + fi + + - name: Record compile failure + if: steps.compile.outcome == 'failure' + run: echo '++compile++' >> "${{ env.SEEKDB_TASK_DIR }}/fail_cases.output" + + - name: Mysqltest slice 0 + if: success() + run: | + chmod +x .github/script/seekdb_native/mysqltest_slice.sh + export GITHUB_RUN_ID="${{ github.run_id }}" + export GITHUB_WORKSPACE="${{ github.workspace }}" + export SEEKDB_TASK_DIR="${{ env.SEEKDB_TASK_DIR }}" + export SLICE_IDX=0 + export SLICES="${{ env.MYSQLTEST_SLICES }}" + export BRANCH="${{ github.ref_name }}" + export FORWARDING_HOST="${{ vars.FORWARDING_HOST || '' }}" + if [[ -n "${{ vars.FARM2_WORKER_IMAGE || '' }}" ]]; then + docker run --rm \ + -v "${{ github.workspace }}:/workspace" \ + -w /workspace \ + -e GITHUB_RUN_ID="${{ github.run_id }}" \ + -e SEEKDB_TASK_DIR="/workspace/seekdb_build/${{ github.run_id }}" \ + -e SLICE_IDX=0 \ + -e SLICES="${{ env.MYSQLTEST_SLICES }}" \ + -e BRANCH="${{ github.ref_name }}" \ + -e FORWARDING_HOST="${{ vars.FORWARDING_HOST || '' }}" \ + "${{ vars.FARM2_WORKER_IMAGE }}" \ + /bin/bash -c '. /workspace/.github/script/seekdb_native/mysqltest_slice.sh' + else + .github/script/seekdb_native/mysqltest_slice.sh + fi + + - name: Mysqltest slice 1 + if: success() + run: | + export GITHUB_RUN_ID="${{ github.run_id }}" + export GITHUB_WORKSPACE="${{ github.workspace }}" + export SEEKDB_TASK_DIR="${{ env.SEEKDB_TASK_DIR }}" + export SLICE_IDX=1 + export SLICES="${{ env.MYSQLTEST_SLICES }}" + export BRANCH="${{ github.ref_name }}" + export FORWARDING_HOST="${{ vars.FORWARDING_HOST || '' }}" + if [[ -n "${{ vars.FARM2_WORKER_IMAGE || '' }}" ]]; then + docker run --rm -v "${{ github.workspace }}:/workspace" -w /workspace \ + -e GITHUB_RUN_ID="${{ github.run_id }}" -e SEEKDB_TASK_DIR="/workspace/seekdb_build/${{ github.run_id }}" \ + -e SLICE_IDX=1 -e SLICES="${{ env.MYSQLTEST_SLICES }}" -e BRANCH="${{ github.ref_name }}" \ + -e FORWARDING_HOST="${{ vars.FORWARDING_HOST || '' }}" \ + "${{ vars.FARM2_WORKER_IMAGE }}" \ + /bin/bash -c '. /workspace/.github/script/seekdb_native/mysqltest_slice.sh' + else + .github/script/seekdb_native/mysqltest_slice.sh + fi + + - name: Mysqltest slice 2 + if: success() + run: | + export GITHUB_RUN_ID="${{ github.run_id }}" GITHUB_WORKSPACE="${{ github.workspace }}" + export SEEKDB_TASK_DIR="${{ env.SEEKDB_TASK_DIR }}" SLICE_IDX=2 SLICES="${{ env.MYSQLTEST_SLICES }}" + export BRANCH="${{ github.ref_name }}" FORWARDING_HOST="${{ vars.FORWARDING_HOST || '' }}" + if [[ -n "${{ vars.FARM2_WORKER_IMAGE || '' }}" ]]; then + docker run --rm -v "${{ github.workspace }}:/workspace" -w /workspace \ + -e GITHUB_RUN_ID="${{ github.run_id }}" -e SEEKDB_TASK_DIR="/workspace/seekdb_build/${{ github.run_id }}" \ + -e SLICE_IDX=2 -e SLICES="${{ env.MYSQLTEST_SLICES }}" -e BRANCH="${{ github.ref_name }}" -e FORWARDING_HOST="${{ vars.FORWARDING_HOST || '' }}" \ + "${{ vars.FARM2_WORKER_IMAGE }}" /bin/bash -c '. /workspace/.github/script/seekdb_native/mysqltest_slice.sh' + else + .github/script/seekdb_native/mysqltest_slice.sh + fi + + - name: Mysqltest slice 3 + if: success() + run: | + export GITHUB_RUN_ID="${{ github.run_id }}" GITHUB_WORKSPACE="${{ github.workspace }}" + export SEEKDB_TASK_DIR="${{ env.SEEKDB_TASK_DIR }}" SLICE_IDX=3 SLICES="${{ env.MYSQLTEST_SLICES }}" + export BRANCH="${{ github.ref_name }}" FORWARDING_HOST="${{ vars.FORWARDING_HOST || '' }}" + if [[ -n "${{ vars.FARM2_WORKER_IMAGE || '' }}" ]]; then + docker run --rm -v "${{ github.workspace }}:/workspace" -w /workspace \ + -e GITHUB_RUN_ID="${{ github.run_id }}" -e SEEKDB_TASK_DIR="/workspace/seekdb_build/${{ github.run_id }}" \ + -e SLICE_IDX=3 -e SLICES="${{ env.MYSQLTEST_SLICES }}" -e BRANCH="${{ github.ref_name }}" -e FORWARDING_HOST="${{ vars.FORWARDING_HOST || '' }}" \ + "${{ vars.FARM2_WORKER_IMAGE }}" /bin/bash -c '. /workspace/.github/script/seekdb_native/mysqltest_slice.sh' + else + .github/script/seekdb_native/mysqltest_slice.sh + fi + + - name: Collect result + if: success() || failure() + run: | + chmod +x .github/script/seekdb_native/collect_result.sh + export GITHUB_RUN_ID="${{ github.run_id }}" + export GITHUB_WORKSPACE="${{ github.workspace }}" + export SEEKDB_TASK_DIR="${{ env.SEEKDB_TASK_DIR }}" + .github/script/seekdb_native/collect_result.sh "${{ github.workspace }}/seekdb_result.json" + + - name: Output result to log + run: | + echo "=== SeekDB native result ===" + cat seekdb_result.json + + - name: Upload SeekDB result as artifact + if: success() || failure() + uses: actions/upload-artifact@v4 + with: + name: seekdb-result-native + path: seekdb_result.json + retention-days: 30