diff --git a/.github/workflows/push-node-status-api.yaml b/.github/workflows/push-node-status-api.yaml index 765e28e2e12..6bbb8ba26e4 100644 --- a/.github/workflows/push-node-status-api.yaml +++ b/.github/workflows/push-node-status-api.yaml @@ -69,6 +69,6 @@ jobs: - name: BuildAndPushImageOnHarbor run: | - docker build -f ${{ env.WORKING_DIRECTORY }}/Dockerfile-pg . -t harbor.nymte.ch/nym/${{ env.IMAGE_NAME_AND_TAGS }} -t harbor.nymte.ch/nym/${{ env.CONTAINER_NAME }}:latest + docker build -f ${{ env.WORKING_DIRECTORY }}/Dockerfile . -t harbor.nymte.ch/nym/${{ env.IMAGE_NAME_AND_TAGS }} -t harbor.nymte.ch/nym/${{ env.CONTAINER_NAME }}:latest docker push harbor.nymte.ch/nym/${{ env.CONTAINER_NAME }} --all-tags diff --git a/Cargo.lock b/Cargo.lock index be1bab854c9..fd0e6783907 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6491,7 +6491,7 @@ dependencies = [ [[package]] name = "nym-node-status-agent" -version = "1.0.4" +version = "1.0.6" dependencies = [ "anyhow", "clap", @@ -6508,7 +6508,7 @@ dependencies = [ [[package]] name = "nym-node-status-api" -version = "3.3.2" +version = "4.0.3" dependencies = [ "ammonia", "anyhow", diff --git a/nym-node-status-api/nym-node-status-agent/Cargo.toml b/nym-node-status-api/nym-node-status-agent/Cargo.toml index 6c6cab2ca96..c214a734eed 100644 --- a/nym-node-status-api/nym-node-status-agent/Cargo.toml +++ b/nym-node-status-api/nym-node-status-agent/Cargo.toml @@ -3,7 +3,7 @@ [package] name = "nym-node-status-agent" -version = "1.0.4" +version = "1.0.6" authors.workspace = true repository.workspace = true homepage.workspace = true diff --git a/nym-node-status-api/nym-node-status-agent/Dockerfile b/nym-node-status-api/nym-node-status-agent/Dockerfile index 192af6678d4..fdf8e4b540d 100644 --- a/nym-node-status-api/nym-node-status-agent/Dockerfile +++ b/nym-node-status-api/nym-node-status-agent/Dockerfile @@ -36,6 +36,9 @@ WORKDIR /nym COPY --from=builder /usr/src/nym/target/release/nym-node-status-agent ./ COPY --from=builder /usr/src/nym-vpn-client/nym-vpn-core/target/release/nym-gateway-probe ./ +COPY --from=builder /usr/src/nym/nym-node-status-api/nym-node-status-agent/entrypoint.sh ./ +RUN chmod +x /nym/entrypoint.sh +ENV SLEEP_TIME=5 ENV NODE_STATUS_AGENT_PROBE_PATH=/nym/nym-gateway-probe -ENTRYPOINT [ "/nym/nym-node-status-agent", "run-probe" ] +ENTRYPOINT [ "/nym/entrypoint.sh" ] diff --git a/nym-node-status-api/nym-node-status-agent/README.md b/nym-node-status-api/nym-node-status-agent/README.md new file mode 100644 index 00000000000..bf75649c253 --- /dev/null +++ b/nym-node-status-api/nym-node-status-agent/README.md @@ -0,0 +1,15 @@ +# Node Status Agent + +An agent to run tests and report results back to the Node Status API. + +Environment variables that can be set individually are: + +- `NYM_NODE_MNEMONICS` - mnemonic to get tickets for tests +- `NODE_STATUS_AGENT_SERVER_PORT` - Node Status API port +- `NODE_STATUS_AGENT_SERVER_ADDRESS` - Node Status API address + +Or use `NODE_STATUS_AGENT_ARGS` to pass your own arguments: + +``` +NODE_STATUS_AGENT_ARGS="run-probe --server localhost:8000 --mnemonic foo bar baz" +``` diff --git a/nym-node-status-api/nym-node-status-agent/entrypoint.sh b/nym-node-status-api/nym-node-status-agent/entrypoint.sh new file mode 100755 index 00000000000..648ce1652a0 --- /dev/null +++ b/nym-node-status-api/nym-node-status-agent/entrypoint.sh @@ -0,0 +1,20 @@ +#!/bin/sh + +echo "Starting agent loop with sleep interval: ${SLEEP_TIME}s" + +# Trap SIGTERM to allow graceful shutdown +trap "echo 'Stopping...'; exit 0" SIGTERM + +DEFAULT_ARGS="run-probe --server \"${NODE_STATUS_AGENT_SERVER_ADDRESS}|${NODE_STATUS_AGENT_SERVER_PORT}\" --mnemonic \"${NYM_NODE_MNEMONICS}\"" +ARGS=${NODE_STATUS_AGENT_ARGS:-${DEFAULT_ARGS}} +COMMAND="/nym/nym-node-status-agent ${ARGS}" + +echo "default_args = '${DEFAULT_ARGS}'" +echo "args = '${ARGS}'" +echo "command = '${COMMAND}'" + +# Run probe in an infinite loop +while true; do + eval $COMMAND + sleep "$SLEEP_TIME" +done diff --git a/nym-node-status-api/nym-node-status-api/.env.example b/nym-node-status-api/nym-node-status-api/.env.example index 7625347a7c3..91b6f192cb8 100644 --- a/nym-node-status-api/nym-node-status-api/.env.example +++ b/nym-node-status-api/nym-node-status-api/.env.example @@ -1,11 +1,7 @@ # Example environment variables for nym-node-status-api -# Database configuration -# For SQLite: -# DATABASE_URL=sqlite://nym-node-status-api.sqlite - -# For PostgreSQL: -# DATABASE_URL=postgres://testuser:testpass@localhost:5433/nym_node_status_api_test +# Database configuration for PostgreSQL: +DATABASE_URL=postgres://testuser:testpass@localhost:5433/nym_node_status_api_test # Network configuration NETWORK_NAME=sandbox diff --git a/nym-node-status-api/nym-node-status-api/.sqlx/query-117f96b2d0cec62858d065e700589f87d7d469fd301e831a1952641ec9375580.json b/nym-node-status-api/nym-node-status-api/.sqlx/query-117f96b2d0cec62858d065e700589f87d7d469fd301e831a1952641ec9375580.json new file mode 100644 index 00000000000..2c2bdee3918 --- /dev/null +++ b/nym-node-status-api/nym-node-status-api/.sqlx/query-117f96b2d0cec62858d065e700589f87d7d469fd301e831a1952641ec9375580.json @@ -0,0 +1,26 @@ +{ + "db_name": "PostgreSQL", + "query": "INSERT INTO gateways (\n gateway_identity_key,\n bonded,\n performance,\n self_described,\n last_updated_utc\n ) VALUES ($1, $2, $3, $4, $5)\n RETURNING id", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Int4" + } + ], + "parameters": { + "Left": [ + "Varchar", + "Bool", + "Int4", + "Varchar", + "Int8" + ] + }, + "nullable": [ + false + ] + }, + "hash": "117f96b2d0cec62858d065e700589f87d7d469fd301e831a1952641ec9375580" +} diff --git a/nym-node-status-api/nym-node-status-api/.sqlx/query-1327b5118f9144dddbcf8edb11f7dc549cf503409fd6dfedcdc02dbcd61d5454.json b/nym-node-status-api/nym-node-status-api/.sqlx/query-1327b5118f9144dddbcf8edb11f7dc549cf503409fd6dfedcdc02dbcd61d5454.json new file mode 100644 index 00000000000..c0373a1f5ce --- /dev/null +++ b/nym-node-status-api/nym-node-status-api/.sqlx/query-1327b5118f9144dddbcf8edb11f7dc549cf503409fd6dfedcdc02dbcd61d5454.json @@ -0,0 +1,32 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT\n key as \"key!\",\n value_json as \"value_json!\",\n last_updated_utc as \"last_updated_utc!\"\n FROM summary", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "key!", + "type_info": "Varchar" + }, + { + "ordinal": 1, + "name": "value_json!", + "type_info": "Varchar" + }, + { + "ordinal": 2, + "name": "last_updated_utc!", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + false, + true, + false + ] + }, + "hash": "1327b5118f9144dddbcf8edb11f7dc549cf503409fd6dfedcdc02dbcd61d5454" +} diff --git a/nym-node-status-api/nym-node-status-api/.sqlx/query-13c7178d57f28e11b47bc96d43ad14acd3c400ed9452db7095fac2a44ece8576.json b/nym-node-status-api/nym-node-status-api/.sqlx/query-13c7178d57f28e11b47bc96d43ad14acd3c400ed9452db7095fac2a44ece8576.json new file mode 100644 index 00000000000..3d15c27947b --- /dev/null +++ b/nym-node-status-api/nym-node-status-api/.sqlx/query-13c7178d57f28e11b47bc96d43ad14acd3c400ed9452db7095fac2a44ece8576.json @@ -0,0 +1,19 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO nym_node_daily_mixing_stats (\n node_id, date_utc,\n total_stake, packets_received,\n packets_sent, packets_dropped\n ) VALUES ($1, $2, $3, $4, $5, $6)\n ON CONFLICT(node_id, date_utc) DO UPDATE SET\n total_stake = excluded.total_stake,\n packets_received = nym_node_daily_mixing_stats.packets_received + excluded.packets_received,\n packets_sent = nym_node_daily_mixing_stats.packets_sent + excluded.packets_sent,\n packets_dropped = nym_node_daily_mixing_stats.packets_dropped + excluded.packets_dropped\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int4", + "Varchar", + "Int8", + "Int4", + "Int4", + "Int4" + ] + }, + "nullable": [] + }, + "hash": "13c7178d57f28e11b47bc96d43ad14acd3c400ed9452db7095fac2a44ece8576" +} diff --git a/nym-node-status-api/nym-node-status-api/.sqlx/query-1c41eccf970cdc943ac2a343d03e6b229a2096d9fdac24d693005b57ed4333ca.json b/nym-node-status-api/nym-node-status-api/.sqlx/query-1c41eccf970cdc943ac2a343d03e6b229a2096d9fdac24d693005b57ed4333ca.json new file mode 100644 index 00000000000..99864abd1f9 --- /dev/null +++ b/nym-node-status-api/nym-node-status-api/.sqlx/query-1c41eccf970cdc943ac2a343d03e6b229a2096d9fdac24d693005b57ed4333ca.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n total_stake\n FROM nym_nodes\n WHERE node_id = $1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "total_stake", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Int4" + ] + }, + "nullable": [ + false + ] + }, + "hash": "1c41eccf970cdc943ac2a343d03e6b229a2096d9fdac24d693005b57ed4333ca" +} diff --git a/nym-node-status-api/nym-node-status-api/.sqlx/query-227539374e7473f6f9642289c5b5d1bcd636315ab23537cb5f6d2f82a2bcb7bf.json b/nym-node-status-api/nym-node-status-api/.sqlx/query-227539374e7473f6f9642289c5b5d1bcd636315ab23537cb5f6d2f82a2bcb7bf.json new file mode 100644 index 00000000000..63d48b08260 --- /dev/null +++ b/nym-node-status-api/nym-node-status-api/.sqlx/query-227539374e7473f6f9642289c5b5d1bcd636315ab23537cb5f6d2f82a2bcb7bf.json @@ -0,0 +1,26 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT\n node_id,\n bond_info as \"bond_info: serde_json::Value\"\n FROM\n nym_nodes\n WHERE\n bond_info IS NOT NULL\n AND\n self_described IS NOT NULL\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "node_id", + "type_info": "Int4" + }, + { + "ordinal": 1, + "name": "bond_info: serde_json::Value", + "type_info": "Jsonb" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + false, + true + ] + }, + "hash": "227539374e7473f6f9642289c5b5d1bcd636315ab23537cb5f6d2f82a2bcb7bf" +} diff --git a/nym-node-status-api/nym-node-status-api/.sqlx/query-25300e435780101fa207c8e26ef2f49ba5db84d63e89440bb494e8327fe73686.json b/nym-node-status-api/nym-node-status-api/.sqlx/query-25300e435780101fa207c8e26ef2f49ba5db84d63e89440bb494e8327fe73686.json new file mode 100644 index 00000000000..45ec9220374 --- /dev/null +++ b/nym-node-status-api/nym-node-status-api/.sqlx/query-25300e435780101fa207c8e26ef2f49ba5db84d63e89440bb494e8327fe73686.json @@ -0,0 +1,20 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT gateway_identity_key\n FROM gateways\n WHERE bonded = true\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "gateway_identity_key", + "type_info": "Varchar" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + false + ] + }, + "hash": "25300e435780101fa207c8e26ef2f49ba5db84d63e89440bb494e8327fe73686" +} diff --git a/nym-node-status-api/nym-node-status-api/.sqlx/query-283f49a65c7d70bf271702ff6a5c7ad6e68c81932d295ff18ed198c54706a57c.json b/nym-node-status-api/nym-node-status-api/.sqlx/query-283f49a65c7d70bf271702ff6a5c7ad6e68c81932d295ff18ed198c54706a57c.json new file mode 100644 index 00000000000..af701d00586 --- /dev/null +++ b/nym-node-status-api/nym-node-status-api/.sqlx/query-283f49a65c7d70bf271702ff6a5c7ad6e68c81932d295ff18ed198c54706a57c.json @@ -0,0 +1,86 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT\n node_id,\n ed25519_identity_pubkey,\n total_stake,\n ip_addresses as \"ip_addresses!: serde_json::Value\",\n mix_port,\n x25519_sphinx_pubkey,\n node_role as \"node_role: serde_json::Value\",\n supported_roles as \"supported_roles: serde_json::Value\",\n entry as \"entry: serde_json::Value\",\n performance,\n self_described as \"self_described: serde_json::Value\",\n bond_info as \"bond_info: serde_json::Value\"\n FROM\n nym_nodes\n WHERE\n self_described IS NOT NULL\n AND\n bond_info IS NOT NULL\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "node_id", + "type_info": "Int4" + }, + { + "ordinal": 1, + "name": "ed25519_identity_pubkey", + "type_info": "Varchar" + }, + { + "ordinal": 2, + "name": "total_stake", + "type_info": "Int8" + }, + { + "ordinal": 3, + "name": "ip_addresses!: serde_json::Value", + "type_info": "Jsonb" + }, + { + "ordinal": 4, + "name": "mix_port", + "type_info": "Int4" + }, + { + "ordinal": 5, + "name": "x25519_sphinx_pubkey", + "type_info": "Varchar" + }, + { + "ordinal": 6, + "name": "node_role: serde_json::Value", + "type_info": "Jsonb" + }, + { + "ordinal": 7, + "name": "supported_roles: serde_json::Value", + "type_info": "Jsonb" + }, + { + "ordinal": 8, + "name": "entry: serde_json::Value", + "type_info": "Jsonb" + }, + { + "ordinal": 9, + "name": "performance", + "type_info": "Varchar" + }, + { + "ordinal": 10, + "name": "self_described: serde_json::Value", + "type_info": "Jsonb" + }, + { + "ordinal": 11, + "name": "bond_info: serde_json::Value", + "type_info": "Jsonb" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + false, + false, + false, + false, + false, + false, + false, + false, + true, + false, + true, + true + ] + }, + "hash": "283f49a65c7d70bf271702ff6a5c7ad6e68c81932d295ff18ed198c54706a57c" +} diff --git a/nym-node-status-api/nym-node-status-api/.sqlx/query-32e1c6b00d97dbd4987062f46017512f33caff1be023c82d914514f17112c061.json b/nym-node-status-api/nym-node-status-api/.sqlx/query-32e1c6b00d97dbd4987062f46017512f33caff1be023c82d914514f17112c061.json new file mode 100644 index 00000000000..5f7696aee8a --- /dev/null +++ b/nym-node-status-api/nym-node-status-api/.sqlx/query-32e1c6b00d97dbd4987062f46017512f33caff1be023c82d914514f17112c061.json @@ -0,0 +1,16 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE testruns SET status = $1 WHERE gateway_id = $2 AND status = $3", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int4", + "Int4", + "Int4" + ] + }, + "nullable": [] + }, + "hash": "32e1c6b00d97dbd4987062f46017512f33caff1be023c82d914514f17112c061" +} diff --git a/nym-node-status-api/nym-node-status-api/.sqlx/query-38439a6c33bf21b90032659797105b6af747c3ae6f48bc41e5ec509a2d87abcc.json b/nym-node-status-api/nym-node-status-api/.sqlx/query-38439a6c33bf21b90032659797105b6af747c3ae6f48bc41e5ec509a2d87abcc.json new file mode 100644 index 00000000000..ac921484b91 --- /dev/null +++ b/nym-node-status-api/nym-node-status-api/.sqlx/query-38439a6c33bf21b90032659797105b6af747c3ae6f48bc41e5ec509a2d87abcc.json @@ -0,0 +1,30 @@ +{ + "db_name": "PostgreSQL", + "query": "\n WITH oldest_queued AS (\n SELECT id\n FROM testruns\n WHERE status = $1\n ORDER BY created_utc asc\n LIMIT 1\n FOR UPDATE SKIP LOCKED\n )\n UPDATE testruns\n SET\n status = $3,\n last_assigned_utc = $2\n FROM oldest_queued\n WHERE testruns.id = oldest_queued.id\n RETURNING\n testruns.id,\n testruns.gateway_id\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Int4" + }, + { + "ordinal": 1, + "name": "gateway_id", + "type_info": "Int4" + } + ], + "parameters": { + "Left": [ + "Int4", + "Int8", + "Int4" + ] + }, + "nullable": [ + false, + false + ] + }, + "hash": "38439a6c33bf21b90032659797105b6af747c3ae6f48bc41e5ec509a2d87abcc" +} diff --git a/nym-node-status-api/nym-node-status-api/.sqlx/query-3c4d989cdb23fcd626ce258846d167237f0baa96cf24fcff1c0fcaa6dedafbb3.json b/nym-node-status-api/nym-node-status-api/.sqlx/query-3c4d989cdb23fcd626ce258846d167237f0baa96cf24fcff1c0fcaa6dedafbb3.json new file mode 100644 index 00000000000..dd3ecf54a58 --- /dev/null +++ b/nym-node-status-api/nym-node-status-api/.sqlx/query-3c4d989cdb23fcd626ce258846d167237f0baa96cf24fcff1c0fcaa6dedafbb3.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE testruns SET status = $1 WHERE id = $2", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int4", + "Int4" + ] + }, + "nullable": [] + }, + "hash": "3c4d989cdb23fcd626ce258846d167237f0baa96cf24fcff1c0fcaa6dedafbb3" +} diff --git a/nym-node-status-api/nym-node-status-api/.sqlx/query-4fca38abbb416d9457c34a8ba4faf481a837eda4f3e1bee1d430a4eb102a5b3d.json b/nym-node-status-api/nym-node-status-api/.sqlx/query-4a664274579ea845f1db258dafedaf253b20c182174fc20062c98a49ffe4782a.json similarity index 85% rename from nym-node-status-api/nym-node-status-api/.sqlx/query-4fca38abbb416d9457c34a8ba4faf481a837eda4f3e1bee1d430a4eb102a5b3d.json rename to nym-node-status-api/nym-node-status-api/.sqlx/query-4a664274579ea845f1db258dafedaf253b20c182174fc20062c98a49ffe4782a.json index ee07d52cc20..125e94a8212 100644 --- a/nym-node-status-api/nym-node-status-api/.sqlx/query-4fca38abbb416d9457c34a8ba4faf481a837eda4f3e1bee1d430a4eb102a5b3d.json +++ b/nym-node-status-api/nym-node-status-api/.sqlx/query-4a664274579ea845f1db258dafedaf253b20c182174fc20062c98a49ffe4782a.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "INSERT INTO gateway_session_stats\n (gateway_identity_key, node_id, day,\n unique_active_clients, session_started, users_hashes,\n vpn_sessions, mixnet_sessions, unknown_sessions)\n VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9)\n ON CONFLICT DO NOTHING", + "query": "INSERT INTO gateway_session_stats\n (gateway_identity_key, node_id, day,\n unique_active_clients, session_started, users_hashes,\n vpn_sessions, mixnet_sessions, unknown_sessions)\n VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9)\n ON CONFLICT DO NOTHING;", "describe": { "columns": [], "parameters": { @@ -18,5 +18,5 @@ }, "nullable": [] }, - "hash": "4fca38abbb416d9457c34a8ba4faf481a837eda4f3e1bee1d430a4eb102a5b3d" + "hash": "4a664274579ea845f1db258dafedaf253b20c182174fc20062c98a49ffe4782a" } diff --git a/nym-node-status-api/nym-node-status-api/.sqlx/query-4b671c10fe1ce968dd4cddb139fe13df44dbeee268dbb3a4ccfa403053b28707.json b/nym-node-status-api/nym-node-status-api/.sqlx/query-4b671c10fe1ce968dd4cddb139fe13df44dbeee268dbb3a4ccfa403053b28707.json new file mode 100644 index 00000000000..a4ee44c1b4e --- /dev/null +++ b/nym-node-status-api/nym-node-status-api/.sqlx/query-4b671c10fe1ce968dd4cddb139fe13df44dbeee268dbb3a4ccfa403053b28707.json @@ -0,0 +1,16 @@ +{ + "db_name": "PostgreSQL", + "query": "INSERT INTO summary\n (key, value_json, last_updated_utc)\n VALUES ($1, $2, $3)\n ON CONFLICT(key) DO UPDATE SET\n value_json=excluded.value_json,\n last_updated_utc=excluded.last_updated_utc;", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Varchar", + "Varchar", + "Int8" + ] + }, + "nullable": [] + }, + "hash": "4b671c10fe1ce968dd4cddb139fe13df44dbeee268dbb3a4ccfa403053b28707" +} diff --git a/nym-node-status-api/nym-node-status-api/.sqlx/query-4c55e15e47237149282bd8c0e3d861fc752db87fd57d7ff41709a7aa469b1aa5.json b/nym-node-status-api/nym-node-status-api/.sqlx/query-4c55e15e47237149282bd8c0e3d861fc752db87fd57d7ff41709a7aa469b1aa5.json new file mode 100644 index 00000000000..8eae32c6ddf --- /dev/null +++ b/nym-node-status-api/nym-node-status-api/.sqlx/query-4c55e15e47237149282bd8c0e3d861fc752db87fd57d7ff41709a7aa469b1aa5.json @@ -0,0 +1,16 @@ +{ + "db_name": "PostgreSQL", + "query": "INSERT INTO summary_history\n (date, timestamp_utc, value_json)\n VALUES ($1, $2, $3)\n ON CONFLICT(date) DO UPDATE SET\n timestamp_utc=excluded.timestamp_utc,\n value_json=excluded.value_json;", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Varchar", + "Int8", + "Varchar" + ] + }, + "nullable": [] + }, + "hash": "4c55e15e47237149282bd8c0e3d861fc752db87fd57d7ff41709a7aa469b1aa5" +} diff --git a/nym-node-status-api/nym-node-status-api/.sqlx/query-528f96815c255353230ecbed2b7615b01fe87b1f88819d12f109f3556484772b.json b/nym-node-status-api/nym-node-status-api/.sqlx/query-528f96815c255353230ecbed2b7615b01fe87b1f88819d12f109f3556484772b.json new file mode 100644 index 00000000000..d1ce13640ff --- /dev/null +++ b/nym-node-status-api/nym-node-status-api/.sqlx/query-528f96815c255353230ecbed2b7615b01fe87b1f88819d12f109f3556484772b.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE gateways SET last_probe_log = $1 WHERE id = $2", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Varchar", + "Int4" + ] + }, + "nullable": [] + }, + "hash": "528f96815c255353230ecbed2b7615b01fe87b1f88819d12f109f3556484772b" +} diff --git a/nym-node-status-api/nym-node-status-api/.sqlx/query-56854f703321ff8d8f30628c7e5322024ea01b778ab55efa9c7c6b219ef36308.json b/nym-node-status-api/nym-node-status-api/.sqlx/query-56854f703321ff8d8f30628c7e5322024ea01b778ab55efa9c7c6b219ef36308.json new file mode 100644 index 00000000000..4525f5cd77d --- /dev/null +++ b/nym-node-status-api/nym-node-status-api/.sqlx/query-56854f703321ff8d8f30628c7e5322024ea01b778ab55efa9c7c6b219ef36308.json @@ -0,0 +1,44 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT\n nd.node_id,\n moniker,\n website,\n security_contact,\n details\n FROM\n nym_node_descriptions nd\n INNER JOIN\n nym_nodes nn on nd.node_id = nn.node_id\n WHERE\n bond_info IS NOT NULL\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "node_id", + "type_info": "Int4" + }, + { + "ordinal": 1, + "name": "moniker", + "type_info": "Varchar" + }, + { + "ordinal": 2, + "name": "website", + "type_info": "Varchar" + }, + { + "ordinal": 3, + "name": "security_contact", + "type_info": "Varchar" + }, + { + "ordinal": 4, + "name": "details", + "type_info": "Varchar" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + false, + true, + true, + true, + true + ] + }, + "hash": "56854f703321ff8d8f30628c7e5322024ea01b778ab55efa9c7c6b219ef36308" +} diff --git a/nym-node-status-api/nym-node-status-api/.sqlx/query-57111a1b630945c8bea377bfd6bc1285d938f4c88ee5c277878d262400db5fb1.json b/nym-node-status-api/nym-node-status-api/.sqlx/query-57111a1b630945c8bea377bfd6bc1285d938f4c88ee5c277878d262400db5fb1.json new file mode 100644 index 00000000000..30a5b433d8b --- /dev/null +++ b/nym-node-status-api/nym-node-status-api/.sqlx/query-57111a1b630945c8bea377bfd6bc1285d938f4c88ee5c277878d262400db5fb1.json @@ -0,0 +1,19 @@ +{ + "db_name": "PostgreSQL", + "query": "INSERT INTO gateways\n (gateway_identity_key, bonded,\n self_described, explorer_pretty_bond,\n last_updated_utc, performance)\n VALUES ($1, $2, $3, $4, $5, $6)\n ON CONFLICT(gateway_identity_key) DO UPDATE SET\n bonded=excluded.bonded,\n self_described=excluded.self_described,\n explorer_pretty_bond=excluded.explorer_pretty_bond,\n last_updated_utc=excluded.last_updated_utc,\n performance = excluded.performance;", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Varchar", + "Bool", + "Varchar", + "Varchar", + "Int8", + "Int4" + ] + }, + "nullable": [] + }, + "hash": "57111a1b630945c8bea377bfd6bc1285d938f4c88ee5c277878d262400db5fb1" +} diff --git a/nym-node-status-api/nym-node-status-api/.sqlx/query-32cf68dd34346fcac5cd0cd585845a9df296fdaebc5920dd1c12398e2967cffb.json b/nym-node-status-api/nym-node-status-api/.sqlx/query-5a5c764d89e2f19eec4dc994c83e4aedc4b62634ae22ccdfc218fb3c5ce58560.json similarity index 52% rename from nym-node-status-api/nym-node-status-api/.sqlx/query-32cf68dd34346fcac5cd0cd585845a9df296fdaebc5920dd1c12398e2967cffb.json rename to nym-node-status-api/nym-node-status-api/.sqlx/query-5a5c764d89e2f19eec4dc994c83e4aedc4b62634ae22ccdfc218fb3c5ce58560.json index 4f61646962d..e582834c255 100644 --- a/nym-node-status-api/nym-node-status-api/.sqlx/query-32cf68dd34346fcac5cd0cd585845a9df296fdaebc5920dd1c12398e2967cffb.json +++ b/nym-node-status-api/nym-node-status-api/.sqlx/query-5a5c764d89e2f19eec4dc994c83e4aedc4b62634ae22ccdfc218fb3c5ce58560.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n date_utc as \"date_utc!\",\n SUM(total_stake) as \"total_stake!: i64\",\n SUM(packets_received) as \"total_packets_received!: i64\",\n SUM(packets_sent) as \"total_packets_sent!: i64\",\n SUM(packets_dropped) as \"total_packets_dropped!: i64\"\n FROM (\n SELECT\n date_utc,\n n.total_stake,\n n.packets_received,\n n.packets_sent,\n n.packets_dropped\n FROM nym_node_daily_mixing_stats n\n )\n GROUP BY date_utc\n ORDER BY date_utc ASC\n ", + "query": "\n SELECT\n date_utc as \"date_utc!\",\n SUM(total_stake)::bigint as \"total_stake!: i64\",\n SUM(packets_received)::bigint as \"total_packets_received!: i64\",\n SUM(packets_sent)::bigint as \"total_packets_sent!: i64\",\n SUM(packets_dropped)::bigint as \"total_packets_dropped!: i64\"\n FROM nym_node_daily_mixing_stats\n GROUP BY date_utc\n ORDER BY date_utc ASC\n ", "describe": { "columns": [ { @@ -11,7 +11,7 @@ { "ordinal": 1, "name": "total_stake!: i64", - "type_info": "Numeric" + "type_info": "Int8" }, { "ordinal": 2, @@ -40,5 +40,5 @@ null ] }, - "hash": "32cf68dd34346fcac5cd0cd585845a9df296fdaebc5920dd1c12398e2967cffb" + "hash": "5a5c764d89e2f19eec4dc994c83e4aedc4b62634ae22ccdfc218fb3c5ce58560" } diff --git a/nym-node-status-api/nym-node-status-api/.sqlx/query-7600823da7ce80b8ffda933608603a2752e28df775d1af8fd943a5fc8d7dc00d.json b/nym-node-status-api/nym-node-status-api/.sqlx/query-7600823da7ce80b8ffda933608603a2752e28df775d1af8fd943a5fc8d7dc00d.json new file mode 100644 index 00000000000..bdea8047add --- /dev/null +++ b/nym-node-status-api/nym-node-status-api/.sqlx/query-7600823da7ce80b8ffda933608603a2752e28df775d1af8fd943a5fc8d7dc00d.json @@ -0,0 +1,38 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT\n id as \"id!\",\n date as \"date!\",\n timestamp_utc as \"timestamp_utc!\",\n value_json as \"value_json!\"\n FROM summary_history\n ORDER BY date DESC\n LIMIT 30", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id!", + "type_info": "Int4" + }, + { + "ordinal": 1, + "name": "date!", + "type_info": "Varchar" + }, + { + "ordinal": 2, + "name": "timestamp_utc!", + "type_info": "Int8" + }, + { + "ordinal": 3, + "name": "value_json!", + "type_info": "Varchar" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + false, + false, + false, + true + ] + }, + "hash": "7600823da7ce80b8ffda933608603a2752e28df775d1af8fd943a5fc8d7dc00d" +} diff --git a/nym-node-status-api/nym-node-status-api/.sqlx/query-7b1c9b42347e83c396c51cfbd03558d124f3d4f0209de661832560001cddb0eb.json b/nym-node-status-api/nym-node-status-api/.sqlx/query-7b1c9b42347e83c396c51cfbd03558d124f3d4f0209de661832560001cddb0eb.json new file mode 100644 index 00000000000..0432ded7a8e --- /dev/null +++ b/nym-node-status-api/nym-node-status-api/.sqlx/query-7b1c9b42347e83c396c51cfbd03558d124f3d4f0209de661832560001cddb0eb.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT\n gateway_identity_key\n FROM\n gateways\n WHERE\n id = $1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "gateway_identity_key", + "type_info": "Varchar" + } + ], + "parameters": { + "Left": [ + "Int4" + ] + }, + "nullable": [ + false + ] + }, + "hash": "7b1c9b42347e83c396c51cfbd03558d124f3d4f0209de661832560001cddb0eb" +} diff --git a/nym-node-status-api/nym-node-status-api/.sqlx/query-7e68a05cd72b223ba363aa497097dac4aa2f08d12ed1ce590f7ab9caafc9574d.json b/nym-node-status-api/nym-node-status-api/.sqlx/query-7e68a05cd72b223ba363aa497097dac4aa2f08d12ed1ce590f7ab9caafc9574d.json new file mode 100644 index 00000000000..44f9e3bf862 --- /dev/null +++ b/nym-node-status-api/nym-node-status-api/.sqlx/query-7e68a05cd72b223ba363aa497097dac4aa2f08d12ed1ce590f7ab9caafc9574d.json @@ -0,0 +1,40 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT\n id,\n gateway_identity_key,\n self_described,\n explorer_pretty_bond\n FROM gateways\n WHERE gateway_identity_key = $1\n AND bonded = true\n ORDER BY gateway_identity_key\n LIMIT 1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Int4" + }, + { + "ordinal": 1, + "name": "gateway_identity_key", + "type_info": "Varchar" + }, + { + "ordinal": 2, + "name": "self_described", + "type_info": "Varchar" + }, + { + "ordinal": 3, + "name": "explorer_pretty_bond", + "type_info": "Varchar" + } + ], + "parameters": { + "Left": [ + "Text" + ] + }, + "nullable": [ + false, + false, + false, + true + ] + }, + "hash": "7e68a05cd72b223ba363aa497097dac4aa2f08d12ed1ce590f7ab9caafc9574d" +} diff --git a/nym-node-status-api/nym-node-status-api/.sqlx/query-82bf46daa4f64c93a90e3c7ee505e4d9eb9d0a4fe272663628fafada8e97213e.json b/nym-node-status-api/nym-node-status-api/.sqlx/query-82bf46daa4f64c93a90e3c7ee505e4d9eb9d0a4fe272663628fafada8e97213e.json new file mode 100644 index 00000000000..25922e6d43e --- /dev/null +++ b/nym-node-status-api/nym-node-status-api/.sqlx/query-82bf46daa4f64c93a90e3c7ee505e4d9eb9d0a4fe272663628fafada8e97213e.json @@ -0,0 +1,16 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE\n testruns\n SET\n status = $1\n WHERE\n status = $2\n AND\n last_assigned_utc < $3\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int4", + "Int4", + "Int8" + ] + }, + "nullable": [] + }, + "hash": "82bf46daa4f64c93a90e3c7ee505e4d9eb9d0a4fe272663628fafada8e97213e" +} diff --git a/nym-node-status-api/nym-node-status-api/.sqlx/query-88a4554c2857288c314768c56648a5f1811d2053582380ca602335a122cef8db.json b/nym-node-status-api/nym-node-status-api/.sqlx/query-88a4554c2857288c314768c56648a5f1811d2053582380ca602335a122cef8db.json new file mode 100644 index 00000000000..174d5286677 --- /dev/null +++ b/nym-node-status-api/nym-node-status-api/.sqlx/query-88a4554c2857288c314768c56648a5f1811d2053582380ca602335a122cef8db.json @@ -0,0 +1,20 @@ +{ + "db_name": "PostgreSQL", + "query": "INSERT INTO testruns (\n id,\n gateway_id,\n status,\n created_utc,\n last_assigned_utc,\n ip_address,\n log\n ) VALUES ($1, $2, $3, $4, $5, $6, $7)", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int4", + "Int4", + "Int4", + "Int8", + "Int8", + "Varchar", + "Varchar" + ] + }, + "nullable": [] + }, + "hash": "88a4554c2857288c314768c56648a5f1811d2053582380ca602335a122cef8db" +} diff --git a/nym-node-status-api/nym-node-status-api/.sqlx/query-8fff43e94fe05f596eb311059b27202a3a30c5d0080f1b5cfadea3be04b4bf1c.json b/nym-node-status-api/nym-node-status-api/.sqlx/query-8fff43e94fe05f596eb311059b27202a3a30c5d0080f1b5cfadea3be04b4bf1c.json new file mode 100644 index 00000000000..36058e8a74b --- /dev/null +++ b/nym-node-status-api/nym-node-status-api/.sqlx/query-8fff43e94fe05f596eb311059b27202a3a30c5d0080f1b5cfadea3be04b4bf1c.json @@ -0,0 +1,19 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO nym_node_descriptions (\n node_id, moniker, website, security_contact, details, last_updated_utc\n ) VALUES ($1, $2, $3, $4, $5, $6)\n ON CONFLICT (node_id) DO UPDATE SET\n moniker = excluded.moniker,\n website = excluded.website,\n security_contact = excluded.security_contact,\n details = excluded.details,\n last_updated_utc = excluded.last_updated_utc\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int4", + "Varchar", + "Varchar", + "Varchar", + "Varchar", + "Int4" + ] + }, + "nullable": [] + }, + "hash": "8fff43e94fe05f596eb311059b27202a3a30c5d0080f1b5cfadea3be04b4bf1c" +} diff --git a/nym-node-status-api/nym-node-status-api/.sqlx/query-a7945636d7c4f536167325061d7fd70dcd7b1a8f81acf37889e9bb21c9430274.json b/nym-node-status-api/nym-node-status-api/.sqlx/query-a7945636d7c4f536167325061d7fd70dcd7b1a8f81acf37889e9bb21c9430274.json new file mode 100644 index 00000000000..a1063ce8223 --- /dev/null +++ b/nym-node-status-api/nym-node-status-api/.sqlx/query-a7945636d7c4f536167325061d7fd70dcd7b1a8f81acf37889e9bb21c9430274.json @@ -0,0 +1,14 @@ +{ + "db_name": "PostgreSQL", + "query": "DELETE FROM gateway_session_stats WHERE day <= $1", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Date" + ] + }, + "nullable": [] + }, + "hash": "a7945636d7c4f536167325061d7fd70dcd7b1a8f81acf37889e9bb21c9430274" +} diff --git a/nym-node-status-api/nym-node-status-api/.sqlx/query-a7c17b091bba4ad65d3a7e4f62958cf3f5894b394051d724622cf660977d7e39.json b/nym-node-status-api/nym-node-status-api/.sqlx/query-a7c17b091bba4ad65d3a7e4f62958cf3f5894b394051d724622cf660977d7e39.json new file mode 100644 index 00000000000..aa27853218d --- /dev/null +++ b/nym-node-status-api/nym-node-status-api/.sqlx/query-a7c17b091bba4ad65d3a7e4f62958cf3f5894b394051d724622cf660977d7e39.json @@ -0,0 +1,34 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n COALESCE(packets_received, 0) as \"packets_received!: _\",\n COALESCE(packets_sent, 0) as \"packets_sent!: _\",\n COALESCE(packets_dropped, 0) as \"packets_dropped!: _\"\n FROM nym_nodes_packet_stats_raw\n WHERE node_id = $1\n ORDER BY timestamp_utc DESC\n LIMIT 1 OFFSET 1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "packets_received!: _", + "type_info": "Int4" + }, + { + "ordinal": 1, + "name": "packets_sent!: _", + "type_info": "Int4" + }, + { + "ordinal": 2, + "name": "packets_dropped!: _", + "type_info": "Int4" + } + ], + "parameters": { + "Left": [ + "Int4" + ] + }, + "nullable": [ + null, + null, + null + ] + }, + "hash": "a7c17b091bba4ad65d3a7e4f62958cf3f5894b394051d724622cf660977d7e39" +} diff --git a/nym-node-status-api/nym-node-status-api/.sqlx/query-abeab3c52e9f0fd5730feb2df7547fcf2896fbc89f16599420b8e72e2e9e70e5.json b/nym-node-status-api/nym-node-status-api/.sqlx/query-abeab3c52e9f0fd5730feb2df7547fcf2896fbc89f16599420b8e72e2e9e70e5.json new file mode 100644 index 00000000000..2ea0c800727 --- /dev/null +++ b/nym-node-status-api/nym-node-status-api/.sqlx/query-abeab3c52e9f0fd5730feb2df7547fcf2896fbc89f16599420b8e72e2e9e70e5.json @@ -0,0 +1,18 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO nym_nodes_packet_stats_raw (\n node_id, timestamp_utc, packets_received, packets_sent, packets_dropped\n ) VALUES ($1, $2, $3, $4, $5)\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int4", + "Int4", + "Int4", + "Int4", + "Int4" + ] + }, + "nullable": [] + }, + "hash": "abeab3c52e9f0fd5730feb2df7547fcf2896fbc89f16599420b8e72e2e9e70e5" +} diff --git a/nym-node-status-api/nym-node-status-api/.sqlx/query-b010fb91828f7e4f0b72bdfe3b58b2abb437cccdb6ebd2e1087cc822ed737b0e.json b/nym-node-status-api/nym-node-status-api/.sqlx/query-b010fb91828f7e4f0b72bdfe3b58b2abb437cccdb6ebd2e1087cc822ed737b0e.json new file mode 100644 index 00000000000..5379205f8d1 --- /dev/null +++ b/nym-node-status-api/nym-node-status-api/.sqlx/query-b010fb91828f7e4f0b72bdfe3b58b2abb437cccdb6ebd2e1087cc822ed737b0e.json @@ -0,0 +1,26 @@ +{ + "db_name": "PostgreSQL", + "query": "INSERT INTO nym_nodes\n (node_id, ed25519_identity_pubkey,\n total_stake,\n ip_addresses, mix_port,\n x25519_sphinx_pubkey, node_role,\n supported_roles, entry,\n self_described,\n bond_info,\n performance, last_updated_utc\n )\n VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13)\n ON CONFLICT(node_id) DO UPDATE SET\n ed25519_identity_pubkey=excluded.ed25519_identity_pubkey,\n ip_addresses=excluded.ip_addresses,\n mix_port=excluded.mix_port,\n x25519_sphinx_pubkey=excluded.x25519_sphinx_pubkey,\n node_role=excluded.node_role,\n supported_roles=excluded.supported_roles,\n entry=excluded.entry,\n self_described=excluded.self_described,\n bond_info=excluded.bond_info,\n performance=excluded.performance,\n last_updated_utc=excluded.last_updated_utc\n ;", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int4", + "Varchar", + "Int8", + "Jsonb", + "Int4", + "Varchar", + "Jsonb", + "Jsonb", + "Jsonb", + "Jsonb", + "Jsonb", + "Varchar", + "Int4" + ] + }, + "nullable": [] + }, + "hash": "b010fb91828f7e4f0b72bdfe3b58b2abb437cccdb6ebd2e1087cc822ed737b0e" +} diff --git a/nym-node-status-api/nym-node-status-api/.sqlx/query-b0c588db5f7be90c6d4cb1b55c3a8ed4e9a64884ac5619e0abdecb04f2d13a74.json b/nym-node-status-api/nym-node-status-api/.sqlx/query-b0c588db5f7be90c6d4cb1b55c3a8ed4e9a64884ac5619e0abdecb04f2d13a74.json new file mode 100644 index 00000000000..677fc961c03 --- /dev/null +++ b/nym-node-status-api/nym-node-status-api/.sqlx/query-b0c588db5f7be90c6d4cb1b55c3a8ed4e9a64884ac5619e0abdecb04f2d13a74.json @@ -0,0 +1,28 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n id,\n gateway_identity_key\n FROM gateways\n WHERE id = $1\n LIMIT 1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Int4" + }, + { + "ordinal": 1, + "name": "gateway_identity_key", + "type_info": "Varchar" + } + ], + "parameters": { + "Left": [ + "Int4" + ] + }, + "nullable": [ + false, + false + ] + }, + "hash": "b0c588db5f7be90c6d4cb1b55c3a8ed4e9a64884ac5619e0abdecb04f2d13a74" +} diff --git a/nym-node-status-api/nym-node-status-api/.sqlx/query-b68796d1d8d2384b30f1aace06269682c4ae96f774261f5c298264d3c12e5b67.json b/nym-node-status-api/nym-node-status-api/.sqlx/query-b68796d1d8d2384b30f1aace06269682c4ae96f774261f5c298264d3c12e5b67.json new file mode 100644 index 00000000000..141b3da9774 --- /dev/null +++ b/nym-node-status-api/nym-node-status-api/.sqlx/query-b68796d1d8d2384b30f1aace06269682c4ae96f774261f5c298264d3c12e5b67.json @@ -0,0 +1,12 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE nym_nodes\n SET\n self_described = NULL,\n bond_info = NULL", + "describe": { + "columns": [], + "parameters": { + "Left": [] + }, + "nullable": [] + }, + "hash": "b68796d1d8d2384b30f1aace06269682c4ae96f774261f5c298264d3c12e5b67" +} diff --git a/nym-node-status-api/nym-node-status-api/.sqlx/query-b76c563214408ac4b236a5e7cec1162fe63e394e2dac5be39156bbe39143cb66.json b/nym-node-status-api/nym-node-status-api/.sqlx/query-b76c563214408ac4b236a5e7cec1162fe63e394e2dac5be39156bbe39143cb66.json new file mode 100644 index 00000000000..cba56580f23 --- /dev/null +++ b/nym-node-status-api/nym-node-status-api/.sqlx/query-b76c563214408ac4b236a5e7cec1162fe63e394e2dac5be39156bbe39143cb66.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT id FROM gateways WHERE gateway_identity_key = $1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Int4" + } + ], + "parameters": { + "Left": [ + "Text" + ] + }, + "nullable": [ + false + ] + }, + "hash": "b76c563214408ac4b236a5e7cec1162fe63e394e2dac5be39156bbe39143cb66" +} diff --git a/nym-node-status-api/nym-node-status-api/.sqlx/query-c09be83be9d5a679a08ab528c6305fddae499f26b55c0f8388392b27b179514b.json b/nym-node-status-api/nym-node-status-api/.sqlx/query-c09be83be9d5a679a08ab528c6305fddae499f26b55c0f8388392b27b179514b.json new file mode 100644 index 00000000000..f0c673de319 --- /dev/null +++ b/nym-node-status-api/nym-node-status-api/.sqlx/query-c09be83be9d5a679a08ab528c6305fddae499f26b55c0f8388392b27b179514b.json @@ -0,0 +1,92 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT\n gw.gateway_identity_key as \"gateway_identity_key!\",\n gw.bonded as \"bonded: bool\",\n gw.performance as \"performance!\",\n gw.self_described as \"self_described?\",\n gw.explorer_pretty_bond as \"explorer_pretty_bond?\",\n gw.last_probe_result as \"last_probe_result?\",\n gw.last_probe_log as \"last_probe_log?\",\n gw.last_testrun_utc as \"last_testrun_utc?\",\n gw.last_updated_utc as \"last_updated_utc!\",\n COALESCE(gd.moniker, 'NA') as \"moniker!\",\n COALESCE(gd.website, 'NA') as \"website!\",\n COALESCE(gd.security_contact, 'NA') as \"security_contact!\",\n COALESCE(gd.details, 'NA') as \"details!\"\n FROM gateways gw\n LEFT JOIN gateway_description gd\n ON gw.gateway_identity_key = gd.gateway_identity_key\n ORDER BY gw.gateway_identity_key", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "gateway_identity_key!", + "type_info": "Varchar" + }, + { + "ordinal": 1, + "name": "bonded: bool", + "type_info": "Bool" + }, + { + "ordinal": 2, + "name": "performance!", + "type_info": "Int4" + }, + { + "ordinal": 3, + "name": "self_described?", + "type_info": "Varchar" + }, + { + "ordinal": 4, + "name": "explorer_pretty_bond?", + "type_info": "Varchar" + }, + { + "ordinal": 5, + "name": "last_probe_result?", + "type_info": "Varchar" + }, + { + "ordinal": 6, + "name": "last_probe_log?", + "type_info": "Varchar" + }, + { + "ordinal": 7, + "name": "last_testrun_utc?", + "type_info": "Int8" + }, + { + "ordinal": 8, + "name": "last_updated_utc!", + "type_info": "Int8" + }, + { + "ordinal": 9, + "name": "moniker!", + "type_info": "Varchar" + }, + { + "ordinal": 10, + "name": "website!", + "type_info": "Varchar" + }, + { + "ordinal": 11, + "name": "security_contact!", + "type_info": "Varchar" + }, + { + "ordinal": 12, + "name": "details!", + "type_info": "Varchar" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + false, + false, + false, + false, + true, + true, + true, + true, + false, + null, + null, + null, + null + ] + }, + "hash": "c09be83be9d5a679a08ab528c6305fddae499f26b55c0f8388392b27b179514b" +} diff --git a/nym-node-status-api/nym-node-status-api/.sqlx/query-c48d04fc3de59dd484f0a63d40336ced54e08785f77e9ef85f3157d004ec85dc.json b/nym-node-status-api/nym-node-status-api/.sqlx/query-c48d04fc3de59dd484f0a63d40336ced54e08785f77e9ef85f3157d004ec85dc.json new file mode 100644 index 00000000000..7954f28f312 --- /dev/null +++ b/nym-node-status-api/nym-node-status-api/.sqlx/query-c48d04fc3de59dd484f0a63d40336ced54e08785f77e9ef85f3157d004ec85dc.json @@ -0,0 +1,86 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT\n node_id,\n ed25519_identity_pubkey,\n total_stake,\n ip_addresses as \"ip_addresses!: serde_json::Value\",\n mix_port,\n x25519_sphinx_pubkey,\n node_role as \"node_role: serde_json::Value\",\n supported_roles as \"supported_roles: serde_json::Value\",\n entry as \"entry: serde_json::Value\",\n performance,\n self_described as \"self_described: serde_json::Value\",\n bond_info as \"bond_info: serde_json::Value\"\n FROM\n nym_nodes\n ORDER BY\n node_id\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "node_id", + "type_info": "Int4" + }, + { + "ordinal": 1, + "name": "ed25519_identity_pubkey", + "type_info": "Varchar" + }, + { + "ordinal": 2, + "name": "total_stake", + "type_info": "Int8" + }, + { + "ordinal": 3, + "name": "ip_addresses!: serde_json::Value", + "type_info": "Jsonb" + }, + { + "ordinal": 4, + "name": "mix_port", + "type_info": "Int4" + }, + { + "ordinal": 5, + "name": "x25519_sphinx_pubkey", + "type_info": "Varchar" + }, + { + "ordinal": 6, + "name": "node_role: serde_json::Value", + "type_info": "Jsonb" + }, + { + "ordinal": 7, + "name": "supported_roles: serde_json::Value", + "type_info": "Jsonb" + }, + { + "ordinal": 8, + "name": "entry: serde_json::Value", + "type_info": "Jsonb" + }, + { + "ordinal": 9, + "name": "performance", + "type_info": "Varchar" + }, + { + "ordinal": 10, + "name": "self_described: serde_json::Value", + "type_info": "Jsonb" + }, + { + "ordinal": 11, + "name": "bond_info: serde_json::Value", + "type_info": "Jsonb" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + false, + false, + false, + false, + false, + false, + false, + false, + true, + false, + true, + true + ] + }, + "hash": "c48d04fc3de59dd484f0a63d40336ced54e08785f77e9ef85f3157d004ec85dc" +} diff --git a/nym-node-status-api/nym-node-status-api/.sqlx/query-c7656b2b1b4328415772ce69d0568bd5438d6c8496ca9cbdcfb70bb5375b345e.json b/nym-node-status-api/nym-node-status-api/.sqlx/query-c7656b2b1b4328415772ce69d0568bd5438d6c8496ca9cbdcfb70bb5375b345e.json new file mode 100644 index 00000000000..5cf589c5d70 --- /dev/null +++ b/nym-node-status-api/nym-node-status-api/.sqlx/query-c7656b2b1b4328415772ce69d0568bd5438d6c8496ca9cbdcfb70bb5375b345e.json @@ -0,0 +1,26 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT\n node_id,\n self_described as \"self_described: serde_json::Value\"\n FROM\n nym_nodes\n WHERE\n self_described IS NOT NULL\n ORDER BY\n node_id\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "node_id", + "type_info": "Int4" + }, + { + "ordinal": 1, + "name": "self_described: serde_json::Value", + "type_info": "Jsonb" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + false, + true + ] + }, + "hash": "c7656b2b1b4328415772ce69d0568bd5438d6c8496ca9cbdcfb70bb5375b345e" +} diff --git a/nym-node-status-api/nym-node-status-api/.sqlx/query-d41cafc76cb49c03df7452c405a4e2e5e3951c41dc35c20261c1d959c0d6403f.json b/nym-node-status-api/nym-node-status-api/.sqlx/query-d41cafc76cb49c03df7452c405a4e2e5e3951c41dc35c20261c1d959c0d6403f.json new file mode 100644 index 00000000000..7eafc8e67d8 --- /dev/null +++ b/nym-node-status-api/nym-node-status-api/.sqlx/query-d41cafc76cb49c03df7452c405a4e2e5e3951c41dc35c20261c1d959c0d6403f.json @@ -0,0 +1,59 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT\n id as \"id!\",\n gateway_id as \"gateway_id!\",\n status as \"status!\",\n created_utc as \"created_utc!\",\n ip_address as \"ip_address!\",\n log as \"log!\",\n last_assigned_utc\n FROM testruns\n WHERE\n id = $1\n AND\n status = $2\n ORDER BY created_utc\n LIMIT 1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id!", + "type_info": "Int4" + }, + { + "ordinal": 1, + "name": "gateway_id!", + "type_info": "Int4" + }, + { + "ordinal": 2, + "name": "status!", + "type_info": "Int4" + }, + { + "ordinal": 3, + "name": "created_utc!", + "type_info": "Int8" + }, + { + "ordinal": 4, + "name": "ip_address!", + "type_info": "Varchar" + }, + { + "ordinal": 5, + "name": "log!", + "type_info": "Varchar" + }, + { + "ordinal": 6, + "name": "last_assigned_utc", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Int4", + "Int4" + ] + }, + "nullable": [ + false, + false, + false, + false, + false, + false, + true + ] + }, + "hash": "d41cafc76cb49c03df7452c405a4e2e5e3951c41dc35c20261c1d959c0d6403f" +} diff --git a/nym-node-status-api/nym-node-status-api/.sqlx/query-d5a22865b11127c53eaa0efb74fa94df734bb29a5ea1eac12f51a55e32d40836.json b/nym-node-status-api/nym-node-status-api/.sqlx/query-d5a22865b11127c53eaa0efb74fa94df734bb29a5ea1eac12f51a55e32d40836.json new file mode 100644 index 00000000000..d970b4e3ee3 --- /dev/null +++ b/nym-node-status-api/nym-node-status-api/.sqlx/query-d5a22865b11127c53eaa0efb74fa94df734bb29a5ea1eac12f51a55e32d40836.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT\n COUNT(id) as \"count: i64\"\n FROM testruns\n WHERE\n status = $1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "count: i64", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Int4" + ] + }, + "nullable": [ + null + ] + }, + "hash": "d5a22865b11127c53eaa0efb74fa94df734bb29a5ea1eac12f51a55e32d40836" +} diff --git a/nym-node-status-api/nym-node-status-api/.sqlx/query-d79d8a7f1f35613e426ef7706a2ffb6dcd721a40318496779e447cf2207665b9.json b/nym-node-status-api/nym-node-status-api/.sqlx/query-d79d8a7f1f35613e426ef7706a2ffb6dcd721a40318496779e447cf2207665b9.json new file mode 100644 index 00000000000..5c7ad92e0f6 --- /dev/null +++ b/nym-node-status-api/nym-node-status-api/.sqlx/query-d79d8a7f1f35613e426ef7706a2ffb6dcd721a40318496779e447cf2207665b9.json @@ -0,0 +1,16 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE gateways SET last_testrun_utc = $1, last_updated_utc = $2 WHERE id = $3", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Int8", + "Int4" + ] + }, + "nullable": [] + }, + "hash": "d79d8a7f1f35613e426ef7706a2ffb6dcd721a40318496779e447cf2207665b9" +} diff --git a/nym-node-status-api/nym-node-status-api/.sqlx/query-ddf003e7e13653388f487e3adfc1aad0a285e29c797f99ec00bcccb063f76b64.json b/nym-node-status-api/nym-node-status-api/.sqlx/query-ddf003e7e13653388f487e3adfc1aad0a285e29c797f99ec00bcccb063f76b64.json new file mode 100644 index 00000000000..59962eb2ed0 --- /dev/null +++ b/nym-node-status-api/nym-node-status-api/.sqlx/query-ddf003e7e13653388f487e3adfc1aad0a285e29c797f99ec00bcccb063f76b64.json @@ -0,0 +1,58 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT\n id,\n gateway_id,\n status,\n created_utc,\n ip_address,\n log,\n last_assigned_utc\n FROM testruns\n WHERE id = $1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Int4" + }, + { + "ordinal": 1, + "name": "gateway_id", + "type_info": "Int4" + }, + { + "ordinal": 2, + "name": "status", + "type_info": "Int4" + }, + { + "ordinal": 3, + "name": "created_utc", + "type_info": "Int8" + }, + { + "ordinal": 4, + "name": "ip_address", + "type_info": "Varchar" + }, + { + "ordinal": 5, + "name": "log", + "type_info": "Varchar" + }, + { + "ordinal": 6, + "name": "last_assigned_utc", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Int4" + ] + }, + "nullable": [ + false, + false, + false, + false, + false, + false, + true + ] + }, + "hash": "ddf003e7e13653388f487e3adfc1aad0a285e29c797f99ec00bcccb063f76b64" +} diff --git a/nym-node-status-api/nym-node-status-api/.sqlx/query-de79ab87b392b9dd087820dfd2d72a8ee5522891233c7d26b8670ee84e641774.json b/nym-node-status-api/nym-node-status-api/.sqlx/query-de79ab87b392b9dd087820dfd2d72a8ee5522891233c7d26b8670ee84e641774.json new file mode 100644 index 00000000000..b85772f486d --- /dev/null +++ b/nym-node-status-api/nym-node-status-api/.sqlx/query-de79ab87b392b9dd087820dfd2d72a8ee5522891233c7d26b8670ee84e641774.json @@ -0,0 +1,19 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO gateway_description (\n gateway_identity_key,\n moniker,\n website,\n security_contact,\n details,\n last_updated_utc\n ) VALUES ($1, $2, $3, $4, $5, $6)\n ON CONFLICT (gateway_identity_key) DO UPDATE SET\n moniker = excluded.moniker,\n website = excluded.website,\n security_contact = excluded.security_contact,\n details = excluded.details,\n last_updated_utc = excluded.last_updated_utc\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Varchar", + "Varchar", + "Varchar", + "Varchar", + "Varchar", + "Int8" + ] + }, + "nullable": [] + }, + "hash": "de79ab87b392b9dd087820dfd2d72a8ee5522891233c7d26b8670ee84e641774" +} diff --git a/nym-node-status-api/nym-node-status-api/.sqlx/query-dfa035c968534926736adf0e5359cde3f6f6689a80299b5b6c1d7bd048965e6e.json b/nym-node-status-api/nym-node-status-api/.sqlx/query-dfa035c968534926736adf0e5359cde3f6f6689a80299b5b6c1d7bd048965e6e.json new file mode 100644 index 00000000000..2887c3bea6f --- /dev/null +++ b/nym-node-status-api/nym-node-status-api/.sqlx/query-dfa035c968534926736adf0e5359cde3f6f6689a80299b5b6c1d7bd048965e6e.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE gateways SET last_probe_result = $1 WHERE id = $2", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Varchar", + "Int4" + ] + }, + "nullable": [] + }, + "hash": "dfa035c968534926736adf0e5359cde3f6f6689a80299b5b6c1d7bd048965e6e" +} diff --git a/nym-node-status-api/nym-node-status-api/.sqlx/query-f343df183767af9815847cb94ccbd484010a7346de03f1e0959a09a964344de8.json b/nym-node-status-api/nym-node-status-api/.sqlx/query-f343df183767af9815847cb94ccbd484010a7346de03f1e0959a09a964344de8.json new file mode 100644 index 00000000000..b6dd6ac8a93 --- /dev/null +++ b/nym-node-status-api/nym-node-status-api/.sqlx/query-f343df183767af9815847cb94ccbd484010a7346de03f1e0959a09a964344de8.json @@ -0,0 +1,58 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT\n id,\n gateway_id,\n status,\n created_utc,\n ip_address,\n log,\n last_assigned_utc\n FROM testruns\n WHERE gateway_id = $1 AND status != 2\n ORDER BY id DESC\n LIMIT 1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Int4" + }, + { + "ordinal": 1, + "name": "gateway_id", + "type_info": "Int4" + }, + { + "ordinal": 2, + "name": "status", + "type_info": "Int4" + }, + { + "ordinal": 3, + "name": "created_utc", + "type_info": "Int8" + }, + { + "ordinal": 4, + "name": "ip_address", + "type_info": "Varchar" + }, + { + "ordinal": 5, + "name": "log", + "type_info": "Varchar" + }, + { + "ordinal": 6, + "name": "last_assigned_utc", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Int4" + ] + }, + "nullable": [ + false, + false, + false, + false, + false, + false, + true + ] + }, + "hash": "f343df183767af9815847cb94ccbd484010a7346de03f1e0959a09a964344de8" +} diff --git a/nym-node-status-api/nym-node-status-api/.sqlx/query-f7e3fa31d68c028bf39cc95389f29f8758ec922dd2e7ea064a1e537e580c9ee5.json b/nym-node-status-api/nym-node-status-api/.sqlx/query-f7e3fa31d68c028bf39cc95389f29f8758ec922dd2e7ea064a1e537e580c9ee5.json new file mode 100644 index 00000000000..bfa4d81356c --- /dev/null +++ b/nym-node-status-api/nym-node-status-api/.sqlx/query-f7e3fa31d68c028bf39cc95389f29f8758ec922dd2e7ea064a1e537e580c9ee5.json @@ -0,0 +1,12 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE\n gateways\n SET\n bonded = false\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [] + }, + "nullable": [] + }, + "hash": "f7e3fa31d68c028bf39cc95389f29f8758ec922dd2e7ea064a1e537e580c9ee5" +} diff --git a/nym-node-status-api/nym-node-status-api/Cargo.toml b/nym-node-status-api/nym-node-status-api/Cargo.toml index 841ffd1e7dc..38669cb475f 100644 --- a/nym-node-status-api/nym-node-status-api/Cargo.toml +++ b/nym-node-status-api/nym-node-status-api/Cargo.toml @@ -3,7 +3,7 @@ [package] name = "nym-node-status-api" -version = "3.3.2" +version = "4.0.3" authors.workspace = true repository.workspace = true homepage.workspace = true @@ -49,7 +49,7 @@ serde_json = { workspace = true } serde_json_path = { workspace = true } strum = { workspace = true } strum_macros = { workspace = true } -sqlx = { workspace = true, features = ["runtime-tokio-rustls", "time"] } +sqlx = { workspace = true, features = ["runtime-tokio-rustls", "time", "postgres"] } thiserror = { workspace = true } time = { workspace = true, features = ["formatting"] } tokio = { workspace = true, features = ["rt-multi-thread"] } @@ -64,11 +64,6 @@ utoipauto = { workspace = true } nym-node-metrics = { path = "../../nym-node/nym-node-metrics" } -[features] -default = ["pg"] -sqlite = ["sqlx/sqlite"] -pg = ["sqlx/postgres"] - [build-dependencies] anyhow = { workspace = true } tokio = { workspace = true, features = ["macros"] } @@ -76,6 +71,7 @@ sqlx = { workspace = true, features = [ "runtime-tokio-rustls", "macros", "migrate", + "postgres", ] } [dev-dependencies] diff --git a/nym-node-status-api/nym-node-status-api/Dockerfile-pg b/nym-node-status-api/nym-node-status-api/Dockerfile similarity index 96% rename from nym-node-status-api/nym-node-status-api/Dockerfile-pg rename to nym-node-status-api/nym-node-status-api/Dockerfile index 4a6c7b24c59..0c31f97e719 100644 --- a/nym-node-status-api/nym-node-status-api/Dockerfile-pg +++ b/nym-node-status-api/nym-node-status-api/Dockerfile @@ -4,7 +4,7 @@ FROM harbor.nymte.ch/dockerhub/rust:latest AS builder COPY ./ /usr/src/nym WORKDIR /usr/src/nym/nym-node-status-api/nym-node-status-api/ -RUN cargo build --release --features pg +RUN cargo build --release #------------------------------------------------------------------- diff --git a/nym-node-status-api/nym-node-status-api/Dockerfile-sqlite b/nym-node-status-api/nym-node-status-api/Dockerfile-sqlite deleted file mode 100644 index c37afedb087..00000000000 --- a/nym-node-status-api/nym-node-status-api/Dockerfile-sqlite +++ /dev/null @@ -1,37 +0,0 @@ -# this will only work with VPN, otherwise remove the harbor part -FROM harbor.nymte.ch/dockerhub/rust:latest AS builder - -COPY ./ /usr/src/nym -WORKDIR /usr/src/nym/nym-node-status-api/nym-node-status-api/ - -RUN cargo build --release --features sqlite --no-default-features - - -#------------------------------------------------------------------- -# The following environment variables are required at runtime: -# -# EXPLORER_API -# NYXD -# NYM_API -# DATABASE_URL -# -# And optionally: -# -# NYM_NODE_STATUS_API_NYM_HTTP_CACHE_TTL -# NYM_NODE_STATUS_API_HTTP_PORT -# NYM_API_CLIENT_TIMEOUT -# EXPLORER_CLIENT_TIMEOUT -# NODE_STATUS_API_MONITOR_REFRESH_INTERVAL -# NODE_STATUS_API_TESTRUN_REFRESH_INTERVAL -# -# see https://github.com/nymtech/nym/blob/develop/nym-node-status-api/src/cli.rs for details -#------------------------------------------------------------------- - -FROM harbor.nymte.ch/dockerhub/ubuntu:24.04 - -RUN apt-get update && apt-get install -y ca-certificates - -WORKDIR /nym - -COPY --from=builder /usr/src/nym/target/release/nym-node-status-api ./ -ENTRYPOINT [ "/nym/nym-node-status-api" ] diff --git a/nym-node-status-api/nym-node-status-api/Makefile b/nym-node-status-api/nym-node-status-api/Makefile index 5f2881c1b44..410e39661e4 100644 --- a/nym-node-status-api/nym-node-status-api/Makefile +++ b/nym-node-status-api/nym-node-status-api/Makefile @@ -52,46 +52,28 @@ test-db-migrate: ## Run database migrations against PostgreSQL .PHONY: test-db-prepare test-db-prepare: ## Run sqlx prepare for compile-time query verification @echo "Running sqlx prepare for PostgreSQL..." - DATABASE_URL="$(TEST_DATABASE_URL)" cargo sqlx prepare -- --features pg + DATABASE_URL="$(TEST_DATABASE_URL)" cargo sqlx prepare # --- Build and Test Targets --- .PHONY: test-db-run -test-db-run: ## Run tests with PostgreSQL feature +test-db-run: ## Run tests @echo "Running tests with PostgreSQL..." - DATABASE_URL="$(TEST_DATABASE_URL)" cargo test --features pg --no-default-features + DATABASE_URL="$(TEST_DATABASE_URL)" cargo test -.PHONY: build-pg -build-pg: ## Build with PostgreSQL feature - @echo "Building with PostgreSQL feature..." - cargo build --features pg --no-default-features +.PHONY: build +build: ## Build + @echo "Building..." + cargo build -.PHONY: build-sqlite -build-sqlite: ## Build with SQLite feature (default) - @echo "Building with SQLite feature..." - cargo build --features sqlite --no-default-features - -.PHONY: check-pg -check-pg: ## Check code with PostgreSQL feature - @echo "Checking code with PostgreSQL feature..." - cargo check --features pg --no-default-features - -.PHONY: check-sqlite -check-sqlite: ## Check code with SQLite feature - @echo "Checking code with SQLite feature..." - cargo check --features sqlite --no-default-features +.PHONY: check +check: ## Check code + @echo "Checking code..." + cargo check .PHONY: clippy -clippy: clippy-pg clippy-sqlite - -.PHONY: clippy-pg -clippy-pg: ## Run clippy with PostgreSQL feature - @echo "Running clippy with PostgreSQL feature..." - cargo clippy --features pg --no-default-features -- -D warnings - -.PHONY: clippy-sqlite -clippy-sqlite: ## Run clippy with SQLite feature (default) - @echo "Running clippy with SQLite feature..." - cargo clippy --features sqlite --no-default-features -- -D warnings +clippy: ## Run clippy + @echo "Running clippy..." + cargo clippy -- -D warnings # --- Cleanup Targets --- .PHONY: clean @@ -106,7 +88,7 @@ clean-db: test-db-down ## Stop database and clean volumes # --- Utility Targets --- .PHONY: sqlx-cli sqlx-cli: ## Install sqlx-cli if not already installed - @command -v sqlx >/dev/null 2>&1 || cargo install sqlx-cli --features postgres,sqlite + @command -v sqlx >/dev/null 2>&1 || cargo install sqlx-cli --features postgres .PHONY: psql psql: ## Connect to the running PostgreSQL database with psql diff --git a/nym-node-status-api/nym-node-status-api/README_PG.md b/nym-node-status-api/nym-node-status-api/README.md similarity index 62% rename from nym-node-status-api/nym-node-status-api/README_PG.md rename to nym-node-status-api/nym-node-status-api/README.md index 8a6d2c3f0b4..330bc7ae706 100644 --- a/nym-node-status-api/nym-node-status-api/README_PG.md +++ b/nym-node-status-api/nym-node-status-api/README.md @@ -1,6 +1,12 @@ -# PostgreSQL Support for nym-node-status-api +# Node Status API -This project now supports both SQLite (default) and PostgreSQL databases. +This is a standalone service that can be run by anyone to monitor the status of nodes in the Nym network. + +## Wellknown instances + +Below is a list of wellknown public instances: + +- https://node-status.nym.com (Nym Technologies SA) ## Quick Start with PostgreSQL @@ -29,7 +35,7 @@ make prepare-pg make build-pg # Or manually: -cargo build --features pg --no-default-features +cargo build ``` ### 4. Run with PostgreSQL @@ -40,35 +46,17 @@ make dev-db # In another terminal, run the application DATABASE_URL=postgres://testuser:testpass@localhost:5433/nym_node_status_api_test \ -cargo run --features pg --no-default-features +cargo run ``` -## Database Features - -- `sqlite` (default): Uses SQLite database -- `pg`: Uses PostgreSQL database - -Only one database feature can be active at a time. - -## Migration Differences - -SQLite migrations are in `migrations/`, PostgreSQL migrations are in `migrations_pg/`. - -Key differences: -- **AUTOINCREMENT** → **SERIAL** -- **INTEGER CHECK (0,1)** → **BOOLEAN** -- **REAL** → **DOUBLE PRECISION** -- No table recreation needed for constraint changes in PostgreSQL - ## Makefile Targets ```bash -make help # Show all available targets -make prepare-pg # Setup PostgreSQL and prepare SQLx cache +make help # Show all available targets +make prepare # Setup PostgreSQL and prepare SQLx cache make dev-db # Start PostgreSQL for development make test-db # Run tests with PostgreSQL -make build-pg # Build with PostgreSQL -make build-sqlite # Build with SQLite +make build # Build with PostgreSQL make psql # Connect to running PostgreSQL make clean # Clean build artifacts make clean-db # Stop database and clean volumes @@ -81,9 +69,6 @@ See `.env.example` for all configuration options. Key variable: ```bash # For PostgreSQL: DATABASE_URL=postgres://testuser:testpass@localhost:5433/nym_node_status_api_test - -# For SQLite: -DATABASE_URL=sqlite://nym-node-status-api.sqlite ``` ## Troubleshooting diff --git a/nym-node-status-api/nym-node-status-api/build.rs b/nym-node-status-api/nym-node-status-api/build.rs index f2fe5127071..dfcee5c27a3 100644 --- a/nym-node-status-api/nym-node-status-api/build.rs +++ b/nym-node-status-api/nym-node-status-api/build.rs @@ -4,10 +4,8 @@ use anyhow::Result; /// cargo clean -p nym-node-status-api #[tokio::main(flavor = "current_thread")] async fn main() -> Result<()> { - #[cfg(feature = "pg")] if let Ok(database_url) = std::env::var("DATABASE_URL") { println!("cargo::rustc-env=DATABASE_URL={database_url}"); } - Ok(()) } diff --git a/nym-node-status-api/nym-node-status-api/launch_node_status_api.sh b/nym-node-status-api/nym-node-status-api/launch_node_status_api.sh index b5409280e5a..17b8eef7d1a 100755 --- a/nym-node-status-api/nym-node-status-api/launch_node_status_api.sh +++ b/nym-node-status-api/nym-node-status-api/launch_node_status_api.sh @@ -5,8 +5,7 @@ set -e user_rust_log_preference=$RUST_LOG export ENVIRONMENT=${ENVIRONMENT:-"mainnet"} export NYM_API_CLIENT_TIMEOUT=60 -export NODE_STATUS_API_TESTRUN_REFRESH_INTERVAL=120 - +export DATABASE_URL="postgres://testuser:testpass@localhost:5433/nym_node_status_api_test" # public counterpart of the agent's private key. # For TESTING only. NOT used in any other environment export NODE_STATUS_API_AGENT_KEY_LIST="H4z8kx5Kkf5JMQHhxaW1MwYndjKCDHC7HsVhHTFfBZ4J" @@ -23,7 +22,7 @@ function run_bare() { echo "RUST_LOG=${RUST_LOG}" # --conection-url is provided in build.rs - cargo run --package nym-node-status-api --features pg --no-default-features + cargo run --package nym-node-status-api } function run_docker() { diff --git a/nym-node-status-api/nym-node-status-api/migrations/000_init.sql b/nym-node-status-api/nym-node-status-api/migrations/000_init.sql deleted file mode 100644 index 4f9fd7da600..00000000000 --- a/nym-node-status-api/nym-node-status-api/migrations/000_init.sql +++ /dev/null @@ -1,112 +0,0 @@ -CREATE TABLE gateways -( - id INTEGER PRIMARY KEY AUTOINCREMENT, - gateway_identity_key VARCHAR NOT NULL UNIQUE, - self_described VARCHAR NOT NULL, - explorer_pretty_bond VARCHAR, - last_probe_result VARCHAR, - last_probe_log VARCHAR, - config_score INTEGER NOT NULL DEFAULT (0), - config_score_successes REAL NOT NULL DEFAULT (0), - config_score_samples REAL NOT NULL DEFAULT (0), - routing_score INTEGER NOT NULL DEFAULT (0), - routing_score_successes REAL NOT NULL DEFAULT (0), - routing_score_samples REAL NOT NULL DEFAULT (0), - test_run_samples REAL NOT NULL DEFAULT (0), - last_testrun_utc INTEGER, - last_updated_utc INTEGER NOT NULL, - bonded INTEGER CHECK (bonded in (0, 1)) NOT NULL DEFAULT 0, - blacklisted INTEGER CHECK (bonded in (0, 1)) NOT NULL DEFAULT 0, - performance INTEGER NOT NULL DEFAULT 0 -); - -CREATE INDEX idx_gateway_description_gateway_identity_key ON gateways (gateway_identity_key); - - -CREATE TABLE mixnodes ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - identity_key VARCHAR NOT NULL UNIQUE, - mix_id INTEGER NOT NULL UNIQUE, - bonded INTEGER CHECK (bonded in (0, 1)) NOT NULL DEFAULT 0, - total_stake INTEGER NOT NULL, - host VARCHAR NOT NULL, - http_api_port INTEGER NOT NULL, - blacklisted INTEGER CHECK (blacklisted in (0, 1)) NOT NULL DEFAULT 0, - full_details VARCHAR, - self_described VARCHAR, - last_updated_utc INTEGER NOT NULL - , is_dp_delegatee INTEGER CHECK (is_dp_delegatee IN (0, 1)) NOT NULL DEFAULT 0); -CREATE INDEX idx_mixnodes_mix_id ON mixnodes (mix_id); -CREATE INDEX idx_mixnodes_identity_key ON mixnodes (identity_key); - -CREATE TABLE - mixnode_description ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - mix_id INTEGER UNIQUE NOT NULL, - moniker VARCHAR, - website VARCHAR, - security_contact VARCHAR, - details VARCHAR, - last_updated_utc INTEGER NOT NULL, - FOREIGN KEY (mix_id) REFERENCES mixnodes (mix_id) - ); - --- Indexes for description table -CREATE INDEX idx_mixnode_description_mix_id ON mixnode_description (mix_id); - - -CREATE TABLE summary -( - key VARCHAR PRIMARY KEY, - value_json VARCHAR, - last_updated_utc INTEGER NOT NULL -); - - -CREATE TABLE summary_history -( - id INTEGER PRIMARY KEY AUTOINCREMENT, - date VARCHAR UNIQUE NOT NULL, - timestamp_utc INTEGER NOT NULL, - value_json VARCHAR -); -CREATE INDEX idx_summary_history_timestamp_utc ON summary_history (timestamp_utc); -CREATE INDEX idx_summary_history_date ON summary_history (date); - - -CREATE TABLE gateway_description ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - gateway_identity_key VARCHAR UNIQUE NOT NULL, - moniker VARCHAR, - website VARCHAR, - security_contact VARCHAR, - details VARCHAR, - last_updated_utc INTEGER NOT NULL, - FOREIGN KEY (gateway_identity_key) REFERENCES gateways (gateway_identity_key) - ); - - -CREATE TABLE - mixnode_daily_stats ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - mix_id INTEGER NOT NULL, - total_stake BIGINT NOT NULL, - date_utc VARCHAR NOT NULL, - packets_received INTEGER DEFAULT 0, - packets_sent INTEGER DEFAULT 0, - packets_dropped INTEGER DEFAULT 0, - FOREIGN KEY (mix_id) REFERENCES mixnodes (mix_id), - UNIQUE (mix_id, date_utc) -- This constraint automatically creates an index - ); - - -CREATE TABLE testruns -( - id INTEGER PRIMARY KEY AUTOINCREMENT, - gateway_id INTEGER NOT NULL, - status INTEGER NOT NULL, -- 0=pending, 1=in-progress, 2=complete - timestamp_utc INTEGER NOT NULL, - ip_address VARCHAR NOT NULL, - log VARCHAR NOT NULL, - FOREIGN KEY (gateway_id) REFERENCES gateways (id) -); diff --git a/nym-node-status-api/nym-node-status-api/migrations/001_last_assigned_utc.sql b/nym-node-status-api/nym-node-status-api/migrations/001_last_assigned_utc.sql deleted file mode 100644 index 2a801763e40..00000000000 --- a/nym-node-status-api/nym-node-status-api/migrations/001_last_assigned_utc.sql +++ /dev/null @@ -1,5 +0,0 @@ -ALTER TABLE testruns -RENAME COLUMN timestamp_utc TO created_utc; - -ALTER TABLE testruns -ADD COLUMN last_assigned_utc INTEGER; diff --git a/nym-node-status-api/nym-node-status-api/migrations/002_session_stats.sql b/nym-node-status-api/nym-node-status-api/migrations/002_session_stats.sql deleted file mode 100644 index d29fe986a33..00000000000 --- a/nym-node-status-api/nym-node-status-api/migrations/002_session_stats.sql +++ /dev/null @@ -1,17 +0,0 @@ - -CREATE TABLE gateway_session_stats ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - gateway_identity_key VARCHAR NOT NULL, - node_id INTEGER NOT NULL, - day DATE NOT NULL, - unique_active_clients INTEGER NOT NULL, - session_started INTEGER NOT NULL, - users_hashes VARCHAR, - vpn_sessions VARCHAR, - mixnet_sessions VARCHAR, - unknown_sessions VARCHAR, - UNIQUE (node_id, day) -- This constraint automatically creates an index - ); -CREATE INDEX idx_gateway_session_stats_identity_key ON gateway_session_stats (gateway_identity_key); -CREATE INDEX idx_gateway_session_stats_day ON gateway_session_stats (day); - diff --git a/nym-node-status-api/nym-node-status-api/migrations/003_scraper_tables.sql b/nym-node-status-api/nym-node-status-api/migrations/003_scraper_tables.sql deleted file mode 100644 index 9d9b5842b16..00000000000 --- a/nym-node-status-api/nym-node-status-api/migrations/003_scraper_tables.sql +++ /dev/null @@ -1,11 +0,0 @@ -CREATE TABLE mixnode_packet_stats_raw ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - mix_id INTEGER NOT NULL, - timestamp_utc INTEGER NOT NULL, - packets_received INTEGER, - packets_sent INTEGER, - packets_dropped INTEGER, - FOREIGN KEY (mix_id) REFERENCES mixnodes (mix_id) - ); - -CREATE INDEX idx_mixnode_packet_stats_raw_mix_id_timestamp_utc ON mixnode_packet_stats_raw (mix_id, timestamp_utc); \ No newline at end of file diff --git a/nym-node-status-api/nym-node-status-api/migrations/004_obsolete_fields.sql b/nym-node-status-api/nym-node-status-api/migrations/004_obsolete_fields.sql deleted file mode 100644 index f275ff0c337..00000000000 --- a/nym-node-status-api/nym-node-status-api/migrations/004_obsolete_fields.sql +++ /dev/null @@ -1,54 +0,0 @@ -ALTER TABLE mixnodes DROP COLUMN blacklisted; -ALTER TABLE gateways DROP COLUMN blacklisted; - -CREATE TABLE nym_nodes ( - node_id INTEGER PRIMARY KEY, - ed25519_identity_pubkey VARCHAR NOT NULL UNIQUE, - total_stake INTEGER NOT NULL, - ip_addresses TEXT NOT NULL, - mix_port INTEGER NOT NULL, - x25519_sphinx_pubkey VARCHAR NOT NULL UNIQUE, - node_role TEXT NOT NULL, - supported_roles TEXT NOT NULL, - performance VARCHAR NOT NULL, - entry TEXT, - last_updated_utc INTEGER NOT NULL -); - -CREATE INDEX idx_nym_nodes_node_id ON nym_nodes (node_id); -CREATE INDEX idx_nym_nodes_ed25519_identity_pubkey ON nym_nodes (ed25519_identity_pubkey); - -CREATE TABLE nym_node_descriptions ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - node_id INTEGER UNIQUE NOT NULL, - moniker VARCHAR, - website VARCHAR, - security_contact VARCHAR, - details VARCHAR, - last_updated_utc INTEGER NOT NULL, - FOREIGN KEY (node_id) REFERENCES nym_nodes (node_id) -); - -CREATE TABLE nym_nodes_packet_stats_raw ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - node_id INTEGER NOT NULL, - timestamp_utc INTEGER NOT NULL, - packets_received INTEGER, - packets_sent INTEGER, - packets_dropped INTEGER, - FOREIGN KEY (node_id) REFERENCES nym_nodes (node_id) -); - -CREATE INDEX idx_nym_nodes_packet_stats_raw_node_id_timestamp_utc ON nym_nodes_packet_stats_raw (node_id, timestamp_utc); - -CREATE TABLE nym_node_daily_mixing_stats ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - node_id INTEGER NOT NULL, - total_stake BIGINT NOT NULL, - date_utc VARCHAR NOT NULL, - packets_received INTEGER DEFAULT 0, - packets_sent INTEGER DEFAULT 0, - packets_dropped INTEGER DEFAULT 0, - FOREIGN KEY (node_id) REFERENCES nym_nodes (node_id), - UNIQUE (node_id, date_utc) -- This constraint automatically creates an index -); diff --git a/nym-node-status-api/nym-node-status-api/migrations/005_node_self_described.sql b/nym-node-status-api/nym-node-status-api/migrations/005_node_self_described.sql deleted file mode 100644 index 8ba7102df11..00000000000 --- a/nym-node-status-api/nym-node-status-api/migrations/005_node_self_described.sql +++ /dev/null @@ -1,61 +0,0 @@ -ALTER TABLE nym_nodes ADD COLUMN self_described TEXT; -ALTER TABLE nym_nodes ADD COLUMN bond_info TEXT; - --- # Why recreate tables? --- I need DELETE with CASCADE functionality, but ALTER TABLE doesn't support --- adding constraints (which CASCADE is). So I recreate tables with proper --- constraints and fill them with existing data. - --- To avoid invalidating existing FK references, temporarily disable FK enforcement. -PRAGMA foreign_keys=off; - -DROP INDEX IF EXISTS idx_nym_nodes_packet_stats_raw_node_id_timestamp_utc; - -ALTER TABLE nym_node_descriptions RENAME TO _nym_node_descriptions_old; -ALTER TABLE nym_nodes_packet_stats_raw RENAME TO _nym_nodes_packet_stats_raw_old; -ALTER TABLE nym_node_daily_mixing_stats RENAME TO _nym_node_daily_mixing_stats_old; - -CREATE TABLE nym_node_descriptions ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - node_id INTEGER UNIQUE NOT NULL, - moniker VARCHAR, - website VARCHAR, - security_contact VARCHAR, - details VARCHAR, - last_updated_utc INTEGER NOT NULL, - FOREIGN KEY (node_id) REFERENCES nym_nodes (node_id) ON DELETE CASCADE -); - -CREATE TABLE nym_nodes_packet_stats_raw ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - node_id INTEGER NOT NULL, - timestamp_utc INTEGER NOT NULL, - packets_received INTEGER, - packets_sent INTEGER, - packets_dropped INTEGER, - FOREIGN KEY (node_id) REFERENCES nym_nodes (node_id) ON DELETE CASCADE -); - -CREATE INDEX idx_nym_nodes_packet_stats_raw_node_id_timestamp_utc ON nym_nodes_packet_stats_raw (node_id, timestamp_utc); - -CREATE TABLE nym_node_daily_mixing_stats ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - node_id INTEGER NOT NULL, - total_stake BIGINT NOT NULL, - date_utc VARCHAR NOT NULL, - packets_received INTEGER DEFAULT 0, - packets_sent INTEGER DEFAULT 0, - packets_dropped INTEGER DEFAULT 0, - FOREIGN KEY (node_id) REFERENCES nym_nodes (node_id) ON DELETE CASCADE, - UNIQUE (node_id, date_utc) -- This constraint automatically creates an index -); - -INSERT INTO nym_node_descriptions SELECT * FROM _nym_node_descriptions_old; -INSERT INTO nym_nodes_packet_stats_raw SELECT * FROM _nym_nodes_packet_stats_raw_old; -INSERT INTO nym_node_daily_mixing_stats SELECT * FROM _nym_node_daily_mixing_stats_old; - -DROP TABLE _nym_node_descriptions_old; -DROP TABLE _nym_nodes_packet_stats_raw_old; -DROP TABLE _nym_node_daily_mixing_stats_old; - -PRAGMA foreign_keys=on; diff --git a/nym-node-status-api/nym-node-status-api/migrations/006_remove_unique_constraint.sql b/nym-node-status-api/nym-node-status-api/migrations/006_remove_unique_constraint.sql deleted file mode 100644 index 1d747b33b63..00000000000 --- a/nym-node-status-api/nym-node-status-api/migrations/006_remove_unique_constraint.sql +++ /dev/null @@ -1,68 +0,0 @@ --- Removing UNIQUE constraint on nym_nodes --- https://www.sqlite.org/lang_altertable.html - --- To avoid invalidating existing FK references, temporarily disable FK enforcement. -PRAGMA foreign_keys=off; - -CREATE TABLE nym_nodes_new ( - node_id INTEGER PRIMARY KEY, - ed25519_identity_pubkey VARCHAR NOT NULL, - total_stake INTEGER NOT NULL, - ip_addresses TEXT NOT NULL, -- JSON serialized - mix_port INTEGER NOT NULL, - x25519_sphinx_pubkey VARCHAR NOT NULL, - node_role TEXT NOT NULL, -- JSON serialized - supported_roles TEXT NOT NULL, -- JSON serialized - performance VARCHAR NOT NULL, - entry TEXT, -- JSON serialized - self_described TEXT, -- JSON serialized - bond_info TEXT, -- JSON serialized - last_updated_utc INTEGER NOT NULL -); - --- columns are misaligned because old nym_nodes has 2 subsequently added columns --- which come at the end of schema definition. --- To correctly insert values into corresponding columns, named columns are required -INSERT INTO nym_nodes_new ( - node_id, - ed25519_identity_pubkey, - total_stake, - ip_addresses, - mix_port, - x25519_sphinx_pubkey, - node_role, - supported_roles, - performance, - entry, - self_described, - bond_info, - last_updated_utc -) -SELECT - existing.node_id, - existing.ed25519_identity_pubkey, - existing.total_stake, - existing.ip_addresses, - existing.mix_port, - existing.x25519_sphinx_pubkey, - existing.node_role, - existing.supported_roles, - existing.performance, - existing.entry, - existing.self_described, - existing.bond_info, - existing.last_updated_utc -FROM nym_nodes as existing; - -DROP INDEX IF EXISTS idx_nym_nodes_node_id; -DROP INDEX IF EXISTS idx_nym_nodes_ed25519_identity_pubkey; - -DROP TABLE nym_nodes; - -ALTER TABLE nym_nodes_new RENAME TO nym_nodes; - -CREATE INDEX idx_nym_nodes_node_id ON nym_nodes (node_id); -CREATE INDEX idx_nym_nodes_ed25519_identity_pubkey ON nym_nodes (ed25519_identity_pubkey); - - -PRAGMA foreign_keys=on; diff --git a/nym-node-status-api/nym-node-status-api/migrations/007_date_fix.sql b/nym-node-status-api/nym-node-status-api/migrations/007_date_fix.sql deleted file mode 100644 index 4db114e9af3..00000000000 --- a/nym-node-status-api/nym-node-status-api/migrations/007_date_fix.sql +++ /dev/null @@ -1,113 +0,0 @@ --- for a couple of days after migrating chrono -> time, we stored dates as --- 2025-June-DD instead of 2025-06-DD. This migration fixes those entries. --- --- Because of a UNIQUE constraint on (node_id, date_utc), we can't just rename in-place. --- - merge (add) node stats back to the original table where conflict (node_id, date_utc) would exist --- - delete invalid records from original table (those stats were merged into correct rows above) --- - insert rows that did not have a conflicting (node_Id, date_utc) combo --- Conflicts affect only the date which has both kinds of entries, --- e.g. 2025-06-05 and 2025-June-05 (date when this change was deployed) --- --- This applies to both affected tables. - --- ---------------------------------------- --- mixnode_daily_stats --- ---------------------------------------- - --- First, copy over rows with invalid date to a temp table (in the correct date format) -CREATE TEMP TABLE tmp_mix AS -SELECT - mix_id, - REPLACE(date_utc,'June','06') AS new_date, - SUM(total_stake) AS total_stake_sum, - SUM(packets_received) AS packets_received_sum, - SUM(packets_sent) AS packets_sent_sum, - SUM(packets_dropped) AS packets_dropped_sum -FROM mixnode_daily_stats -WHERE date_utc LIKE '%June%' -GROUP BY mix_id, new_date; - -UPDATE mixnode_daily_stats AS m -SET - total_stake = m.total_stake, - packets_received = m.packets_received + (SELECT packets_received_sum FROM tmp_mix WHERE mix_id = m.mix_id AND new_date = m.date_utc), - packets_sent = m.packets_sent + (SELECT packets_sent_sum FROM tmp_mix WHERE mix_id = m.mix_id AND new_date = m.date_utc), - packets_dropped = m.packets_dropped + (SELECT packets_dropped_sum FROM tmp_mix WHERE mix_id = m.mix_id AND new_date = m.date_utc) -WHERE EXISTS ( - SELECT 1 FROM tmp_mix - WHERE mix_id = m.mix_id - AND new_date = m.date_utc -); - -DELETE FROM mixnode_daily_stats - WHERE date_utc LIKE '%June%'; - -INSERT INTO mixnode_daily_stats - (mix_id, date_utc, total_stake, packets_received, packets_sent, packets_dropped) -SELECT - mix_id, - new_date, - total_stake_sum, - packets_received_sum, - packets_sent_sum, - packets_dropped_sum -FROM tmp_mix AS t --- only those whose new_date did _not_ already exist -WHERE NOT EXISTS ( - SELECT 1 FROM mixnode_daily_stats AS m - WHERE m.mix_id = t.mix_id - AND m.date_utc = t.new_date -); - -DROP TABLE tmp_mix; - - --- ---------------------------------------- --- nym_node_daily_mixing_stats --- ---------------------------------------- - -CREATE TEMP TABLE tmp_nym_node_stats AS -SELECT - node_id, - REPLACE(date_utc,'June','06') AS new_date, - SUM(total_stake) AS total_stake_sum, - SUM(packets_received) AS packets_received_sum, - SUM(packets_sent) AS packets_sent_sum, - SUM(packets_dropped) AS packets_dropped_sum -FROM nym_node_daily_mixing_stats -WHERE date_utc LIKE '%June%' -GROUP BY node_id, new_date; - -UPDATE nym_node_daily_mixing_stats AS m -SET - total_stake = m.total_stake, - packets_received = m.packets_received + (SELECT packets_received_sum FROM tmp_nym_node_stats WHERE node_id = m.node_id AND new_date = m.date_utc), - packets_sent = m.packets_sent + (SELECT packets_sent_sum FROM tmp_nym_node_stats WHERE node_id = m.node_id AND new_date = m.date_utc), - packets_dropped = m.packets_dropped + (SELECT packets_dropped_sum FROM tmp_nym_node_stats WHERE node_id = m.node_id AND new_date = m.date_utc) -WHERE EXISTS ( - SELECT 1 FROM tmp_nym_node_stats - WHERE node_id = m.node_id - AND new_date = m.date_utc -); - -DELETE FROM nym_node_daily_mixing_stats - WHERE date_utc LIKE '%June%'; - -INSERT INTO nym_node_daily_mixing_stats - (node_id, date_utc, total_stake, packets_received, packets_sent, packets_dropped) -SELECT - node_id, - new_date, - total_stake_sum, - packets_received_sum, - packets_sent_sum, - packets_dropped_sum -FROM tmp_nym_node_stats AS t -WHERE NOT EXISTS ( - SELECT 1 FROM nym_node_daily_mixing_stats AS m - WHERE m.node_id = t.node_id - AND m.date_utc = t.new_date -); - -DROP TABLE tmp_nym_node_stats; - diff --git a/nym-node-status-api/nym-node-status-api/migrations/008_performance_indexes.sql b/nym-node-status-api/nym-node-status-api/migrations/008_performance_indexes.sql deleted file mode 100644 index 235b50e171a..00000000000 --- a/nym-node-status-api/nym-node-status-api/migrations/008_performance_indexes.sql +++ /dev/null @@ -1,16 +0,0 @@ --- Add partial indexes for NOT NULL filtering to improve performance of /explorer/v3/nodes endpoint - --- Index for queries filtering on self_described IS NOT NULL -CREATE INDEX IF NOT EXISTS idx_nym_nodes_self_described_not_null -ON nym_nodes(node_id) -WHERE self_described IS NOT NULL; - --- Index for queries filtering on bond_info IS NOT NULL -CREATE INDEX IF NOT EXISTS idx_nym_nodes_bond_info_not_null -ON nym_nodes(node_id) -WHERE bond_info IS NOT NULL; - --- Composite index for queries filtering on both bond_info AND self_described -CREATE INDEX IF NOT EXISTS idx_nym_nodes_bond_self_described -ON nym_nodes(node_id) -WHERE bond_info IS NOT NULL AND self_described IS NOT NULL; \ No newline at end of file diff --git a/nym-node-status-api/nym-node-status-api/src/db/mod.rs b/nym-node-status-api/nym-node-status-api/src/db/mod.rs index 964193455ea..ed3e72fa892 100644 --- a/nym-node-status-api/nym-node-status-api/src/db/mod.rs +++ b/nym-node-status-api/nym-node-status-api/src/db/mod.rs @@ -3,42 +3,16 @@ use std::{str::FromStr, time::Duration}; pub(crate) mod models; pub(crate) mod queries; -pub(crate) mod query_wrapper; #[cfg(test)] mod tests; -// Re-export the query wrapper functions for easier access -pub(crate) use query_wrapper::query; -#[allow(unused_imports)] -pub(crate) use query_wrapper::query_as; - -#[cfg(feature = "sqlite")] -use sqlx::{ - migrate::Migrator, - sqlite::{SqliteAutoVacuum, SqliteConnectOptions, SqliteSynchronous}, - ConnectOptions, SqlitePool, -}; - -#[cfg(feature = "pg")] use sqlx::{migrate::Migrator, postgres::PgConnectOptions, ConnectOptions, PgPool}; -#[cfg(feature = "sqlite")] -static MIGRATOR: Migrator = sqlx::migrate!("./migrations"); - -#[cfg(feature = "pg")] static MIGRATOR: Migrator = sqlx::migrate!("./migrations_pg"); -#[cfg(feature = "sqlite")] -pub(crate) type DbPool = SqlitePool; - -#[cfg(feature = "pg")] pub(crate) type DbPool = PgPool; -#[cfg(feature = "sqlite")] -pub(crate) type DbConnection = sqlx::pool::PoolConnection; - -#[cfg(feature = "pg")] pub(crate) type DbConnection = sqlx::pool::PoolConnection; pub(crate) struct Storage { @@ -46,30 +20,6 @@ pub(crate) struct Storage { } impl Storage { - #[cfg(feature = "sqlite")] - pub async fn init(connection_url: String, busy_timeout: Duration) -> Result { - let connect_options = SqliteConnectOptions::from_str(&connection_url)? - .journal_mode(sqlx::sqlite::SqliteJournalMode::Wal) - .busy_timeout(busy_timeout) - .synchronous(SqliteSynchronous::Normal) - .auto_vacuum(SqliteAutoVacuum::Incremental) - .foreign_keys(true) - .create_if_missing(true) - .disable_statement_logging(); - - let pool = sqlx::SqlitePool::connect_with(connect_options) - .await - .map_err(|err| anyhow!("Failed to connect to {}: {}", &connection_url, err))?; - - MIGRATOR.run(&pool).await?; - - // aftering setting pragma, check whether it was set successfully - Self::assert_busy_timeout(pool.clone(), busy_timeout.as_secs() as i64).await?; - - Ok(Storage { pool }) - } - - #[cfg(feature = "pg")] pub async fn init(connection_url: String, _busy_timeout: Duration) -> Result { use std::env; let mut connect_options = @@ -86,7 +36,11 @@ impl Storage { .await .map_err(|err| anyhow!("Failed to connect to {}: {}", &connection_url, err))?; - MIGRATOR.run(&pool).await?; + if env::var("SKIP_MIGRATIONS").unwrap_or_default() != "true" { + MIGRATOR.run(&pool).await?; + } else { + tracing::warn!("Skipping migrations"); + } Ok(Storage { pool }) } @@ -95,28 +49,4 @@ impl Storage { pub fn pool_owned(&self) -> DbPool { self.pool.clone() } - - #[cfg(feature = "sqlite")] - async fn assert_busy_timeout(pool: DbPool, expected_busy_timeout_s: i64) -> Result<()> { - let mut conn = pool.acquire().await?; - // Sqlite stores this value as miliseconds - // https://www.sqlite.org/pragma.html#pragma_busy_timeout - let busy_timeout_db = sqlx::query!("PRAGMA busy_timeout;") - .fetch_one(conn.as_mut()) - .await?; - - let actual_busy_timeout_ms = busy_timeout_db.timeout.unwrap_or(0); - tracing::info!("PRAGMA busy_timeout={}ms", actual_busy_timeout_ms); - let expected_busy_timeout_ms = expected_busy_timeout_s * 1000; - - if expected_busy_timeout_ms != actual_busy_timeout_ms { - anyhow::bail!( - "PRAGMA busy_timeout expected: {}ms, actual: {}ms", - expected_busy_timeout_ms, - actual_busy_timeout_ms - ); - } - - Ok(()) - } } diff --git a/nym-node-status-api/nym-node-status-api/src/db/queries/gateways.rs b/nym-node-status-api/nym-node-status-api/src/db/queries/gateways.rs index 4b0dc2c0f94..10235d9957d 100644 --- a/nym-node-status-api/nym-node-status-api/src/db/queries/gateways.rs +++ b/nym-node-status-api/nym-node-status-api/src/db/queries/gateways.rs @@ -9,26 +9,25 @@ use crate::{ node_scraper::helpers::NodeDescriptionResponse, }; use futures_util::TryStreamExt; -use sqlx::Row; use tracing::error; pub(crate) async fn select_gateway_identity( conn: &mut DbConnection, gateway_pk: i32, ) -> anyhow::Result { - let record = crate::db::query( + let record = sqlx::query!( r#"SELECT gateway_identity_key FROM gateways WHERE - id = ?"#, + id = $1"#, + gateway_pk ) - .bind(gateway_pk) .fetch_one(conn.as_mut()) .await?; - Ok(record.try_get("gateway_identity_key")?) + Ok(record.gateway_identity_key) } pub(crate) async fn update_bonded_gateways( @@ -37,7 +36,7 @@ pub(crate) async fn update_bonded_gateways( ) -> anyhow::Result<()> { let mut tx = pool.begin().await?; - crate::db::query( + sqlx::query!( r#"UPDATE gateways SET @@ -48,25 +47,25 @@ pub(crate) async fn update_bonded_gateways( .await?; for record in gateways { - crate::db::query( + sqlx::query!( "INSERT INTO gateways (gateway_identity_key, bonded, self_described, explorer_pretty_bond, last_updated_utc, performance) - VALUES (?, ?, ?, ?, ?, ?) + VALUES ($1, $2, $3, $4, $5, $6) ON CONFLICT(gateway_identity_key) DO UPDATE SET bonded=excluded.bonded, self_described=excluded.self_described, explorer_pretty_bond=excluded.explorer_pretty_bond, last_updated_utc=excluded.last_updated_utc, performance = excluded.performance;", + record.identity_key, + record.bonded, + record.self_described, + record.explorer_pretty_bond, + record.last_updated_utc, + record.performance as i32 ) - .bind(record.identity_key) - .bind(record.bonded) - .bind(record.self_described) - .bind(record.explorer_pretty_bond) - .bind(record.last_updated_utc) - .bind(record.performance as i32) .execute(&mut *tx) .await?; } @@ -78,21 +77,22 @@ pub(crate) async fn update_bonded_gateways( pub(crate) async fn get_all_gateways(pool: &DbPool) -> anyhow::Result> { let mut conn = pool.acquire().await?; - let items = crate::db::query_as::( + let items = sqlx::query_as!( + GatewayDto, r#"SELECT - gw.gateway_identity_key, - gw.bonded, - gw.performance, - gw.self_described, - gw.explorer_pretty_bond, - gw.last_probe_result, - gw.last_probe_log, - gw.last_testrun_utc, - gw.last_updated_utc, - COALESCE(gd.moniker, 'NA') as moniker, - COALESCE(gd.website, 'NA') as website, - COALESCE(gd.security_contact, 'NA') as security_contact, - COALESCE(gd.details, 'NA') as details + gw.gateway_identity_key as "gateway_identity_key!", + gw.bonded as "bonded: bool", + gw.performance as "performance!", + gw.self_described as "self_described?", + gw.explorer_pretty_bond as "explorer_pretty_bond?", + gw.last_probe_result as "last_probe_result?", + gw.last_probe_log as "last_probe_log?", + gw.last_testrun_utc as "last_testrun_utc?", + gw.last_updated_utc as "last_updated_utc!", + COALESCE(gd.moniker, 'NA') as "moniker!", + COALESCE(gd.website, 'NA') as "website!", + COALESCE(gd.security_contact, 'NA') as "security_contact!", + COALESCE(gd.details, 'NA') as "details!" FROM gateways gw LEFT JOIN gateway_description gd ON gw.gateway_identity_key = gd.gateway_identity_key @@ -113,17 +113,17 @@ pub(crate) async fn get_all_gateways(pool: &DbPool) -> anyhow::Result anyhow::Result> { let mut conn = pool.acquire().await?; - let items = crate::db::query( + let items = sqlx::query!( r#" SELECT gateway_identity_key FROM gateways WHERE bonded = true - "#, + "# ) .fetch_all(&mut *conn) .await? .into_iter() - .map(|record| record.try_get::("gateway_identity_key").unwrap()) + .map(|record| record.gateway_identity_key) .collect::>(); Ok(items) @@ -131,11 +131,11 @@ pub(crate) async fn get_bonded_gateway_id_keys(pool: &DbPool) -> anyhow::Result< pub(crate) async fn insert_gateway_description( conn: &mut DbConnection, - identity_key: String, - description: NodeDescriptionResponse, + identity_key: &str, + description: &NodeDescriptionResponse, timestamp: i64, ) -> anyhow::Result<()> { - crate::db::query( + sqlx::query!( r#" INSERT INTO gateway_description ( gateway_identity_key, @@ -144,7 +144,7 @@ pub(crate) async fn insert_gateway_description( security_contact, details, last_updated_utc - ) VALUES (?, ?, ?, ?, ?, ?) + ) VALUES ($1, $2, $3, $4, $5, $6) ON CONFLICT (gateway_identity_key) DO UPDATE SET moniker = excluded.moniker, website = excluded.website, @@ -152,13 +152,13 @@ pub(crate) async fn insert_gateway_description( details = excluded.details, last_updated_utc = excluded.last_updated_utc "#, + identity_key, + description.moniker, + description.website, + description.security_contact, + description.details, + timestamp, ) - .bind(identity_key) - .bind(description.moniker) - .bind(description.website) - .bind(description.security_contact) - .bind(description.details) - .bind(timestamp) .execute(conn.as_mut()) .await .map(drop) @@ -170,36 +170,38 @@ pub(crate) async fn get_or_create_gateway( gateway_identity_key: &str, ) -> anyhow::Result { // Try to find existing gateway - let existing = crate::db::query("SELECT id FROM gateways WHERE gateway_identity_key = ?") - .bind(gateway_identity_key.to_string()) - .fetch_optional(conn.as_mut()) - .await?; + let existing = sqlx::query_scalar!( + "SELECT id FROM gateways WHERE gateway_identity_key = $1", + gateway_identity_key.to_string() + ) + .fetch_optional(conn.as_mut()) + .await?; if let Some(row) = existing { - return Ok(row.try_get("id")?); + return Ok(row); } // Create new gateway tracing::info!("Creating new gateway record for {}", gateway_identity_key); let now = crate::utils::now_utc().unix_timestamp(); - let result = crate::db::query( + let result: i32 = sqlx::query_scalar!( r#"INSERT INTO gateways ( - gateway_identity_key, - bonded, - performance, - self_described, + gateway_identity_key, + bonded, + performance, + self_described, last_updated_utc - ) VALUES (?, ?, ?, ?, ?) + ) VALUES ($1, $2, $3, $4, $5) RETURNING id"#, + gateway_identity_key.to_string(), + true, // Assume bonded since being tested + 0, // Initial performance + "null", + now ) - .bind(gateway_identity_key.to_string()) - .bind(true) // Assume bonded since being tested - .bind(0) // Initial performance - .bind("null") - .bind(now) .fetch_one(conn.as_mut()) .await?; - Ok(result.try_get("id")?) + Ok(result) } diff --git a/nym-node-status-api/nym-node-status-api/src/db/queries/gateways_stats.rs b/nym-node-status-api/nym-node-status-api/src/db/queries/gateways_stats.rs index bf90bff06bf..7e82caaed7d 100644 --- a/nym-node-status-api/nym-node-status-api/src/db/queries/gateways_stats.rs +++ b/nym-node-status-api/nym-node-status-api/src/db/queries/gateways_stats.rs @@ -6,38 +6,6 @@ use futures_util::TryStreamExt; use time::Date; use tracing::error; -#[cfg(feature = "sqlite")] -pub(crate) async fn insert_session_records( - pool: &DbPool, - records: Vec, -) -> anyhow::Result<()> { - let mut tx = pool.begin().await?; - for record in records { - sqlx::query!( - "INSERT OR IGNORE INTO gateway_session_stats - (gateway_identity_key, node_id, day, - unique_active_clients, session_started, users_hashes, - vpn_sessions, mixnet_sessions, unknown_sessions) - VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)", - record.gateway_identity_key, - record.node_id, - record.day, - record.unique_active_clients, - record.session_started, - record.users_hashes, - record.vpn_sessions, - record.mixnet_sessions, - record.unknown_sessions, - ) - .execute(&mut *tx) - .await?; - } - tx.commit().await?; - - Ok(()) -} - -#[cfg(feature = "pg")] pub(crate) async fn insert_session_records( pool: &DbPool, records: Vec, @@ -50,7 +18,7 @@ pub(crate) async fn insert_session_records( unique_active_clients, session_started, users_hashes, vpn_sessions, mixnet_sessions, unknown_sessions) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9) - ON CONFLICT DO NOTHING", + ON CONFLICT DO NOTHING;", record.gateway_identity_key, record.node_id, record.day, @@ -101,8 +69,7 @@ pub(crate) async fn get_sessions_stats(pool: &DbPool) -> anyhow::Result anyhow::Result<()> { let mut conn = pool.acquire().await?; - crate::db::query("DELETE FROM gateway_session_stats WHERE day <= ?") - .bind(cut_off) + sqlx::query!("DELETE FROM gateway_session_stats WHERE day <= $1", cut_off) .execute(&mut *conn) .await?; Ok(()) diff --git a/nym-node-status-api/nym-node-status-api/src/db/queries/misc.rs b/nym-node-status-api/nym-node-status-api/src/db/queries/misc.rs index 248b556c1d5..6e4c5d18b81 100644 --- a/nym-node-status-api/nym-node-status-api/src/db/queries/misc.rs +++ b/nym-node-status-api/nym-node-status-api/src/db/queries/misc.rs @@ -6,8 +6,8 @@ use crate::db::{models::NetworkSummary, DbPool}; /// `daily_summary` pub(crate) async fn insert_summaries( pool: &DbPool, - summaries: Vec<(String, usize)>, - summary: NetworkSummary, + summaries: &Vec<(&str, usize)>, + summary: &NetworkSummary, last_updated: UtcDateTime, ) -> anyhow::Result<()> { insert_summary(pool, summaries, last_updated).await?; @@ -19,7 +19,7 @@ pub(crate) async fn insert_summaries( async fn insert_summary( pool: &DbPool, - summaries: Vec<(String, usize)>, + summaries: &Vec<(&str, usize)>, last_updated: UtcDateTime, ) -> anyhow::Result<()> { let timestamp = last_updated.unix_timestamp(); @@ -27,17 +27,17 @@ async fn insert_summary( for (kind, value) in summaries { let value = value.to_string(); - crate::db::query( + sqlx::query!( "INSERT INTO summary (key, value_json, last_updated_utc) - VALUES (?, ?, ?) + VALUES ($1, $2, $3) ON CONFLICT(key) DO UPDATE SET value_json=excluded.value_json, last_updated_utc=excluded.last_updated_utc;", + kind, + value, + timestamp ) - .bind(kind.clone()) - .bind(value) - .bind(timestamp) .execute(&mut *tx) .await .map_err(|err| { @@ -60,7 +60,7 @@ async fn insert_summary( /// This is not aggregate data, it's a set of latest data points async fn insert_summary_history( pool: &DbPool, - summary: NetworkSummary, + summary: &NetworkSummary, last_updated: UtcDateTime, ) -> anyhow::Result<()> { let mut conn = pool.acquire().await?; @@ -70,17 +70,17 @@ async fn insert_summary_history( let date = datetime_to_only_date_str(last_updated); - crate::db::query( + sqlx::query!( "INSERT INTO summary_history (date, timestamp_utc, value_json) - VALUES (?, ?, ?) + VALUES ($1, $2, $3) ON CONFLICT(date) DO UPDATE SET timestamp_utc=excluded.timestamp_utc, value_json=excluded.value_json;", + date, + timestamp, + value_json ) - .bind(date) - .bind(timestamp) - .bind(value_json) .execute(&mut *conn) .await?; diff --git a/nym-node-status-api/nym-node-status-api/src/db/queries/nym_nodes.rs b/nym-node-status-api/nym-node-status-api/src/db/queries/nym_nodes.rs index 4144e791a73..b74c09367d6 100644 --- a/nym-node-status-api/nym-node-status-api/src/db/queries/nym_nodes.rs +++ b/nym-node-status-api/nym-node-status-api/src/db/queries/nym_nodes.rs @@ -1,19 +1,19 @@ +use crate::db::models::NymNodeDescriptionDeHelper; use futures_util::TryStreamExt; use nym_node_requests::api::v1::node::models::NodeDescription; use nym_validator_client::{ client::{NodeId, NymNodeDetails}, models::NymNodeDescription, }; -use sqlx::Row; use std::collections::HashMap; use tracing::{instrument, warn}; -use crate::db::models::NymNodeDescriptionDeHelper; +use crate::db::DbConnection; use crate::http::models::DailyStats; use crate::{ db::{ models::{NymNodeDto, NymNodeInsertRecord}, - DbConnection, DbPool, + DbPool, }, node_scraper::helpers::NodeDescriptionResponse, }; @@ -21,20 +21,21 @@ use crate::{ pub(crate) async fn get_all_nym_nodes(pool: &DbPool) -> anyhow::Result> { let mut conn = pool.acquire().await?; - crate::db::query_as::( + sqlx::query_as!( + NymNodeDto, r#"SELECT node_id, ed25519_identity_pubkey, total_stake, - ip_addresses, + ip_addresses as "ip_addresses!: serde_json::Value", mix_port, x25519_sphinx_pubkey, - node_role, - supported_roles, - entry, + node_role as "node_role: serde_json::Value", + supported_roles as "supported_roles: serde_json::Value", + entry as "entry: serde_json::Value", performance, - self_described, - bond_info + self_described as "self_described: serde_json::Value", + bond_info as "bond_info: serde_json::Value" FROM nym_nodes ORDER BY @@ -57,20 +58,21 @@ pub(crate) async fn get_described_bonded_nym_nodes( ) -> anyhow::Result> { let mut conn = pool.acquire().await?; - crate::db::query_as::( + sqlx::query_as!( + NymNodeDto, r#"SELECT node_id, ed25519_identity_pubkey, total_stake, - ip_addresses, + ip_addresses as "ip_addresses!: serde_json::Value", mix_port, x25519_sphinx_pubkey, - node_role, - supported_roles, - entry, + node_role as "node_role: serde_json::Value", + supported_roles as "supported_roles: serde_json::Value", + entry as "entry: serde_json::Value", performance, - self_described, - bond_info + self_described as "self_described: serde_json::Value", + bond_info as "bond_info: serde_json::Value" FROM nym_nodes WHERE @@ -92,7 +94,7 @@ pub(crate) async fn update_nym_nodes( ) -> anyhow::Result { let mut tx = pool.begin().await?; - crate::db::query( + sqlx::query!( "UPDATE nym_nodes SET self_described = NULL, @@ -104,7 +106,7 @@ pub(crate) async fn update_nym_nodes( let inserted = node_records.len(); for record in node_records { // https://www.sqlite.org/lang_upsert.html - crate::db::query( + sqlx::query!( "INSERT INTO nym_nodes (node_id, ed25519_identity_pubkey, total_stake, @@ -115,7 +117,7 @@ pub(crate) async fn update_nym_nodes( bond_info, performance, last_updated_utc ) - VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?) + VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13) ON CONFLICT(node_id) DO UPDATE SET ed25519_identity_pubkey=excluded.ed25519_identity_pubkey, ip_addresses=excluded.ip_addresses, @@ -129,20 +131,20 @@ pub(crate) async fn update_nym_nodes( performance=excluded.performance, last_updated_utc=excluded.last_updated_utc ;", + record.node_id, + record.ed25519_identity_pubkey, + record.total_stake, + record.ip_addresses, + record.mix_port, + record.x25519_sphinx_pubkey, + record.node_role, + record.supported_roles, + record.entry, + record.self_described, + record.bond_info, + record.performance, + record.last_updated_utc as i32, ) - .bind(record.node_id) - .bind(record.ed25519_identity_pubkey) - .bind(record.total_stake) - .bind(record.ip_addresses) - .bind(record.mix_port) - .bind(record.x25519_sphinx_pubkey) - .bind(record.node_role) - .bind(record.supported_roles) - .bind(record.entry) - .bind(record.self_described) - .bind(record.bond_info) - .bind(record.performance) - .bind(record.last_updated_utc) .execute(&mut *tx) .await .map_err(|e| anyhow::anyhow!("Failed to INSERT node_id={}: {}", record.node_id, e))?; @@ -150,10 +152,6 @@ pub(crate) async fn update_nym_nodes( tx.commit().await?; - tracing::debug!( - "Successfully inserted/updated {} nym_nodes records", - inserted - ); Ok(inserted) } @@ -162,10 +160,10 @@ pub(crate) async fn get_described_node_bond_info( ) -> anyhow::Result> { let mut conn = pool.acquire().await?; - crate::db::query( + sqlx::query!( r#"SELECT node_id, - bond_info + bond_info as "bond_info: serde_json::Value" FROM nym_nodes WHERE @@ -180,11 +178,11 @@ pub(crate) async fn get_described_node_bond_info( records .into_iter() .filter_map(|record| { - let node_id: i32 = record.try_get("node_id").ok()?; - let bond_info: serde_json::Value = record.try_get("bond_info").ok()?; - serde_json::from_value::(bond_info) - .ok() - .map(|res| (node_id as i64 as NodeId, res)) + record + .bond_info + // only return details for nodes which have details stored + .and_then(|bond_info| serde_json::from_value::(bond_info).ok()) + .map(|res| (record.node_id as NodeId, res)) }) .collect::>() }) @@ -196,10 +194,10 @@ pub(crate) async fn get_node_self_description( ) -> anyhow::Result> { let mut conn = pool.acquire().await?; - crate::db::query( + sqlx::query!( r#"SELECT node_id, - self_described + self_described as "self_described: serde_json::Value" FROM nym_nodes WHERE @@ -214,14 +212,18 @@ pub(crate) async fn get_node_self_description( records .into_iter() .filter_map(|record| { - let node_id: i32 = record.try_get("node_id").ok()?; - let self_described: serde_json::Value = record.try_get("self_described").ok()?; - - let val = serde_json::from_value::(self_described) - .inspect_err(|err| { - warn!("malformed description data for node {node_id}: {err}") - }); - val.ok().map(|res| (node_id as NodeId, res.into())) + let node_id = record.node_id; + record + .self_described + // only return details for nodes which have details stored + .and_then(|description| { + serde_json::from_value::(description) + .inspect_err(|err| { + warn!("malformed description data for node {node_id}: {err}") + }) + .ok() + }) + .map(|res| (record.node_id as NodeId, res.into())) }) .collect::>() }) @@ -233,7 +235,7 @@ pub(crate) async fn get_bonded_node_description( ) -> anyhow::Result> { let mut conn = pool.acquire().await?; - crate::db::query( + sqlx::query!( r#"SELECT nd.node_id, moniker, @@ -254,15 +256,14 @@ pub(crate) async fn get_bonded_node_description( records .into_iter() .map(|elem| { - let node_id: i32 = elem.try_get("node_id").unwrap_or(0); - let node_id: NodeId = node_id.try_into().unwrap_or_default(); + let node_id: NodeId = elem.node_id.try_into().unwrap_or_default(); ( node_id, NodeDescription { - moniker: elem.try_get("moniker").unwrap_or_default(), - website: elem.try_get("website").unwrap_or_default(), - security_contact: elem.try_get("security_contact").unwrap_or_default(), - details: elem.try_get("details").unwrap_or_default(), + moniker: elem.moniker.unwrap_or_default(), + website: elem.website.unwrap_or_default(), + security_contact: elem.security_contact.unwrap_or_default(), + details: elem.details.unwrap_or_default(), }, ) }) @@ -273,15 +274,15 @@ pub(crate) async fn get_bonded_node_description( pub(crate) async fn insert_nym_node_description( conn: &mut DbConnection, - node_id: i64, - description: NodeDescriptionResponse, + node_id: &i64, + description: &NodeDescriptionResponse, timestamp: i64, ) -> anyhow::Result<()> { - crate::db::query( + sqlx::query!( r#" INSERT INTO nym_node_descriptions ( node_id, moniker, website, security_contact, details, last_updated_utc - ) VALUES (?, ?, ?, ?, ?, ?) + ) VALUES ($1, $2, $3, $4, $5, $6) ON CONFLICT (node_id) DO UPDATE SET moniker = excluded.moniker, website = excluded.website, @@ -289,13 +290,13 @@ pub(crate) async fn insert_nym_node_description( details = excluded.details, last_updated_utc = excluded.last_updated_utc "#, + *node_id as i32, + description.moniker, + description.website, + description.security_contact, + description.details, + timestamp as i32, ) - .bind(node_id) - .bind(description.moniker) - .bind(description.website) - .bind(description.security_contact) - .bind(description.details) - .bind(timestamp) .execute(conn.as_mut()) .await .map(drop) @@ -309,25 +310,16 @@ pub(crate) async fn get_daily_stats(pool: &DbPool) -> anyhow::Result>() + .fetch_all(&mut *conn) .await?; Ok(items) diff --git a/nym-node-status-api/nym-node-status-api/src/db/queries/packet_stats.rs b/nym-node-status-api/nym-node-status-api/src/db/queries/packet_stats.rs index 5862a723d4f..b496e8b8ac3 100644 --- a/nym-node-status-api/nym-node-status-api/src/db/queries/packet_stats.rs +++ b/nym-node-status-api/nym-node-status-api/src/db/queries/packet_stats.rs @@ -59,38 +59,7 @@ pub(crate) async fn batch_store_packet_stats( .map_err(|err| anyhow::anyhow!("Failed to commit: {err}")) } -#[cfg(feature = "sqlite")] -pub(crate) async fn insert_node_packet_stats_uncommitted( - tx: &mut Transaction<'static, sqlx::Sqlite>, - node_kind: &ScrapeNodeKind, - stats: &NodeStats, - timestamp_utc: i64, -) -> Result<()> { - match node_kind { - ScrapeNodeKind::MixingNymNode { node_id } - | ScrapeNodeKind::EntryExitNymNode { node_id, .. } => { - sqlx::query( - r#" - INSERT INTO nym_nodes_packet_stats_raw ( - node_id, timestamp_utc, packets_received, packets_sent, packets_dropped - ) VALUES (?, ?, ?, ?, ?) - "#, - ) - .bind(node_id) - .bind(timestamp_utc) - .bind(stats.packets_received) - .bind(stats.packets_sent) - .bind(stats.packets_dropped) - .execute(tx.as_mut()) - .await?; - } - } - - Ok(()) -} - -#[cfg(feature = "pg")] -pub(crate) async fn insert_node_packet_stats_uncommitted( +async fn insert_node_packet_stats_uncommitted( tx: &mut Transaction<'static, sqlx::Postgres>, node_kind: &ScrapeNodeKind, stats: &NodeStats, @@ -99,18 +68,18 @@ pub(crate) async fn insert_node_packet_stats_uncommitted( match node_kind { ScrapeNodeKind::MixingNymNode { node_id } | ScrapeNodeKind::EntryExitNymNode { node_id, .. } => { - sqlx::query( + sqlx::query!( r#" INSERT INTO nym_nodes_packet_stats_raw ( node_id, timestamp_utc, packets_received, packets_sent, packets_dropped ) VALUES ($1, $2, $3, $4, $5) "#, + *node_id as i32, + timestamp_utc as i32, + stats.packets_received, + stats.packets_sent, + stats.packets_dropped, ) - .bind(node_id) - .bind(timestamp_utc) - .bind(stats.packets_received) - .bind(stats.packets_sent) - .bind(stats.packets_dropped) .execute(tx.as_mut()) .await?; } @@ -119,38 +88,6 @@ pub(crate) async fn insert_node_packet_stats_uncommitted( Ok(()) } -#[cfg(feature = "sqlite")] -pub(crate) async fn get_raw_node_stats( - tx: &mut Transaction<'static, sqlx::Sqlite>, - node_kind: &ScrapeNodeKind, -) -> Result> { - let packets = match node_kind { - // if no packets are found, it's fine to assume 0 because that's also - // SQL default value if none provided - ScrapeNodeKind::MixingNymNode { node_id } - | ScrapeNodeKind::EntryExitNymNode { node_id, .. } => { - sqlx::query_as::<_, NodeStats>( - r#" - SELECT - COALESCE(packets_received, 0) as packets_received, - COALESCE(packets_sent, 0) as packets_sent, - COALESCE(packets_dropped, 0) as packets_dropped - FROM nym_nodes_packet_stats_raw - WHERE node_id = ? - ORDER BY timestamp_utc DESC - LIMIT 1 OFFSET 1 - "#, - ) - .bind(node_id) - .fetch_optional(tx.as_mut()) - .await? - } - }; - - Ok(packets) -} - -#[cfg(feature = "pg")] pub(crate) async fn get_raw_node_stats( tx: &mut Transaction<'static, sqlx::Postgres>, node_kind: &ScrapeNodeKind, @@ -160,19 +97,20 @@ pub(crate) async fn get_raw_node_stats( // SQL default value if none provided ScrapeNodeKind::MixingNymNode { node_id } | ScrapeNodeKind::EntryExitNymNode { node_id, .. } => { - sqlx::query_as::<_, NodeStats>( + sqlx::query_as!( + NodeStats, r#" SELECT - COALESCE(packets_received, 0) as packets_received, - COALESCE(packets_sent, 0) as packets_sent, - COALESCE(packets_dropped, 0) as packets_dropped + COALESCE(packets_received, 0) as "packets_received!: _", + COALESCE(packets_sent, 0) as "packets_sent!: _", + COALESCE(packets_dropped, 0) as "packets_dropped!: _" FROM nym_nodes_packet_stats_raw WHERE node_id = $1 ORDER BY timestamp_utc DESC LIMIT 1 OFFSET 1 "#, + *node_id as i32, ) - .bind(node_id) .fetch_optional(tx.as_mut()) .await? } @@ -181,57 +119,6 @@ pub(crate) async fn get_raw_node_stats( Ok(packets) } -#[cfg(feature = "sqlite")] -pub(crate) async fn insert_daily_node_stats_uncommitted( - tx: &mut Transaction<'static, sqlx::Sqlite>, - node_kind: &ScrapeNodeKind, - date_utc: &str, - packets: NodeStats, -) -> Result<()> { - match node_kind { - ScrapeNodeKind::MixingNymNode { node_id } - | ScrapeNodeKind::EntryExitNymNode { node_id, .. } => { - let total_stake = sqlx::query_scalar::<_, i64>( - r#" - SELECT - total_stake - FROM nym_nodes - WHERE node_id = ? - "#, - ) - .bind(node_id) - .fetch_one(tx.as_mut()) - .await?; - - sqlx::query( - r#" - INSERT INTO nym_node_daily_mixing_stats ( - node_id, date_utc, - total_stake, packets_received, - packets_sent, packets_dropped - ) VALUES (?, ?, ?, ?, ?, ?) - ON CONFLICT(node_id, date_utc) DO UPDATE SET - total_stake = excluded.total_stake, - packets_received = nym_node_daily_mixing_stats.packets_received + excluded.packets_received, - packets_sent = nym_node_daily_mixing_stats.packets_sent + excluded.packets_sent, - packets_dropped = nym_node_daily_mixing_stats.packets_dropped + excluded.packets_dropped - "#, - ) - .bind(node_id) - .bind(date_utc) - .bind(total_stake) - .bind(packets.packets_received) - .bind(packets.packets_sent) - .bind(packets.packets_dropped) - .execute(tx.as_mut()) - .await?; - } - } - - Ok(()) -} - -#[cfg(feature = "pg")] pub(crate) async fn insert_daily_node_stats_uncommitted( tx: &mut Transaction<'static, sqlx::Postgres>, node_kind: &ScrapeNodeKind, @@ -241,19 +128,19 @@ pub(crate) async fn insert_daily_node_stats_uncommitted( match node_kind { ScrapeNodeKind::MixingNymNode { node_id } | ScrapeNodeKind::EntryExitNymNode { node_id, .. } => { - let total_stake = sqlx::query_scalar::<_, i64>( + let total_stake = sqlx::query_scalar!( r#" SELECT total_stake FROM nym_nodes WHERE node_id = $1 "#, + *node_id as i32 ) - .bind(node_id) .fetch_one(tx.as_mut()) .await?; - sqlx::query( + sqlx::query!( r#" INSERT INTO nym_node_daily_mixing_stats ( node_id, date_utc, @@ -266,13 +153,13 @@ pub(crate) async fn insert_daily_node_stats_uncommitted( packets_sent = nym_node_daily_mixing_stats.packets_sent + excluded.packets_sent, packets_dropped = nym_node_daily_mixing_stats.packets_dropped + excluded.packets_dropped "#, + *node_id as i32, + date_utc, + total_stake, + packets.packets_received, + packets.packets_sent, + packets.packets_dropped, ) - .bind(node_id) - .bind(date_utc) - .bind(total_stake) - .bind(packets.packets_received) - .bind(packets.packets_sent) - .bind(packets.packets_dropped) .execute(tx.as_mut()) .await?; } diff --git a/nym-node-status-api/nym-node-status-api/src/db/queries/scraper.rs b/nym-node-status-api/nym-node-status-api/src/db/queries/scraper.rs index d72d0c870da..ce8a5e7981c 100644 --- a/nym-node-status-api/nym-node-status-api/src/db/queries/scraper.rs +++ b/nym-node-status-api/nym-node-status-api/src/db/queries/scraper.rs @@ -67,8 +67,8 @@ pub(crate) async fn get_nodes_for_scraping(pool: &DbPool) -> Result Result<()> { let timestamp = now_utc().unix_timestamp(); let mut conn = pool.acquire().await?; @@ -81,7 +81,7 @@ pub(crate) async fn insert_scraped_node_description( node_id, identity_key, } => { - insert_nym_node_description(&mut conn, node_id, description.clone(), timestamp).await?; + insert_nym_node_description(&mut conn, node_id, description, timestamp).await?; // for historic reasons (/gateways API), store this info into gateways table as well insert_gateway_description(&mut conn, identity_key, description, timestamp).await?; } diff --git a/nym-node-status-api/nym-node-status-api/src/db/queries/summary.rs b/nym-node-status-api/nym-node-status-api/src/db/queries/summary.rs index d0efb9a406b..120c8acece2 100644 --- a/nym-node-status-api/nym-node-status-api/src/db/queries/summary.rs +++ b/nym-node-status-api/nym-node-status-api/src/db/queries/summary.rs @@ -23,12 +23,13 @@ use crate::{ pub(crate) async fn get_summary_history(pool: &DbPool) -> anyhow::Result> { let mut conn = pool.acquire().await?; - let items = crate::db::query_as::( + let items = sqlx::query_as!( + SummaryHistoryDto, r#"SELECT - id, - date, - timestamp_utc, - value_json + id as "id!", + date as "date!", + timestamp_utc as "timestamp_utc!", + value_json as "value_json!" FROM summary_history ORDER BY date DESC LIMIT 30"#, @@ -50,12 +51,13 @@ pub(crate) async fn get_summary_history(pool: &DbPool) -> anyhow::Result anyhow::Result> { let mut conn = pool.acquire().await?; - Ok(crate::db::query_as::( + Ok(sqlx::query_as!( + SummaryDto, r#"SELECT - key, - value_json, - last_updated_utc - FROM summary"#, + key as "key!", + value_json as "value_json!", + last_updated_utc as "last_updated_utc!" + FROM summary"# ) .fetch(&mut *conn) .try_collect::>() diff --git a/nym-node-status-api/nym-node-status-api/src/db/queries/testruns.rs b/nym-node-status-api/nym-node-status-api/src/db/queries/testruns.rs index 80dafc070d7..1f8aafed52e 100644 --- a/nym-node-status-api/nym-node-status-api/src/db/queries/testruns.rs +++ b/nym-node-status-api/nym-node-status-api/src/db/queries/testruns.rs @@ -3,50 +3,52 @@ use crate::db::DbConnection; use crate::db::DbPool; use crate::http::models::TestrunAssignment; use crate::utils::now_utc; -use sqlx::Row; use time::Duration; -pub(crate) async fn count_testruns_in_progress(conn: &mut DbConnection) -> anyhow::Result { - #[cfg(feature = "sqlite")] - let sql = "SELECT COUNT(id) FROM testruns WHERE status = ?"; - - #[cfg(feature = "pg")] - let sql = "SELECT COUNT(id) FROM testruns WHERE status = $1"; - - let count: i64 = sqlx::query_scalar(sql) - .bind(TestRunStatus::InProgress as i32) - .fetch_one(conn.as_mut()) - .await?; - - Ok(count) +pub(crate) async fn count_testruns_in_progress( + conn: &mut DbConnection, +) -> anyhow::Result> { + sqlx::query_scalar!( + r#"SELECT + COUNT(id) as "count: i64" + FROM testruns + WHERE + status = $1 + "#, + TestRunStatus::InProgress as i64, + ) + .fetch_one(conn.as_mut()) + .await + .map_err(anyhow::Error::from) } pub(crate) async fn get_in_progress_testrun_by_id( conn: &mut DbConnection, testrun_id: i32, ) -> anyhow::Result { - crate::db::query_as::( + sqlx::query_as!( + TestRunDto, r#"SELECT - id, - gateway_id, - status, - created_utc, - ip_address, - log, + id as "id!", + gateway_id as "gateway_id!", + status as "status!", + created_utc as "created_utc!", + ip_address as "ip_address!", + log as "log!", last_assigned_utc FROM testruns WHERE - id = ? + id = $1 AND - status = ? + status = $2 ORDER BY created_utc LIMIT 1"#, + testrun_id, + TestRunStatus::InProgress as i64, ) - .bind(testrun_id) - .bind(TestRunStatus::InProgress as i32) .fetch_one(conn.as_mut()) .await - .map_err(|e| anyhow::anyhow!("Failed to retrieve in-progress testrun {testrun_id}: {e}")) + .map_err(|e| anyhow::anyhow!("Couldn't retrieve testrun {testrun_id}: {e}")) } pub(crate) async fn update_testruns_assigned_before( @@ -57,20 +59,20 @@ pub(crate) async fn update_testruns_assigned_before( let previous_run = now_utc() - max_age; let cutoff_timestamp = previous_run.unix_timestamp(); - let res = crate::db::query( + let res = sqlx::query!( r#"UPDATE testruns SET - status = ? + status = $1 WHERE - status = ? + status = $2 AND - last_assigned_utc < ? + last_assigned_utc < $3 "#, + TestRunStatus::Queued as i64, + TestRunStatus::InProgress as i64, + cutoff_timestamp ) - .bind(TestRunStatus::Queued as i32) - .bind(TestRunStatus::InProgress as i32) - .bind(cutoff_timestamp) .execute(conn.as_mut()) .await?; @@ -91,47 +93,51 @@ pub(crate) async fn assign_oldest_testrun( ) -> anyhow::Result> { let now = now_utc().unix_timestamp(); // find & mark as "In progress" in the same transaction to avoid race conditions - let returning = crate::db::query( - r#"UPDATE testruns - SET - status = ?, - last_assigned_utc = ? - WHERE id = - ( + // lock the row to avoid two threads reading the same value + let returning = sqlx::query!( + r#" + WITH oldest_queued AS ( SELECT id FROM testruns - WHERE status = ? + WHERE status = $1 ORDER BY created_utc asc LIMIT 1 + FOR UPDATE SKIP LOCKED ) + UPDATE testruns + SET + status = $3, + last_assigned_utc = $2 + FROM oldest_queued + WHERE testruns.id = oldest_queued.id RETURNING - id, - gateway_id + testruns.id, + testruns.gateway_id "#, + TestRunStatus::Queued as i32, + now, + TestRunStatus::InProgress as i32, ) - .bind(TestRunStatus::InProgress as i32) - .bind(now) - .bind(TestRunStatus::Queued as i32) .fetch_optional(conn.as_mut()) .await?; if let Some(testrun) = returning { - let gw_identity = crate::db::query( + let gw_identity = sqlx::query!( r#" SELECT id, gateway_identity_key FROM gateways - WHERE id = ? + WHERE id = $1 LIMIT 1"#, + testrun.gateway_id ) - .bind(testrun.try_get::("gateway_id")?) .fetch_one(conn.as_mut()) .await?; Ok(Some(TestrunAssignment { - testrun_id: testrun.try_get("id")?, - gateway_identity_key: gw_identity.try_get("gateway_identity_key")?, + testrun_id: testrun.id, + gateway_identity_key: gw_identity.gateway_identity_key, assigned_at_utc: now, })) } else { @@ -145,11 +151,13 @@ pub(crate) async fn update_testrun_status( status: TestRunStatus, ) -> anyhow::Result<()> { let status = status as i32; - crate::db::query("UPDATE testruns SET status = ? WHERE id = ?") - .bind(status) - .bind(testrun_id) - .execute(conn.as_mut()) - .await?; + sqlx::query!( + "UPDATE testruns SET status = $1 WHERE id = $2", + status, + testrun_id, + ) + .execute(conn.as_mut()) + .await?; Ok(()) } @@ -157,41 +165,33 @@ pub(crate) async fn update_testrun_status( pub(crate) async fn update_gateway_last_probe_log( conn: &mut DbConnection, gateway_pk: i32, - log: String, + log: &str, ) -> anyhow::Result<()> { - crate::db::query("UPDATE gateways SET last_probe_log = ? WHERE id = ?") - .bind(log) - .bind(gateway_pk) - .execute(conn.as_mut()) - .await - .map(drop) - .map_err(|e| { - anyhow::anyhow!( - "Failed to update probe log for gateway {}: {}", - gateway_pk, - e - ) - }) + sqlx::query!( + "UPDATE gateways SET last_probe_log = $1 WHERE id = $2", + log, + gateway_pk, + ) + .execute(conn.as_mut()) + .await + .map(drop) + .map_err(From::from) } pub(crate) async fn update_gateway_last_probe_result( conn: &mut DbConnection, gateway_pk: i32, - result: String, + result: &str, ) -> anyhow::Result<()> { - crate::db::query("UPDATE gateways SET last_probe_result = ? WHERE id = ?") - .bind(result) - .bind(gateway_pk) - .execute(conn.as_mut()) - .await - .map(drop) - .map_err(|e| { - anyhow::anyhow!( - "Failed to update probe result for gateway {}: {}", - gateway_pk, - e - ) - }) + sqlx::query!( + "UPDATE gateways SET last_probe_result = $1 WHERE id = $2", + result, + gateway_pk, + ) + .execute(conn.as_mut()) + .await + .map(drop) + .map_err(From::from) } pub(crate) async fn update_gateway_score( @@ -199,21 +199,24 @@ pub(crate) async fn update_gateway_score( gateway_pk: i32, ) -> anyhow::Result<()> { let now = now_utc().unix_timestamp(); - crate::db::query("UPDATE gateways SET last_testrun_utc = ?, last_updated_utc = ? WHERE id = ?") - .bind(now) - .bind(now) - .bind(gateway_pk) - .execute(conn.as_mut()) - .await - .map(drop) - .map_err(From::from) + sqlx::query!( + "UPDATE gateways SET last_testrun_utc = $1, last_updated_utc = $2 WHERE id = $3", + now, + now, + gateway_pk, + ) + .execute(conn.as_mut()) + .await + .map(drop) + .map_err(From::from) } pub(crate) async fn get_testrun_by_id( conn: &mut DbConnection, testrun_id: i32, ) -> anyhow::Result { - crate::db::query_as::( + sqlx::query_as!( + TestRunDto, r#"SELECT id, gateway_id, @@ -223,9 +226,9 @@ pub(crate) async fn get_testrun_by_id( log, last_assigned_utc FROM testruns - WHERE id = ?"#, + WHERE id = $1"#, + testrun_id ) - .bind(testrun_id) .fetch_one(conn.as_mut()) .await .map_err(|e| anyhow::anyhow!("Testrun {} not found: {}", testrun_id, e)) @@ -239,24 +242,24 @@ pub(crate) async fn insert_external_testrun( ) -> anyhow::Result<()> { let now = crate::utils::now_utc().unix_timestamp(); - crate::db::query( + sqlx::query!( r#"INSERT INTO testruns ( - id, - gateway_id, - status, - created_utc, - last_assigned_utc, - ip_address, + id, + gateway_id, + status, + created_utc, + last_assigned_utc, + ip_address, log - ) VALUES (?, ?, ?, ?, ?, ?, ?)"#, - ) - .bind(testrun_id) - .bind(gateway_id) - .bind(TestRunStatus::InProgress as i32) - .bind(now) - .bind(assigned_at_utc) - .bind("external") // Marker for external origin - .bind("") // Empty initial log + ) VALUES ($1, $2, $3, $4, $5, $6, $7)"#, + testrun_id, + gateway_id, + TestRunStatus::InProgress as i32, + now, + assigned_at_utc, + "external", // Marker for external origin + "" + ) // Empty initial log .execute(conn.as_mut()) .await?; @@ -274,12 +277,14 @@ pub(crate) async fn update_testrun_status_by_gateway( status: TestRunStatus, ) -> anyhow::Result<()> { let status = status as i32; - crate::db::query("UPDATE testruns SET status = ? WHERE gateway_id = ? AND status = ?") - .bind(status) - .bind(gateway_id) - .bind(TestRunStatus::InProgress as i32) - .execute(conn.as_mut()) - .await?; + sqlx::query!( + "UPDATE testruns SET status = $1 WHERE gateway_id = $2 AND status = $3", + status, + gateway_id, + TestRunStatus::InProgress as i32 + ) + .execute(conn.as_mut()) + .await?; Ok(()) } diff --git a/nym-node-status-api/nym-node-status-api/src/db/query_wrapper.rs b/nym-node-status-api/nym-node-status-api/src/db/query_wrapper.rs deleted file mode 100644 index e061d654a1f..00000000000 --- a/nym-node-status-api/nym-node-status-api/src/db/query_wrapper.rs +++ /dev/null @@ -1,251 +0,0 @@ -use sqlx::Database; - -/// Converts SQLite-style ? placeholders to PostgreSQL $N format -#[cfg(feature = "pg")] -fn convert_placeholders(query: &str) -> String { - let mut result = String::with_capacity(query.len() + 10); - let mut placeholder_count = 0; - let mut chars = query.chars(); - let mut in_string: Option = None; - let mut escape_next = false; - - #[allow(clippy::while_let_on_iterator)] - while let Some(ch) = chars.next() { - if escape_next { - result.push(ch); - escape_next = false; - continue; - } - - if let Some(quote_char) = in_string { - result.push(ch); - if ch == quote_char { - in_string = None; - } else if ch == '\\' { - escape_next = true; - } - continue; - } - - match ch { - '\\' => { - result.push(ch); - escape_next = true; - } - '\'' | '"' => { - result.push(ch); - in_string = Some(ch); - } - '?' => { - placeholder_count += 1; - result.push_str(&format!("${placeholder_count}")); - } - _ => { - result.push(ch); - } - } - } - - result -} - -/// Creates a query that automatically handles placeholder conversion -#[cfg(feature = "sqlite")] -pub fn query( - sql: &str, -) -> sqlx::query::Query<'_, sqlx::Sqlite, ::Arguments<'_>> { - sqlx::query(sql) -} - -#[cfg(feature = "pg")] -pub fn query( - sql: &str, -) -> sqlx::query::Query<'static, sqlx::Postgres, ::Arguments<'static>> { - let converted = convert_placeholders(sql); - sqlx::query(Box::leak(converted.into_boxed_str())) -} - -/// Creates a query_as that automatically handles placeholder conversion -#[cfg(feature = "sqlite")] -pub fn query_as( - sql: &str, -) -> sqlx::query::QueryAs<'_, sqlx::Sqlite, O, ::Arguments<'_>> -where - O: for<'r> sqlx::FromRow<'r, ::Row>, -{ - sqlx::query_as(sql) -} - -#[cfg(feature = "pg")] -pub fn query_as( - sql: &str, -) -> sqlx::query::QueryAs< - 'static, - sqlx::Postgres, - O, - ::Arguments<'static>, -> -where - O: for<'r> sqlx::FromRow<'r, ::Row>, -{ - let converted = convert_placeholders(sql); - sqlx::query_as(Box::leak(converted.into_boxed_str())) -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - #[cfg(feature = "pg")] - fn test_convert_placeholders() { - // Basic conversion - assert_eq!( - convert_placeholders(r"SELECT * FROM table WHERE id = ?"), - r"SELECT * FROM table WHERE id = $1" - ); - - // Multiple placeholders - assert_eq!( - convert_placeholders(r"INSERT INTO table (a, b, c) VALUES (?, ?, ?)"), - r"INSERT INTO table (a, b, c) VALUES ($1, $2, $3)" - ); - - // Placeholder inside string literal should be ignored - assert_eq!( - convert_placeholders(r"SELECT * FROM table WHERE name = 'test?' AND id = ?"), - r"SELECT * FROM table WHERE name = 'test?' AND id = $1" - ); - - // Update statement - assert_eq!( - convert_placeholders(r"UPDATE table SET a = ?, b = ? WHERE id = ?"), - r"UPDATE table SET a = $1, b = $2 WHERE id = $3" - ); - - // Test with 10 placeholders (like in update_mixnodes) - assert_eq!( - convert_placeholders(r"INSERT INTO t VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"), - r"INSERT INTO t VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10)" - ); - - // No placeholders - assert_eq!( - convert_placeholders(r"SELECT * FROM table"), - r"SELECT * FROM table" - ); - - // Placeholder at the beginning - assert_eq!(convert_placeholders(r"? AND ?"), r"$1 AND $2"); - - // Placeholder at the end - assert_eq!( - convert_placeholders(r"SELECT * FROM table WHERE id = ?"), - r"SELECT * FROM table WHERE id = $1" - ); - - // Adjacent placeholders - assert_eq!( - convert_placeholders(r"VALUES(?,? ,?)"), - r"VALUES($1,$2 ,$3)" - ); - - // Escaped single quote - assert_eq!( - convert_placeholders(r"SELECT * FROM foo WHERE bar = 'it\'s a test' AND baz = ?"), - r"SELECT * FROM foo WHERE bar = 'it\'s a test' AND baz = $1" - ); - - // Double quotes - assert_eq!( - convert_placeholders(r#"SELECT * FROM "table" WHERE "column" = ? AND name = "test?""#), - r#"SELECT * FROM "table" WHERE "column" = $1 AND name = "test?""# - ); - - // Mixed quotes - assert_eq!( - convert_placeholders( - r#"SELECT * FROM table WHERE a = 'single?' AND b = "double?" AND c = ?"# - ), - r#"SELECT * FROM table WHERE a = 'single?' AND b = "double?" AND c = $1"# - ); - - // Escaped backslash before quote - assert_eq!( - convert_placeholders(r"SELECT * FROM table WHERE path = 'C:\\?' AND id = ?"), - r"SELECT * FROM table WHERE path = 'C:\\?' AND id = $1" - ); - - // Multiple escaped quotes - assert_eq!( - convert_placeholders( - r#"INSERT INTO table (msg) VALUES ('it\'s "complex" test') WHERE id = ?"# - ), - r#"INSERT INTO table (msg) VALUES ('it\'s "complex" test') WHERE id = $1"# - ); - - // Very long query with many placeholders - let long_query = r"INSERT INTO very_long_table_name (col1, col2, col3, col4, col5, col6, col7, col8, col9, col10, col11, col12, col13, col14, col15) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"; - let expected = r"INSERT INTO very_long_table_name (col1, col2, col3, col4, col5, col6, col7, col8, col9, col10, col11, col12, col13, col14, col15) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15)"; - assert_eq!(convert_placeholders(long_query), expected); - - // Query with comments (question marks in comments are also converted) - assert_eq!( - convert_placeholders( - r"-- This is a comment with ? - SELECT * FROM table WHERE id = ? -- another comment ?" - ), - r"-- This is a comment with $1 - SELECT * FROM table WHERE id = $2 -- another comment $3" - ); - - // Multiline strings - assert_eq!( - convert_placeholders( - r"SELECT * FROM table - WHERE description = 'This is a - multiline string with ?' - AND id = ?" - ), - r"SELECT * FROM table - WHERE description = 'This is a - multiline string with ?' - AND id = $1" - ); - - // Complex nested quotes - assert_eq!( - convert_placeholders( - r#"SELECT json_extract(data, '$.items[?(@.name=="test?")]') FROM table WHERE id = ?"# - ), - r#"SELECT json_extract(data, '$.items[?(@.name=="test?")]') FROM table WHERE id = $1"# - ); - - // Empty string - assert_eq!(convert_placeholders(""), ""); - - // Only placeholders - assert_eq!(convert_placeholders("???"), "$1$2$3"); - - // Unicode in strings - assert_eq!( - convert_placeholders(r"SELECT * FROM table WHERE name = '测试?' AND id = ?"), - r"SELECT * FROM table WHERE name = '测试?' AND id = $1" - ); - - // Test case with backslash at end of string - assert_eq!( - convert_placeholders(r"SELECT * FROM table WHERE path LIKE '%\\' AND id = ?"), - r"SELECT * FROM table WHERE path LIKE '%\\' AND id = $1" - ); - - // Mismatched quotes - assert_eq!( - convert_placeholders(r#"SELECT * FROM foo WHERE bar = "'" AND baz = ?"#), - r#"SELECT * FROM foo WHERE bar = "'" AND baz = $1"# - ); - - // Unmatched quote - assert_eq!(convert_placeholders(r"SELECT 'oops?"), r"SELECT 'oops?"); - } -} diff --git a/nym-node-status-api/nym-node-status-api/src/http/api/testruns.rs b/nym-node-status-api/nym-node-status-api/src/http/api/testruns.rs index 3292de1ad8e..dd0c25db9d1 100644 --- a/nym-node-status-api/nym-node-status-api/src/http/api/testruns.rs +++ b/nym-node-status-api/nym-node-status-api/src/http/api/testruns.rs @@ -49,7 +49,8 @@ async fn request_testrun( let active_testruns = db::queries::testruns::count_testruns_in_progress(&mut conn) .await - .map_err(HttpError::internal_with_logging)?; + .map_err(HttpError::internal_with_logging)? + .unwrap_or_default(); if active_testruns >= state.agent_max_count() { tracing::warn!( "{}/{} testruns in progress, rejecting", @@ -147,7 +148,7 @@ async fn submit_testrun( queries::testruns::update_gateway_last_probe_log( &mut conn, assigned_testrun.gateway_id, - submitted_result.payload.probe_result.clone(), + &submitted_result.payload.probe_result.clone(), ) .await .map_err(HttpError::internal_with_logging)?; @@ -155,7 +156,7 @@ async fn submit_testrun( queries::testruns::update_gateway_last_probe_result( &mut conn, assigned_testrun.gateway_id, - result, + &result, ) .await .map_err(HttpError::internal_with_logging)?; @@ -300,13 +301,13 @@ async fn process_testrun_submission_by_gateway( queries::testruns::update_gateway_last_probe_log( conn, gateway_id, - payload.probe_result.clone(), + &payload.probe_result.clone(), ) .await .map_err(HttpError::internal_with_logging)?; let result = get_result_from_log(&payload.probe_result); - queries::testruns::update_gateway_last_probe_result(conn, gateway_id, result) + queries::testruns::update_gateway_last_probe_result(conn, gateway_id, &result) .await .map_err(HttpError::internal_with_logging)?; diff --git a/nym-node-status-api/nym-node-status-api/src/http/models.rs b/nym-node-status-api/nym-node-status-api/src/http/models.rs index 9ff469eee2d..3f6c2e8cdd5 100644 --- a/nym-node-status-api/nym-node-status-api/src/http/models.rs +++ b/nym-node-status-api/nym-node-status-api/src/http/models.rs @@ -11,6 +11,7 @@ use nym_validator_client::{ nym_nodes::{BasicEntryInformation, NodeRole}, }; use serde::{Deserialize, Serialize}; +use strum_macros::EnumString; use tracing::{error, instrument}; use utoipa::ToSchema; @@ -40,11 +41,54 @@ pub struct BuildInformation { pub commit_sha: String, } +#[derive(Debug, Clone, Deserialize, Serialize, ToSchema, EnumString)] +#[serde(rename_all = "snake_case")] +#[strum(serialize_all = "snake_case")] +pub enum AsnKind { + Residential, + Other, +} + +#[derive(Debug, Clone, Deserialize, Serialize, ToSchema)] +pub struct Asn { + pub asn: String, + pub name: String, + pub domain: String, + pub route: String, + pub kind: AsnKind, +} + #[derive(Debug, Clone, Deserialize, Serialize, ToSchema)] pub struct Location { pub two_letter_iso_country_code: String, pub latitude: f64, pub longitude: f64, + + pub city: String, + pub region: String, + pub org: String, + pub postal: String, + pub timezone: String, + + pub asn: Option, +} + +#[derive(Debug, Clone, Deserialize, Serialize, ToSchema, EnumString)] +#[serde(rename_all = "snake_case")] +#[strum(serialize_all = "snake_case")] +pub enum ScoreValue { + Offline, + Low, + Medium, + High, +} + +#[derive(Debug, Clone, Deserialize, Serialize, ToSchema)] +pub struct DVpnGatewayPerformance { + last_updated_utc: String, + score: ScoreValue, + load: ScoreValue, + uptime_percentage_last_24_hours: f32, } #[derive(Debug, Clone, Deserialize, Serialize, ToSchema)] @@ -60,9 +104,15 @@ pub struct DVpnGateway { pub mix_port: u16, pub role: NodeRole, pub entry: Option, + // The performance data here originates from the nym-api, and is effectively mixnet performance // at the time of writing this pub performance: String, + + // Node performance information needed by the NymVPN UI / Explorer to show more information + // about the node in a user-friendly way + pub performance_v2: Option, + pub build_information: BinaryBuildInformationOwned, } @@ -155,6 +205,10 @@ pub mod wg_outcome_versions { #[derive(Debug, Clone, Deserialize, Serialize, ToSchema)] pub struct ProbeOutcomeV1 { pub can_register: bool, + pub can_handshake: Option, + pub can_resolve_dns: Option, + pub ping_hosts_performance: Option, + pub ping_ips_performance: Option, pub can_handshake_v4: bool, pub can_resolve_dns_v4: bool, @@ -181,6 +235,7 @@ impl DVpnGateway { pub(crate) fn new(gateway: Gateway, skimmed_node: &SkimmedNode) -> anyhow::Result { let location = gateway .explorer_pretty_bond + .clone() .ok_or_else(|| anyhow::anyhow!("Missing explorer_pretty_bond")) .and_then(|value| { serde_json::from_value::(value).map_err(From::from) @@ -189,20 +244,53 @@ impl DVpnGateway { let self_described: NymNodeDataDeHelper = gateway .self_described + .clone() .ok_or_else(|| anyhow::anyhow!("Missing self_described")) .and_then(|value| { serde_json::from_value::(value).map_err(From::from) })?; - let last_probe_result = match gateway.last_probe_result { - Some(value) => { - let parsed = - serde_json::from_value::(value).inspect_err(|err| { + let last_updated_utc = gateway.last_testrun_utc.clone().unwrap_or_default(); + let performance = to_percent(gateway.performance); + let network_monitor_performance_mixnet_mode = gateway.performance as f32 / 100f32; + + tracing::info!("🌈 gateway probe result: {:?}", gateway.last_probe_result); + + let (last_probe_result, performance_v2) = match gateway.last_probe_result { + Some(ref value) => { + let mut parsed = serde_json::from_value::(value.clone()) + .inspect_err(|err| { error!("Failed to deserialize probe result: {err}"); })?; - Some(parsed) + + parsed.outcome.wg = parsed.outcome.wg.clone().map(|mut wg| { + if wg.can_handshake.is_none() { + wg.can_handshake = Some(wg.can_handshake_v4); + } + if wg.can_resolve_dns.is_none() { + wg.can_resolve_dns = Some(wg.can_resolve_dns_v4); + } + if wg.ping_hosts_performance.is_none() { + wg.ping_hosts_performance = Some(wg.ping_hosts_performance_v4); + } + if wg.ping_ips_performance.is_none() { + wg.ping_ips_performance = Some(wg.ping_ips_performance_v4); + } + wg + }); + + tracing::info!("🌈 gateway probe parsed: {:?}", parsed); + let performance_v2 = DVpnGatewayPerformance { + last_updated_utc: last_updated_utc.to_string(), + load: calculate_load(&parsed), + score: calculate_score(&gateway, &parsed), + + // the network monitor's measure is a good proxy for node uptime, it can be improved in the future + uptime_percentage_last_24_hours: network_monitor_performance_mixnet_mode, + }; + (Some(parsed), Some(performance_v2)) } - None => None, + None => (None, None), }; Ok(Self { @@ -214,21 +302,113 @@ impl DVpnGateway { latitude: location.location.latitude, longitude: location.location.longitude, two_letter_iso_country_code: location.two_letter_iso_country_code, + org: location.org, + city: location.city, + region: location.region, + postal: location.postal, + timezone: location.timezone, + asn: location.asn.map(|a| { + let kind = if a.kind.eq_ignore_ascii_case("isp") { + // we consider anything that is "ISP" from ipinfo to be residential + AsnKind::Residential + } else { + // everything else is considered "other" + AsnKind::Other + }; + Asn { + asn: a.asn, + domain: a.domain, + kind, + name: a.name, + route: a.route, + } + }), }, last_probe: last_probe_result.map(|res| DirectoryGwProbe { - last_updated_utc: gateway.last_testrun_utc.unwrap_or_default(), + last_updated_utc: last_updated_utc.to_string(), outcome: res.outcome, }), ip_addresses: skimmed_node.ip_addresses.clone(), mix_port: skimmed_node.mix_port, role: skimmed_node.role.clone(), entry: skimmed_node.entry.clone(), - performance: to_percent(gateway.performance), + performance, + performance_v2, build_information: self_described.build_information, }) } } +/// calculates a visual score for the gateway +fn calculate_score(gateway: &Gateway, probe_outcome: &LastProbeResult) -> ScoreValue { + let mixnet_performance = gateway.performance as f64 / 100.0; + let ping_ips_performance = probe_outcome + .outcome + .wg + .clone() + .map(|p| { + let ping_ips_performance = p.ping_ips_performance_v4 as f64; + + let duration = p.download_duration_sec_v4 as f64; + let file_size_mb = if p.downloaded_file_v4.contains("1Mb") { + 1024.0 + } else if p.downloaded_file_v4.contains("10Mb") { + 10240.0 + } else if p.downloaded_file_v4.contains("100Mb") { + 102400.0 + } else { + 1.0 + }; + let speed_mbps = file_size_mb / duration; + + let file_download_score = if speed_mbps > 100.0 { + 1.0 + } else if speed_mbps > 50.0 { + 0.75 + } else if speed_mbps > 20.0 { + 0.5 + } else if speed_mbps > 10.0 { + 0.25 + } else { + 0.1 + }; + + // combine the scores + file_download_score * ping_ips_performance + }) + .unwrap_or(0f64); + + let score = mixnet_performance * ping_ips_performance; + + if score > 0.75 { + ScoreValue::High + } else if score > 0.5 { + ScoreValue::Medium + } else if score > 0.1 { + ScoreValue::Low + } else { + ScoreValue::Offline + } +} + +/// calculates a visual load score for the gateway +fn calculate_load(probe_outcome: &LastProbeResult) -> ScoreValue { + let score = probe_outcome + .outcome + .wg + .clone() + .map(|p| p.ping_ips_performance_v4 as f64) + .unwrap_or(0f64); + + if score > 0.8 { + ScoreValue::Low + } else if score > 0.4 { + ScoreValue::Medium + } else { + ScoreValue::High + } +} + fn to_percent(performance: u8) -> String { let fraction = performance as f32 / 100.0; format!("{fraction:.2}") @@ -341,6 +521,12 @@ mod test { two_letter_iso_country_code: "US".to_string(), latitude: 40.7128, longitude: -74.0060, + org: "Nym".to_string(), + city: "Genève".to_string(), + region: "Geneva".to_string(), + postal: "1200".to_string(), + timezone: "Europe/Zurich".to_string(), + asn: None, }; assert_eq!(location.two_letter_iso_country_code, "US"); @@ -355,18 +541,36 @@ mod test { two_letter_iso_country_code: "XX".to_string(), latitude: 90.0, longitude: 0.0, + org: "Nym".to_string(), + city: "Genève".to_string(), + region: "Geneva".to_string(), + postal: "1200".to_string(), + timezone: "Europe/Zurich".to_string(), + asn: None, }; let south_pole = Location { two_letter_iso_country_code: "AQ".to_string(), latitude: -90.0, longitude: 0.0, + org: "Nym".to_string(), + city: "Genève".to_string(), + region: "Geneva".to_string(), + postal: "1200".to_string(), + timezone: "Europe/Zurich".to_string(), + asn: None, }; let date_line = Location { two_letter_iso_country_code: "FJ".to_string(), latitude: -17.0, longitude: 180.0, + org: "Nym".to_string(), + city: "Genève".to_string(), + region: "Geneva".to_string(), + postal: "1200".to_string(), + timezone: "Europe/Zurich".to_string(), + asn: None, }; assert_eq!(north_pole.latitude, 90.0); diff --git a/nym-node-status-api/nym-node-status-api/src/http/state.rs b/nym-node-status-api/nym-node-status-api/src/http/state.rs index effaf517fea..9f4182540bf 100644 --- a/nym-node-status-api/nym-node-status-api/src/http/state.rs +++ b/nym-node-status-api/nym-node-status-api/src/http/state.rs @@ -492,6 +492,8 @@ impl HttpCache { None => { let new_node_stats = crate::db::queries::get_daily_stats(db) .await + .inspect_err(|err| tracing::error!("{err}")) + // still need to return some data on API in case of internal error .unwrap_or_default() .into_iter() .rev() diff --git a/nym-node-status-api/nym-node-status-api/src/main.rs b/nym-node-status-api/nym-node-status-api/src/main.rs index 2723222415d..c8c289566a0 100644 --- a/nym-node-status-api/nym-node-status-api/src/main.rs +++ b/nym-node-status-api/nym-node-status-api/src/main.rs @@ -5,12 +5,6 @@ use nym_task::signal::wait_for_signal; use nym_validator_client::nyxd::NyxdClient; use std::sync::Arc; -#[cfg(all(feature = "sqlite", feature = "pg"))] -compile_error!("Features 'sqlite' and 'pg' are mutually exclusive"); - -#[cfg(not(any(feature = "sqlite", feature = "pg")))] -compile_error!("Either 'sqlite' or 'pg' feature must be enabled"); - mod cli; mod db; mod http; diff --git a/nym-node-status-api/nym-node-status-api/src/monitor/geodata.rs b/nym-node-status-api/nym-node-status-api/src/monitor/geodata.rs index e9d6e554ea0..bb797d75cd4 100644 --- a/nym-node-status-api/nym-node-status-api/src/monitor/geodata.rs +++ b/nym-node-status-api/nym-node-status-api/src/monitor/geodata.rs @@ -82,6 +82,7 @@ pub(crate) struct Location { pub(crate) org: String, pub(crate) postal: String, pub(crate) timezone: String, + pub(crate) asn: Option, } impl From for Location { @@ -95,15 +96,16 @@ impl From for Location { org: value.org, postal: value.postal, timezone: value.timezone, + asn: value.asn, } } } #[derive(Debug, Clone, Deserialize)] pub(crate) struct LocationResponse { - #[serde(rename = "country")] + #[serde(rename = "country", default = "String::default")] pub(crate) two_letter_iso_country_code: String, - #[serde(deserialize_with = "deserialize_loc")] + #[serde(deserialize_with = "deserialize_loc", default = "Coordinates::default")] pub(crate) loc: Coordinates, #[serde(default = "String::default")] pub(crate) ip: String, @@ -117,6 +119,8 @@ pub(crate) struct LocationResponse { pub(crate) postal: String, #[serde(default = "String::default")] pub(crate) timezone: String, + + pub(crate) asn: Option, } fn deserialize_loc<'de, D>(deserializer: D) -> Result @@ -139,6 +143,24 @@ pub(crate) struct Coordinates { pub(crate) longitude: f64, } +#[derive(Debug, Default, Clone, Serialize, Deserialize)] +pub(crate) struct Asn { + #[serde(default = "String::default")] + pub(crate) asn: String, + + #[serde(default = "String::default")] + pub(crate) name: String, + + #[serde(default = "String::default")] + pub(crate) domain: String, + + #[serde(default = "String::default")] + pub(crate) route: String, + + #[serde(rename = "type", default = "String::default")] + pub(crate) kind: String, +} + impl Location { pub(crate) fn empty() -> Self { Self::default() diff --git a/nym-node-status-api/nym-node-status-api/src/monitor/mod.rs b/nym-node-status-api/nym-node-status-api/src/monitor/mod.rs index 8df1bb3e7fc..592b0768c8a 100644 --- a/nym-node-status-api/nym-node-status-api/src/monitor/mod.rs +++ b/nym-node-status-api/nym-node-status-api/src/monitor/mod.rs @@ -198,22 +198,16 @@ impl Monitor { // let nodes_summary = vec![ - (NYMNODE_COUNT.to_string(), nym_node_count), - (ASSIGNED_MIXING_COUNT.to_string(), assigned_mixing_count), - (NYMNODES_DESCRIBED_COUNT.to_string(), described_nodes.len()), - (GATEWAYS_BONDED_COUNT.to_string(), count_bonded_gateways), - (ASSIGNED_ENTRY_COUNT.to_string(), assigned_entry_count), - (ASSIGNED_EXIT_COUNT.to_string(), assigned_exit_count), + (NYMNODE_COUNT, nym_node_count), + (ASSIGNED_MIXING_COUNT, assigned_mixing_count), + (NYMNODES_DESCRIBED_COUNT, described_nodes.len()), + (GATEWAYS_BONDED_COUNT, count_bonded_gateways), + (ASSIGNED_ENTRY_COUNT, assigned_entry_count), + (ASSIGNED_EXIT_COUNT, assigned_exit_count), // TODO dz doesn't make sense, could make sense with historical Nym // Nodes if we really need this data - ( - MIXNODES_HISTORICAL_COUNT.to_string(), - all_historical_mixnodes, - ), - ( - GATEWAYS_HISTORICAL_COUNT.to_string(), - all_historical_gateways, - ), + (MIXNODES_HISTORICAL_COUNT, all_historical_mixnodes), + (GATEWAYS_HISTORICAL_COUNT, all_historical_gateways), ]; let last_updated = now_utc(); @@ -245,8 +239,7 @@ impl Monitor { }, }; - queries::insert_summaries(&pool, nodes_summary.clone(), network_summary, last_updated) - .await?; + queries::insert_summaries(&pool, &nodes_summary, &network_summary, last_updated).await?; let mut log_lines: Vec = vec![]; for (key, value) in nodes_summary.iter() { @@ -386,26 +379,12 @@ impl Monitor { async fn historical_count(pool: &DbPool) -> anyhow::Result<(usize, usize)> { let mut conn = pool.acquire().await?; - #[cfg(feature = "sqlite")] - let all_historical_gateways = sqlx::query_scalar!(r#"SELECT count(id) FROM gateways"#) - .fetch_one(&mut *conn) - .await? - .cast_checked()?; - - #[cfg(feature = "pg")] let all_historical_gateways = sqlx::query_scalar!(r#"SELECT count(id) FROM gateways"#) .fetch_one(&mut *conn) .await? .unwrap_or(0) .cast_checked()?; - #[cfg(feature = "sqlite")] - let all_historical_mixnodes = sqlx::query_scalar!(r#"SELECT count(id) FROM mixnodes"#) - .fetch_one(&mut *conn) - .await? - .cast_checked()?; - - #[cfg(feature = "pg")] let all_historical_mixnodes = sqlx::query_scalar!(r#"SELECT count(id) FROM mixnodes"#) .fetch_one(&mut *conn) .await? diff --git a/nym-node-status-api/nym-node-status-api/src/node_scraper/helpers.rs b/nym-node-status-api/nym-node-status-api/src/node_scraper/helpers.rs index 9b861cd4c3c..128d276e889 100644 --- a/nym-node-status-api/nym-node-status-api/src/node_scraper/helpers.rs +++ b/nym-node-status-api/nym-node-status-api/src/node_scraper/helpers.rs @@ -149,7 +149,7 @@ pub async fn scrape_and_store_description(pool: &DbPool, node: ScraperNodeInfo) })?; let sanitized_description = sanitize_description(description, *node.node_id()); - insert_scraped_node_description(pool, node.node_kind.clone(), sanitized_description).await?; + insert_scraped_node_description(pool, &node.node_kind, &sanitized_description).await?; Ok(()) } @@ -194,51 +194,6 @@ pub async fn scrape_packet_stats(node: &ScraperNodeInfo) -> Result, - node_kind: &ScrapeNodeKind, - timestamp: UtcDateTime, - current_stats: &NodeStats, -) -> Result<()> { - use crate::db::queries::{get_raw_node_stats, insert_daily_node_stats_uncommitted}; - - let date_utc = format!( - "{:04}-{:02}-{:02}", - timestamp.year(), - timestamp.month() as u8, - timestamp.day() - ); - - // Get previous stats - let previous_stats = get_raw_node_stats(tx, node_kind).await?; - - let (diff_received, diff_sent, diff_dropped) = if let Some(prev) = previous_stats { - ( - calculate_packet_difference(current_stats.packets_received, prev.packets_received), - calculate_packet_difference(current_stats.packets_sent, prev.packets_sent), - calculate_packet_difference(current_stats.packets_dropped, prev.packets_dropped), - ) - } else { - (0, 0, 0) // No previous stats available - }; - - insert_daily_node_stats_uncommitted( - tx, - node_kind, - &date_utc, - NodeStats { - packets_received: diff_received, - packets_sent: diff_sent, - packets_dropped: diff_dropped, - }, - ) - .await?; - - Ok(()) -} - -#[cfg(feature = "pg")] pub async fn update_daily_stats_uncommitted( tx: &mut Transaction<'static, sqlx::Postgres>, node_kind: &ScrapeNodeKind, diff --git a/nym-node-status-api/nym-node-status-api/src/testruns/queue.rs b/nym-node-status-api/nym-node-status-api/src/testruns/queue.rs index 8ed220c6a38..29d24c1b82d 100644 --- a/nym-node-status-api/nym-node-status-api/src/testruns/queue.rs +++ b/nym-node-status-api/nym-node-status-api/src/testruns/queue.rs @@ -14,19 +14,20 @@ pub(crate) async fn try_queue_testrun( let timestamp = now.unix_timestamp(); let timestamp_pretty = now.to_string(); - let items = crate::db::query_as::( + let items = sqlx::query_as!( + GatewayInfoDto, r#"SELECT id, gateway_identity_key, self_described, explorer_pretty_bond FROM gateways - WHERE gateway_identity_key = ? + WHERE gateway_identity_key = $1 AND bonded = true ORDER BY gateway_identity_key LIMIT 1"#, + identity_key.clone() ) - .bind(identity_key.clone()) // TODO dz should call .fetch_one // TODO dz replace this in other queries as well .fetch(conn.as_mut()) @@ -46,7 +47,8 @@ pub(crate) async fn try_queue_testrun( // // check if there is already a test run for this gateway // - let items = crate::db::query_as::( + let items = sqlx::query_as!( + TestRunDto, r#"SELECT id, gateway_id, @@ -56,11 +58,11 @@ pub(crate) async fn try_queue_testrun( log, last_assigned_utc FROM testruns - WHERE gateway_id = ? AND status != 2 + WHERE gateway_id = $1 AND status != 2 ORDER BY id DESC LIMIT 1"#, + gateway_id ) - .bind(gateway_id) .fetch(conn.as_mut()) .try_collect::>() .await?; @@ -84,20 +86,6 @@ pub(crate) async fn try_queue_testrun( let status = TestRunStatus::Queued as i32; let log = format!("Test for {identity_key} requested at {timestamp_pretty} UTC\n\n"); - #[cfg(feature = "sqlite")] - let id = sqlx::query!( - "INSERT INTO testruns (gateway_id, status, ip_address, created_utc, log) VALUES (?, ?, ?, ?, ?)", - gateway_id, - status, - ip_address, - timestamp, - log, - ) - .execute(conn.as_mut()) - .await? - .last_insert_rowid(); - - #[cfg(feature = "pg")] let id = { let record = sqlx::query!( "INSERT INTO testruns (gateway_id, status, ip_address, created_utc, log) VALUES ($1, $2, $3, $4, $5) RETURNING id",