From 4ae39be0ac377b4208ad4653c6d80f3724e93b6e Mon Sep 17 00:00:00 2001 From: Oleksii Sholik Date: Thu, 15 Feb 2024 14:02:20 +0200 Subject: [PATCH 001/156] Add support for ?dialect=postgresql to /api/migrations Fix Elixir tests --- .../electric/lib/electric/plug/migrations.ex | 1 + .../lib/electric/postgres/replication.ex | 26 ++-- .../electric/test/electric/plug_test.exs | 118 ++++++++++++------ 3 files changed, 93 insertions(+), 52 deletions(-) diff --git a/components/electric/lib/electric/plug/migrations.ex b/components/electric/lib/electric/plug/migrations.ex index f53f810d84..5e67f66d65 100644 --- a/components/electric/lib/electric/plug/migrations.ex +++ b/components/electric/lib/electric/plug/migrations.ex @@ -46,6 +46,7 @@ defmodule Electric.Plug.Migrations do defp get_dialect(%{query_params: %{"dialect" => dialect_name}}) do case dialect_name do "sqlite" -> {:ok, Electric.Postgres.Dialect.SQLite} + "postgresql" -> {:ok, Electric.Postgres.Dialect.Postgresql} _ -> {:error, "unsupported dialect #{inspect(dialect_name)}"} end end diff --git a/components/electric/lib/electric/postgres/replication.ex b/components/electric/lib/electric/postgres/replication.ex index 8b08891b7d..0916ada59d 100644 --- a/components/electric/lib/electric/postgres/replication.ex +++ b/components/electric/lib/electric/postgres/replication.ex @@ -69,7 +69,7 @@ defmodule Electric.Postgres.Replication do {:ok, [], []} propagate_ast -> - {msg, relations} = build_replication_msg(propagate_ast, schema_version, dialect) + {msg, relations} = build_replication_msg(propagate_ast, stmt, schema_version, dialect) {:ok, [msg], relations} end @@ -90,6 +90,9 @@ defmodule Electric.Postgres.Replication do end end + defp to_sql(_ast, stmt, Dialect.Postgresql), do: stmt + defp to_sql(ast, _stmt, dialect), do: Dialect.to_sql(ast, dialect) + def affected_tables(stmts, dialect \\ @default_dialect) when is_list(stmts) do stmts |> Enum.flat_map(&get_affected_table/1) @@ -112,7 +115,7 @@ defmodule Electric.Postgres.Replication do [] end - defp build_replication_msg(ast, schema_version, dialect) do + defp build_replication_msg(ast, stmt, schema_version, dialect) do affected_tables = affected_tables(ast, dialect) relations = Enum.map(affected_tables, &{&1.schema, &1.name}) @@ -133,7 +136,7 @@ defmodule Electric.Postgres.Replication do ast, &%SatOpMigrate.Stmt{ type: stmt_type(&1), - sql: Dialect.to_sql(&1, dialect) + sql: to_sql(&1, stmt, dialect) } ) @@ -168,17 +171,17 @@ defmodule Electric.Postgres.Replication do defp replication_msg_table(%Proto.Table{} = table, dialect) do %SatOpMigrate.Table{ name: Dialect.table_name(table.name, dialect), - columns: Enum.map(table.columns, &replication_msg_table_col(&1, dialect)), + columns: Enum.map(table.columns, &replication_msg_table_col(&1)), fks: Enum.flat_map(table.constraints, &replication_msg_table_fk(&1, dialect)), - pks: Enum.flat_map(table.constraints, &replication_msg_table_pk(&1, dialect)) + pks: Enum.flat_map(table.constraints, &replication_msg_table_pk(&1)) } end - defp replication_msg_table_col(%Proto.Column{} = column, dialect) do + defp replication_msg_table_col(%Proto.Column{} = column) do %SatOpMigrate.Column{ name: column.name, pg_type: replication_msg_table_col_type(column.type), - sqlite_type: Dialect.type_name(column.type, dialect) + sqlite_type: Dialect.type_name(column.type, Dialect.SQLite) } end @@ -190,13 +193,8 @@ defmodule Electric.Postgres.Replication do } end - defp replication_msg_table_pk(%Proto.Constraint{constraint: {:primary, pk}}, _dialect) do - pk.keys - end - - defp replication_msg_table_pk(_constraint, _dialect) do - [] - end + defp replication_msg_table_pk(%Proto.Constraint{constraint: {:primary, pk}}), do: pk.keys + defp replication_msg_table_pk(_constraint), do: [] defp replication_msg_table_fk(%Proto.Constraint{constraint: {:foreign, fk}}, dialect) do [ diff --git a/components/electric/test/electric/plug_test.exs b/components/electric/test/electric/plug_test.exs index 689673a173..907622cd1d 100644 --- a/components/electric/test/electric/plug_test.exs +++ b/components/electric/test/electric/plug_test.exs @@ -56,7 +56,7 @@ defmodule Electric.PlugTest do |> sent_resp() end - test_tx "returns migrations translated to given dialect", fn conn -> + test_tx "returns migrations translated to the sqlite dialect", fn conn -> assert {:ok, _schema} = apply_migrations(conn) {:ok, _pid} = start_supervised({SchemaCache, [__connection__: conn, origin: "postgres_1"]}) @@ -70,34 +70,20 @@ defmodule Electric.PlugTest do {:ok, file_list} = :zip.extract(body, [:memory]) - assert file_list == [ + assert [ {~c"0001/migration.sql", "CREATE TABLE \"a\" (\n \"id\" TEXT NOT NULL,\n \"value\" TEXT NOT NULL,\n CONSTRAINT \"a_pkey\" PRIMARY KEY (\"id\")\n) WITHOUT ROWID;\n\n\nCREATE TABLE \"b\" (\n \"id\" TEXT NOT NULL,\n \"value\" TEXT NOT NULL,\n CONSTRAINT \"b_pkey\" PRIMARY KEY (\"id\")\n) WITHOUT ROWID;\n\n\nCREATE INDEX \"a_idx\" ON \"a\" (\"value\" ASC);\n"}, - { - ~c"0001/metadata.json", - "{\"format\":\"SatOpMigrate\",\"ops\":[\"GjIKAWESEgoCaWQSBFRFWFQaBgoEdXVpZBIVCgV2YWx1ZRIEVEVYVBoGCgR0ZXh0IgJpZAoEMDAwMRJ+EnxDUkVBVEUgVEFCTEUgImEiICgKICAiaWQiIFRFWFQgTk9UIE5VTEwsCiAgInZhbHVlIiBURVhUIE5PVCBOVUxMLAogIENPTlNUUkFJTlQgImFfcGtleSIgUFJJTUFSWSBLRVkgKCJpZCIpCikgV0lUSE9VVCBST1dJRDsK\",\"GjIKAWISEgoCaWQSBFRFWFQaBgoEdXVpZBIVCgV2YWx1ZRIEVEVYVBoGCgR0ZXh0IgJpZAoEMDAwMRJ+EnxDUkVBVEUgVEFCTEUgImIiICgKICAiaWQiIFRFWFQgTk9UIE5VTEwsCiAgInZhbHVlIiBURVhUIE5PVCBOVUxMLAogIENPTlNUUkFJTlQgImJfcGtleSIgUFJJTUFSWSBLRVkgKCJpZCIpCikgV0lUSE9VVCBST1dJRDsK\",\"CgQwMDAxEi8IARIrQ1JFQVRFIElOREVYICJhX2lkeCIgT04gImEiICgidmFsdWUiIEFTQyk7Cg==\"],\"protocol_version\":\"Electric.Satellite\",\"version\":\"0001\"}" - }, + {~c"0001/metadata.json", metadata_json_0001}, {~c"0002/migration.sql", "CREATE TABLE \"c\" (\n \"id\" TEXT NOT NULL,\n \"value\" TEXT NOT NULL,\n CONSTRAINT \"c_pkey\" PRIMARY KEY (\"id\")\n) WITHOUT ROWID;\n"}, - { - ~c"0002/metadata.json", - "{\"format\":\"SatOpMigrate\",\"ops\":[\"GjIKAWMSEgoCaWQSBFRFWFQaBgoEdXVpZBIVCgV2YWx1ZRIEVEVYVBoGCgR0ZXh0IgJpZAoEMDAwMhJ+EnxDUkVBVEUgVEFCTEUgImMiICgKICAiaWQiIFRFWFQgTk9UIE5VTEwsCiAgInZhbHVlIiBURVhUIE5PVCBOVUxMLAogIENPTlNUUkFJTlQgImNfcGtleSIgUFJJTUFSWSBLRVkgKCJpZCIpCikgV0lUSE9VVCBST1dJRDsK\"],\"protocol_version\":\"Electric.Satellite\",\"version\":\"0002\"}" - }, + {~c"0002/metadata.json", metadata_json_0002}, {~c"0003/migration.sql", "CREATE TABLE \"d\" (\n \"id\" TEXT NOT NULL,\n \"value\" TEXT NOT NULL,\n CONSTRAINT \"d_pkey\" PRIMARY KEY (\"id\")\n) WITHOUT ROWID;\n\n\nALTER TABLE \"d\" ADD COLUMN \"is_valid\" INTEGER;\n"}, - { - ~c"0003/metadata.json", - "{\"format\":\"SatOpMigrate\",\"ops\":[\"Gk8KAWQSEgoCaWQSBFRFWFQaBgoEdXVpZBIVCgV2YWx1ZRIEVEVYVBoGCgR0ZXh0EhsKCGlzX3ZhbGlkEgdJTlRFR0VSGgYKBGJvb2wiAmlkCgQwMDAzEn4SfENSRUFURSBUQUJMRSAiZCIgKAogICJpZCIgVEVYVCBOT1QgTlVMTCwKICAidmFsdWUiIFRFWFQgTk9UIE5VTEwsCiAgQ09OU1RSQUlOVCAiZF9wa2V5IiBQUklNQVJZIEtFWSAoImlkIikKKSBXSVRIT1VUIFJPV0lEOwo=\",\"Gk8KAWQSEgoCaWQSBFRFWFQaBgoEdXVpZBIVCgV2YWx1ZRIEVEVYVBoGCgR0ZXh0EhsKCGlzX3ZhbGlkEgdJTlRFR0VSGgYKBGJvb2wiAmlkCgQwMDAzEjMIBhIvQUxURVIgVEFCTEUgImQiIEFERCBDT0xVTU4gImlzX3ZhbGlkIiBJTlRFR0VSOwo=\"],\"protocol_version\":\"Electric.Satellite\",\"version\":\"0003\"}" - }, + {~c"0003/metadata.json", metadata_json_0003}, {~c"0004/migration.sql", "CREATE TABLE \"e\" (\n \"id\" TEXT NOT NULL,\n \"value\" TEXT NOT NULL,\n CONSTRAINT \"e_pkey\" PRIMARY KEY (\"id\")\n) WITHOUT ROWID;\n"}, - { - ~c"0004/metadata.json", - "{\"format\":\"SatOpMigrate\",\"ops\":[\"GjIKAWUSEgoCaWQSBFRFWFQaBgoEdXVpZBIVCgV2YWx1ZRIEVEVYVBoGCgR0ZXh0IgJpZAoEMDAwNBJ+EnxDUkVBVEUgVEFCTEUgImUiICgKICAiaWQiIFRFWFQgTk9UIE5VTEwsCiAgInZhbHVlIiBURVhUIE5PVCBOVUxMLAogIENPTlNUUkFJTlQgImVfcGtleSIgUFJJTUFSWSBLRVkgKCJpZCIpCikgV0lUSE9VVCBST1dJRDsK\"],\"protocol_version\":\"Electric.Satellite\",\"version\":\"0004\"}" - } - ] - - assert {~c"0001/metadata.json", json} = List.keyfind(file_list, ~c"0001/metadata.json", 0) + {~c"0004/metadata.json", metadata_json_0004} + ] = file_list assert {:ok, %{ @@ -105,7 +91,7 @@ defmodule Electric.PlugTest do "ops" => [op1, _op2, _op3], "protocol_version" => "Electric.Satellite", "version" => "0001" - }} = Jason.decode(json) + }} = Jason.decode(metadata_json_0001) assert {:ok, %SatOpMigrate{ @@ -136,17 +122,79 @@ defmodule Electric.PlugTest do version: "0001" }} = op1 |> Base.decode64!() |> SatOpMigrate.decode() - assert {~c"0002/metadata.json", json} = List.keyfind(file_list, ~c"0002/metadata.json", 0) + assert {:ok, %{"ops" => [_]}} = Jason.decode(metadata_json_0002) + assert {:ok, %{"ops" => [_, _]}} = Jason.decode(metadata_json_0003) + assert {:ok, %{"ops" => [_]}} = Jason.decode(metadata_json_0004) + end - assert {:ok, %{"ops" => [_]}} = Jason.decode(json) + test_tx "returns migrations translated to the postgresql dialect", fn conn -> + assert {:ok, _schema} = apply_migrations(conn) - assert {~c"0003/metadata.json", json} = List.keyfind(file_list, ~c"0003/metadata.json", 0) + {:ok, _pid} = start_supervised({SchemaCache, [__connection__: conn, origin: "postgres_1"]}) - assert {:ok, %{"ops" => [_, _]}} = Jason.decode(json) + resp = + conn(:get, "/api/migrations", %{"dialect" => "postgresql"}) + |> Electric.Plug.Router.call([]) - assert {~c"0004/metadata.json", json} = List.keyfind(file_list, ~c"0004/metadata.json", 0) + assert {200, _headers, body} = sent_resp(resp) + assert ["application/zip"] = get_resp_header(resp, "content-type") + + {:ok, file_list} = :zip.extract(body, [:memory]) + + assert [ + {~c"0001/migration.sql", + "CREATE TABLE a (id uuid PRIMARY KEY, value text NOT NULL);\n\nCREATE TABLE b (id uuid PRIMARY KEY, value text NOT NULL);\n\nCREATE INDEX a_idx ON a (value);"}, + {~c"0001/metadata.json", metadata_json_0001}, + {~c"0002/migration.sql", + "CREATE TABLE c (id uuid PRIMARY KEY, value text NOT NULL);"}, + {~c"0002/metadata.json", metadata_json_0002}, + {~c"0003/migration.sql", + "CREATE TABLE d (id uuid PRIMARY KEY, value text NOT NULL);\n\nALTER TABLE d ADD COLUMN is_valid boolean;"}, + {~c"0003/metadata.json", metadata_json_0003}, + {~c"0004/migration.sql", + "CREATE TABLE e (id uuid PRIMARY KEY, value text NOT NULL);"}, + {~c"0004/metadata.json", metadata_json_0004} + ] = file_list + + assert {:ok, + %{ + "format" => "SatOpMigrate", + "ops" => [op1, _op2, _op3], + "protocol_version" => "Electric.Satellite", + "version" => "0001" + }} = Jason.decode(metadata_json_0001) + + assert {:ok, + %SatOpMigrate{ + stmts: [ + %SatOpMigrate.Stmt{ + type: :CREATE_TABLE, + sql: "CREATE TABLE a (id uuid PRIMARY KEY, value text NOT NULL);" + } + ], + table: %SatOpMigrate.Table{ + name: "a", + columns: [ + %SatOpMigrate.Column{ + name: "id", + sqlite_type: "TEXT", + pg_type: %SatOpMigrate.PgColumnType{name: "uuid"} + }, + %SatOpMigrate.Column{ + name: "value", + sqlite_type: "TEXT", + pg_type: %SatOpMigrate.PgColumnType{name: "text"} + } + ], + fks: [], + pks: ["id"] + }, + version: "0001" + }} = op1 |> Base.decode64!() |> SatOpMigrate.decode() - assert {:ok, %{"ops" => [_]}} = Jason.decode(json) + assert {:ok, %{"version" => "0002", "ops" => [_]}} = Jason.decode(metadata_json_0002) + assert {:ok, %{"version" => "0003", "ops" => [_, _]}} = Jason.decode(metadata_json_0003) + assert {:ok, %{"version" => "0004", "ops" => [_]}} = Jason.decode(metadata_json_0004) end test_tx "can return migrations after a certain point", fn conn -> @@ -163,20 +211,14 @@ defmodule Electric.PlugTest do {:ok, file_list} = :zip.extract(body, [:memory]) - assert file_list == [ + assert [ {~c"0003/migration.sql", "CREATE TABLE \"d\" (\n \"id\" TEXT NOT NULL,\n \"value\" TEXT NOT NULL,\n CONSTRAINT \"d_pkey\" PRIMARY KEY (\"id\")\n) WITHOUT ROWID;\n\n\nALTER TABLE \"d\" ADD COLUMN \"is_valid\" INTEGER;\n"}, - { - ~c"0003/metadata.json", - "{\"format\":\"SatOpMigrate\",\"ops\":[\"Gk8KAWQSEgoCaWQSBFRFWFQaBgoEdXVpZBIVCgV2YWx1ZRIEVEVYVBoGCgR0ZXh0EhsKCGlzX3ZhbGlkEgdJTlRFR0VSGgYKBGJvb2wiAmlkCgQwMDAzEn4SfENSRUFURSBUQUJMRSAiZCIgKAogICJpZCIgVEVYVCBOT1QgTlVMTCwKICAidmFsdWUiIFRFWFQgTk9UIE5VTEwsCiAgQ09OU1RSQUlOVCAiZF9wa2V5IiBQUklNQVJZIEtFWSAoImlkIikKKSBXSVRIT1VUIFJPV0lEOwo=\",\"Gk8KAWQSEgoCaWQSBFRFWFQaBgoEdXVpZBIVCgV2YWx1ZRIEVEVYVBoGCgR0ZXh0EhsKCGlzX3ZhbGlkEgdJTlRFR0VSGgYKBGJvb2wiAmlkCgQwMDAzEjMIBhIvQUxURVIgVEFCTEUgImQiIEFERCBDT0xVTU4gImlzX3ZhbGlkIiBJTlRFR0VSOwo=\"],\"protocol_version\":\"Electric.Satellite\",\"version\":\"0003\"}" - }, + {~c"0003/metadata.json", _metadata_json_0003}, {~c"0004/migration.sql", "CREATE TABLE \"e\" (\n \"id\" TEXT NOT NULL,\n \"value\" TEXT NOT NULL,\n CONSTRAINT \"e_pkey\" PRIMARY KEY (\"id\")\n) WITHOUT ROWID;\n"}, - { - ~c"0004/metadata.json", - "{\"format\":\"SatOpMigrate\",\"ops\":[\"GjIKAWUSEgoCaWQSBFRFWFQaBgoEdXVpZBIVCgV2YWx1ZRIEVEVYVBoGCgR0ZXh0IgJpZAoEMDAwNBJ+EnxDUkVBVEUgVEFCTEUgImUiICgKICAiaWQiIFRFWFQgTk9UIE5VTEwsCiAgInZhbHVlIiBURVhUIE5PVCBOVUxMLAogIENPTlNUUkFJTlQgImVfcGtleSIgUFJJTUFSWSBLRVkgKCJpZCIpCikgV0lUSE9VVCBST1dJRDsK\"],\"protocol_version\":\"Electric.Satellite\",\"version\":\"0004\"}" - } - ] + {~c"0004/metadata.json", _metadata_json_0004} + ] = file_list end test "returns error if dialect missing", _cxt do From 1bef2d83bb5b4c2a30a0da50808caf284b5a8a31 Mon Sep 17 00:00:00 2001 From: Oleksii Sholik Date: Mon, 19 Feb 2024 16:26:46 +0200 Subject: [PATCH 002/156] Deprecate PgColumnType.sqlite_type sqlite_type is not needed since the client can always use pg_type which is the source of truth for the actual column type --- clients/typescript/src/migrators/triggers.ts | 7 +------ clients/typescript/src/satellite/process.ts | 8 +------- clients/typescript/test/satellite/common.ts | 8 ++++++++ protocol/satellite.proto | 7 +++++++ 4 files changed, 17 insertions(+), 13 deletions(-) diff --git a/clients/typescript/src/migrators/triggers.ts b/clients/typescript/src/migrators/triggers.ts index f334c0197e..6e0098a1d3 100644 --- a/clients/typescript/src/migrators/triggers.ts +++ b/clients/typescript/src/migrators/triggers.ts @@ -8,12 +8,7 @@ type ForeignKey = { } type ColumnName = string -type SQLiteType = string -type PgType = string -type ColumnType = { - sqliteType: SQLiteType - pgType: PgType -} +type ColumnType = string type ColumnTypes = Record export type Table = { diff --git a/clients/typescript/src/satellite/process.ts b/clients/typescript/src/satellite/process.ts index f574282ebd..51f95429cb 100644 --- a/clients/typescript/src/satellite/process.ts +++ b/clients/typescript/src/satellite/process.ts @@ -1655,13 +1655,7 @@ export function generateTriggersForTable(tbl: MigrationTable): Statement[] { } }), columnTypes: Object.fromEntries( - tbl.columns.map((col) => [ - col.name, - { - sqliteType: col.sqliteType.toUpperCase(), - pgType: col.pgType!.name.toUpperCase(), - }, - ]) + tbl.columns.map((col) => [col.name, col.pgType!.name.toUpperCase()]) ), } const fullTableName = table.namespace + '.' + table.tableName diff --git a/clients/typescript/test/satellite/common.ts b/clients/typescript/test/satellite/common.ts index 68f89a935b..4fd64b6561 100644 --- a/clients/typescript/test/satellite/common.ts +++ b/clients/typescript/test/satellite/common.ts @@ -366,11 +366,19 @@ export const personTable: Table = { primary: ['id'], foreignKeys: [], columnTypes: { +<<<<<<< HEAD id: { sqliteType: 'REAL', pgType: PgBasicType.PG_REAL }, name: { sqliteType: 'TEXT', pgType: PgBasicType.PG_TEXT }, age: { sqliteType: 'INTEGER', pgType: PgBasicType.PG_INTEGER }, bmi: { sqliteType: 'REAL', pgType: PgBasicType.PG_REAL }, int8: { sqliteType: 'INTEGER', pgType: PgBasicType.PG_INT8 }, blob: { sqliteType: 'BLOB', pgType: PgBasicType.PG_BYTEA }, +======= + id: PgBasicType.PG_REAL, + name: PgBasicType.PG_TEXT, + age: PgBasicType.PG_INTEGER, + bmi: PgBasicType.PG_REAL, + int8: PgBasicType.PG_INT8, +>>>>>>> 2007ecb76 (Deprecate PgColumnType.sqlite_type) }, } diff --git a/protocol/satellite.proto b/protocol/satellite.proto index 7676028432..2096780c60 100644 --- a/protocol/satellite.proto +++ b/protocol/satellite.proto @@ -404,8 +404,15 @@ message SatOpMigrate { repeated int32 size = 3; } message Column { + //reserved 2; + string name = 1; + + // deprecated + // leaving it here to avoid breaking TypeScript tests that have hard-coded, + // base64-encoded SatOpMigrate messages. string sqlite_type = 2; + PgColumnType pg_type = 3; } message ForeignKey { From 57cb5c8586c8245099549edc9bcd5b664e0de8f7 Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Thu, 14 Mar 2024 09:43:24 +0100 Subject: [PATCH 003/156] Extend protocol with SQL dialect in start replication request. --- protocol/satellite.proto | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/protocol/satellite.proto b/protocol/satellite.proto index 2096780c60..6e09632cff 100644 --- a/protocol/satellite.proto +++ b/protocol/satellite.proto @@ -115,6 +115,11 @@ message SatInStartReplicationReq { NONE = 0; } + enum Dialect { + SQLITE = 0; + POSTGRES = 1; + } + reserved 3; // LSN position of the log on the producer side @@ -132,6 +137,10 @@ message SatInStartReplicationReq { * observed additional data before disconnect */ repeated uint64 observed_transaction_data = 6; + + // The SQL dialect used by the client + // Defaults to SQLite if not specified + optional Dialect sql_dialect = 6; // Note: // - a client might resume replication only for a subset of previous subscriptions From 73c49d1242e6cea412e118cdfc41a7d32296e1ca Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Thu, 14 Mar 2024 10:15:35 +0100 Subject: [PATCH 004/156] Modified SatelliteClient to specify the SQL dialect in the StartReplicationReq message. --- clients/typescript/src/config/index.ts | 10 +++++++++- clients/typescript/src/electric/index.ts | 4 ++-- clients/typescript/src/satellite/client.ts | 12 +++++++++++- clients/typescript/src/satellite/config.ts | 1 + clients/typescript/src/satellite/registry.ts | 1 + clients/typescript/test/satellite/client.test.ts | 1 + 6 files changed, 25 insertions(+), 4 deletions(-) diff --git a/clients/typescript/src/config/index.ts b/clients/typescript/src/config/index.ts index eb82fc5c6d..63dd11288f 100644 --- a/clients/typescript/src/config/index.ts +++ b/clients/typescript/src/config/index.ts @@ -44,6 +44,10 @@ export interface ElectricConfig { connectionBackOffOptions?: ConnectionBackOffOptions } +export type ElectricConfigWithDialect = ElectricConfig & { + dialect?: 'SQLite' | 'Postgres' +} + export type HydratedConfig = { auth: AuthConfig replication: { @@ -51,6 +55,7 @@ export type HydratedConfig = { port: number ssl: boolean timeout: number + dialect: 'SQLite' | 'Postgres' } debug: boolean connectionBackOffOptions: ConnectionBackOffOptions @@ -68,7 +73,9 @@ export type InternalElectricConfig = { connectionBackOffOptions?: ConnectionBackOffOptions } -export const hydrateConfig = (config: ElectricConfig): HydratedConfig => { +export const hydrateConfig = ( + config: ElectricConfigWithDialect +): HydratedConfig => { const auth = config.auth ?? {} const debug = config.debug ?? false @@ -86,6 +93,7 @@ export const hydrateConfig = (config: ElectricConfig): HydratedConfig => { port: port, ssl: sslEnabled, timeout: config.timeout ?? 3000, + dialect: config.dialect ?? 'SQLite', } const { diff --git a/clients/typescript/src/electric/index.ts b/clients/typescript/src/electric/index.ts index 9aedb036cb..4d7d02e0a3 100644 --- a/clients/typescript/src/electric/index.ts +++ b/clients/typescript/src/electric/index.ts @@ -1,4 +1,4 @@ -import { ElectricConfig, hydrateConfig } from '../config/index' +import { ElectricConfigWithDialect, hydrateConfig } from '../config/index' import { DatabaseAdapter } from './adapter' import { BundleMigrator, Migrator } from '../migrators/index' import { EventNotifier, Notifier } from '../notifiers/index' @@ -47,7 +47,7 @@ export const electrify = async >( dbDescription: DB, adapter: DatabaseAdapter, socketFactory: SocketFactory, - config: ElectricConfig = {}, + config: ElectricConfigWithDialect = {}, opts?: Omit ): Promise> => { setLogLevel(config.debug ? 'TRACE' : 'WARN') diff --git a/clients/typescript/src/satellite/client.ts b/clients/typescript/src/satellite/client.ts index 60e202f952..38b4cea41b 100644 --- a/clients/typescript/src/satellite/client.ts +++ b/clients/typescript/src/satellite/client.ts @@ -26,6 +26,7 @@ import { Root, RootClientImpl, SatRpcRequest, + SatInStartReplicationReq_Dialect, } from '../_generated/protocol/satellite' import { getObjFromString, @@ -131,6 +132,7 @@ type EventEmitter = AsyncEventEmitter export class SatelliteClient implements Client { private opts: Required + private dialect: SatInStartReplicationReq_Dialect private emitter: EventEmitter @@ -194,6 +196,10 @@ export class SatelliteClient implements Client { this.emitter = new AsyncEventEmitter() this.opts = { ...satelliteClientDefaults, ...opts } + this.dialect = + opts.dialect === 'SQLite' + ? SatInStartReplicationReq_Dialect.SQLITE + : SatInStartReplicationReq_Dialect.POSTGRES this.socketFactory = socketFactory this.inbound = this.resetInboundReplication() @@ -365,7 +371,10 @@ export class SatelliteClient implements Client { ) ) } - request = SatInStartReplicationReq.fromPartial({ schemaVersion }) + request = SatInStartReplicationReq.fromPartial({ + schemaVersion, + sqlDialect: this.dialect, + }) } else { Log.info( `starting replication with lsn: ${base64.fromBytes( @@ -376,6 +385,7 @@ export class SatelliteClient implements Client { lsn, subscriptionIds, observedTransactionData, + sqlDialect: this.dialect, }) } diff --git a/clients/typescript/src/satellite/config.ts b/clients/typescript/src/satellite/config.ts index 4671550437..ba112602af 100644 --- a/clients/typescript/src/satellite/config.ts +++ b/clients/typescript/src/satellite/config.ts @@ -64,6 +64,7 @@ export interface SatelliteClientOpts { ssl: boolean timeout: number pushPeriod?: number + dialect: 'SQLite' | 'Postgres' } export const validateConfig = (config: any) => { diff --git a/clients/typescript/src/satellite/registry.ts b/clients/typescript/src/satellite/registry.ts index 45a774e3b4..f65f277b88 100644 --- a/clients/typescript/src/satellite/registry.ts +++ b/clients/typescript/src/satellite/registry.ts @@ -208,6 +208,7 @@ export class GlobalRegistry extends BaseRegistry { port: config.replication.port, ssl: config.replication.ssl, timeout: config.replication.timeout, + dialect: config.replication.dialect, } const client = new SatelliteClient( diff --git a/clients/typescript/test/satellite/client.test.ts b/clients/typescript/test/satellite/client.test.ts index 9375125751..0b09625095 100644 --- a/clients/typescript/test/satellite/client.test.ts +++ b/clients/typescript/test/satellite/client.test.ts @@ -46,6 +46,7 @@ test.beforeEach((t) => { timeout: 10000, ssl: false, pushPeriod: 100, + dialect: 'SQLite', }) const clientId = '91eba0c8-28ba-4a86-a6e8-42731c2c6694' From 072eba42aa0089ff3df5eaa99050112f2a1c0aa1 Mon Sep 17 00:00:00 2001 From: Oleksii Sholik Date: Thu, 14 Mar 2024 11:25:41 +0200 Subject: [PATCH 005/156] Support different dialects when encoding migrations for Satellite --- .../lib/electric/satellite/protocol.ex | 12 +- .../lib/electric/satellite/protocol/state.ex | 2 + .../lib/electric/satellite/serialization.ex | 114 ++++++++++-------- 3 files changed, 72 insertions(+), 56 deletions(-) diff --git a/components/electric/lib/electric/satellite/protocol.ex b/components/electric/lib/electric/satellite/protocol.ex index f497583965..3bd0afe6cb 100644 --- a/components/electric/lib/electric/satellite/protocol.ex +++ b/components/electric/lib/electric/satellite/protocol.ex @@ -125,11 +125,12 @@ defmodule Electric.Satellite.Protocol do %State{} = state ) do Logger.debug( - "Received start replication request lsn: #{inspect(client_lsn)} with options: #{inspect(opts)}" + "Received start replication request lsn: #{inspect(client_lsn)} with options: #{inspect(opts)} and dialect: #{inspect(msg.sql_dialect)}" ) with :ok <- validate_schema_version(msg.schema_version), {:ok, lsn} <- validate_lsn(client_lsn) do + state = %{state | sql_dialect: decode_sql_dialect(msg.sql_dialect)} handle_start_replication_request(msg, lsn, state) else {:error, :bad_schema_version} -> @@ -644,11 +645,11 @@ defmodule Electric.Satellite.Protocol do # The offset here comes from the producer @spec handle_outgoing_tx({Transaction.t(), any}, State.t()) :: {[%SatRelation{}], [%SatOpLog{}], OutRep.t()} - defp handle_outgoing_tx({tx, offset}, %State{out_rep: out_rep}) do + defp handle_outgoing_tx({tx, offset}, %State{out_rep: out_rep} = state) do Logger.debug("trans: #{inspect(tx)} with offset #{inspect(offset)}") {serialized_log, unknown_relations, known_relations} = - Serialization.serialize_trans(tx, offset, out_rep.relations) + Serialization.serialize_trans(tx, offset, out_rep.relations, state.sql_dialect) if unknown_relations != [], do: Logger.debug("Sending previously unseen relations: #{inspect(unknown_relations)}") @@ -1222,4 +1223,9 @@ defmodule Electric.Satellite.Protocol do %State{state | expiration_timer: nil} |> schedule_auth_expiration(exp_time) end + + defp decode_sql_dialect(default) when default in [nil, :SQLITE], + do: Electric.Postgres.Dialect.SQLite + + defp decode_sql_dialect(:POSTGRES), do: Electric.Postgres.Dialect.Postgresql end diff --git a/components/electric/lib/electric/satellite/protocol/state.ex b/components/electric/lib/electric/satellite/protocol/state.ex index 129f25274b..8eb9679e06 100644 --- a/components/electric/lib/electric/satellite/protocol/state.ex +++ b/components/electric/lib/electric/satellite/protocol/state.ex @@ -17,6 +17,7 @@ defmodule Electric.Satellite.Protocol.State do subscriptions: %{}, subscription_data_fun: nil, move_in_data_fun: nil, + sql_dialect: Electric.Postgres.Dialect.SQLite, telemetry: nil @type t() :: %__MODULE__{ @@ -33,6 +34,7 @@ defmodule Electric.Satellite.Protocol.State do subscriptions: map(), subscription_data_fun: fun(), move_in_data_fun: fun(), + sql_dialect: Electric.Postgres.Dialect.SQLite | Electric.Postgres.Dialect.Postgresql, telemetry: Telemetry.t() | nil } diff --git a/components/electric/lib/electric/satellite/serialization.ex b/components/electric/lib/electric/satellite/serialization.ex index c023f77113..d4a2389866 100644 --- a/components/electric/lib/electric/satellite/serialization.ex +++ b/components/electric/lib/electric/satellite/serialization.ex @@ -26,12 +26,19 @@ defmodule Electric.Satellite.Serialization do @type relation_mapping() :: %{Changes.relation() => {PB.relation_id(), [Replication.Column.name()]}} + @default_dialect Electric.Postgres.Dialect.SQLite + @doc """ Serialize from internal format to Satellite PB format """ - @spec serialize_trans(Transaction.t(), term(), relation_mapping()) :: + @spec serialize_trans(Transaction.t(), term(), relation_mapping(), module()) :: {[%SatOpLog{}], [Changes.relation()], relation_mapping()} - def serialize_trans(%Transaction{} = trans, offset, known_relations) do + def serialize_trans( + %Transaction{} = trans, + offset, + known_relations, + dialect \\ @default_dialect + ) do tm = DateTime.to_unix(trans.commit_timestamp, :millisecond) lsn = Electric.Postgres.CachedWal.Api.serialize_wal_position(offset) @@ -42,7 +49,8 @@ defmodule Electric.Satellite.Serialization do migration_version: nil, schema: nil, new_relations: [], - known_relations: known_relations + known_relations: known_relations, + sql_dialect: dialect } state = Enum.reduce(trans.changes, state, &serialize_change/2) @@ -102,56 +110,7 @@ defmodule Electric.Satellite.Serialization do end defp serialize_change(record, state) when is_migration_relation(record.relation) do - %{ - origin: origin, - schema: schema, - ops: ops, - migration_version: version, - new_relations: new_relations - } = state - - state = - case(record) do - ddl when is_ddl_relation(ddl.relation) -> - {:ok, v} = SchemaCache.tx_version(origin, ddl.record) - {:ok, sql} = Extension.extract_ddl_sql(ddl.record) - - Logger.info("Serializing migration #{inspect(v)}: #{inspect(sql)}") - - # unlikely since the extension tables have constraints that prevent this - if version && version != v, - do: raise("Got DDL transaction with differing migration versions") - - {:ok, schema_version} = maybe_load_schema(origin, schema, v) - - {ops, add_relations} = - case Replication.migrate(schema_version, sql) do - {:ok, [op], relations} -> - {[%SatTransOp{op: {:migrate, op}} | ops], relations} - - {:ok, [], []} -> - {ops, []} - end - - known_relations = - Enum.reduce(add_relations, state.known_relations, fn relation, known -> - {_relation_id, _columns, _, known} = load_new_relation(relation, known) - known - end) - - %{ - state - | ops: ops, - migration_version: v, - schema: schema_version, - new_relations: new_relations ++ add_relations, - known_relations: known_relations - } - - _ -> - state - end - + state = serialize_migration(record, state) %{state | is_migration: true} end @@ -183,6 +142,55 @@ defmodule Electric.Satellite.Serialization do %{state | ops: [op | ops], new_relations: new_relations, known_relations: known_relations} end + defp serialize_migration(%{relation: relation, record: record}, state) + when is_ddl_relation(relation) do + %{ + origin: origin, + schema: schema, + ops: ops, + migration_version: version, + new_relations: new_relations, + sql_dialect: dialect + } = state + + {:ok, v} = SchemaCache.tx_version(origin, record) + {:ok, sql} = Extension.extract_ddl_sql(record) + + Logger.info("Serializing migration #{inspect(v)}: #{inspect(sql)}") + + # unlikely since the extension tables have constraints that prevent this + if version && version != v, + do: raise("Got DDL transaction with differing migration versions") + + {:ok, schema_version} = maybe_load_schema(origin, schema, v) + + {ops, add_relations} = + case Replication.migrate(schema_version, sql, dialect) do + {:ok, [op], relations} -> + {[%SatTransOp{op: {:migrate, op}} | ops], relations} + + {:ok, [], []} -> + {ops, []} + end + + known_relations = + Enum.reduce(add_relations, state.known_relations, fn relation, known -> + {_relation_id, _columns, _, known} = load_new_relation(relation, known) + known + end) + + %{ + state + | ops: ops, + migration_version: v, + schema: schema_version, + new_relations: new_relations ++ add_relations, + known_relations: known_relations + } + end + + defp serialize_migration(_record, state), do: state + defp maybe_load_schema(origin, nil, version) do with {:ok, schema} <- Extension.SchemaCache.load(origin, version) do {:ok, schema} From 6bbe30823280225f192a2f0aae0e27f18d3c5e22 Mon Sep 17 00:00:00 2001 From: Oleksii Sholik Date: Mon, 19 Feb 2024 16:30:33 +0200 Subject: [PATCH 006/156] Update generated protocol messages --- .../src/_generated/protocol/satellite.ts | 28 +++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/clients/typescript/src/_generated/protocol/satellite.ts b/clients/typescript/src/_generated/protocol/satellite.ts index 9f7eadb65b..bee41f38e9 100644 --- a/clients/typescript/src/_generated/protocol/satellite.ts +++ b/clients/typescript/src/_generated/protocol/satellite.ts @@ -122,6 +122,11 @@ export interface SatInStartReplicationReq { * observed additional data before disconnect */ observedTransactionData: Long[]; + /** + * The SQL dialect used by the client + * Defaults to SQLite if not specified + */ + sqlDialect?: SatInStartReplicationReq_Dialect | undefined; } export enum SatInStartReplicationReq_Option { @@ -130,6 +135,12 @@ export enum SatInStartReplicationReq_Option { UNRECOGNIZED = -1, } +export enum SatInStartReplicationReq_Dialect { + SQLITE = 0, + POSTGRES = 1, + UNRECOGNIZED = -1, +} + /** (Producer) The result of the start replication requests */ export interface SatInStartReplicationResp { $type: "Electric.Satellite.SatInStartReplicationResp"; @@ -457,9 +468,15 @@ export interface SatOpMigrate_PgColumnType { size: number[]; } +/** reserved 2; */ export interface SatOpMigrate_Column { $type: "Electric.Satellite.SatOpMigrate.Column"; name: string; + /** + * deprecated + * leaving it here to avoid breaking TypeScript tests that have hard-coded, + * base64-encoded SatOpMigrate messages. + */ sqliteType: string; pgType: SatOpMigrate_PgColumnType | undefined; } @@ -1102,6 +1119,7 @@ function createBaseSatInStartReplicationReq(): SatInStartReplicationReq { subscriptionIds: [], schemaVersion: undefined, observedTransactionData: [], + sqlDialect: undefined, }; } @@ -1128,6 +1146,9 @@ export const SatInStartReplicationReq = { writer.uint64(v); } writer.ldelim(); + if (message.sqlDialect !== undefined) { + writer.uint32(48).int32(message.sqlDialect); + } return writer; }, @@ -1193,6 +1214,12 @@ export const SatInStartReplicationReq = { } break; + if (tag !== 48) { + break; + } + + message.sqlDialect = reader.int32() as any; + continue; } if ((tag & 7) === 4 || tag === 0) { break; @@ -1213,6 +1240,7 @@ export const SatInStartReplicationReq = { message.subscriptionIds = object.subscriptionIds?.map((e) => e) || []; message.schemaVersion = object.schemaVersion ?? undefined; message.observedTransactionData = object.observedTransactionData?.map((e) => Long.fromValue(e)) || []; + message.sqlDialect = object.sqlDialect ?? undefined; return message; }, }; From a266d46f1d102499642cd676b1803c7578fb7b2b Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Wed, 7 Feb 2024 09:19:34 +0100 Subject: [PATCH 007/156] Driver for Node Postgres and Tauri Postgres. --- clients/typescript/package.json | 7 + .../src/drivers/node-postgres/adapter.ts | 26 +++ .../src/drivers/node-postgres/database.ts | 98 +++++++++ .../src/drivers/node-postgres/index.ts | 42 ++++ .../src/drivers/node-postgres/mock.ts | 21 ++ .../src/drivers/tauri-postgres/adapter.ts | 26 +++ .../src/drivers/tauri-postgres/database.ts | 86 ++++++++ .../src/drivers/tauri-postgres/index.ts | 41 ++++ .../src/drivers/tauri-postgres/mock.ts | 21 ++ .../test/drivers/node-postgres.test.ts | 192 ++++++++++++++++++ .../test/drivers/tauri-postgres.test.ts | 31 +++ 11 files changed, 591 insertions(+) create mode 100644 clients/typescript/src/drivers/node-postgres/adapter.ts create mode 100644 clients/typescript/src/drivers/node-postgres/database.ts create mode 100644 clients/typescript/src/drivers/node-postgres/index.ts create mode 100644 clients/typescript/src/drivers/node-postgres/mock.ts create mode 100644 clients/typescript/src/drivers/tauri-postgres/adapter.ts create mode 100644 clients/typescript/src/drivers/tauri-postgres/database.ts create mode 100644 clients/typescript/src/drivers/tauri-postgres/index.ts create mode 100644 clients/typescript/src/drivers/tauri-postgres/mock.ts create mode 100644 clients/typescript/test/drivers/node-postgres.test.ts create mode 100644 clients/typescript/test/drivers/tauri-postgres.test.ts diff --git a/clients/typescript/package.json b/clients/typescript/package.json index bb51fe3b00..e85e5cd097 100644 --- a/clients/typescript/package.json +++ b/clients/typescript/package.json @@ -56,6 +56,7 @@ "./generic": "./dist/drivers/generic/index.js", "./node": "./dist/drivers/better-sqlite3/index.js", "./react": "./dist/frameworks/react/index.js", + "./tauri-postgres": "./dist/drivers/tauri-postgres/index.js", "./vuejs": "./dist/frameworks/vuejs/index.js", "./wa-sqlite": "./dist/drivers/wa-sqlite/index.js", "./tauri": "./dist/drivers/tauri-sqlite/index.js", @@ -93,6 +94,9 @@ "react": [ "./dist/frameworks/react/index.d.ts" ], + "tauri-postgres": [ + "./dist/drivers/tauri-postgres/index.d.ts" + ], "vuejs": [ "./dist/frameworks/vuejs/index.d.ts" ], @@ -166,6 +170,7 @@ "dependencies": { "@electric-sql/prisma-generator": "workspace:*", "@prisma/client": "4.8.1", + "@tauri-apps/api": "^1.5.3", "async-mutex": "^0.4.0", "base-64": "^1.0.0", "better-sqlite3": "^8.4.0", @@ -219,6 +224,7 @@ "@types/lodash.throttle": "^4.1.7", "@types/lodash.uniqwith": "^4.5.9", "@types/node": "^18.8.4", + "@types/pg": "^8.11.0", "@types/prompts": "^2.4.9", "@types/react": "^18.0.18", "@types/tcp-port-used": "^1.0.2", @@ -229,6 +235,7 @@ "@vue/test-utils": "^2.4.4", "ava": "^4.3.1", "concurrently": "^8.2.2", + "embedded-postgres": "16.1.1-beta.9", "eslint": "^8.22.0", "expo-sqlite": "^13.0.0", "glob": "^10.3.10", diff --git a/clients/typescript/src/drivers/node-postgres/adapter.ts b/clients/typescript/src/drivers/node-postgres/adapter.ts new file mode 100644 index 0000000000..6d6c5f0c7b --- /dev/null +++ b/clients/typescript/src/drivers/node-postgres/adapter.ts @@ -0,0 +1,26 @@ +import { Database } from './database' +import { Row } from '../../util/types' +import { Statement } from '../../util' +import { SerialDatabaseAdapter as GenericDatabaseAdapter } from '../generic' +import { RunResult } from '../../electric/adapter' + +export class DatabaseAdapter extends GenericDatabaseAdapter { + readonly db: Database + + constructor(db: Database) { + super() + this.db = db + } + + async _run(statement: Statement): Promise { + const { rowsModified } = await this.db.exec(statement) + return { + rowsAffected: rowsModified, + } + } + + async _query(statement: Statement): Promise { + const { rows } = await this.db.exec(statement) + return rows + } +} diff --git a/clients/typescript/src/drivers/node-postgres/database.ts b/clients/typescript/src/drivers/node-postgres/database.ts new file mode 100644 index 0000000000..d9fb618d3d --- /dev/null +++ b/clients/typescript/src/drivers/node-postgres/database.ts @@ -0,0 +1,98 @@ +// TODO: fix the below +// was probably added because the driver does not support passing a BigInt +// and expects it to be passed as a string instead +/* +(BigInt.prototype as any).toJSON = function () { + return this.toString(); +}; +*/ + +import type { Client } from 'pg' +import EmbeddedPostgres from 'embedded-postgres' +import { Row, Statement } from '../../util' + +export type QueryResult = { + rows: Row[] + rowsModified: number +} + +export interface Database { + name: string + exec(statement: Statement): Promise + stop(): Promise +} + +export class ElectricDatabase implements Database { + // Do not use this constructor directly. + // Create a Database instance using the static `init` method instead. + private constructor( + public name: string, + private postgres: EmbeddedPostgres, + private db: Client + ) {} + + async exec(statement: Statement): Promise { + const { rows, rowCount } = await this.db.query( + statement.sql, + statement.args + ) + return { + rows, + rowsModified: rowCount ?? 0, + } + } + + async stop() { + await this.postgres.stop() + } + + // Creates and opens a DB backed by Postgres + static async init(config: PostgresConfig) { + // Initialize Postgres + const pg = new EmbeddedPostgres({ + databaseDir: config.databaseDir, + user: config.user ?? 'postgres', + password: config.password ?? 'password', + port: config.port ?? 54321, + persistent: config.persistent ?? true, + }) + + await pg.initialise() + await pg.start() + await pg.createDatabase(config.name) + const db = pg.getPgClient() + await db.connect() + + // We use the database directory as the name + // because it uniquely identifies the DB + return new ElectricDatabase(config.databaseDir, pg, db) + } +} + +type PostgresConfig = { + /** + * The name of the database. + */ + name: string + /** + * The location where the data should be persisted to. + */ + databaseDir: string + /** + * Default is 'postgres'. + */ + user?: string + /** + * Default is 'password'. + */ + password?: string + /** + * Default is 54321. + */ + port?: number + /** + * When set to fale, the database will be deleted when the DB is stopped. + * Default is true. + */ + persistent?: boolean +} diff --git a/clients/typescript/src/drivers/node-postgres/index.ts b/clients/typescript/src/drivers/node-postgres/index.ts new file mode 100644 index 0000000000..1c2558b693 --- /dev/null +++ b/clients/typescript/src/drivers/node-postgres/index.ts @@ -0,0 +1,42 @@ +import { DatabaseAdapter } from './adapter' +import { Database, ElectricDatabase } from './database' +import { ElectricConfig } from '../../config' +import { electrify as baseElectrify, ElectrifyOptions } from '../../electric' +import { WebSocketWeb } from '../../sockets/web' +import { ElectricClient, DbSchema } from '../../client/model' +import { PgBundleMigrator } from '../../migrators/bundle' + +export { DatabaseAdapter, ElectricDatabase } +export type { Database } + +/** + * This embdedded-postgres driver is used for unit testing. + * The real driver to run Postgres is the `sqlx` driver + * which uses Tauri as a bridge to a Postgres driver written in Rust. + */ +export const electrify = async >( + db: T, + dbDescription: DB, + config: ElectricConfig, + opts?: ElectrifyOptions +): Promise> => { + const dbName = db.name + const adapter = opts?.adapter || new DatabaseAdapter(db) + const migrator = + opts?.migrator || new PgBundleMigrator(adapter, dbDescription.migrations) + const socketFactory = opts?.socketFactory || WebSocketWeb + + const client = await baseElectrify( + dbName, + dbDescription, + adapter, + socketFactory, + config, + { + migrator, + ...opts, + } + ) + + return client +} diff --git a/clients/typescript/src/drivers/node-postgres/mock.ts b/clients/typescript/src/drivers/node-postgres/mock.ts new file mode 100644 index 0000000000..99d7e162e5 --- /dev/null +++ b/clients/typescript/src/drivers/node-postgres/mock.ts @@ -0,0 +1,21 @@ +import { Database, QueryResult } from './database' +import { DbName, Statement } from '../../util' + +export class MockDatabase implements Database { + name: DbName + fail: Error | undefined + + constructor(dbName: DbName, fail?: Error) { + this.name = dbName + this.fail = fail + } + + async exec(_statement: Statement): Promise { + if (typeof this.fail !== 'undefined') throw this.fail + + return { + rows: [{ val: 1 }, { val: 2 }], + rowsModified: 0, + } + } +} diff --git a/clients/typescript/src/drivers/tauri-postgres/adapter.ts b/clients/typescript/src/drivers/tauri-postgres/adapter.ts new file mode 100644 index 0000000000..6d6c5f0c7b --- /dev/null +++ b/clients/typescript/src/drivers/tauri-postgres/adapter.ts @@ -0,0 +1,26 @@ +import { Database } from './database' +import { Row } from '../../util/types' +import { Statement } from '../../util' +import { SerialDatabaseAdapter as GenericDatabaseAdapter } from '../generic' +import { RunResult } from '../../electric/adapter' + +export class DatabaseAdapter extends GenericDatabaseAdapter { + readonly db: Database + + constructor(db: Database) { + super() + this.db = db + } + + async _run(statement: Statement): Promise { + const { rowsModified } = await this.db.exec(statement) + return { + rowsAffected: rowsModified, + } + } + + async _query(statement: Statement): Promise { + const { rows } = await this.db.exec(statement) + return rows + } +} diff --git a/clients/typescript/src/drivers/tauri-postgres/database.ts b/clients/typescript/src/drivers/tauri-postgres/database.ts new file mode 100644 index 0000000000..df412330f2 --- /dev/null +++ b/clients/typescript/src/drivers/tauri-postgres/database.ts @@ -0,0 +1,86 @@ +// TODO: fix the below +// was probably added because the driver does not support passing a BigInt +// and expects it to be passed as a string instead +/* +(BigInt.prototype as any).toJSON = function () { + return this.toString(); +}; +*/ + +import { Row, Statement } from '../../util' + +export type QueryResult = { + rows: Row[] + rowsModified: number +} + +type TauriQueryResult = { + result: string + rows_modified: number +} + +export interface Database { + name: string + exec(statement: Statement): Promise + stop(): Promise +} + +export class ElectricDatabase implements Database { + // Do not use this constructor directly. + // Create a Database instance using the static `init` method instead. + private constructor(public name: string, private invoke: Function) {} + + /* + async tauri_init(name: string) { + this.invoke("tauri_init", { name }); + } + */ + + private tauriExec(statement: Statement): Promise { + return this.invoke('tauri_exec_command', { + sql: statement.sql, + values: statement.args ?? [], // TODO: have to modify the Rust code to expect just the values instead of bind params + /* + bind_params: { + keys: [], + values: statement.args ?? [], + } + */ + }) + } + + async exec(statement: Statement): Promise { + const { result, rows_modified: rowsModified } = await this.tauriExec( + statement + ) + const rows = JSON.parse(result, (_key: any, val: string) => { + // The values are strings because they were serialized + // in order to send them from the Rust backend to here + if (val[0] == '\u0000') { + // transforms an integer from its string rerpesentation as four code points into an actual int + return ( + val.charCodeAt(1) * 2 ** 32 + + val.charCodeAt(2) * 2 ** 16 + + val.charCodeAt(3) * 1 + ) + } + if (val === 'NULL') { + return null + } + return val + }) + return { + rows, + rowsModified, + } + } + + async stop(): Promise { + await this.invoke('tauri_stop_postgres') + } + + static async init(dbName: string, invoke: Function) { + await invoke('tauri_init_command', { name: dbName }) + return new ElectricDatabase(dbName, invoke) + } +} diff --git a/clients/typescript/src/drivers/tauri-postgres/index.ts b/clients/typescript/src/drivers/tauri-postgres/index.ts new file mode 100644 index 0000000000..6151f00144 --- /dev/null +++ b/clients/typescript/src/drivers/tauri-postgres/index.ts @@ -0,0 +1,41 @@ +import { DatabaseAdapter } from './adapter' +import { Database, ElectricDatabase } from './database' +import { ElectricConfig } from '../../config' +import { electrify as baseElectrify, ElectrifyOptions } from '../../electric' +import { WebSocketWeb } from '../../sockets/web' +import { ElectricClient, DbSchema } from '../../client/model' +import { PgBundleMigrator } from '../../migrators/bundle' + +export { DatabaseAdapter, ElectricDatabase } +export type { Database } + +/** + * This driver uses `sqlx` and Tauri + * as a bridge to a Postgres driver written in Rust. + */ +export const electrify = async >( + db: T, + dbDescription: DB, + config: ElectricConfig, + opts?: ElectrifyOptions +): Promise> => { + const dbName = db.name + const adapter = opts?.adapter || new DatabaseAdapter(db) + const migrator = + opts?.migrator || new PgBundleMigrator(adapter, dbDescription.migrations) + const socketFactory = opts?.socketFactory || WebSocketWeb + + const client = await baseElectrify( + dbName, + dbDescription, + adapter, + socketFactory, + config, + { + migrator, + ...opts, + } + ) + + return client +} diff --git a/clients/typescript/src/drivers/tauri-postgres/mock.ts b/clients/typescript/src/drivers/tauri-postgres/mock.ts new file mode 100644 index 0000000000..99d7e162e5 --- /dev/null +++ b/clients/typescript/src/drivers/tauri-postgres/mock.ts @@ -0,0 +1,21 @@ +import { Database, QueryResult } from './database' +import { DbName, Statement } from '../../util' + +export class MockDatabase implements Database { + name: DbName + fail: Error | undefined + + constructor(dbName: DbName, fail?: Error) { + this.name = dbName + this.fail = fail + } + + async exec(_statement: Statement): Promise { + if (typeof this.fail !== 'undefined') throw this.fail + + return { + rows: [{ val: 1 }, { val: 2 }], + rowsModified: 0, + } + } +} diff --git a/clients/typescript/test/drivers/node-postgres.test.ts b/clients/typescript/test/drivers/node-postgres.test.ts new file mode 100644 index 0000000000..7438cdb829 --- /dev/null +++ b/clients/typescript/test/drivers/node-postgres.test.ts @@ -0,0 +1,192 @@ +import test from 'ava' + +import { ElectricDatabase } from '../../src/drivers/node-postgres' +import { MockDatabase } from '../../src/drivers/node-postgres/mock' +import { DatabaseAdapter } from '../../src/drivers/node-postgres' +import fs from 'fs/promises' + +test('database adapter run works', async (t) => { + const db = new MockDatabase('test.db') + const adapter = new DatabaseAdapter(db) + + const sql = 'drop table badgers' + const result = await adapter.run({ sql }) + + t.is(result.rowsAffected, 0) +}) + +test('database adapter query works', async (t) => { + const db = new MockDatabase('test.db') + const adapter = new DatabaseAdapter(db) + + const sql = 'select * from bars' + const result = await adapter.query({ sql }) + + t.deepEqual(result, [ + { + val: 1, + }, + { + val: 2, + }, + ]) +}) + +// Test with an actual embedded-postgres DB +async function makeAdapter() { + const db = await ElectricDatabase.init({ + name: 'driver-test', + databaseDir: './tmp/pg/db', + persistent: false, + }) + + const adapter = new DatabaseAdapter(db) + const createTableSql = + 'CREATE TABLE IF NOT EXISTS Post(id TEXT PRIMARY KEY, title TEXT, contents TEXT, nbr integer);' + await adapter.run({ sql: createTableSql }) + const stop = async () => { + await db.stop() + await fs.rm('./tmp', { recursive: true, force: true }) + } + return { adapter, stop } +} + +test.serial('adapter run works on real DB', async (t) => { + const { adapter, stop } = await makeAdapter() + const insertRecordSql = + "INSERT INTO Post (id, title, contents, nbr) VALUES ('i1', 't1', 'c1', 18)" + const res = await adapter.run({ sql: insertRecordSql }) + t.is(res.rowsAffected, 1) + await stop() +}) + +test.serial('adapter query works on real DB', async (t) => { + const { adapter, stop } = await makeAdapter() + const insertRecordSql = + "INSERT INTO Post (id, title, contents, nbr) VALUES ('i1', 't1', 'c1', 18)" + await adapter.run({ sql: insertRecordSql }) + + const selectSql = + "SELECT * FROM Post WHERE (id = ('i1')) AND (nbr = (18)) LIMIT 1" + const res = await adapter.query({ sql: selectSql }) + t.deepEqual(res, [{ id: 'i1', title: 't1', contents: 'c1', nbr: 18 }]) + await stop() +}) + +test.serial('adapter runInTransaction works on real DB', async (t) => { + const { adapter, stop } = await makeAdapter() + const insertRecord1Sql = + "INSERT INTO Post (id, title, contents, nbr) VALUES ('i1', 't1', 'c1', 18)" + const insertRecord2Sql = + "INSERT INTO Post (id, title, contents, nbr) VALUES ('i2', 't2', 'c2', 25)" + + const txRes = await adapter.runInTransaction( + { sql: insertRecord1Sql }, + { sql: insertRecord2Sql } + ) + + t.is(txRes.rowsAffected, 2) + + const selectAll = 'SELECT id FROM Post' + const res = await adapter.query({ sql: selectAll }) + + t.deepEqual(res, [{ id: 'i1' }, { id: 'i2' }]) + await stop() +}) + +test.serial('adapter runInTransaction rolls back on conflict', async (t) => { + const { adapter, stop } = await makeAdapter() + const insertRecord1Sql = + "INSERT INTO Post (id, title, contents, nbr) VALUES ('i1', 't1', 'c1', 18)" + const insertRecord2Sql = + "INSERT INTO Post (id, title, contents, nbr) VALUES ('i1', 't2', 'c2', 25)" + + try { + await adapter.runInTransaction( + { sql: insertRecord1Sql }, + { sql: insertRecord2Sql } + ) + t.fail() // the transaction should be rejected because the primary key of the second record already exists + } catch (err) { + const castError = err as { code: string; detail: string } + t.is(castError.code, '23505') + t.is(castError.detail, 'Key (id)=(i1) already exists.') + + // Check that no posts were added to the DB + const selectAll = 'SELECT id FROM Post' + const res = await adapter.query({ sql: selectAll }) + t.deepEqual(res, []) + } + await stop() +}) + +test.serial( + 'adapter supports dependent queries in transaction on real DB', + async (t) => { + const { adapter, stop } = await makeAdapter() + const [txRes, rowsAffected] = (await adapter.transaction>( + (tx, setResult) => { + let rowsAffected = 0 + tx.run( + { + sql: "INSERT INTO Post (id, title, contents, nbr) VALUES ('i1', 't1', 'c1', 18)", + }, + (tx2, res) => { + rowsAffected += res.rowsAffected + const select = { sql: "SELECT nbr FROM Post WHERE id = 'i1'" } + tx2.query(select, (tx3, rows) => { + const [res] = rows as unknown as Array<{ nbr: number }> + const newNbr = res.nbr + 2 + tx3.run( + { + sql: `INSERT INTO Post (id, title, contents, nbr) VALUES ('i2', 't2', 'c2', ${newNbr})`, + }, + (_, res) => { + rowsAffected += res.rowsAffected + setResult([newNbr, rowsAffected]) + } + ) + }) + } + ) + } + )) as unknown as Array + + t.is(txRes, 20) + t.is(rowsAffected, 2) + + const selectAll = 'SELECT * FROM Post' + const res = await adapter.query({ sql: selectAll }) + + t.deepEqual(res, [ + { id: 'i1', title: 't1', contents: 'c1', nbr: 18 }, + { id: 'i2', title: 't2', contents: 'c2', nbr: 20 }, + ]) + await stop() + } +) + +test.serial('adapter rolls back dependent queries on conflict', async (t) => { + const { adapter, stop } = await makeAdapter() + try { + await adapter.transaction((tx) => { + tx.run({ + sql: "INSERT INTO Post (id, title, contents, nbr) VALUES ('i1', 't1', 'c1', 18)", + }) + tx.run({ + sql: "INSERT INTO Post (id, title, contents, nbr) VALUES ('i1', 't2', 'c2', 20)", + }) + }) + t.fail() // the transaction should be rejected because the primary key of the second record already exists + } catch (err) { + const castError = err as { code: string; detail: string } + t.is(castError.code, '23505') + t.is(castError.detail, 'Key (id)=(i1) already exists.') + + // Check that no posts were added to the DB + const selectAll = 'SELECT id FROM Post' + const res = await adapter.query({ sql: selectAll }) + t.deepEqual(res, []) + } + await stop() +}) diff --git a/clients/typescript/test/drivers/tauri-postgres.test.ts b/clients/typescript/test/drivers/tauri-postgres.test.ts new file mode 100644 index 0000000000..0f9aae907b --- /dev/null +++ b/clients/typescript/test/drivers/tauri-postgres.test.ts @@ -0,0 +1,31 @@ +import test from 'ava' + +import { MockDatabase } from '../../src/drivers/tauri-postgres/mock' +import { DatabaseAdapter } from '../../src/drivers/tauri-postgres' + +test('database adapter run works', async (t) => { + const db = new MockDatabase('test.db') + const adapter = new DatabaseAdapter(db) + + const sql = 'drop table badgers' + const result = await adapter.run({ sql }) + + t.is(result.rowsAffected, 0) +}) + +test('database adapter query works', async (t) => { + const db = new MockDatabase('test.db') + const adapter = new DatabaseAdapter(db) + + const sql = 'select * from bars' + const result = await adapter.query({ sql }) + + t.deepEqual(result, [ + { + val: 1, + }, + { + val: 2, + }, + ]) +}) From 991ae2de77f76ce27cec691ae89e27f0a65af662 Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Thu, 15 Feb 2024 10:26:53 +0100 Subject: [PATCH 008/156] Removed obsolete comments and logs --- .../test/migrators/postgres/builder.test.ts | 362 +++++++++ .../typescript/test/satellite/client.test.ts | 1 - .../satellite/postgres/process.tags.test.ts | 696 ++++++++++++++++++ .../test/satellite/process.tags.test.ts | 7 - 4 files changed, 1058 insertions(+), 8 deletions(-) create mode 100644 clients/typescript/test/migrators/postgres/builder.test.ts create mode 100644 clients/typescript/test/satellite/postgres/process.tags.test.ts diff --git a/clients/typescript/test/migrators/postgres/builder.test.ts b/clients/typescript/test/migrators/postgres/builder.test.ts new file mode 100644 index 0000000000..d8a57c045d --- /dev/null +++ b/clients/typescript/test/migrators/postgres/builder.test.ts @@ -0,0 +1,362 @@ +import test from 'ava' +import { dedent } from 'ts-dedent' +import { makeMigration, parseMetadata } from '../../../src/migrators/builder' +import { loadMigrations } from '../../../src/cli/migrations/builder' +import { + SatOpMigrate, + SatOpMigrate_Table, + SatOpMigrate_Type, + SatOpMigrate_Stmt, + SatOpMigrate_Column, + SatOpMigrate_PgColumnType, + SatOpMigrate_ForeignKey, +} from '../../../src/_generated/protocol/satellite' +import _m0 from 'protobufjs/minimal.js' +import path from 'path' +import { pgBuilder } from '../../../src/migrators/query-builder' +import { DatabaseAdapter } from '../../../src/drivers/node-postgres' +import { makePgDatabase } from '../../support/node-postgres' +import { PgBundleMigrator } from '../../../src/migrators' + +function encodeSatOpMigrateMsg(request: SatOpMigrate) { + return ( + SatOpMigrate.encode(request, _m0.Writer.create()).finish() as any + ).toString('base64') +} + +const migrationMetaData = { + format: 'SatOpMigrate', + ops: [ + encodeSatOpMigrateMsg( + SatOpMigrate.fromPartial({ + version: '20230613112725_814', + stmts: [ + SatOpMigrate_Stmt.fromPartial({ + type: SatOpMigrate_Type.CREATE_TABLE, + sql: 'CREATE TABLE "main"."stars" (\n "id" TEXT NOT NULL PRIMARY KEY,\n "avatar_url" TEXT NOT NULL,\n "name" TEXT,\n "starred_at" TEXT NOT NULL,\n "username" TEXT NOT NULL\n);\n', + }), + ], + table: SatOpMigrate_Table.fromPartial({ + name: 'stars', + columns: [ + SatOpMigrate_Column.fromPartial({ + name: 'id', + sqliteType: 'TEXT', + pgType: SatOpMigrate_PgColumnType.fromPartial({ + name: 'text', + array: [], + size: [], + }), + }), + SatOpMigrate_Column.fromPartial({ + name: 'avatar_url', + sqliteType: 'TEXT', + pgType: SatOpMigrate_PgColumnType.fromPartial({ + name: 'text', + array: [], + size: [], + }), + }), + SatOpMigrate_Column.fromPartial({ + name: 'name', + sqliteType: 'TEXT', + pgType: SatOpMigrate_PgColumnType.fromPartial({ + name: 'text', + array: [], + size: [], + }), + }), + SatOpMigrate_Column.fromPartial({ + name: 'starred_at', + sqliteType: 'TEXT', + pgType: SatOpMigrate_PgColumnType.fromPartial({ + name: 'text', + array: [], + size: [], + }), + }), + SatOpMigrate_Column.fromPartial({ + name: 'username', + sqliteType: 'TEXT', + pgType: SatOpMigrate_PgColumnType.fromPartial({ + name: 'text', + array: [], + size: [], + }), + }), + ], + fks: [], + pks: ['id'], + }), + }) + ), + ], + protocol_version: 'Electric.Satellite', + version: '20230613112725_814', +} + +test('parse migration meta data', (t) => { + const metaData = parseMetadata(migrationMetaData) + t.is(metaData.ops[0].table?.name, 'stars') + t.is(metaData.ops[0].table?.columns.length, 5) +}) + +test('generate migration from meta data', (t) => { + const metaData = parseMetadata(migrationMetaData) + const migration = makeMigration(metaData, pgBuilder) + t.is(migration.version, migrationMetaData.version) + t.is( + migration.statements[0], + 'CREATE TABLE "main"."stars" (\n "id" TEXT NOT NULL PRIMARY KEY,\n "avatar_url" TEXT NOT NULL,\n "name" TEXT,\n "starred_at" TEXT NOT NULL,\n "username" TEXT NOT NULL\n);\n' + ) + t.is( + migration.statements[3], + dedent` + CREATE OR REPLACE FUNCTION update_ensure_main_stars_primarykey_function() + RETURNS TRIGGER AS $$ + BEGIN + IF OLD."id" IS DISTINCT FROM NEW."id" THEN + RAISE EXCEPTION 'Cannot change the value of column id as it belongs to the primary key'; + END IF; + RETURN NEW; + END; + $$ LANGUAGE plpgsql; + ` + ) + + t.is( + migration.statements[4], + dedent` + CREATE TRIGGER update_ensure_main_stars_primarykey + BEFORE UPDATE ON "main"."stars" + FOR EACH ROW + EXECUTE FUNCTION update_ensure_main_stars_primarykey_function(); + ` + ) +}) + +test('make migration for table with FKs', (t) => { + /* + SatOpMigrate_ForeignKey.fromPartial({ + fkCols: [''] + }) + */ + + const migration = { + format: 'SatOpMigrate', + ops: [ + encodeSatOpMigrateMsg( + SatOpMigrate.fromPartial({ + version: '1', + stmts: [ + SatOpMigrate_Stmt.fromPartial({ + type: 0, + sql: 'CREATE TABLE "main"."tenants" (\n "id" TEXT NOT NULL,\n "name" TEXT NOT NULL,\n CONSTRAINT "tenants_pkey" PRIMARY KEY ("id")\n) WITHOUT ROWID;\n', + }), + ], + table: SatOpMigrate_Table.fromPartial({ + name: 'tenants', + columns: [ + SatOpMigrate_Column.fromPartial({ + name: 'id', + sqliteType: 'TEXT', + pgType: { + $type: 'Electric.Satellite.SatOpMigrate.PgColumnType', + name: 'uuid', + array: [], + size: [], + }, + }), + SatOpMigrate_Column.fromPartial({ + name: 'name', + sqliteType: 'TEXT', + pgType: { + $type: 'Electric.Satellite.SatOpMigrate.PgColumnType', + name: 'text', + array: [], + size: [], + }, + }), + ], + fks: [], + pks: ['id'], + }), + }) + ), + encodeSatOpMigrateMsg( + SatOpMigrate.fromPartial({ + version: '1', + stmts: [ + SatOpMigrate_Stmt.fromPartial({ + type: 0, + sql: 'CREATE TABLE "main"."users" (\n "id" TEXT NOT NULL,\n "name" TEXT NOT NULL,\n "email" TEXT NOT NULL,\n "password_hash" TEXT NOT NULL,\n CONSTRAINT "users_pkey" PRIMARY KEY ("id")\n) WITHOUT ROWID;\n', + }), + ], + table: SatOpMigrate_Table.fromPartial({ + name: 'users', + columns: [ + SatOpMigrate_Column.fromPartial({ + name: 'id', + sqliteType: 'TEXT', + pgType: SatOpMigrate_PgColumnType.fromPartial({ + name: 'uuid', + array: [], + size: [], + }), + }), + SatOpMigrate_Column.fromPartial({ + name: 'name', + sqliteType: 'TEXT', + pgType: SatOpMigrate_PgColumnType.fromPartial({ + name: 'text', + array: [], + size: [], + }), + }), + SatOpMigrate_Column.fromPartial({ + name: 'email', + sqliteType: 'TEXT', + pgType: SatOpMigrate_PgColumnType.fromPartial({ + name: 'text', + array: [], + size: [], + }), + }), + SatOpMigrate_Column.fromPartial({ + name: 'password_hash', + sqliteType: 'TEXT', + pgType: SatOpMigrate_PgColumnType.fromPartial({ + name: 'text', + array: [], + size: [], + }), + }), + ], + fks: [], + pks: ['id'], + }), + }) + ), + encodeSatOpMigrateMsg( + SatOpMigrate.fromPartial({ + version: '1', + stmts: [ + SatOpMigrate_Stmt.fromPartial({ + type: 0, + sql: 'CREATE TABLE "main"."tenant_users" (\n "tenant_id" TEXT NOT NULL,\n "user_id" TEXT NOT NULL,\n CONSTRAINT "tenant_users_tenant_id_fkey" FOREIGN KEY ("tenant_id") REFERENCES "tenants" ("id") ON DELETE CASCADE,\n CONSTRAINT "tenant_users_user_id_fkey" FOREIGN KEY ("user_id") REFERENCES "users" ("id") ON DELETE CASCADE,\n CONSTRAINT "tenant_users_pkey" PRIMARY KEY ("tenant_id", "user_id")\n) WITHOUT ROWID;\n', + }), + ], + table: SatOpMigrate_Table.fromPartial({ + name: 'tenant_users', + columns: [ + SatOpMigrate_Column.fromPartial({ + name: 'tenant_id', + sqliteType: 'TEXT', + pgType: SatOpMigrate_PgColumnType.fromPartial({ + name: 'uuid', + array: [], + size: [], + }), + }), + SatOpMigrate_Column.fromPartial({ + name: 'user_id', + sqliteType: 'TEXT', + pgType: SatOpMigrate_PgColumnType.fromPartial({ + name: 'uuid', + array: [], + size: [], + }), + }), + ], + fks: [ + SatOpMigrate_ForeignKey.fromPartial({ + fkCols: ['tenant_id'], + pkTable: 'tenants', + pkCols: ['id'], + }), + SatOpMigrate_ForeignKey.fromPartial({ + fkCols: ['user_id'], + pkTable: 'users', + pkCols: ['id'], + }), + ], + pks: ['tenant_id', 'user_id'], + }), + }) + ), + ], + protocol_version: 'Electric.Satellite', + version: '1', + } + + //const migrateMetaData = JSON.parse(`{"format":"SatOpMigrate","ops":["GjcKB3RlbmFudHMSEgoCaWQSBFRFWFQaBgoEdXVpZBIUCgRuYW1lEgRURVhUGgYKBHRleHQiAmlkCgExEooBEocBQ1JFQVRFIFRBQkxFICJ0ZW5hbnRzIiAoCiAgImlkIiBURVhUIE5PVCBOVUxMLAogICJuYW1lIiBURVhUIE5PVCBOVUxMLAogIENPTlNUUkFJTlQgInRlbmFudHNfcGtleSIgUFJJTUFSWSBLRVkgKCJpZCIpCikgV0lUSE9VVCBST1dJRDsK","GmsKBXVzZXJzEhIKAmlkEgRURVhUGgYKBHV1aWQSFAoEbmFtZRIEVEVYVBoGCgR0ZXh0EhUKBWVtYWlsEgRURVhUGgYKBHRleHQSHQoNcGFzc3dvcmRfaGFzaBIEVEVYVBoGCgR0ZXh0IgJpZAoBMRLAARK9AUNSRUFURSBUQUJMRSAidXNlcnMiICgKICAiaWQiIFRFWFQgTk9UIE5VTEwsCiAgIm5hbWUiIFRFWFQgTk9UIE5VTEwsCiAgImVtYWlsIiBURVhUIE5PVCBOVUxMLAogICJwYXNzd29yZF9oYXNoIiBURVhUIE5PVCBOVUxMLAogIENPTlNUUkFJTlQgInVzZXJzX3BrZXkiIFBSSU1BUlkgS0VZICgiaWQiKQopIFdJVEhPVVQgUk9XSUQ7Cg==","GoYBCgx0ZW5hbnRfdXNlcnMSGQoJdGVuYW50X2lkEgRURVhUGgYKBHV1aWQSFwoHdXNlcl9pZBIEVEVYVBoGCgR1dWlkGhgKCXRlbmFudF9pZBIHdGVuYW50cxoCaWQaFAoHdXNlcl9pZBIFdXNlcnMaAmlkIgl0ZW5hbnRfaWQiB3VzZXJfaWQKATESkgMSjwNDUkVBVEUgVEFCTEUgInRlbmFudF91c2VycyIgKAogICJ0ZW5hbnRfaWQiIFRFWFQgTk9UIE5VTEwsCiAgInVzZXJfaWQiIFRFWFQgTk9UIE5VTEwsCiAgQ09OU1RSQUlOVCAidGVuYW50X3VzZXJzX3RlbmFudF9pZF9ma2V5IiBGT1JFSUdOIEtFWSAoInRlbmFudF9pZCIpIFJFRkVSRU5DRVMgInRlbmFudHMiICgiaWQiKSBPTiBERUxFVEUgQ0FTQ0FERSwKICBDT05TVFJBSU5UICJ0ZW5hbnRfdXNlcnNfdXNlcl9pZF9ma2V5IiBGT1JFSUdOIEtFWSAoInVzZXJfaWQiKSBSRUZFUkVOQ0VTICJ1c2VycyIgKCJpZCIpIE9OIERFTEVURSBDQVNDQURFLAogIENPTlNUUkFJTlQgInRlbmFudF91c2Vyc19wa2V5IiBQUklNQVJZIEtFWSAoInRlbmFudF9pZCIsICJ1c2VyX2lkIikKKSBXSVRIT1VUIFJPV0lEOwo="],"protocol_version":"Electric.Satellite","version":"1"}`) + const metaData = parseMetadata(migration) + makeMigration(metaData, pgBuilder) + t.pass() +}) + +test('generate index creation migration from meta data', (t) => { + const metaData = parseMetadata({ + format: 'SatOpMigrate', + ops: [ + encodeSatOpMigrateMsg( + SatOpMigrate.fromPartial({ + version: '20230613112725_814', + stmts: [ + SatOpMigrate_Stmt.create({ + type: SatOpMigrate_Type.CREATE_INDEX, + sql: 'CREATE INDEX idx_stars_username ON stars(username);', + }), + ], + }) + ), + ], + protocol_version: 'Electric.Satellite', + version: '20230613112725_814', + }) + const migration = makeMigration(metaData, pgBuilder) + t.is(migration.version, migrationMetaData.version) + t.deepEqual(migration.statements, [ + 'CREATE INDEX idx_stars_username ON stars(username);', + ]) +}) + +const migrationsFolder = path.join('./test/migrators/support/migrations') + +test('read migration meta data', async (t) => { + const migrations = await loadMigrations(migrationsFolder, pgBuilder) + const versions = migrations.map((m) => m.version) + t.deepEqual(versions, ['20230613112725_814', '20230613112735_992']) +}) + +test('load migration from meta data', async (t) => { + const { db, stop } = await makePgDatabase('load-migration-meta-data', 5500) + const migration = makeMigration(parseMetadata(migrationMetaData), pgBuilder) + const adapter = new DatabaseAdapter(db) + const migrator = new PgBundleMigrator(adapter, [migration]) + + // Apply the migration + await migrator.up() + + // Check that the DB is initialized with the stars table + const tables = await adapter.query({ + sql: ` + SELECT table_name + FROM information_schema.tables + WHERE table_schema = 'main' AND table_name = 'stars';`, + }) + + const starIdx = tables.findIndex((tbl) => tbl.table_name === 'stars') + t.assert(starIdx >= 0) // must exist + + const columns = await adapter + .query({ + sql: `SELECT column_name + FROM information_schema.columns + WHERE table_name = 'stars';`, + }) + .then((columns) => columns.map((column) => column.column_name)) + + t.deepEqual(columns, ['id', 'avatar_url', 'name', 'starred_at', 'username']) + await stop() +}) diff --git a/clients/typescript/test/satellite/client.test.ts b/clients/typescript/test/satellite/client.test.ts index 0b09625095..d737594d43 100644 --- a/clients/typescript/test/satellite/client.test.ts +++ b/clients/typescript/test/satellite/client.test.ts @@ -575,7 +575,6 @@ test.serial('send transaction', async (t) => { ] const transaction = toTransactions(opLogEntries, relations) - // console.log(transaction) t.plan(7) // We expect exactly 1 + 3 messages to be sent by the client, with 2 checks per non-relation message diff --git a/clients/typescript/test/satellite/postgres/process.tags.test.ts b/clients/typescript/test/satellite/postgres/process.tags.test.ts new file mode 100644 index 0000000000..b7515aa5bc --- /dev/null +++ b/clients/typescript/test/satellite/postgres/process.tags.test.ts @@ -0,0 +1,696 @@ +import anyTest, { TestFn } from 'ava' +import Long from 'long' + +import { + OPTYPES, + generateTag, + encodeTags, + opLogEntryToChange, +} from '../../../src/satellite/oplog' + +import { + generateRemoteOplogEntry, + genEncodedTags, + getPgMatchingShadowEntries as getMatchingShadowEntries, +} from '../../support/satellite-helpers' +import { Statement } from '../../../src/util/types' + +import { + makePgContext, + cleanAndStopSatellite, + relations, + ContextType, +} from '../common' + +const test = anyTest as TestFn +let port = 5100 +test.beforeEach(async (t) => { + await makePgContext(t, port++) +}) +test.afterEach.always(cleanAndStopSatellite) + +test('basic rules for setting tags', async (t) => { + const { adapter, runMigrations, satellite, authState } = t.context + await runMigrations() + + await satellite._setAuthState(authState) + const clientId = satellite._authState?.clientId ?? 'test_client' + + await adapter.run({ + sql: `INSERT INTO main.parent(id, value, other) VALUES (1, 'local', null)`, + }) + + const txDate1 = await satellite._performSnapshot() + let shadow = await getMatchingShadowEntries(adapter) + t.is(shadow.length, 1) + t.is(shadow[0].tags, genEncodedTags(clientId, [txDate1])) + + await adapter.run({ + sql: `UPDATE main.parent SET value = 'local1', other = 3 WHERE id = 1`, + }) + + const txDate2 = await satellite._performSnapshot() + shadow = await getMatchingShadowEntries(adapter) + t.is(shadow.length, 1) + t.is(shadow[0].tags, genEncodedTags(clientId, [txDate2])) + + await adapter.run({ + sql: `UPDATE main.parent SET value = 'local2', other = 4 WHERE id = 1`, + }) + + const txDate3 = await satellite._performSnapshot() + shadow = await getMatchingShadowEntries(adapter) + t.is(shadow.length, 1) + t.is(shadow[0].tags, genEncodedTags(clientId, [txDate3])) + + await adapter.run({ + sql: `DELETE FROM main.parent WHERE id = 1`, + }) + + const txDate4 = await satellite._performSnapshot() + shadow = await getMatchingShadowEntries(adapter) + t.is(shadow.length, 0) + + const entries = await satellite._getEntries() + t.is(entries[0].clearTags, encodeTags([])) + t.is(entries[1].clearTags, genEncodedTags(clientId, [txDate1])) + t.is(entries[2].clearTags, genEncodedTags(clientId, [txDate2])) + t.is(entries[3].clearTags, genEncodedTags(clientId, [txDate3])) + + t.not(txDate1, txDate2) + t.not(txDate2, txDate3) + t.not(txDate3, txDate4) +}) + +test('TX1=INSERT, TX2=DELETE, TX3=INSERT, ack TX1', async (t) => { + const { adapter, runMigrations, satellite, tableInfo, authState } = t.context + await runMigrations() + await satellite._setAuthState(authState) + + const clientId = satellite._authState?.clientId ?? 'test_id' + + // Local INSERT + const stmts1 = { + sql: `INSERT INTO main.parent (id, value, other) VALUES ($1, $2, $3)`, + args: ['1', 'local', null], + } + await adapter.runInTransaction(stmts1) + const txDate1 = await satellite._performSnapshot() + + const localEntries1 = await satellite._getEntries() + const shadowEntry1 = await getMatchingShadowEntries(adapter, localEntries1[0]) + + // shadow tag is time of snapshot + const tag1 = genEncodedTags(clientId, [txDate1]) + t.is(tag1, shadowEntry1[0].tags) + // clearTag is empty + t.like(localEntries1[0], { + clearTags: JSON.stringify([]), + timestamp: txDate1.toISOString(), + }) + + // Local DELETE + const stmts2 = { + sql: `DELETE FROM main.parent WHERE id=$1`, + args: ['1'], + } + await adapter.runInTransaction(stmts2) + const txDate2 = await satellite._performSnapshot() + + const localEntries2 = await satellite._getEntries() + const shadowEntry2 = await getMatchingShadowEntries(adapter, localEntries2[1]) + + // shadowTag is empty + t.is(0, shadowEntry2.length) + // clearTags contains previous shadowTag + t.like(localEntries2[1], { + clearTags: tag1, + timestamp: txDate2.toISOString(), + }) + + // Local INSERT + const stmts3 = { + sql: `INSERT INTO main.parent (id, value, other) VALUES ($1, $2, $3)`, + args: ['1', 'local', null], + } + await adapter.runInTransaction(stmts3) + const txDate3 = await satellite._performSnapshot() + + const localEntries3 = await satellite._getEntries() + const shadowEntry3 = await getMatchingShadowEntries(adapter, localEntries3[1]) + + const tag3 = genEncodedTags(clientId, [txDate3]) + // shadow tag is tag3 + t.is(tag3, shadowEntry3[0].tags) + // clearTags is empty after a DELETE + t.like(localEntries3[2], { + clearTags: JSON.stringify([]), + timestamp: txDate3.toISOString(), + }) + + // apply incomig operation (local operation ack) + const ackEntry = generateRemoteOplogEntry( + tableInfo, + 'main', + 'parent', + OPTYPES.insert, + txDate1.getTime(), + tag1, + { + id: 1, + value: 'local', + other: null, + }, + undefined + ) + + const ackDataChange = opLogEntryToChange(ackEntry, relations) + satellite.relations = relations // satellite must be aware of the relations in order to turn the `ackDataChange` DataChange into an OpLogEntry + const tx = { + origin: clientId, + commit_timestamp: Long.fromNumber((txDate1 as Date).getTime()), + changes: [ackDataChange], + lsn: new Uint8Array(), + } + await satellite._applyTransaction(tx) + + // validate that garbage collection has been triggered + t.is(2, (await satellite._getEntries()).length) + + const shadow = await getMatchingShadowEntries(adapter) + t.like( + shadow[0], + { + tags: genEncodedTags(clientId, [txDate3]), + }, + 'error: tag1 was reintroduced after merging acked operation' + ) +}) + +test('remote tx (INSERT) concurrently with local tx (INSERT -> DELETE)', async (t) => { + const { adapter, runMigrations, satellite, tableInfo, authState } = t.context + await runMigrations() + await satellite._setAuthState(authState) + + const stmts: Statement[] = [] + + // For this key we will choose remote Tx, such that: Local TM > Remote TX + stmts.push({ + sql: `INSERT INTO main.parent (id, value, other) VALUES ($1, $2, $3);`, + args: ['1', 'local', null], + }) + stmts.push({ sql: `DELETE FROM main.parent WHERE id = 1` }) + // For this key we will choose remote Tx, such that: Local TM < Remote TX + stmts.push({ + sql: `INSERT INTO main.parent (id, value, other) VALUES ($1, $2, $3);`, + args: ['2', 'local', null], + }) + stmts.push({ sql: `DELETE FROM main.parent WHERE id = 2` }) + await adapter.runInTransaction(...stmts) + + const txDate1 = await satellite._performSnapshot() + + const prevTs = txDate1.getTime() - 1 + const nextTs = txDate1.getTime() + 1 + + const prevEntry = generateRemoteOplogEntry( + tableInfo, + 'main', + 'parent', + OPTYPES.insert, + prevTs, + genEncodedTags('remote', [prevTs]), + { + id: 1, + value: 'remote', + other: 1, + }, + undefined + ) + const nextEntry = generateRemoteOplogEntry( + tableInfo, + 'main', + 'parent', + OPTYPES.insert, + nextTs, + genEncodedTags('remote', [nextTs]), + { + id: 2, + value: 'remote', + other: 2, + }, + undefined + ) + + satellite.relations = relations // satellite must be aware of the relations in order to turn `DataChange`s into `OpLogEntry`s + + const prevChange = opLogEntryToChange(prevEntry, relations) + const prevTx = { + origin: 'remote', + commit_timestamp: Long.fromNumber(prevTs), + changes: [prevChange], + lsn: new Uint8Array(), + } + await satellite._applyTransaction(prevTx) + + const nextChange = opLogEntryToChange(nextEntry, relations) + const nextTx = { + origin: 'remote', + commit_timestamp: Long.fromNumber(nextTs), + changes: [nextChange], + lsn: new Uint8Array(), + } + await satellite._applyTransaction(nextTx) + + const shadow = await getMatchingShadowEntries(adapter) + const expectedShadow = [ + { + namespace: 'main', + tablename: 'parent', + primaryKey: '{"id":1}', + tags: genEncodedTags('remote', [prevTs]), + }, + { + namespace: 'main', + tablename: 'parent', + primaryKey: '{"id":2}', + tags: genEncodedTags('remote', [nextTs]), + }, + ] + t.deepEqual(shadow, expectedShadow) + + const userTable = await adapter.query({ sql: `SELECT * FROM main.parent;` }) + + // In both cases insert wins over delete, but + // for id = 1 CR picks local data before delete, while + // for id = 2 CR picks remote data + const expectedUserTable = [ + { id: 1, value: 'local', other: null }, + { id: 2, value: 'remote', other: 2 }, + ] + t.deepEqual(expectedUserTable, userTable) +}) + +test('remote tx (INSERT) concurrently with 2 local txses (INSERT -> DELETE)', async (t) => { + const { adapter, runMigrations, satellite, tableInfo, authState } = t.context + await runMigrations() + await satellite._setAuthState(authState) + + let stmts: Statement[] = [] + + // For this key we will choose remote Tx, such that: Local TM > Remote TX + stmts.push({ + sql: `INSERT INTO main.parent (id, value, other) VALUES ($1, $2, $3);`, + args: ['1', 'local', null], + }) + stmts.push({ + sql: `INSERT INTO main.parent (id, value, other) VALUES ($1, $2, $3);`, + args: ['2', 'local', null], + }) + await adapter.runInTransaction(...stmts) + const txDate1 = await satellite._performSnapshot() + + stmts = [] + // For this key we will choose remote Tx, such that: Local TM < Remote TX + stmts.push({ sql: `DELETE FROM main.parent WHERE id = 1` }) + stmts.push({ sql: `DELETE FROM main.parent WHERE id = 2` }) + await adapter.runInTransaction(...stmts) + await satellite._performSnapshot() + + const prevTs = txDate1.getTime() - 1 + const nextTs = txDate1.getTime() + 1 + + const prevEntry = generateRemoteOplogEntry( + tableInfo, + 'main', + 'parent', + OPTYPES.insert, + prevTs, + genEncodedTags('remote', [prevTs]), + { + id: 1, + value: 'remote', + other: 1, + }, + undefined + ) + const nextEntry = generateRemoteOplogEntry( + tableInfo, + 'main', + 'parent', + OPTYPES.insert, + nextTs, + genEncodedTags('remote', [nextTs]), + { + id: 2, + value: 'remote', + other: 2, + }, + undefined + ) + + satellite.relations = relations // satellite must be aware of the relations in order to turn `DataChange`s into `OpLogEntry`s in `_applyTransaction` + + const prevChange = opLogEntryToChange(prevEntry, relations) + const prevTx = { + origin: 'remote', + commit_timestamp: Long.fromNumber(prevTs), + changes: [prevChange], + lsn: new Uint8Array(), + } + await satellite._applyTransaction(prevTx) + + const nextChange = opLogEntryToChange(nextEntry, relations) + const nextTx = { + origin: 'remote', + commit_timestamp: Long.fromNumber(nextTs), + changes: [nextChange], + lsn: new Uint8Array(), + } + await satellite._applyTransaction(nextTx) + + const shadow = await getMatchingShadowEntries(adapter) + const expectedShadow = [ + { + namespace: 'main', + tablename: 'parent', + primaryKey: '{"id":1}', + tags: genEncodedTags('remote', [prevTs]), + }, + { + namespace: 'main', + tablename: 'parent', + primaryKey: '{"id":2}', + tags: genEncodedTags('remote', [nextTs]), + }, + ] + t.deepEqual(shadow, expectedShadow) + + let userTable = await adapter.query({ sql: `SELECT * FROM main.parent;` }) + + // In both cases insert wins over delete, but + // for id = 1 CR picks local data before delete, while + // for id = 2 CR picks remote data + const expectedUserTable = [ + { id: 1, value: 'local', other: null }, + { id: 2, value: 'remote', other: 2 }, + ] + t.deepEqual(expectedUserTable, userTable) +}) + +test('remote tx (INSERT) concurrently with local tx (INSERT -> UPDATE)', async (t) => { + const { adapter, runMigrations, satellite, tableInfo, authState } = t.context + await runMigrations() + await satellite._setAuthState(authState) + const clientId = satellite._authState?.clientId ?? 'test_id' + let stmts: Statement[] = [] + + // For this key we will choose remote Tx, such that: Local TM > Remote TX + stmts.push({ + sql: `INSERT INTO main.parent (id, value, other) VALUES ($1, $2, $3);`, + args: ['1', 'local', null], + }) + stmts.push({ + sql: `UPDATE main.parent SET value = $1, other = $2 WHERE id = 1`, + args: ['local', 999], + }) + // For this key we will choose remote Tx, such that: Local TM < Remote TX + stmts.push({ + sql: `INSERT INTO main.parent (id, value, other) VALUES ($1, $2, $3);`, + args: ['2', 'local', null], + }) + stmts.push({ + sql: `UPDATE main.parent SET value = $1, other = $2 WHERE id = 1`, + args: ['local', 999], + }) + await adapter.runInTransaction(...stmts) + + const txDate1 = await satellite._performSnapshot() + + const prevTs = txDate1.getTime() - 1 + const nextTs = txDate1.getTime() + 1 + + const prevEntry = generateRemoteOplogEntry( + tableInfo, + 'main', + 'parent', + OPTYPES.insert, + prevTs, + genEncodedTags('remote', [prevTs]), + { + id: 1, + value: 'remote', + other: 1, + }, + undefined + ) + + const nextEntry = generateRemoteOplogEntry( + tableInfo, + 'main', + 'parent', + OPTYPES.insert, + nextTs, + genEncodedTags('remote', [nextTs]), + { + id: 2, + value: 'remote', + other: 2, + }, + undefined + ) + + satellite.relations = relations // satellite must be aware of the relations in order to turn `DataChange`s into `OpLogEntry`s in `_applyTransaction` + + const prevChange = opLogEntryToChange(prevEntry, relations) + const prevTx = { + origin: 'remote', + commit_timestamp: Long.fromNumber(prevTs), + changes: [prevChange], + lsn: new Uint8Array(), + } + await satellite._applyTransaction(prevTx) + + const nextChange = opLogEntryToChange(nextEntry, relations) + const nextTx = { + origin: 'remote', + commit_timestamp: Long.fromNumber(nextTs), + changes: [nextChange], + lsn: new Uint8Array(), + } + await satellite._applyTransaction(nextTx) + + let shadow = await getMatchingShadowEntries(adapter) + const expectedShadow = [ + { + namespace: 'main', + tablename: 'parent', + primaryKey: '{"id":1}', + tags: encodeTags([ + generateTag(clientId, new Date(txDate1)), + generateTag('remote', new Date(prevTs)), + ]), + }, + { + namespace: 'main', + tablename: 'parent', + primaryKey: '{"id":2}', + tags: encodeTags([ + generateTag(clientId, new Date(txDate1)), + generateTag('remote', new Date(nextTs)), + ]), + }, + ] + t.deepEqual(shadow, expectedShadow) + + let entries = await satellite._getEntries() + + // Given that Insert and Update happen within the same transaction clear should not + // contain itself + t.is(entries[0].clearTags, encodeTags([])) + t.is(entries[1].clearTags, encodeTags([])) + t.is(entries[2].clearTags, encodeTags([])) + t.is(entries[3].clearTags, encodeTags([])) + + let userTable = await adapter.query({ sql: `SELECT * FROM main.parent;` }) + + // In both cases insert wins over delete, but + // for id = 1 CR picks local data before delete, while + // for id = 2 CR picks remote data + const expectedUserTable = [ + { id: 1, value: 'local', other: 999 }, + { id: 2, value: 'remote', other: 2 }, + ] + t.deepEqual(expectedUserTable, userTable) +}) + +test('origin tx (INSERT) concurrently with local txses (INSERT -> DELETE)', async (t) => { + // + const { adapter, runMigrations, satellite, tableInfo, authState } = t.context + await runMigrations() + await satellite._setAuthState(authState) + const clientId = satellite._authState?.clientId ?? 'test_id' + + let stmts: Statement[] = [] + + // For this key we will choose remote Tx, such that: Local TM > Remote TX + stmts.push({ + sql: `INSERT INTO main.parent (id, value, other) VALUES ($1, $2, $3);`, + args: ['1', 'local', null], + }) + stmts.push({ + sql: `INSERT INTO main.parent (id, value, other) VALUES ($1, $2, $3);`, + args: ['2', 'local', null], + }) + await adapter.runInTransaction(...stmts) + const txDate1 = await satellite._performSnapshot() + + stmts = [] + // For this key we will choose remote Tx, such that: Local TM < Remote TX + stmts.push({ sql: `DELETE FROM main.parent WHERE id = 1` }) + stmts.push({ sql: `DELETE FROM main.parent WHERE id = 2` }) + await adapter.runInTransaction(...stmts) + await satellite._performSnapshot() + + let entries = await satellite._getEntries() + t.assert(entries[0].newRow) + t.assert(entries[1]) + t.assert(entries[1].newRow) + + // For this key we receive transaction which was older + const electricEntrySameTs = new Date(entries[0].timestamp).getTime() + let electricEntrySame = generateRemoteOplogEntry( + tableInfo, + entries[0].namespace, + entries[0].tablename, + OPTYPES.insert, + electricEntrySameTs, + genEncodedTags(clientId, [txDate1]), + JSON.parse(entries[0].newRow!), + undefined + ) + + // For this key we had concurrent insert transaction from another node `remote` + // with same timestamp + const electricEntryConflictTs = new Date(entries[1].timestamp).getTime() + let electricEntryConflict = generateRemoteOplogEntry( + tableInfo, + entries[1].namespace, + entries[1].tablename, + OPTYPES.insert, + electricEntryConflictTs, + encodeTags([ + generateTag(clientId, txDate1), + generateTag('remote', txDate1), + ]), + JSON.parse(entries[1].newRow!), + undefined + ) + + satellite.relations = relations // satellite must be aware of the relations in order to turn `DataChange`s into `OpLogEntry`s in `_applyTransaction` + + const electricEntrySameChange = opLogEntryToChange( + electricEntrySame, + relations + ) + const electricEntryConflictChange = opLogEntryToChange( + electricEntryConflict, + relations + ) + const tx = { + origin: clientId, + commit_timestamp: Long.fromNumber(new Date().getTime()), // commit_timestamp doesn't matter for this test, it is only used to GC the oplog + changes: [electricEntrySameChange, electricEntryConflictChange], + lsn: new Uint8Array(), + } + await satellite._applyTransaction(tx) + + let shadow = await getMatchingShadowEntries(adapter) + const expectedShadow = [ + { + namespace: 'main', + tablename: 'parent', + primaryKey: '{"id":2}', + tags: genEncodedTags('remote', [txDate1]), + }, + ] + t.deepEqual(shadow, expectedShadow) + + let userTable = await adapter.query({ sql: `SELECT * FROM main.parent;` }) + const expectedUserTable = [{ id: 2, value: 'local', other: null }] + t.deepEqual(expectedUserTable, userTable) +}) + +test('local (INSERT -> UPDATE -> DELETE) with remote equivalent', async (t) => { + const { runMigrations, satellite, tableInfo, authState, adapter } = t.context + await runMigrations() + await satellite._setAuthState(authState) + const clientId = satellite._authState?.clientId ?? 'test_id' + let txDate1 = new Date().getTime() + + const insertEntry = generateRemoteOplogEntry( + tableInfo, + 'main', + 'parent', + OPTYPES.update, + txDate1, + genEncodedTags('remote', [txDate1]), + { + id: 1, + value: 'local', + }, + undefined + ) + + const deleteDate = txDate1 + 1 + const deleteEntry = generateRemoteOplogEntry( + tableInfo, + 'main', + 'parent', + OPTYPES.delete, + deleteDate, + genEncodedTags('remote', []), + { + id: 1, + value: 'local', + }, + undefined + ) + + satellite.relations = relations // satellite must be aware of the relations in order to turn `DataChange`s into `OpLogEntry`s in `_applyTransaction` + + const insertChange = opLogEntryToChange(insertEntry, relations) + const insertTx = { + origin: clientId, + commit_timestamp: Long.fromNumber(txDate1), + changes: [insertChange], + lsn: new Uint8Array(), + } + await satellite._applyTransaction(insertTx) + + let shadow = await getMatchingShadowEntries(adapter) + const expectedShadow = [ + { + namespace: 'main', + tablename: 'parent', + primaryKey: '{"id":1}', + tags: genEncodedTags('remote', [txDate1]), + }, + ] + t.deepEqual(shadow, expectedShadow) + + const deleteChange = opLogEntryToChange(deleteEntry, relations) + const deleteTx = { + origin: clientId, + commit_timestamp: Long.fromNumber(deleteDate), + changes: [deleteChange], + lsn: new Uint8Array(), + } + await satellite._applyTransaction(deleteTx) + + shadow = await getMatchingShadowEntries(adapter) + t.deepEqual([], shadow) + + let entries = await satellite._getEntries(0) + t.deepEqual([], entries) +}) diff --git a/clients/typescript/test/satellite/process.tags.test.ts b/clients/typescript/test/satellite/process.tags.test.ts index e96dfbb596..0286f1a232 100644 --- a/clients/typescript/test/satellite/process.tags.test.ts +++ b/clients/typescript/test/satellite/process.tags.test.ts @@ -429,10 +429,7 @@ test('remote tx (INSERT) concurrently with local tx (INSERT -> DELETE)', async ( ] t.deepEqual(shadow, expectedShadow) - //let entries= await satellite._getEntries() - //console.log(entries) const userTable = await adapter.query({ sql: `SELECT * FROM parent;` }) - //console.log(table) // In both cases insert wins over delete, but // for id = 1 CR picks local data before delete, while @@ -539,10 +536,7 @@ test('remote tx (INSERT) concurrently with 2 local txses (INSERT -> DELETE)', as ] t.deepEqual(shadow, expectedShadow) - //let entries= await satellite._getEntries() - //console.log(entries) let userTable = await adapter.query({ sql: `SELECT * FROM parent;` }) - //console.log(table) // In both cases insert wins over delete, but // for id = 1 CR picks local data before delete, while @@ -660,7 +654,6 @@ test('remote tx (INSERT) concurrently with local tx (INSERT -> UPDATE)', async ( t.deepEqual(shadow, expectedShadow) let entries = await satellite._getEntries() - //console.log(entries) // Given that Insert and Update happen within the same transaction clear should not // contain itself From d708255821400b0bb7174a9369196061ba338c4e Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Wed, 7 Feb 2024 15:17:05 +0100 Subject: [PATCH 009/156] Modify migrator and initial migration to work with both SQLite and Postgres. --- clients/typescript/package.json | 1 + clients/typescript/src/electric/index.ts | 11 +- clients/typescript/src/migrators/bundle.ts | 137 ++++-- clients/typescript/src/migrators/index.ts | 6 +- clients/typescript/src/migrators/schema.ts | 96 +++- clients/typescript/src/satellite/config.ts | 84 +++- clients/typescript/src/util/index.ts | 1 - clients/typescript/src/util/options.ts | 14 - .../test/client/model/shapes.test.ts | 2 +- .../test/drivers/node-postgres.test.ts | 14 +- .../test/migrators/postgres/bundle.test.ts | 61 +++ .../test/migrators/postgres/schema.test.ts | 56 +++ .../migrators/{ => sqlite}/builder.test.ts | 12 +- .../migrators/{ => sqlite}/bundle.test.ts | 10 +- .../migrators/{ => sqlite}/schema.test.ts | 10 +- .../migrators/{ => sqlite}/triggers.test.ts | 6 +- clients/typescript/test/satellite/common.ts | 5 +- .../test/support/migrations/pg-migrations.js | 452 ++++++++++++++++++ .../typescript/test/support/node-postgres.ts | 20 + pnpm-lock.yaml | 8 + 20 files changed, 882 insertions(+), 124 deletions(-) delete mode 100644 clients/typescript/src/util/options.ts create mode 100644 clients/typescript/test/migrators/postgres/bundle.test.ts create mode 100644 clients/typescript/test/migrators/postgres/schema.test.ts rename clients/typescript/test/migrators/{ => sqlite}/builder.test.ts (97%) rename clients/typescript/test/migrators/{ => sqlite}/bundle.test.ts (81%) rename clients/typescript/test/migrators/{ => sqlite}/schema.test.ts (76%) rename clients/typescript/test/migrators/{ => sqlite}/triggers.test.ts (97%) create mode 100644 clients/typescript/test/support/migrations/pg-migrations.js create mode 100644 clients/typescript/test/support/node-postgres.ts diff --git a/clients/typescript/package.json b/clients/typescript/package.json index e85e5cd097..750ab9f3f1 100644 --- a/clients/typescript/package.json +++ b/clients/typescript/package.json @@ -183,6 +183,7 @@ "frame-stream": "^3.0.1", "get-port": "^7.0.0", "jose": "^4.14.4", + "kysely": "^0.27.2", "lodash.flow": "^3.5.0", "lodash.groupby": "^4.6.0", "lodash.isequal": "^4.5.0", diff --git a/clients/typescript/src/electric/index.ts b/clients/typescript/src/electric/index.ts index 4d7d02e0a3..3274a6d7f4 100644 --- a/clients/typescript/src/electric/index.ts +++ b/clients/typescript/src/electric/index.ts @@ -1,6 +1,6 @@ import { ElectricConfigWithDialect, hydrateConfig } from '../config/index' -import { DatabaseAdapter } from './adapter' -import { BundleMigrator, Migrator } from '../migrators/index' +import { DatabaseAdapter } from '../electric/adapter' +import { Migrator } from '../migrators/index' import { EventNotifier, Notifier } from '../notifiers/index' import { globalRegistry, Registry } from '../satellite/index' import { SocketFactory } from '../sockets/index' @@ -9,6 +9,7 @@ import { setLogLevel } from '../util/debug' import { ElectricNamespace } from './namespace' import { ElectricClient } from '../client/model/client' import { DbSchema } from '../client/model/schema' +import { SqliteBundleMigrator } from '../migrators/bundle' export { ElectricNamespace } export type * from './adapter' @@ -18,6 +19,9 @@ export type * from './adapter' // implementations to be passed in to facilitate testing. export interface ElectrifyOptions { adapter?: DatabaseAdapter + /** + * Defaults to the migrator for SQLite. + */ migrator?: Migrator notifier?: Notifier socketFactory?: SocketFactory @@ -56,7 +60,8 @@ export const electrify = async >( const configWithDefaults = hydrateConfig(config) const migrator = - opts?.migrator || new BundleMigrator(adapter, dbDescription.migrations) + opts?.migrator || + new SqliteBundleMigrator(adapter, dbDescription.migrations) const notifier = opts?.notifier || new EventNotifier(dbName) const registry = opts?.registry || globalRegistry diff --git a/clients/typescript/src/migrators/bundle.ts b/clients/typescript/src/migrators/bundle.ts index f56ad9326a..48c06c1614 100644 --- a/clients/typescript/src/migrators/bundle.ts +++ b/clients/typescript/src/migrators/bundle.ts @@ -3,44 +3,67 @@ import { Migration, MigrationRecord, Migrator, - MigratorOptions, StmtMigration, } from './index' import { DatabaseAdapter } from '../electric/adapter' -import { overrideDefined } from '../util/options' -import { data as baseMigration } from './schema' +import { getData as makeBaseMigration } from './schema' import Log from 'loglevel' -import { SatelliteError, SatelliteErrorCode } from '../util' +import { + SatelliteError, + SatelliteErrorCode, + SqlValue, + Statement, +} from '../util' +import { ElectricSchema } from './schema' +import { + Kysely, + KyselyConfig, + sql as raw, + DummyDriver, + PostgresAdapter, + PostgresIntrospector, + PostgresQueryCompiler, + SqliteAdapter, + SqliteIntrospector, + SqliteQueryCompiler, + expressionBuilder, + ExpressionBuilder, +} from 'kysely' +import { _electric_migrations } from '../satellite/config' export const SCHEMA_VSN_ERROR_MSG = `Local schema doesn't match server's. Clear local state through developer tools and retry connection manually. If error persists, re-generate the client. Check documentation (https://electric-sql.com/docs/reference/roadmap) to learn more.` -const DEFAULTS: MigratorOptions = { - tableName: '_electric_migrations', -} - const VALID_VERSION_EXP = new RegExp('^[0-9_]+') -export class BundleMigrator implements Migrator { +abstract class BundleMigratorBase implements Migrator { adapter: DatabaseAdapter migrations: StmtMigration[] - tableName: string + readonly tableName = _electric_migrations + queryBuilder: Kysely + eb: ExpressionBuilder constructor( adapter: DatabaseAdapter, migrations: Migration[] = [], - tableName?: string + queryBuilderConfig: KyselyConfig, + dialect: 'SQLite' | 'PG' ) { - const overrides = { tableName: tableName } - const opts = overrideDefined(DEFAULTS, overrides) as MigratorOptions - this.adapter = adapter + const baseMigration = makeBaseMigration(dialect) this.migrations = [...baseMigration.migrations, ...migrations].map( makeStmtMigration ) - this.tableName = opts.tableName + this.queryBuilder = new Kysely(queryBuilderConfig) + this.eb = expressionBuilder() } + /** + * Returns a SQL statement that checks if the given table exists. + * @param tableName The name of the table to check for existence. + */ + abstract createTableExistsStatement(tableName: string): Statement + async up(): Promise { const existing = await this.queryApplied() const unapplied = await this.validateApplied(this.migrations, existing) @@ -58,16 +81,8 @@ export class BundleMigrator implements Migrator { async migrationsTableExists(): Promise { // If this is the first time we're running migrations, then the // migrations table won't exist. - const tableExists = ` - SELECT 1 FROM sqlite_master - WHERE type = 'table' - AND name = ? - ` - const tables = await this.adapter.query({ - sql: tableExists, - args: [this.tableName], - }) - + const tableExists = this.createTableExistsStatement(this.tableName) + const tables = await this.adapter.query(tableExists) return tables.length > 0 } @@ -77,9 +92,10 @@ export class BundleMigrator implements Migrator { } const existingRecords = ` - SELECT version FROM ${this.tableName} + SELECT version FROM main.${this.tableName} ORDER BY id ASC ` + const rows = await this.adapter.query({ sql: existingRecords }) return rows as unknown as MigrationRecord[] } @@ -93,7 +109,7 @@ export class BundleMigrator implements Migrator { // The hard-coded version '0' below corresponds to the version of the internal migration defined in `schema.ts`. // We're ignoring it because this function is supposed to return the application schema version. const schemaVersion = ` - SELECT version FROM ${this.tableName} + SELECT version FROM main.${this.tableName} WHERE version != '0' ORDER BY version DESC LIMIT 1 @@ -144,13 +160,15 @@ export class BundleMigrator implements Migrator { ) } - const applied = `INSERT INTO ${this.tableName} - ('version', 'applied_at') VALUES (?, ?) - ` + const { sql, parameters } = raw` + INSERT INTO main.${this.eb.table( + this.tableName + )} (version, applied_at) VALUES (${version}, ${Date.now().toString()}) + `.compile(this.queryBuilder) await this.adapter.runInTransaction(...statements, { - sql: applied, - args: [version, Date.now()], + sql, + args: parameters as SqlValue[], }) } @@ -161,13 +179,14 @@ export class BundleMigrator implements Migrator { * that indicates if the migration was applied. */ async applyIfNotAlready(migration: StmtMigration): Promise { - const versionExists = ` - SELECT 1 FROM ${this.tableName} - WHERE version = ? - ` + const { sql, parameters } = raw` + SELECT 1 FROM main.${this.eb.table(this.tableName)} + WHERE version = ${migration.version} + `.compile(this.queryBuilder) + const rows = await this.adapter.query({ - sql: versionExists, - args: [migration.version], + sql, + args: parameters as SqlValue[], }) const shouldApply = rows.length === 0 @@ -181,3 +200,45 @@ export class BundleMigrator implements Migrator { return shouldApply } } + +export class SqliteBundleMigrator extends BundleMigratorBase { + constructor(adapter: DatabaseAdapter, migrations: Migration[] = []) { + const config: KyselyConfig = { + dialect: { + createAdapter: () => new SqliteAdapter(), + createDriver: () => new DummyDriver(), + createIntrospector: (db) => new SqliteIntrospector(db), + createQueryCompiler: () => new SqliteQueryCompiler(), + }, + } + super(adapter, migrations, config, 'SQLite') + } + + createTableExistsStatement(tableName: string): Statement { + return { + sql: `SELECT 1 FROM sqlite_master WHERE type = 'table' AND name = ?`, + args: [tableName], + } + } +} + +export class PgBundleMigrator extends BundleMigratorBase { + constructor(adapter: DatabaseAdapter, migrations: Migration[] = []) { + const config: KyselyConfig = { + dialect: { + createAdapter: () => new PostgresAdapter(), + createDriver: () => new DummyDriver(), + createIntrospector: (db) => new PostgresIntrospector(db), + createQueryCompiler: () => new PostgresQueryCompiler(), + }, + } + super(adapter, migrations, config, 'PG') + } + + createTableExistsStatement(tableName: string): Statement { + return { + sql: `SELECT 1 FROM information_schema.tables WHERE table_name = $1`, + args: [tableName], + } + } +} diff --git a/clients/typescript/src/migrators/index.ts b/clients/typescript/src/migrators/index.ts index d0799aee96..48f765e559 100644 --- a/clients/typescript/src/migrators/index.ts +++ b/clients/typescript/src/migrators/index.ts @@ -1,6 +1,6 @@ import { Statement } from '../util' -export { BundleMigrator } from './bundle' +export { SqliteBundleMigrator, PgBundleMigrator } from './bundle' export { MockMigrator } from './mock' export { parseMetadata, makeMigration } from './builder' export type { MetaData } from './builder' @@ -32,7 +32,3 @@ export interface Migrator { applyIfNotAlready(migration: StmtMigration): Promise querySchemaVersion(): Promise } - -export interface MigratorOptions { - tableName: string -} diff --git a/clients/typescript/src/migrators/schema.ts b/clients/typescript/src/migrators/schema.ts index 61e72a8ea8..7bef26eae1 100644 --- a/clients/typescript/src/migrators/schema.ts +++ b/clients/typescript/src/migrators/schema.ts @@ -1,30 +1,78 @@ import { satelliteDefaults } from '../satellite/config' +import { QualifiedTablename } from '../util' +export type { ElectricSchema } from '../satellite/config' const { metaTable, migrationsTable, oplogTable, triggersTable, shadowTable } = satelliteDefaults -export const data = { - migrations: [ - { - statements: [ - //`-- The ops log table\n`, - `CREATE TABLE IF NOT EXISTS ${oplogTable} (\n rowid INTEGER PRIMARY KEY AUTOINCREMENT,\n namespace TEXT NOT NULL,\n tablename TEXT NOT NULL,\n optype TEXT NOT NULL,\n primaryKey TEXT NOT NULL,\n newRow TEXT,\n oldRow TEXT,\n timestamp TEXT, clearTags TEXT DEFAULT "[]" NOT NULL\n);`, - // Add an index for the oplog - `CREATE INDEX IF NOT EXISTS ${oplogTable.namespace}._electric_table_pk_reference ON ${oplogTable.tablename} (namespace, tablename, primaryKey)`, - `CREATE INDEX IF NOT EXISTS ${oplogTable.namespace}._electric_timestamp ON ${oplogTable.tablename} (timestamp)`, - //`-- Somewhere to keep our metadata\n`, - `CREATE TABLE IF NOT EXISTS ${metaTable} (\n key TEXT PRIMARY KEY,\n value BLOB\n);`, - //`-- Somewhere to track migrations\n`, - `CREATE TABLE IF NOT EXISTS ${migrationsTable} (\n id INTEGER PRIMARY KEY AUTOINCREMENT,\n version TEXT NOT NULL UNIQUE,\n applied_at TEXT NOT NULL\n);`, - //`-- Initialisation of the metadata table\n`, - `INSERT INTO ${metaTable} (key, value) VALUES ('compensations', 1), ('lsn', ''), ('clientId', ''), ('subscriptions', ''), ('seenAdditionalData', '');`, - //`-- These are toggles for turning the triggers on and off\n`, - `DROP TABLE IF EXISTS ${triggersTable};`, - `CREATE TABLE ${triggersTable} (tablename TEXT PRIMARY KEY, flag INTEGER);`, - //`-- Somewhere to keep dependency tracking information\n`, - `CREATE TABLE ${shadowTable} (\n namespace TEXT NOT NULL,\n tablename TEXT NOT NULL,\n primaryKey TEXT NOT NULL,\n tags TEXT NOT NULL,\n PRIMARY KEY (namespace, tablename, primaryKey));`, - ], - version: '0', - }, - ], +export const getData = (dialect: 'SQLite' | 'PG') => { + const pgOnly = (query: string) => { + if (dialect === 'PG') { + return query + } + return '' + } + const pgOnlyQuery = (query: string) => { + if (dialect === 'PG') { + return [query] + } + return [] + } + + const AUTOINCREMENT_PK = + dialect === 'SQLite' + ? 'INTEGER PRIMARY KEY AUTOINCREMENT' + : 'SERIAL PRIMARY KEY' + const BLOB = dialect === 'SQLite' ? 'BLOB' : 'TEXT' + const create_index = ( + indexName: string, + onTable: QualifiedTablename, + columns: string[] + ) => { + const namespace = onTable.namespace + const tablename = onTable.tablename + if (dialect === 'SQLite') { + return `CREATE INDEX IF NOT EXISTS ${namespace}.${indexName} ON ${tablename} (${columns.join( + ', ' + )})` + } + return `CREATE INDEX IF NOT EXISTS ${indexName} ON ${namespace}.${tablename} (${columns.join( + ', ' + )})` + } + + const data = { + migrations: [ + { + statements: [ + // The main schema, + ...pgOnlyQuery(`CREATE SCHEMA IF NOT EXISTS "main"`), + //`-- The ops log table\n`, + `CREATE TABLE IF NOT EXISTS ${oplogTable} (\n rowid ${AUTOINCREMENT_PK},\n namespace TEXT NOT NULL,\n tablename TEXT NOT NULL,\n optype TEXT NOT NULL,\n primaryKey TEXT NOT NULL,\n newRow TEXT,\n oldRow TEXT,\n timestamp TEXT, clearTags TEXT DEFAULT '[]' NOT NULL\n);`, + // Add an index for the oplog + create_index('_electric_table_pk_reference', oplogTable, [ + 'namespace', + 'tablename', + 'primaryKey', + ]), + create_index('_electric_timestamp', oplogTable, ['timestamp']), + //`-- Somewhere to keep our metadata\n`, + `CREATE TABLE IF NOT EXISTS ${metaTable} (\n key TEXT PRIMARY KEY,\n value ${BLOB}\n);`, + //`-- Somewhere to track migrations\n`, + `CREATE TABLE IF NOT EXISTS ${migrationsTable} (\n id ${AUTOINCREMENT_PK},\n version TEXT NOT NULL UNIQUE,\n applied_at TEXT NOT NULL\n);`, + //`-- Initialisation of the metadata table\n`, + `INSERT INTO ${metaTable} (key, value) VALUES ('compensations', 1), ('lsn', ''), ('clientId', ''), ('subscriptions', ''), ('seenAdditionalData', '');`, + //`-- These are toggles for turning the triggers on and off\n`, + `DROP TABLE IF EXISTS ${triggersTable};`, + `CREATE TABLE ${triggersTable} (tablename TEXT PRIMARY KEY, flag INTEGER);`, + //`-- Somewhere to keep dependency tracking information\n`, + `CREATE TABLE ${shadowTable} (\n ${pgOnly( + 'rowid SERIAL,' + )} namespace TEXT NOT NULL,\n tablename TEXT NOT NULL,\n primaryKey TEXT NOT NULL,\n tags TEXT NOT NULL,\n PRIMARY KEY (namespace, tablename, primaryKey));`, + ], + version: '0', + }, + ], + } + return data } diff --git a/clients/typescript/src/satellite/config.ts b/clients/typescript/src/satellite/config.ts index ba112602af..a9626d4e78 100644 --- a/clients/typescript/src/satellite/config.ts +++ b/clients/typescript/src/satellite/config.ts @@ -1,5 +1,6 @@ import { IBackOffOptions } from 'exponential-backoff' import { QualifiedTablename } from '../util/tablename' +import { Insertable, Selectable, Updateable, Generated } from 'kysely' export type ConnectionBackoffOptions = Omit export interface SatelliteOpts { @@ -34,12 +35,85 @@ export interface SatelliteOverrides { minSnapshotWindow?: number } +// Describe the schema of the database for use with Kysely +// The names of the properties in this interface +// must be kept consistent with the names of the tables + +export const _electric_oplog = '_electric_oplog' +export const _electric_meta = '_electric_meta' +export const _electric_migrations = '_electric_migrations' +export const _electric_trigger_settings = '_electric_trigger_settings' +export const _electric_shadow = '_electric_shadow' + +export interface ElectricSchema { + [_electric_oplog]: OplogTable + [_electric_meta]: MetaTable + [_electric_migrations]: MigrationsTable + [_electric_trigger_settings]: TriggersTable + [_electric_shadow]: ShadowTable +} + +interface OplogTable { + rowid: number + namespace: string + tablename: string + optype: string + primaryKey: string + newRow: string | null + oldRow: string | null + timestamp: string + clearTags: string +} + +export type Oplog = Selectable +export type NewOplog = Insertable +export type OplogUpdate = Updateable + +interface MetaTable { + key: string + value: Buffer +} + +export type Meta = Selectable +export type NewMeta = Insertable +export type MetaUpdate = Updateable + +export interface MigrationsTable { + id: Generated + version: string + applied_at: string +} + +export type Migration = Selectable +export type NewMigration = Insertable +export type MigrationUpdate = Updateable + +interface TriggersTable { + tablename: string + flag: number +} + +export type Trigger = Selectable +export type NewTrigger = Insertable +export type TriggerUpdate = Updateable + +interface ShadowTable { + namespace: string + tablename: string + primaryKey: string + tags: string +} + +export type Shadow = Selectable +export type NewShadow = Insertable +export type ShadowUpdate = Updateable + export const satelliteDefaults: SatelliteOpts = { - metaTable: new QualifiedTablename('main', '_electric_meta'), - migrationsTable: new QualifiedTablename('main', '_electric_migrations'), - oplogTable: new QualifiedTablename('main', '_electric_oplog'), - triggersTable: new QualifiedTablename('main', '_electric_trigger_settings'), - shadowTable: new QualifiedTablename('main', '_electric_shadow'), + metaTable: new QualifiedTablename('main', _electric_meta), + migrationsTable: new QualifiedTablename('main', _electric_migrations), + oplogTable: new QualifiedTablename('main', _electric_oplog), + triggersTable: new QualifiedTablename('main', _electric_trigger_settings), + shadowTable: new QualifiedTablename('main', _electric_shadow), pollingInterval: 2000, minSnapshotWindow: 40, clearOnBehindWindow: true, diff --git a/clients/typescript/src/util/index.ts b/clients/typescript/src/util/index.ts index 15604c826e..5405b180ae 100644 --- a/clients/typescript/src/util/index.ts +++ b/clients/typescript/src/util/index.ts @@ -1,7 +1,6 @@ export * from './common' export * from './hex' export * from './keys' -export * from './options' export * from './parser' export * from './proto' export * from './random' diff --git a/clients/typescript/src/util/options.ts b/clients/typescript/src/util/options.ts deleted file mode 100644 index fe1070c972..0000000000 --- a/clients/typescript/src/util/options.ts +++ /dev/null @@ -1,14 +0,0 @@ -export const overrideDefined = ( - defaults: object = {}, - overrides: object = {} -): object => { - const filteredOverrides: { [key: string | symbol]: any } = {} - - for (const [k, v] of Object.entries(overrides)) { - if (v !== undefined) { - filteredOverrides[k] = v - } - } - - return Object.assign({}, defaults, filteredOverrides) -} diff --git a/clients/typescript/test/client/model/shapes.test.ts b/clients/typescript/test/client/model/shapes.test.ts index eafbd916e9..baa5c08070 100644 --- a/clients/typescript/test/client/model/shapes.test.ts +++ b/clients/typescript/test/client/model/shapes.test.ts @@ -5,7 +5,7 @@ import { schema } from '../generated' import { DatabaseAdapter } from '../../../src/drivers/better-sqlite3' import { SatelliteProcess } from '../../../src/satellite/process' import { MockRegistry, MockSatelliteClient } from '../../../src/satellite/mock' -import { BundleMigrator } from '../../../src/migrators' +import { SqliteBundleMigrator as BundleMigrator } from '../../../src/migrators' import { MockNotifier } from '../../../src/notifiers' import { RelationsCache, randomValue } from '../../../src/util' import { ElectricClient } from '../../../src/client/model/client' diff --git a/clients/typescript/test/drivers/node-postgres.test.ts b/clients/typescript/test/drivers/node-postgres.test.ts index 7438cdb829..2f667927a1 100644 --- a/clients/typescript/test/drivers/node-postgres.test.ts +++ b/clients/typescript/test/drivers/node-postgres.test.ts @@ -1,9 +1,8 @@ import test from 'ava' -import { ElectricDatabase } from '../../src/drivers/node-postgres' import { MockDatabase } from '../../src/drivers/node-postgres/mock' import { DatabaseAdapter } from '../../src/drivers/node-postgres' -import fs from 'fs/promises' +import { makePgDatabase } from '../support/node-postgres' test('database adapter run works', async (t) => { const db = new MockDatabase('test.db') @@ -34,20 +33,11 @@ test('database adapter query works', async (t) => { // Test with an actual embedded-postgres DB async function makeAdapter() { - const db = await ElectricDatabase.init({ - name: 'driver-test', - databaseDir: './tmp/pg/db', - persistent: false, - }) - + const { db, stop } = await makePgDatabase('driver-test') const adapter = new DatabaseAdapter(db) const createTableSql = 'CREATE TABLE IF NOT EXISTS Post(id TEXT PRIMARY KEY, title TEXT, contents TEXT, nbr integer);' await adapter.run({ sql: createTableSql }) - const stop = async () => { - await db.stop() - await fs.rm('./tmp', { recursive: true, force: true }) - } return { adapter, stop } } diff --git a/clients/typescript/test/migrators/postgres/bundle.test.ts b/clients/typescript/test/migrators/postgres/bundle.test.ts new file mode 100644 index 0000000000..c65484e768 --- /dev/null +++ b/clients/typescript/test/migrators/postgres/bundle.test.ts @@ -0,0 +1,61 @@ +import test from 'ava' + +import { DatabaseAdapter } from '../../../src/drivers/node-postgres' +import { PgBundleMigrator as BundleMigrator } from '../../../src/migrators/bundle' +import { makeStmtMigration } from '../../../src/migrators' + +import { randomValue } from '../../../src/util/random' + +import migrations from '../../support/migrations/pg-migrations.js' +import { makePgDatabase } from '../../support/node-postgres' + +let port = 5532 +test.beforeEach(async (t) => { + const dbName = `bundle-migrator-${randomValue()}` + const { db, stop } = await makePgDatabase(dbName, port++) + const adapter = new DatabaseAdapter(db) + + t.context = { + adapter, + dbName, + stopPG: stop, + } +}) + +test.afterEach.always(async (t) => { + const { stopPG } = t.context as any + await stopPG() +}) + +test('run the bundle migrator', async (t) => { + const { adapter } = t.context as any + + const migrator = new BundleMigrator(adapter, migrations) + t.is(await migrator.up(), 3) + t.is(await migrator.up(), 0) +}) + +test('applyIfNotAlready applies new migrations', async (t) => { + const { adapter } = t.context as any + + const allButLastMigrations = migrations.slice(0, -1) + const lastMigration = makeStmtMigration(migrations[migrations.length - 1]) + + const migrator = new BundleMigrator(adapter, allButLastMigrations) + t.is(await migrator.up(), 2) + + const wasApplied = await migrator.applyIfNotAlready(lastMigration) + t.assert(wasApplied) +}) + +test('applyIfNotAlready ignores already applied migrations', async (t) => { + const { adapter } = t.context as any + + const migrator = new BundleMigrator(adapter, migrations) + t.is(await migrator.up(), 3) + + const wasApplied = await migrator.applyIfNotAlready( + makeStmtMigration(migrations[0]) + ) + t.assert(!wasApplied) +}) diff --git a/clients/typescript/test/migrators/postgres/schema.test.ts b/clients/typescript/test/migrators/postgres/schema.test.ts new file mode 100644 index 0000000000..818a11e26b --- /dev/null +++ b/clients/typescript/test/migrators/postgres/schema.test.ts @@ -0,0 +1,56 @@ +import test from 'ava' + +import { AnyDatabase } from '../../../src/drivers' +import { DatabaseAdapter } from '../../../src/drivers/node-postgres' +import { PgBundleMigrator as BundleMigrator } from '../../../src/migrators/bundle' +import { satelliteDefaults } from '../../../src/satellite/config' + +import { randomValue } from '../../../src/util/random' + +import migrations from '../../support/migrations/pg-migrations.js' +import { makePgDatabase } from '../../support/node-postgres' + +type Context = { + dbName: string + adapter: DatabaseAdapter + db: AnyDatabase + stopPG: () => Promise +} + +test.beforeEach(async (t) => { + const dbName = `schema-migrations-${randomValue()}` + const { db, stop } = await makePgDatabase(dbName, 5432) + const adapter = new DatabaseAdapter(db) + + t.context = { + adapter, + dbName, + stopPG: stop, + } +}) + +test.afterEach.always(async (t) => { + const { stopPG } = t.context as Context + await stopPG() +}) + +test('check schema keys are unique', async (t) => { + const { adapter } = t.context as Context + + const migrator = new BundleMigrator(adapter, migrations) + await migrator.up() + + await adapter.run({ + sql: `INSERT INTO ${satelliteDefaults.metaTable}(key, value) values ('key', 'value')`, + }) + try { + await adapter.run({ + sql: `INSERT INTO ${satelliteDefaults.metaTable}(key, value) values ('key', 'value')`, + }) + t.fail() + } catch (err) { + const castError = err as { code: string; detail: string } + t.is(castError.code, '23505') + t.is(castError.detail, 'Key (key)=(key) already exists.') + } +}) diff --git a/clients/typescript/test/migrators/builder.test.ts b/clients/typescript/test/migrators/sqlite/builder.test.ts similarity index 97% rename from clients/typescript/test/migrators/builder.test.ts rename to clients/typescript/test/migrators/sqlite/builder.test.ts index 3af3f17ab3..fb961bb131 100644 --- a/clients/typescript/test/migrators/builder.test.ts +++ b/clients/typescript/test/migrators/sqlite/builder.test.ts @@ -1,6 +1,6 @@ import test from 'ava' -import { makeMigration, parseMetadata } from '../../src/migrators/builder' -import { loadMigrations } from '../../src/cli/migrations/builder' +import { makeMigration, parseMetadata } from '../../../src/migrators/builder' +import { loadMigrations } from '../../../src/cli/migrations/builder' import { SatOpMigrate, SatOpMigrate_Table, @@ -9,13 +9,13 @@ import { SatOpMigrate_Column, SatOpMigrate_PgColumnType, SatOpMigrate_ForeignKey, -} from '../../src/_generated/protocol/satellite' +} from '../../../src/_generated/protocol/satellite' import _m0 from 'protobufjs/minimal.js' import Database from 'better-sqlite3' -import { electrify } from '../../src/drivers/better-sqlite3' +import { electrify } from '../../../src/drivers/better-sqlite3' import path from 'path' -import { DbSchema } from '../../src/client/model' -import { MockSocket } from '../../src/sockets/mock' +import { DbSchema } from '../../../src/client/model' +import { MockSocket } from '../../../src/sockets/mock' function encodeSatOpMigrateMsg(request: SatOpMigrate) { return ( diff --git a/clients/typescript/test/migrators/bundle.test.ts b/clients/typescript/test/migrators/sqlite/bundle.test.ts similarity index 81% rename from clients/typescript/test/migrators/bundle.test.ts rename to clients/typescript/test/migrators/sqlite/bundle.test.ts index 8057a8c13c..8408667068 100644 --- a/clients/typescript/test/migrators/bundle.test.ts +++ b/clients/typescript/test/migrators/sqlite/bundle.test.ts @@ -3,13 +3,13 @@ import Database from 'better-sqlite3' import { rm as removeFile } from 'node:fs/promises' -import { DatabaseAdapter } from '../../src/drivers/better-sqlite3/adapter' -import { BundleMigrator } from '../../src/migrators/bundle' -import { makeStmtMigration } from '../../src/migrators' +import { DatabaseAdapter } from '../../../src/drivers/better-sqlite3/adapter' +import { SqliteBundleMigrator as BundleMigrator } from '../../../src/migrators/bundle' +import { makeStmtMigration } from '../../../src/migrators' -import { randomValue } from '../../src/util/random' +import { randomValue } from '../../../src/util/random' -import migrations from '../support/migrations/migrations.js' +import migrations from '../../support/migrations/migrations.js' test.beforeEach((t) => { const dbName = `bundle-migrator-${randomValue()}.db` diff --git a/clients/typescript/test/migrators/schema.test.ts b/clients/typescript/test/migrators/sqlite/schema.test.ts similarity index 76% rename from clients/typescript/test/migrators/schema.test.ts rename to clients/typescript/test/migrators/sqlite/schema.test.ts index 4b943c59b5..2af7df7f10 100644 --- a/clients/typescript/test/migrators/schema.test.ts +++ b/clients/typescript/test/migrators/sqlite/schema.test.ts @@ -3,13 +3,13 @@ import Database from 'better-sqlite3' import { rm as removeFile } from 'node:fs/promises' -import { DatabaseAdapter } from '../../src/drivers/better-sqlite3/adapter' -import { BundleMigrator } from '../../src/migrators/bundle' -import { satelliteDefaults } from '../../src/satellite/config' +import { DatabaseAdapter } from '../../../src/drivers/better-sqlite3/adapter' +import { SqliteBundleMigrator as BundleMigrator } from '../../../src/migrators/bundle' +import { satelliteDefaults } from '../../../src/satellite/config' -import { randomValue } from '../../src/util/random' +import { randomValue } from '../../../src/util/random' -import migrations from '../support/migrations/migrations.js' +import migrations from '../../support/migrations/migrations.js' type Context = { dbName: string diff --git a/clients/typescript/test/migrators/triggers.test.ts b/clients/typescript/test/migrators/sqlite/triggers.test.ts similarity index 97% rename from clients/typescript/test/migrators/triggers.test.ts rename to clients/typescript/test/migrators/sqlite/triggers.test.ts index dfb93b7fa3..6e47ca38a7 100644 --- a/clients/typescript/test/migrators/triggers.test.ts +++ b/clients/typescript/test/migrators/sqlite/triggers.test.ts @@ -1,10 +1,10 @@ import { dedent } from 'ts-dedent' import Database from 'better-sqlite3' import testAny, { TestFn } from 'ava' -import { generateTableTriggers } from '../../src/migrators/triggers' +import { generateTableTriggers } from '../../../src/migrators/triggers' import type { Database as SqliteDB } from 'better-sqlite3' -import { satelliteDefaults } from '../../src/satellite/config' -import { migrateDb, personTable } from '../satellite/common' +import { satelliteDefaults } from '../../../src/satellite/config' +import { migrateDb, personTable } from '../../satellite/common' type Context = { db: SqliteDB; migrateDb: () => void } const test = testAny as TestFn diff --git a/clients/typescript/test/satellite/common.ts b/clients/typescript/test/satellite/common.ts index 4fd64b6561..ecd7763c53 100644 --- a/clients/typescript/test/satellite/common.ts +++ b/clients/typescript/test/satellite/common.ts @@ -3,14 +3,14 @@ import { RelationsCache, randomValue } from '../../src/util' import Database from 'better-sqlite3' import type { Database as SqliteDB } from 'better-sqlite3' import { DatabaseAdapter } from '../../src/drivers/better-sqlite3' -import { BundleMigrator } from '../../src/migrators' +import { SqliteBundleMigrator as BundleMigrator } from '../../src/migrators' import { EventNotifier, MockNotifier } from '../../src/notifiers' import { MockSatelliteClient } from '../../src/satellite/mock' import { GlobalRegistry, Registry, SatelliteProcess } from '../../src/satellite' import { TableInfo, initTableInfo } from '../support/satellite-helpers' import { satelliteDefaults, SatelliteOpts } from '../../src/satellite/config' import { Table, generateTableTriggers } from '../../src/migrators/triggers' -import { data as initialMigration } from '../../src/migrators/schema' +import { getData as makeInitialMigration } from '../../src/migrators/schema' export const dbDescription = new DbSchema( { @@ -345,6 +345,7 @@ export function migrateDb(db: SqliteDB, table: Table) { db.exec(createTableSQL) // Apply the initial migration on the database + const initialMigration = makeInitialMigration('SQLite') const migration = initialMigration.migrations[0].statements migration.forEach((stmt) => { db.exec(stmt) diff --git a/clients/typescript/test/support/migrations/pg-migrations.js b/clients/typescript/test/support/migrations/pg-migrations.js new file mode 100644 index 0000000000..26d97875c5 --- /dev/null +++ b/clients/typescript/test/support/migrations/pg-migrations.js @@ -0,0 +1,452 @@ +/* + Autogenerated ElectricSQL config file. Don't edit this + file directly. Instead, use the `electric` CLI tool + to manage your config and migrations. + + See https://electric-sql.com/docs for more information. +*/ + +export default [ + { + statements: [ + 'DROP TABLE IF EXISTS main._electric_trigger_settings;', + 'CREATE TABLE main._electric_trigger_settings(tablename TEXT PRIMARY KEY, flag INTEGER);', + ], + version: '1', + }, + { + statements: [ + 'CREATE TABLE IF NOT EXISTS main.items (\n value TEXT PRIMARY KEY NOT NULL\n);', + 'CREATE TABLE IF NOT EXISTS main.parent (\n id INTEGER PRIMARY KEY NOT NULL,\n value TEXT,\n other INTEGER DEFAULT 0\n);', + 'CREATE TABLE IF NOT EXISTS main.child (\n id INTEGER PRIMARY KEY NOT NULL,\n parent INTEGER NOT NULL,\n FOREIGN KEY(parent) REFERENCES main.parent(id)\n);', + 'DROP TABLE IF EXISTS main._electric_trigger_settings;', + 'CREATE TABLE main._electric_trigger_settings(tablename TEXT PRIMARY KEY, flag INTEGER);', + "INSERT INTO main._electric_trigger_settings(tablename,flag) VALUES ('main.child', 1);", + "INSERT INTO main._electric_trigger_settings(tablename,flag) VALUES ('main.items', 1);", + "INSERT INTO main._electric_trigger_settings(tablename,flag) VALUES ('main.parent', 1);", + + 'DROP TRIGGER IF EXISTS update_ensure_main_child_primarykey ON main.child;', + ` + CREATE OR REPLACE FUNCTION update_ensure_main_child_primarykey_function() + RETURNS TRIGGER AS $$ + BEGIN + IF old.id != new.id THEN + RAISE EXCEPTION 'cannot change the value of column id as it belongs to the primary key'; + END IF; + RETURN NEW; + END; + $$ LANGUAGE plpgsql;`, + ` + CREATE TRIGGER update_ensure_main_child_primarykey + BEFORE UPDATE ON main.child + FOR EACH ROW + EXECUTE FUNCTION update_ensure_main_child_primarykey_function(); + `, + + 'DROP TRIGGER IF EXISTS insert_main_child_into_oplog ON main.child', + + ` + CREATE OR REPLACE FUNCTION insert_main_child_into_oplog_function() + RETURNS TRIGGER AS $$ + BEGIN + DECLARE + flag_value INTEGER; + BEGIN + -- Get the flag value from _electric_trigger_settings + SELECT flag INTO flag_value FROM main._electric_trigger_settings WHERE tablename = 'main.child'; + + IF flag_value = 1 THEN + -- Insert into _electric_oplog + INSERT INTO main._electric_oplog (namespace, tablename, optype, "primaryKey", "newRow", "oldRow", timestamp) + VALUES ('main', 'child', 'INSERT', jsonb_build_object('id', NEW.id), jsonb_build_object('id', NEW.id, 'parent', NEW.parent), NULL, NULL); + END IF; + + RETURN NEW; + END; + END; + $$ LANGUAGE plpgsql; + `, + + ` + CREATE TRIGGER insert_main_child_into_oplog + AFTER INSERT ON main.child + FOR EACH ROW + EXECUTE FUNCTION insert_main_child_into_oplog_function(); + `, + + 'DROP TRIGGER IF EXISTS update_main_child_into_oplog ON main.child;', + ` + CREATE OR REPLACE FUNCTION update_main_child_into_oplog_function() + RETURNS TRIGGER AS $$ + BEGIN + DECLARE + flag_value INTEGER; + BEGIN + -- Get the flag value from _electric_trigger_settings + SELECT flag INTO flag_value FROM main._electric_trigger_settings WHERE tablename = 'main.child'; + + IF flag_value = 1 THEN + -- Insert into _electric_oplog + INSERT INTO main._electric_oplog (namespace, tablename, optype, "primaryKey", "newRow", "oldRow", timestamp) + VALUES ('main', 'child', 'UPDATE', jsonb_build_object('id', NEW.id), jsonb_build_object('id', NEW.id, 'parent', NEW.parent), jsonb_build_object('id', OLD.id, 'parent', OLD.parent), NULL); + END IF; + + RETURN NEW; + END; + END; + $$ LANGUAGE plpgsql; + `, + ` + CREATE TRIGGER update_main_child_into_oplog + AFTER UPDATE ON main.child + FOR EACH ROW + EXECUTE FUNCTION update_main_child_into_oplog_function(); + `, + + 'DROP TRIGGER IF EXISTS delete_main_child_into_oplog ON main.child;', + ` + CREATE OR REPLACE FUNCTION delete_main_child_into_oplog_function() + RETURNS TRIGGER AS $$ + BEGIN + DECLARE + flag_value INTEGER; + BEGIN + -- Get the flag value from _electric_trigger_settings + SELECT flag INTO flag_value FROM main._electric_trigger_settings WHERE tablename = 'main.child'; + + IF flag_value = 1 THEN + -- Insert into _electric_oplog + INSERT INTO main._electric_oplog (namespace, tablename, optype, "primaryKey", "newRow", "oldRow", timestamp) + VALUES ('main', 'child', 'DELETE', jsonb_build_object('id', OLD.id), NULL, jsonb_build_object('id', OLD.id, 'parent', OLD.parent), NULL); + END IF; + + RETURN NEW; + END; + END; + $$ LANGUAGE plpgsql; + `, + ` + CREATE TRIGGER delete_main_child_into_oplog + AFTER DELETE ON main.child + FOR EACH ROW + EXECUTE FUNCTION delete_main_child_into_oplog_function(); + `, + + 'DROP TRIGGER IF EXISTS compensation_insert_main_child_parent_into_oplog ON main.child;', + ` + CREATE OR REPLACE FUNCTION compensation_insert_main_child_parent_into_oplog_function() + RETURNS TRIGGER AS $$ + BEGIN + DECLARE + flag_value INTEGER; + meta_value TEXT; + BEGIN + SELECT flag INTO flag_value FROM main._electric_trigger_settings WHERE tablename = 'main.parent'; + + SELECT value INTO meta_value FROM main._electric_meta WHERE key = 'compensations'; + + IF flag_value = 1 AND meta_value = '1' THEN + INSERT INTO main._electric_oplog (namespace, tablename, optype, "primaryKey", "newRow", "oldRow", timestamp) + SELECT 'main', 'parent', 'INSERT', jsonb_build_object('id', id), + jsonb_build_object('id', id, 'value', value, 'other', other), NULL, NULL + FROM main.parent WHERE id = NEW."parent"; + END IF; + + RETURN NEW; + END; + END; + $$ LANGUAGE plpgsql; + `, + ` + CREATE TRIGGER compensation_insert_main_child_parent_into_oplog + AFTER INSERT ON main.child + FOR EACH ROW + EXECUTE FUNCTION compensation_insert_main_child_parent_into_oplog_function(); + `, + + 'DROP TRIGGER IF EXISTS compensation_update_main_child_parent_into_oplog ON main.parent;', + ` + CREATE OR REPLACE FUNCTION compensation_update_main_child_parent_into_oplog_function() + RETURNS TRIGGER AS $$ + BEGIN + DECLARE + flag_value INTEGER; + meta_value TEXT; + BEGIN + -- Get the flag value from _electric_trigger_settings + SELECT flag INTO flag_value FROM main._electric_trigger_settings WHERE tablename = 'main.parent'; + + -- Get the 'compensations' value from _electric_meta + SELECT value INTO meta_value FROM main._electric_meta WHERE key = 'compensations'; + + IF flag_value = 1 AND meta_value = '1' THEN + -- Insert into _electric_oplog + INSERT INTO main._electric_oplog (namespace, tablename, optype, "primaryKey", "newRow", "oldRow", timestamp) + SELECT 'main', 'parent', 'UPDATE', jsonb_build_object('id', id), + jsonb_build_object('id', id, 'value', value, 'other', other), NULL, NULL + FROM main.parent WHERE id = NEW."parent"; + END IF; + + RETURN NEW; + END; + END; + $$ LANGUAGE plpgsql; + `, + ` + CREATE TRIGGER compensation_update_main_child_parent_into_oplog + AFTER UPDATE ON main.parent + FOR EACH ROW + EXECUTE FUNCTION compensation_update_main_child_parent_into_oplog_function(); + `, + + 'DROP TRIGGER IF EXISTS update_ensure_main_items_primarykey ON main.items;', + ` + CREATE OR REPLACE FUNCTION update_ensure_main_items_primarykey_function() + RETURNS TRIGGER AS $$ + BEGIN + IF old.value != new.value THEN + RAISE EXCEPTION 'cannot change the value of column value as it belongs to the primary key'; + END IF; + RETURN NEW; + END; + $$ LANGUAGE plpgsql;`, + ` + CREATE TRIGGER update_ensure_main_items_primarykey + BEFORE UPDATE ON main.items + FOR EACH ROW + EXECUTE FUNCTION update_ensure_main_items_primarykey_function(); + `, + + 'DROP TRIGGER IF EXISTS insert_main_items_into_oplog ON main.items;', + ` + CREATE OR REPLACE FUNCTION insert_main_items_into_oplog_function() + RETURNS TRIGGER AS $$ + BEGIN + DECLARE + flag_value INTEGER; + BEGIN + -- Get the flag value from _electric_trigger_settings + SELECT flag INTO flag_value FROM main._electric_trigger_settings WHERE tablename = 'main.items'; + + IF flag_value = 1 THEN + -- Insert into _electric_oplog + INSERT INTO main._electric_oplog (namespace, tablename, optype, "primaryKey", "newRow", "oldRow", timestamp) + VALUES ('main', 'items', 'INSERT', jsonb_build_object('value', NEW.value), jsonb_build_object('value', NEW.value), NULL, NULL); + END IF; + + RETURN NEW; + END; + END; + $$ LANGUAGE plpgsql; + `, + + ` + -- Attach the trigger function to the table + CREATE TRIGGER insert_main_items_into_oplog + AFTER INSERT ON main.items + FOR EACH ROW + EXECUTE FUNCTION insert_main_items_into_oplog_function(); + `, + + 'DROP TRIGGER IF EXISTS update_main_items_into_oplog ON main.items;', + ` + CREATE OR REPLACE FUNCTION update_main_items_into_oplog_function() + RETURNS TRIGGER AS $$ + BEGIN + DECLARE + flag_value INTEGER; + BEGIN + -- Get the flag value from _electric_trigger_settings + SELECT flag INTO flag_value FROM main._electric_trigger_settings WHERE tablename = 'main.items'; + + IF flag_value = 1 THEN + -- Insert into _electric_oplog + INSERT INTO main._electric_oplog (namespace, tablename, optype, "primaryKey", "newRow", "oldRow", timestamp) + VALUES ('main', 'items', 'UPDATE', jsonb_build_object('value', NEW.value), jsonb_build_object('value', NEW.value), jsonb_build_object('value', OLD.value), NULL); + END IF; + + RETURN NEW; + END; + END; + $$ LANGUAGE plpgsql;`, + + ` + -- Attach the trigger function to the table + CREATE TRIGGER update_main_items_into_oplog + AFTER UPDATE ON main.items + FOR EACH ROW + EXECUTE FUNCTION update_main_items_into_oplog_function(); + `, + + 'DROP TRIGGER IF EXISTS delete_main_items_into_oplog ON main.items;', + ` + CREATE OR REPLACE FUNCTION delete_main_items_into_oplog_function() + RETURNS TRIGGER AS $$ + BEGIN + DECLARE + flag_value INTEGER; + BEGIN + -- Get the flag value from _electric_trigger_settings + SELECT flag INTO flag_value FROM main._electric_trigger_settings WHERE tablename = 'main.items'; + + IF flag_value = 1 THEN + -- Insert into _electric_oplog + INSERT INTO main._electric_oplog (namespace, tablename, optype, "primaryKey", "newRow", "oldRow", timestamp) + VALUES ('main', 'items', 'DELETE', jsonb_build_object('value', OLD.value), NULL, jsonb_build_object('value', OLD.value), NULL); + END IF; + + RETURN OLD; + END; + END; + $$ LANGUAGE plpgsql;`, + ` + -- Attach the trigger function to the table + CREATE TRIGGER delete_main_items_into_oplog + AFTER DELETE ON main.items + FOR EACH ROW + EXECUTE FUNCTION delete_main_items_into_oplog_function(); + `, + 'DROP TRIGGER IF EXISTS update_ensure_main_parent_primarykey ON main.parent;', + + ` + CREATE OR REPLACE FUNCTION update_ensure_main_parent_primarykey_function() + RETURNS TRIGGER AS $$ + BEGIN + IF OLD.id != NEW.id THEN + RAISE EXCEPTION 'cannot change the value of column id as it belongs to the primary key'; + END IF; + RETURN NEW; + END; + $$ LANGUAGE plpgsql; + `, + + ` + -- Attach the trigger function to the table + CREATE TRIGGER update_ensure_main_parent_primarykey + BEFORE UPDATE ON main.parent + FOR EACH ROW + EXECUTE FUNCTION update_ensure_main_parent_primarykey_function(); + `, + + 'DROP TRIGGER IF EXISTS insert_main_parent_into_oplog ON main.parent;', + ` + CREATE OR REPLACE FUNCTION insert_main_parent_into_oplog_function() + RETURNS TRIGGER AS $$ + BEGIN + DECLARE + flag_value INTEGER; + BEGIN + -- Get the flag value from _electric_trigger_settings + SELECT flag INTO flag_value FROM main._electric_trigger_settings WHERE tablename = 'main.parent'; + + IF flag_value = 1 THEN + -- Insert into _electric_oplog + INSERT INTO main._electric_oplog (namespace, tablename, optype, "primaryKey", "newRow", "oldRow", timestamp) + VALUES ( + 'main', + 'parent', + 'INSERT', + jsonb_build_object('id', NEW.id), + jsonb_build_object('id', NEW.id, 'value', NEW.value, 'other', NEW.other), + NULL, + NULL + ); + END IF; + + RETURN NEW; + END; + END; + $$ LANGUAGE plpgsql; + `, + + ` + -- Attach the trigger function to the table + CREATE TRIGGER insert_main_parent_into_oplog + AFTER INSERT ON main.parent + FOR EACH ROW + EXECUTE FUNCTION insert_main_parent_into_oplog_function(); + `, + + 'DROP TRIGGER IF EXISTS update_main_parent_into_oplog ON main.parent;', + + ` + CREATE OR REPLACE FUNCTION update_main_parent_into_oplog_function() + RETURNS TRIGGER AS $$ + BEGIN + DECLARE + flag_value INTEGER; + BEGIN + -- Get the flag value from _electric_trigger_settings + SELECT flag INTO flag_value FROM main._electric_trigger_settings WHERE tablename = 'main.parent'; + + IF flag_value = 1 THEN + -- Insert into _electric_oplog + INSERT INTO main._electric_oplog (namespace, tablename, optype, "primaryKey", "newRow", "oldRow", timestamp) + VALUES ( + 'main', + 'parent', + 'UPDATE', + jsonb_build_object('id', NEW.id), + jsonb_build_object('id', NEW.id, 'value', NEW.value, 'other', NEW.other), + jsonb_build_object('id', OLD.id, 'value', OLD.value, 'other', OLD.other), + NULL + ); + END IF; + + RETURN NEW; + END; + END; + $$ LANGUAGE plpgsql; + `, + + ` + -- Attach the trigger function to the table + CREATE TRIGGER update_main_parent_into_oplog + AFTER UPDATE ON main.parent + FOR EACH ROW + EXECUTE FUNCTION update_main_parent_into_oplog_function(); + `, + + 'DROP TRIGGER IF EXISTS delete_main_parent_into_oplog ON main.parent;', + + ` + CREATE OR REPLACE FUNCTION delete_main_parent_into_oplog_function() + RETURNS TRIGGER AS $$ + BEGIN + DECLARE + flag_value INTEGER; + BEGIN + -- Get the flag value from _electric_trigger_settings + SELECT flag INTO flag_value FROM main._electric_trigger_settings WHERE tablename = 'main.parent'; + + IF flag_value = 1 THEN + -- Insert into _electric_oplog + INSERT INTO main._electric_oplog (namespace, tablename, optype, "primaryKey", "newRow", "oldRow", timestamp) + VALUES ( + 'main', + 'parent', + 'DELETE', + jsonb_build_object('id', OLD.id), + NULL, + jsonb_build_object('id', OLD.id, 'value', OLD.value, 'other', OLD.other), + NULL + ); + END IF; + + RETURN OLD; + END; + END; + $$ LANGUAGE plpgsql; + `, + + ` + -- Attach the trigger function to the table + CREATE TRIGGER delete_main_parent_into_oplog + AFTER DELETE ON main.parent + FOR EACH ROW + EXECUTE FUNCTION delete_main_parent_into_oplog_function(); + `, + ], + version: '2', + }, +] diff --git a/clients/typescript/test/support/node-postgres.ts b/clients/typescript/test/support/node-postgres.ts new file mode 100644 index 0000000000..73f28a42d2 --- /dev/null +++ b/clients/typescript/test/support/node-postgres.ts @@ -0,0 +1,20 @@ +import fs from 'fs/promises' +import { ElectricDatabase } from '../../src/drivers/node-postgres' + +export async function makePgDatabase( + name: string, + port: number +): Promise<{ db: ElectricDatabase; stop: () => Promise }> { + const db = await ElectricDatabase.init({ + name, + databaseDir: `./tmp-${name}`, + persistent: false, + port, + }) + + const stop = async () => { + await db.stop() + await fs.rm(`./tmp-${name}`, { recursive: true, force: true }) + } + return { db, stop } +} diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index bc81515326..7e0143a784 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -59,6 +59,9 @@ importers: jose: specifier: ^4.14.4 version: 4.14.4 + kysely: + specifier: ^0.27.2 + version: 0.27.2 lodash.flow: specifier: ^3.5.0 version: 3.5.0 @@ -13014,6 +13017,11 @@ packages: resolution: {integrity: sha512-o+NO+8WrRiQEE4/7nwRJhN1HWpVmJm511pBHUxPLtp0BUISzlBplORYSmTclCnJvQq2tKu/sgl3xVpkc7ZWuQQ==} engines: {node: '>=6'} + /kysely@0.27.2: + resolution: {integrity: sha512-DmRvEfiR/NLpgsTbSxma2ldekhsdcd65+MNiKXyd/qj7w7X5e3cLkXxcj+MypsRDjPhHQ/CD5u3Eq1sBYzX0bw==} + engines: {node: '>=14.0.0'} + dev: false + /latest-version@7.0.0: resolution: {integrity: sha512-KvNT4XqAMzdcL6ka6Tl3i2lYeFDgXNCuIX+xNx6ZMVR1dFq+idXd9FLKNMOIx0t9mJ9/HudyX4oZWXZQ0UJHeg==} engines: {node: '>=14.16'} From c0e9ae00afd8ba55c640705d57b92b4b125b097e Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Thu, 8 Feb 2024 17:49:15 +0100 Subject: [PATCH 010/156] Port triggers to also support Postgres --- .../typescript/src/cli/migrations/builder.ts | 11 +- .../typescript/src/cli/migrations/migrate.ts | 7 +- .../src/drivers/better-sqlite3/adapter.ts | 5 + .../src/drivers/node-postgres/database.ts | 1 + .../src/drivers/node-postgres/index.ts | 3 + .../src/drivers/node-postgres/mock.ts | 4 + .../src/drivers/tauri-postgres/index.ts | 3 + .../src/drivers/tauri-postgres/mock.ts | 4 + clients/typescript/src/migrators/builder.ts | 8 +- clients/typescript/src/migrators/bundle.ts | 35 +-- .../src/migrators/query-builder/builder.ts | 205 +++++++++++++++++ .../src/migrators/query-builder/index.ts | 5 + .../src/migrators/query-builder/pgBuilder.ts | 207 ++++++++++++++++++ .../migrators/query-builder/sqliteBuilder.ts | 144 ++++++++++++ clients/typescript/src/migrators/schema.ts | 65 ++---- clients/typescript/src/migrators/triggers.ts | 180 +++++++-------- clients/typescript/src/satellite/process.ts | 143 +++++++----- clients/typescript/src/util/tablename.ts | 19 +- .../test/cli/migrations/builder.test.ts | 3 +- .../test/drivers/node-postgres.test.ts | 99 +++++---- .../test/migrators/postgres/schema.test.ts | 5 +- .../test/migrators/sqlite/builder.test.ts | 14 +- .../test/migrators/sqlite/schema.test.ts | 5 +- .../test/migrators/sqlite/triggers.test.ts | 18 +- clients/typescript/test/satellite/common.ts | 7 +- .../typescript/test/satellite/merge.test.ts | 30 ++- .../typescript/test/satellite/process.test.ts | 51 +++-- .../test/support/migrations/migrations.js | 48 ++-- .../test/support/migrations/pg-migrations.js | 32 +-- 29 files changed, 994 insertions(+), 367 deletions(-) create mode 100644 clients/typescript/src/migrators/query-builder/builder.ts create mode 100644 clients/typescript/src/migrators/query-builder/index.ts create mode 100644 clients/typescript/src/migrators/query-builder/pgBuilder.ts create mode 100644 clients/typescript/src/migrators/query-builder/sqliteBuilder.ts diff --git a/clients/typescript/src/cli/migrations/builder.ts b/clients/typescript/src/cli/migrations/builder.ts index fe52e73e50..24d4e49276 100644 --- a/clients/typescript/src/cli/migrations/builder.ts +++ b/clients/typescript/src/cli/migrations/builder.ts @@ -8,6 +8,7 @@ import { makeMigration, } from '../../migrators' import { isObject } from '../../util' +import { QueryBuilder } from '../../migrators/query-builder' /* * This file defines functions to build migrations @@ -35,10 +36,11 @@ import { isObject } from '../../util' */ export async function buildMigrations( migrationsFolder: string, - migrationsFile: string + migrationsFile: string, + builder: QueryBuilder ) { try { - const migrations = await loadMigrations(migrationsFolder) + const migrations = await loadMigrations(migrationsFolder, builder) // Update the configuration file await fs.writeFile( migrationsFile, @@ -73,7 +75,8 @@ export async function getMigrationNames( * @returns An array of migrations. */ export async function loadMigrations( - migrationsFolder: string + migrationsFolder: string, + builder: QueryBuilder ): Promise { const dirNames = await getMigrationNames(migrationsFolder) const migrationPaths = dirNames.map((dirName) => @@ -82,7 +85,7 @@ export async function loadMigrations( const migrationMetaDatas = await Promise.all( migrationPaths.map(readMetadataFile) ) - return migrationMetaDatas.map(makeMigration) + return migrationMetaDatas.map((data) => makeMigration(data, builder)) } /** diff --git a/clients/typescript/src/cli/migrations/migrate.ts b/clients/typescript/src/cli/migrations/migrate.ts index 7a81a507f2..fe13e416eb 100644 --- a/clients/typescript/src/cli/migrations/migrate.ts +++ b/clients/typescript/src/cli/migrations/migrate.ts @@ -16,6 +16,7 @@ import { getConfig, type Config } from '../config' import { start } from '../docker-commands/command-start' import { stop } from '../docker-commands/command-stop' import { withConfig } from '../configure/command-with-config' +import { sqliteBuilder } from '../../migrators/query-builder' // Rather than run `npx prisma` we resolve the path to the prisma binary so that // we can be sure we are using the same version of Prisma that is a dependency of @@ -230,6 +231,10 @@ async function getLatestMigration( * @param configFolder Absolute path to the configuration folder. */ async function _generate(opts: Omit) { + // TODO: introduce an option for the generator which indicates + // whether the app runs on Sqlite or PG + // and then here use the right query builder + const builder = sqliteBuilder const config = opts.config // Create a unique temporary folder in which to save // intermediate files without risking collisions @@ -260,7 +265,7 @@ async function _generate(opts: Omit) { // Build the migrations console.log('Building migrations...') - await buildMigrations(migrationsFolder, migrationsFile) + await buildMigrations(migrationsFolder, migrationsFile, builder) console.log('Successfully built migrations') if ( diff --git a/clients/typescript/src/drivers/better-sqlite3/adapter.ts b/clients/typescript/src/drivers/better-sqlite3/adapter.ts index 78d79b2680..eb888dc9b0 100644 --- a/clients/typescript/src/drivers/better-sqlite3/adapter.ts +++ b/clients/typescript/src/drivers/better-sqlite3/adapter.ts @@ -26,6 +26,7 @@ export class DatabaseAdapter } async runInTransaction(...statements: DbStatement[]): Promise { + console.log(`runInTransaction: ${JSON.stringify(statements)}`) const txn = this.db.transaction((stmts: DbStatement[]) => { let rowsAffected = 0 for (const stmt of stmts) { @@ -51,6 +52,7 @@ export class DatabaseAdapter // Promise interface, but impl not actually async async run({ sql, args }: DbStatement): Promise { + console.log(`RUN: ${sql} - ${JSON.stringify(args)}`) const prep = this.db.prepare(sql) const res = prep.run(...wrapBindParams(args)) return { @@ -60,6 +62,7 @@ export class DatabaseAdapter // This `query` function does not enforce that the query is read-only async query({ sql, args }: DbStatement): Promise { + console.log(`QUERY: ${sql} - ${JSON.stringify(args)}`) const stmt = this.db.prepare(sql) return stmt.all(...wrapBindParams(args)) } @@ -83,6 +86,7 @@ class WrappedTx implements Tx { successCallback?: (tx: WrappedTx, res: RunResult) => void, errorCallback?: (error: any) => void ): void { + console.log(`wrapped tx run: ${sql} - ${JSON.stringify(args)}`) try { const prep = this.db.prepare(sql) const res = prep.run(...wrapBindParams(args)) @@ -99,6 +103,7 @@ class WrappedTx implements Tx { successCallback: (tx: WrappedTx, res: Row[]) => void, errorCallback?: (error: any) => void ): void { + console.log(`wrapped tx query: ${sql} - ${JSON.stringify(args)}`) try { const stmt = this.db.prepare(sql) const rows = stmt.all(...wrapBindParams(args)) diff --git a/clients/typescript/src/drivers/node-postgres/database.ts b/clients/typescript/src/drivers/node-postgres/database.ts index d9fb618d3d..6c981854a4 100644 --- a/clients/typescript/src/drivers/node-postgres/database.ts +++ b/clients/typescript/src/drivers/node-postgres/database.ts @@ -32,6 +32,7 @@ export class ElectricDatabase implements Database { ) {} async exec(statement: Statement): Promise { + console.log(`EXEC: ${statement.sql} - ${JSON.stringify(statement.args)}`) const { rows, rowCount } = await this.db.query( statement.sql, statement.args diff --git a/clients/typescript/src/drivers/node-postgres/index.ts b/clients/typescript/src/drivers/node-postgres/index.ts index 1c2558b693..98b39bd978 100644 --- a/clients/typescript/src/drivers/node-postgres/index.ts +++ b/clients/typescript/src/drivers/node-postgres/index.ts @@ -1,3 +1,4 @@ +import { DatabaseAdapter as DatabaseAdapterI } from '../../electric/adapter' import { DatabaseAdapter } from './adapter' import { Database, ElectricDatabase } from './database' import { ElectricConfig } from '../../config' @@ -25,6 +26,7 @@ export const electrify = async >( const migrator = opts?.migrator || new PgBundleMigrator(adapter, dbDescription.migrations) const socketFactory = opts?.socketFactory || WebSocketWeb + const prepare = async (_connection: DatabaseAdapterI) => {} const client = await baseElectrify( dbName, @@ -34,6 +36,7 @@ export const electrify = async >( config, { migrator, + prepare, ...opts, } ) diff --git a/clients/typescript/src/drivers/node-postgres/mock.ts b/clients/typescript/src/drivers/node-postgres/mock.ts index 99d7e162e5..95899754de 100644 --- a/clients/typescript/src/drivers/node-postgres/mock.ts +++ b/clients/typescript/src/drivers/node-postgres/mock.ts @@ -18,4 +18,8 @@ export class MockDatabase implements Database { rowsModified: 0, } } + + async stop(): Promise { + return + } } diff --git a/clients/typescript/src/drivers/tauri-postgres/index.ts b/clients/typescript/src/drivers/tauri-postgres/index.ts index 6151f00144..afd6ab908c 100644 --- a/clients/typescript/src/drivers/tauri-postgres/index.ts +++ b/clients/typescript/src/drivers/tauri-postgres/index.ts @@ -1,3 +1,4 @@ +import { DatabaseAdapter as DatabaseAdapterI } from '../../electric/adapter' import { DatabaseAdapter } from './adapter' import { Database, ElectricDatabase } from './database' import { ElectricConfig } from '../../config' @@ -24,6 +25,7 @@ export const electrify = async >( const migrator = opts?.migrator || new PgBundleMigrator(adapter, dbDescription.migrations) const socketFactory = opts?.socketFactory || WebSocketWeb + const prepare = async (_connection: DatabaseAdapterI) => {} const client = await baseElectrify( dbName, @@ -33,6 +35,7 @@ export const electrify = async >( config, { migrator, + prepare, ...opts, } ) diff --git a/clients/typescript/src/drivers/tauri-postgres/mock.ts b/clients/typescript/src/drivers/tauri-postgres/mock.ts index 99d7e162e5..95899754de 100644 --- a/clients/typescript/src/drivers/tauri-postgres/mock.ts +++ b/clients/typescript/src/drivers/tauri-postgres/mock.ts @@ -18,4 +18,8 @@ export class MockDatabase implements Database { rowsModified: 0, } } + + async stop(): Promise { + return + } } diff --git a/clients/typescript/src/migrators/builder.ts b/clients/typescript/src/migrators/builder.ts index 3f25b68996..dc30b83dae 100644 --- a/clients/typescript/src/migrators/builder.ts +++ b/clients/typescript/src/migrators/builder.ts @@ -3,6 +3,7 @@ import { SatOpMigrate } from '../_generated/protocol/satellite' import { base64, getProtocolVersion } from '../util' import { Migration } from './index' import { generateTriggersForTable } from '../satellite/process' +import { QueryBuilder } from './query-builder' const metaDataSchema = z .object({ @@ -69,7 +70,10 @@ export function parseMetadata(data: object): MetaData { * @param migration The migration's meta data. * @returns The corresponding migration. */ -export function makeMigration(migration: MetaData): Migration { +export function makeMigration( + migration: MetaData, + builder: QueryBuilder +): Migration { const statements = migration.ops .map((op) => op.stmts.map((stmt) => stmt.sql)) .flat() @@ -84,7 +88,7 @@ export function makeMigration(migration: MetaData): Migration { }) const triggers = tables - .map(generateTriggersForTable) + .map((tbl) => generateTriggersForTable(tbl, builder)) .flat() .map((stmt) => stmt.sql) diff --git a/clients/typescript/src/migrators/bundle.ts b/clients/typescript/src/migrators/bundle.ts index 48c06c1614..bf561dacca 100644 --- a/clients/typescript/src/migrators/bundle.ts +++ b/clients/typescript/src/migrators/bundle.ts @@ -6,7 +6,7 @@ import { StmtMigration, } from './index' import { DatabaseAdapter } from '../electric/adapter' -import { getData as makeBaseMigration } from './schema' +import { buildInitialMigration as makeBaseMigration } from './schema' import Log from 'loglevel' import { SatelliteError, @@ -30,6 +30,7 @@ import { ExpressionBuilder, } from 'kysely' import { _electric_migrations } from '../satellite/config' +import { pgBuilder, QueryBuilder, sqliteBuilder } from './query-builder' export const SCHEMA_VSN_ERROR_MSG = `Local schema doesn't match server's. Clear local state through developer tools and retry connection manually. If error persists, re-generate the client. Check documentation (https://electric-sql.com/docs/reference/roadmap) to learn more.` @@ -47,10 +48,10 @@ abstract class BundleMigratorBase implements Migrator { adapter: DatabaseAdapter, migrations: Migration[] = [], queryBuilderConfig: KyselyConfig, - dialect: 'SQLite' | 'PG' + electricQueryBuilder: QueryBuilder ) { this.adapter = adapter - const baseMigration = makeBaseMigration(dialect) + const baseMigration = makeBaseMigration(electricQueryBuilder) this.migrations = [...baseMigration.migrations, ...migrations].map( makeStmtMigration ) @@ -60,9 +61,13 @@ abstract class BundleMigratorBase implements Migrator { /** * Returns a SQL statement that checks if the given table exists. + * @param namespace The namespace where to check. * @param tableName The name of the table to check for existence. */ - abstract createTableExistsStatement(tableName: string): Statement + abstract createTableExistsStatement( + namespace: string, + tableName: string + ): Statement async up(): Promise { const existing = await this.queryApplied() @@ -81,7 +86,7 @@ abstract class BundleMigratorBase implements Migrator { async migrationsTableExists(): Promise { // If this is the first time we're running migrations, then the // migrations table won't exist. - const tableExists = this.createTableExistsStatement(this.tableName) + const tableExists = this.createTableExistsStatement('main', this.tableName) const tables = await this.adapter.query(tableExists) return tables.length > 0 } @@ -92,7 +97,7 @@ abstract class BundleMigratorBase implements Migrator { } const existingRecords = ` - SELECT version FROM main.${this.tableName} + SELECT version FROM "main"."${this.tableName}" ORDER BY id ASC ` @@ -109,7 +114,7 @@ abstract class BundleMigratorBase implements Migrator { // The hard-coded version '0' below corresponds to the version of the internal migration defined in `schema.ts`. // We're ignoring it because this function is supposed to return the application schema version. const schemaVersion = ` - SELECT version FROM main.${this.tableName} + SELECT version FROM "main"."${this.tableName}" WHERE version != '0' ORDER BY version DESC LIMIT 1 @@ -161,7 +166,7 @@ abstract class BundleMigratorBase implements Migrator { } const { sql, parameters } = raw` - INSERT INTO main.${this.eb.table( + INSERT INTO "main".${this.eb.table( this.tableName )} (version, applied_at) VALUES (${version}, ${Date.now().toString()}) `.compile(this.queryBuilder) @@ -180,7 +185,7 @@ abstract class BundleMigratorBase implements Migrator { */ async applyIfNotAlready(migration: StmtMigration): Promise { const { sql, parameters } = raw` - SELECT 1 FROM main.${this.eb.table(this.tableName)} + SELECT 1 FROM "main".${this.eb.table(this.tableName)} WHERE version = ${migration.version} `.compile(this.queryBuilder) @@ -211,10 +216,10 @@ export class SqliteBundleMigrator extends BundleMigratorBase { createQueryCompiler: () => new SqliteQueryCompiler(), }, } - super(adapter, migrations, config, 'SQLite') + super(adapter, migrations, config, sqliteBuilder) } - createTableExistsStatement(tableName: string): Statement { + createTableExistsStatement(_namespace: string, tableName: string): Statement { return { sql: `SELECT 1 FROM sqlite_master WHERE type = 'table' AND name = ?`, args: [tableName], @@ -232,13 +237,13 @@ export class PgBundleMigrator extends BundleMigratorBase { createQueryCompiler: () => new PostgresQueryCompiler(), }, } - super(adapter, migrations, config, 'PG') + super(adapter, migrations, config, pgBuilder) } - createTableExistsStatement(tableName: string): Statement { + createTableExistsStatement(namespace: string, tableName: string): Statement { return { - sql: `SELECT 1 FROM information_schema.tables WHERE table_name = $1`, - args: [tableName], + sql: `SELECT 1 FROM information_schema.tables WHERE table_schema = $1 AND table_name = $2`, + args: [namespace, tableName], } } } diff --git a/clients/typescript/src/migrators/query-builder/builder.ts b/clients/typescript/src/migrators/query-builder/builder.ts new file mode 100644 index 0000000000..0e0c18a255 --- /dev/null +++ b/clients/typescript/src/migrators/query-builder/builder.ts @@ -0,0 +1,205 @@ +import { ForeignKey } from '../triggers' +import { QualifiedTablename } from '../../util' + +export abstract class QueryBuilder { + /** + * The autoincrementing integer primary key type for the current SQL dialect. + */ + abstract readonly AUTOINCREMENT_PK: string + + /** + * The type to use for BLOB for the current SQL dialect. + */ + abstract readonly BLOB: string + + /** + * Returns the given query if the current SQL dialect is PostgreSQL. + */ + abstract pgOnly(query: string): string + + /** + * Returns an array containing the given query if the current SQL dialect is PostgreSQL. + */ + abstract pgOnlyQuery(query: string): string[] + + /** + * Returns the given query if the current SQL dialect is SQLite. + */ + abstract sqliteOnly(query: string): string + + /** + * Returns an array containing the given query if the current SQL dialect is SQLite. + */ + abstract sqliteOnlyQuery(query: string): string[] + + /** + * Create an index on a table. + */ + abstract createIndex( + indexName: string, + onTable: QualifiedTablename, + columns: string[] + ): string + + /** + * Insert a row into a table, ignoring it if it already exists. + */ + abstract insertOrIgnore( + schema: string, + table: string, + columns: string[], + values: string[] + ): string + + /** + * Drop a trigger if it exists. + */ + abstract dropTriggerIfExists( + triggerName: string, + namespace: string, + tablename: string + ): string + + /** + * Create a trigger that prevents updates to the primary key. + */ + abstract createNoFkUpdateTrigger( + namespace: string, + tablename: string, + pk: string[] + ): string[] + + /** + * Creates or replaces a trigger that prevents updates to the primary key. + */ + createOrReplaceNoFkUpdateTrigger( + namespace: string, + tablename: string, + pk: string[] + ): string[] { + return [ + this.dropTriggerIfExists( + `update_ensure_${namespace}_${tablename}_primarykey`, + namespace, + tablename + ), + ...this.createNoFkUpdateTrigger(namespace, tablename, pk), + ] + } + + /** + * Create a trigger that logs operations into the oplog. + */ + abstract createOplogTrigger( + opType: 'INSERT' | 'UPDATE' | 'DELETE', + namespace: string, + tableName: string, + newPKs: string, + newRows: string, + oldRows: string + ): string[] + + createOrReplaceOplogTrigger( + opType: 'INSERT' | 'UPDATE' | 'DELETE', + namespace: string, + tableName: string, + newPKs: string, + newRows: string, + oldRows: string + ): string[] { + return [ + this.dropTriggerIfExists( + `${opType.toLowerCase()}_${namespace}_${tableName}_into_oplog`, + namespace, + tableName + ), + ...this.createOplogTrigger( + opType, + namespace, + tableName, + newPKs, + newRows, + oldRows + ), + ] + } + + /** + * Creates or replaces a trigger that logs insertions into the oplog. + */ + createOrReplaceInsertTrigger = this.createOrReplaceOplogTrigger.bind( + this, + 'INSERT' + ) + + /** + * Creates or replaces a trigger that logs updates into the oplog. + */ + createOrReplaceUpdateTrigger = this.createOrReplaceOplogTrigger.bind( + this, + 'UPDATE' + ) + + /** + * Creates or replaces a trigger that logs deletions into the oplog. + */ + createOrReplaceDeleteTrigger = this.createOrReplaceOplogTrigger.bind( + this, + 'DELETE' + ) + + /** + * Creates a trigger that logs compensations for operations into the oplog. + */ + abstract createFkCompensationTrigger( + opType: 'INSERT' | 'UPDATE', + namespace: string, + tableName: string, + childKey: string, + fkTableNamespace: string, + fkTableName: string, + joinedFkPKs: string, + foreignKey: ForeignKey + ): string[] + + createOrReplaceFkCompensationTrigger( + opType: 'INSERT' | 'UPDATE', + namespace: string, + tableName: string, + childKey: string, + fkTableNamespace: string, + fkTableName: string, + joinedFkPKs: string, + foreignKey: ForeignKey + ): string[] { + return [ + this.dropTriggerIfExists( + `compensation_${opType.toLowerCase()}_${namespace}_${tableName}_${childKey}_into_oplog`, + namespace, + tableName + ), + ...this.createFkCompensationTrigger( + opType, + namespace, + tableName, + childKey, + fkTableNamespace, + fkTableName, + joinedFkPKs, + foreignKey + ), + ] + } + + /** + * Creates a trigger that logs compensations for insertions into the oplog. + */ + createOrReplaceInsertCompensationTrigger = + this.createOrReplaceFkCompensationTrigger.bind(this, 'INSERT') + + /** + * Creates a trigger that logs compensations for updates into the oplog. + */ + createOrReplaceUpdateCompensationTrigger = + this.createOrReplaceFkCompensationTrigger.bind(this, 'UPDATE') +} diff --git a/clients/typescript/src/migrators/query-builder/index.ts b/clients/typescript/src/migrators/query-builder/index.ts new file mode 100644 index 0000000000..f1122f0844 --- /dev/null +++ b/clients/typescript/src/migrators/query-builder/index.ts @@ -0,0 +1,5 @@ +import sqliteBuilder from './sqliteBuilder' +import pgBuilder from './pgBuilder' +import { QueryBuilder as QueryBuilder } from './builder' + +export { sqliteBuilder, pgBuilder, QueryBuilder } diff --git a/clients/typescript/src/migrators/query-builder/pgBuilder.ts b/clients/typescript/src/migrators/query-builder/pgBuilder.ts new file mode 100644 index 0000000000..b095f1d14d --- /dev/null +++ b/clients/typescript/src/migrators/query-builder/pgBuilder.ts @@ -0,0 +1,207 @@ +import { dedent } from 'ts-dedent' +import { QualifiedTablename } from '../../util' +import { QueryBuilder } from './builder' +import { ForeignKey } from '../triggers' + +const quote = (col: string) => `"${col}"` + +class PgBuilder extends QueryBuilder { + readonly AUTOINCREMENT_PK = 'SERIAL PRIMARY KEY' + readonly BLOB = 'TEXT' + + pgOnly(query: string) { + return query + } + + pgOnlyQuery(query: string) { + return [query] + } + + sqliteOnly(_query: string) { + return '' + } + + sqliteOnlyQuery(_query: string) { + return [] + } + + createIndex( + indexName: string, + onTable: QualifiedTablename, + columns: string[] + ) { + const namespace = onTable.namespace + const tablename = onTable.tablename + return `CREATE INDEX IF NOT EXISTS ${indexName} ON "${namespace}"."${tablename}" (${columns + .map(quote) + .join(', ')})` + } + + insertOrIgnore( + schema: string, + table: string, + columns: string[], + values: string[] + ) { + return dedent` + INSERT INTO "${schema}"."${table}" (${columns.map(quote).join(', ')}) + VALUES (${values.join(', ')}) + ON CONFLICT DO NOTHING; + ` + } + + dropTriggerIfExists( + triggerName: string, + namespace: string, + tablename: string + ) { + return `DROP TRIGGER IF EXISTS ${triggerName} ON "${namespace}"."${tablename}";` + } + + createNoFkUpdateTrigger( + namespace: string, + tablename: string, + pk: string[] + ): string[] { + return [ + dedent` + CREATE OR REPLACE FUNCTION update_ensure_${namespace}_${tablename}_primarykey_function() + RETURNS TRIGGER AS $$ + BEGIN + ${pk + .map( + (col) => + dedent`IF OLD."${col}" IS DISTINCT FROM NEW."${col}" THEN + RAISE EXCEPTION 'Cannot change the value of column ${col} as it belongs to the primary key'; + END IF;` + ) + .join('\n')} + RETURN NEW; + END; + $$ LANGUAGE plpgsql; + `, + dedent` + CREATE TRIGGER update_ensure_${namespace}_${tablename}_primarykey + BEFORE UPDATE ON "${namespace}"."${tablename}" + FOR EACH ROW + EXECUTE FUNCTION update_ensure_${namespace}_${tablename}_primarykey_function(); + `, + ] + } + + createJsonObject(rows: string) { + return `jsonb_build_object(${rows})` + } + + createOplogTrigger( + opType: 'INSERT' | 'UPDATE' | 'DELETE', + namespace: string, + tableName: string, + newPKs: string, + newRows: string, + oldRows: string + ): string[] { + const opTypeLower = opType.toLowerCase() + const pk = this.createJsonObject(newPKs) + // Update has both the old and the new row + // Delete only has the old row + const newRecord = + opType === 'DELETE' ? 'NULL' : this.createJsonObject(newRows) + // Insert only has the new row + const oldRecord = + opType === 'INSERT' ? 'NULL' : this.createJsonObject(oldRows) + + return [ + dedent` + CREATE OR REPLACE FUNCTION ${opTypeLower}_${namespace}_${tableName}_into_oplog_function() + RETURNS TRIGGER AS $$ + BEGIN + DECLARE + flag_value INTEGER; + BEGIN + -- Get the flag value from _electric_trigger_settings + SELECT flag INTO flag_value FROM main._electric_trigger_settings WHERE namespace = '${namespace}' AND tablename = '${tableName}'; + + IF flag_value = 1 THEN + -- Insert into _electric_oplog + INSERT INTO main._electric_oplog (namespace, tablename, optype, "primaryKey", "newRow", "oldRow", timestamp) + VALUES ( + '${namespace}', + '${tableName}', + '${opType}', + ${pk}, + ${newRecord}, + ${oldRecord}, + NULL + ); + END IF; + + RETURN NEW; + END; + END; + $$ LANGUAGE plpgsql; + `, + dedent` + CREATE TRIGGER ${opTypeLower}_${namespace}_${tableName}_into_oplog + AFTER ${opType} ON "${namespace}"."${tableName}" + FOR EACH ROW + EXECUTE FUNCTION ${opTypeLower}_${namespace}_${tableName}_into_oplog_function(); + `, + ] + } + + createFkCompensationTrigger( + opType: 'INSERT' | 'UPDATE', + namespace: string, + tableName: string, + childKey: string, + fkTableNamespace: string, + fkTableName: string, + joinedFkPKs: string, + foreignKey: ForeignKey + ): string[] { + const opTypeLower = opType.toLowerCase() + + return [ + dedent` + CREATE OR REPLACE FUNCTION compensation_${opTypeLower}_${namespace}_${tableName}_${childKey}_into_oplog_function() + RETURNS TRIGGER AS $$ + BEGIN + DECLARE + flag_value INTEGER; + meta_value INTEGER; + BEGIN + SELECT flag INTO flag_value FROM main._electric_trigger_settings WHERE namespace = '${fkTableNamespace}' AND tablename = '${fkTableName}'; + + SELECT value INTO meta_value FROM main._electric_meta WHERE key = 'compensations'; + + IF flag_value = 1 AND meta_value = 1 THEN + INSERT INTO main._electric_oplog (namespace, tablename, optype, "primaryKey", "newRow", "oldRow", timestamp) + SELECT + '${fkTableNamespace}', + '${fkTableName}', + 'UPDATE', + jsonb_build_object(${joinedFkPKs}), + jsonb_build_object(${joinedFkPKs}), + NULL, + NULL + FROM "${fkTableNamespace}"."${fkTableName}" + WHERE "${foreignKey.parentKey}" = NEW."${foreignKey.childKey}"; + END IF; + + RETURN NEW; + END; + END; + $$ LANGUAGE plpgsql; + `, + dedent` + CREATE TRIGGER compensation_${opTypeLower}_${namespace}_${tableName}_${childKey}_into_oplog + AFTER ${opType} ON "${namespace}"."${tableName}" + FOR EACH ROW + EXECUTE FUNCTION compensation_${opTypeLower}_${namespace}_${tableName}_${childKey}_into_oplog_function(); + `, + ] + } +} + +export default new PgBuilder() diff --git a/clients/typescript/src/migrators/query-builder/sqliteBuilder.ts b/clients/typescript/src/migrators/query-builder/sqliteBuilder.ts new file mode 100644 index 0000000000..c12f53f876 --- /dev/null +++ b/clients/typescript/src/migrators/query-builder/sqliteBuilder.ts @@ -0,0 +1,144 @@ +import { dedent } from 'ts-dedent' +import { QualifiedTablename } from '../../util' +import { QueryBuilder } from './builder' +import { ForeignKey } from '../triggers' + +class SqliteBuilder extends QueryBuilder { + readonly AUTOINCREMENT_PK = 'INTEGER PRIMARY KEY AUTOINCREMENT' + readonly BLOB = 'BLOB' + + pgOnly(_query: string) { + return '' + } + + pgOnlyQuery(_query: string) { + return [] + } + + sqliteOnly(query: string) { + return query + } + + sqliteOnlyQuery(query: string) { + return [query] + } + + createIndex( + indexName: string, + onTable: QualifiedTablename, + columns: string[] + ) { + const namespace = onTable.namespace + const tablename = onTable.tablename + return `CREATE INDEX IF NOT EXISTS ${namespace}.${indexName} ON ${tablename} (${columns.join( + ', ' + )})` + } + + insertOrIgnore( + schema: string, + table: string, + columns: string[], + values: string[] + ) { + return dedent` + INSERT OR IGNORE INTO ${schema}.${table} (${columns.join(', ')}) + VALUES (${values.join(', ')}); + ` + } + + dropTriggerIfExists( + triggerName: string, + _namespace: string, + _tablename: string + ) { + return `DROP TRIGGER IF EXISTS ${triggerName};` + } + + createNoFkUpdateTrigger( + namespace: string, + tablename: string, + pk: string[] + ): string[] { + return [ + dedent` + CREATE TRIGGER update_ensure_${namespace}_${tablename}_primarykey + BEFORE UPDATE ON "${namespace}"."${tablename}" + BEGIN + SELECT + CASE + ${pk + .map( + (col) => + `WHEN old."${col}" != new."${col}" THEN\n\t\tRAISE (ABORT, 'cannot change the value of column ${col} as it belongs to the primary key')` + ) + .join('\n')} + END; + END; + `, + ] + } + + createJsonObject(rows: string) { + return `json_object(${rows})` + } + + createOplogTrigger( + opType: 'INSERT' | 'UPDATE' | 'DELETE', + namespace: string, + tableName: string, + newPKs: string, + newRows: string, + oldRows: string + ): string[] { + const opTypeLower = opType.toLowerCase() + const pk = this.createJsonObject(newPKs) + // Update has both the old and the new row + // Delete only has the old row + const newRecord = + opType === 'DELETE' ? 'NULL' : this.createJsonObject(newRows) + // Insert only has the new row + const oldRecord = + opType === 'INSERT' ? 'NULL' : this.createJsonObject(oldRows) + + return [ + dedent` + CREATE TRIGGER ${opTypeLower}_${namespace}_${tableName}_into_oplog + AFTER ${opType} ON "${namespace}"."${tableName}" + WHEN 1 = (SELECT flag from _electric_trigger_settings WHERE namespace = '${namespace}' AND tablename = '${tableName}') + BEGIN + INSERT INTO _electric_oplog (namespace, tablename, optype, primaryKey, newRow, oldRow, timestamp) + VALUES ('${namespace}', '${tableName}', '${opType}', ${pk}, ${newRecord}, ${oldRecord}, NULL); + END; + `, + ] + } + + createFkCompensationTrigger( + opType: 'INSERT' | 'UPDATE', + namespace: string, + tableName: string, + childKey: string, + fkTableNamespace: string, + fkTableName: string, + joinedFkPKs: string, + foreignKey: ForeignKey + ): string[] { + const opTypeLower = opType.toLowerCase() + return [ + dedent` + CREATE TRIGGER compensation_${opTypeLower}_${namespace}_${tableName}_${childKey}_into_oplog + AFTER ${opType} ON "${namespace}"."${tableName}" + WHEN 1 = (SELECT flag from _electric_trigger_settings WHERE namespace = '${namespace}' AND tablename = '${fkTableNamespace}.${fkTableName}') AND + 1 = (SELECT value from _electric_meta WHERE key = 'compensations') + BEGIN + INSERT INTO _electric_oplog (namespace, tablename, optype, primaryKey, newRow, oldRow, timestamp) + SELECT '${fkTableNamespace}', '${fkTableName}', 'COMPENSATION', json_object(${joinedFkPKs}), json_object(${joinedFkPKs}), NULL, NULL + FROM "${fkTableNamespace}"."${fkTableName}" WHERE "${foreignKey.parentKey}" = new."${foreignKey.childKey}"; + END; + `, + ] + } +} + +export default new SqliteBuilder() diff --git a/clients/typescript/src/migrators/schema.ts b/clients/typescript/src/migrators/schema.ts index 7bef26eae1..82a5681524 100644 --- a/clients/typescript/src/migrators/schema.ts +++ b/clients/typescript/src/migrators/schema.ts @@ -1,74 +1,41 @@ import { satelliteDefaults } from '../satellite/config' -import { QualifiedTablename } from '../util' +import { QueryBuilder } from './query-builder' export type { ElectricSchema } from '../satellite/config' const { metaTable, migrationsTable, oplogTable, triggersTable, shadowTable } = satelliteDefaults -export const getData = (dialect: 'SQLite' | 'PG') => { - const pgOnly = (query: string) => { - if (dialect === 'PG') { - return query - } - return '' - } - const pgOnlyQuery = (query: string) => { - if (dialect === 'PG') { - return [query] - } - return [] - } - - const AUTOINCREMENT_PK = - dialect === 'SQLite' - ? 'INTEGER PRIMARY KEY AUTOINCREMENT' - : 'SERIAL PRIMARY KEY' - const BLOB = dialect === 'SQLite' ? 'BLOB' : 'TEXT' - const create_index = ( - indexName: string, - onTable: QualifiedTablename, - columns: string[] - ) => { - const namespace = onTable.namespace - const tablename = onTable.tablename - if (dialect === 'SQLite') { - return `CREATE INDEX IF NOT EXISTS ${namespace}.${indexName} ON ${tablename} (${columns.join( - ', ' - )})` - } - return `CREATE INDEX IF NOT EXISTS ${indexName} ON ${namespace}.${tablename} (${columns.join( - ', ' - )})` - } - +export const buildInitialMigration = (builder: QueryBuilder) => { const data = { migrations: [ { statements: [ // The main schema, - ...pgOnlyQuery(`CREATE SCHEMA IF NOT EXISTS "main"`), + ...builder.pgOnlyQuery(`CREATE SCHEMA IF NOT EXISTS "main"`), //`-- The ops log table\n`, - `CREATE TABLE IF NOT EXISTS ${oplogTable} (\n rowid ${AUTOINCREMENT_PK},\n namespace TEXT NOT NULL,\n tablename TEXT NOT NULL,\n optype TEXT NOT NULL,\n primaryKey TEXT NOT NULL,\n newRow TEXT,\n oldRow TEXT,\n timestamp TEXT, clearTags TEXT DEFAULT '[]' NOT NULL\n);`, + `CREATE TABLE IF NOT EXISTS "${oplogTable.namespace}"."${oplogTable.tablename}" (\n "rowid" ${builder.AUTOINCREMENT_PK},\n "namespace" TEXT NOT NULL,\n "tablename" TEXT NOT NULL,\n "optype" TEXT NOT NULL,\n "primaryKey" TEXT NOT NULL,\n "newRow" TEXT,\n "oldRow" TEXT,\n "timestamp" TEXT, "clearTags" TEXT DEFAULT '[]' NOT NULL\n);`, // Add an index for the oplog - create_index('_electric_table_pk_reference', oplogTable, [ + builder.createIndex('_electric_table_pk_reference', oplogTable, [ 'namespace', 'tablename', 'primaryKey', ]), - create_index('_electric_timestamp', oplogTable, ['timestamp']), + builder.createIndex('_electric_timestamp', oplogTable, ['timestamp']), //`-- Somewhere to keep our metadata\n`, - `CREATE TABLE IF NOT EXISTS ${metaTable} (\n key TEXT PRIMARY KEY,\n value ${BLOB}\n);`, + `CREATE TABLE IF NOT EXISTS "${metaTable.namespace}"."${metaTable.tablename}" (\n "key" TEXT PRIMARY KEY,\n "value" ${builder.BLOB}\n);`, //`-- Somewhere to track migrations\n`, - `CREATE TABLE IF NOT EXISTS ${migrationsTable} (\n id ${AUTOINCREMENT_PK},\n version TEXT NOT NULL UNIQUE,\n applied_at TEXT NOT NULL\n);`, + `CREATE TABLE IF NOT EXISTS "${migrationsTable.namespace}"."${migrationsTable.tablename}" (\n "id" ${builder.AUTOINCREMENT_PK},\n "version" TEXT NOT NULL UNIQUE,\n "applied_at" TEXT NOT NULL\n);`, //`-- Initialisation of the metadata table\n`, - `INSERT INTO ${metaTable} (key, value) VALUES ('compensations', 1), ('lsn', ''), ('clientId', ''), ('subscriptions', ''), ('seenAdditionalData', '');`, + `INSERT INTO "${metaTable.namespace}"."${metaTable.tablename}" (key, value) VALUES ('compensations', 1), ('lsn', ''), ('clientId', ''), ('subscriptions', ''), ('seenAdditionalData', '');`, //`-- These are toggles for turning the triggers on and off\n`, - `DROP TABLE IF EXISTS ${triggersTable};`, - `CREATE TABLE ${triggersTable} (tablename TEXT PRIMARY KEY, flag INTEGER);`, + `DROP TABLE IF EXISTS "${triggersTable.namespace}"."${triggersTable.tablename}";`, + `CREATE TABLE "${triggersTable.namespace}"."${triggersTable.tablename}" ("namespace" TEXT, "tablename" TEXT, "flag" INTEGER, PRIMARY KEY ("namespace", "tablename"));`, //`-- Somewhere to keep dependency tracking information\n`, - `CREATE TABLE ${shadowTable} (\n ${pgOnly( - 'rowid SERIAL,' - )} namespace TEXT NOT NULL,\n tablename TEXT NOT NULL,\n primaryKey TEXT NOT NULL,\n tags TEXT NOT NULL,\n PRIMARY KEY (namespace, tablename, primaryKey));`, + `CREATE TABLE "${shadowTable.namespace}"."${ + shadowTable.tablename + }" (\n ${builder.pgOnly( + '"rowid" SERIAL,' + )} "namespace" TEXT NOT NULL,\n "tablename" TEXT NOT NULL,\n "primaryKey" TEXT NOT NULL,\n "tags" TEXT NOT NULL,\n PRIMARY KEY ("namespace", "tablename", "primaryKey"));`, ], version: '0', }, diff --git a/clients/typescript/src/migrators/triggers.ts b/clients/typescript/src/migrators/triggers.ts index 6e0098a1d3..76fd65736d 100644 --- a/clients/typescript/src/migrators/triggers.ts +++ b/clients/typescript/src/migrators/triggers.ts @@ -1,7 +1,8 @@ import { Statement } from '../util' import { dedent } from 'ts-dedent' +import { QueryBuilder } from './query-builder' -type ForeignKey = { +export type ForeignKey = { table: string childKey: string parentKey: string @@ -30,7 +31,6 @@ function mkStatement(sql: string): Statement { /** * Generates the triggers Satellite needs for the given table. * Assumes that the necessary meta tables already exist. - * @param tableFullName - Full name of the table for which to generate triggers. * @param table - A new or existing table for which to create/update the triggers. * @returns An array of SQLite statements that add the necessary oplog triggers. * @@ -39,8 +39,8 @@ function mkStatement(sql: string): Statement { * do not accept queries containing more than one SQL statement. */ export function generateOplogTriggers( - tableFullName: TableFullName, - table: Omit + table: Omit, + builder: QueryBuilder ): Statement[] { const { tableName, namespace, columns, primary, columnTypes } = table @@ -49,68 +49,48 @@ export function generateOplogTriggers( const newRows = joinColsForJSON(columns, columnTypes, 'new') const oldRows = joinColsForJSON(columns, columnTypes, 'old') + const [dropFkTrigger, ...createFkTrigger] = + builder.createOrReplaceNoFkUpdateTrigger(namespace, tableName, primary) + const [dropInsertTrigger, ...createInsertTrigger] = + builder.createOrReplaceInsertTrigger( + namespace, + tableName, + newPKs, + newRows, + oldRows + ) + return [ - //`-- Toggles for turning the triggers on and off\n`, - dedent` - INSERT OR IGNORE INTO _electric_trigger_settings(tablename,flag) VALUES ('${tableFullName}', 1); - `, - //`\* Triggers for table ${tableName} *\\n - //`-- ensures primary key is immutable\n` - dedent` - DROP TRIGGER IF EXISTS update_ensure_${namespace}_${tableName}_primarykey; - `, - dedent` - CREATE TRIGGER update_ensure_${namespace}_${tableName}_primarykey - BEFORE UPDATE ON "${namespace}"."${tableName}" - BEGIN - SELECT - CASE - ${primary - .map( - (col) => - `WHEN old."${col}" != new."${col}" THEN\n\t\tRAISE (ABORT, 'cannot change the value of column ${col} as it belongs to the primary key')` - ) - .join('\n')} - END; - END; - `, - //`-- Triggers that add INSERT, UPDATE, DELETE operation to the _opslog table\n` - dedent` - DROP TRIGGER IF EXISTS insert_${namespace}_${tableName}_into_oplog; - `, - dedent` - CREATE TRIGGER insert_${namespace}_${tableName}_into_oplog - AFTER INSERT ON "${namespace}"."${tableName}" - WHEN 1 == (SELECT flag from _electric_trigger_settings WHERE tablename == '${tableFullName}') - BEGIN - INSERT INTO _electric_oplog (namespace, tablename, optype, primaryKey, newRow, oldRow, timestamp) - VALUES ('${namespace}', '${tableName}', 'INSERT', json_object(${newPKs}), json_object(${newRows}), NULL, NULL); - END; - `, - dedent` - DROP TRIGGER IF EXISTS update_${namespace}_${tableName}_into_oplog; - `, - dedent` - CREATE TRIGGER update_${namespace}_${tableName}_into_oplog - AFTER UPDATE ON "${namespace}"."${tableName}" - WHEN 1 == (SELECT flag from _electric_trigger_settings WHERE tablename == '${tableFullName}') - BEGIN - INSERT INTO _electric_oplog (namespace, tablename, optype, primaryKey, newRow, oldRow, timestamp) - VALUES ('${namespace}', '${tableName}', 'UPDATE', json_object(${newPKs}), json_object(${newRows}), json_object(${oldRows}), NULL); - END; - `, + // Toggles for turning the triggers on and off dedent` - DROP TRIGGER IF EXISTS delete_${namespace}_${tableName}_into_oplog; - `, - dedent` - CREATE TRIGGER delete_${namespace}_${tableName}_into_oplog - AFTER DELETE ON "${namespace}"."${tableName}" - WHEN 1 == (SELECT flag from _electric_trigger_settings WHERE tablename == '${tableFullName}') - BEGIN - INSERT INTO _electric_oplog (namespace, tablename, optype, primaryKey, newRow, oldRow, timestamp) - VALUES ('${namespace}', '${tableName}', 'DELETE', json_object(${oldPKs}), NULL, json_object(${oldRows}), NULL); - END; + ${builder.insertOrIgnore( + 'main', + '_electric_trigger_settings', + ['namespace', 'tablename', 'flag'], + [`'${namespace}'`, `'${tableName}'`, '1'] + )} `, + // Triggers for table ${tableName} + // ensures primary key is immutable + dropFkTrigger, + ...createFkTrigger, + // Triggers that add INSERT, UPDATE, DELETE operation to the _opslog table + dropInsertTrigger, + ...createInsertTrigger, + ...builder.createOrReplaceUpdateTrigger( + namespace, + tableName, + newPKs, + newRows, + oldRows + ), + ...builder.createOrReplaceDeleteTrigger( + namespace, + tableName, + oldPKs, + newRows, + oldRows + ), ].map(mkStatement) } @@ -128,7 +108,10 @@ export function generateOplogTriggers( * @param tables Map of all tables (needed to look up the tables that are pointed at by FKs). * @returns An array of SQLite statements that add the necessary compensation triggers. */ -function generateCompensationTriggers(table: Table): Statement[] { +function generateCompensationTriggers( + table: Table, + builder: QueryBuilder +): Statement[] { const { tableName, namespace, foreignKeys, columnTypes } = table const makeTriggers = (foreignKey: ForeignKey) => { @@ -148,37 +131,33 @@ function generateCompensationTriggers(table: Table): Statement[] { [fkTablePK]: columnTypes[foreignKey.childKey], }) + const [dropInsertTrigger, ...createInsertTrigger] = + builder.createOrReplaceInsertCompensationTrigger( + namespace, + tableName, + childKey, + fkTableNamespace, + fkTableName, + joinedFkPKs, + foreignKey + ) + return [ - //`-- Triggers for foreign key compensations\n`, - dedent` - DROP TRIGGER IF EXISTS compensation_insert_${namespace}_${tableName}_${childKey}_into_oplog;`, // The compensation trigger inserts a row in `_electric_oplog` if the row pointed at by the FK exists // The way how this works is that the values for the row are passed to the nested SELECT // which will return those values for every record that matches the query // which can be at most once since we filter on the foreign key which is also the primary key and thus is unique. - dedent` - CREATE TRIGGER compensation_insert_${namespace}_${tableName}_${childKey}_into_oplog - AFTER INSERT ON "${namespace}"."${tableName}" - WHEN 1 == (SELECT flag from _electric_trigger_settings WHERE tablename == '${fkTableNamespace}.${fkTableName}') AND - 1 == (SELECT value from _electric_meta WHERE key == 'compensations') - BEGIN - INSERT INTO _electric_oplog (namespace, tablename, optype, primaryKey, newRow, oldRow, timestamp) - SELECT '${fkTableNamespace}', '${fkTableName}', 'COMPENSATION', json_object(${joinedFkPKs}), json_object(${joinedFkPKs}), NULL, NULL - FROM "${fkTableNamespace}"."${fkTableName}" WHERE "${foreignKey.parentKey}" = new."${foreignKey.childKey}"; - END; - `, - dedent`DROP TRIGGER IF EXISTS compensation_update_${namespace}_${tableName}_${foreignKey.childKey}_into_oplog;`, - dedent` - CREATE TRIGGER compensation_update_${namespace}_${tableName}_${foreignKey.childKey}_into_oplog - AFTER UPDATE ON "${namespace}"."${tableName}" - WHEN 1 == (SELECT flag from _electric_trigger_settings WHERE tablename == '${fkTableNamespace}.${fkTableName}') AND - 1 == (SELECT value from _electric_meta WHERE key == 'compensations') - BEGIN - INSERT INTO _electric_oplog (namespace, tablename, optype, primaryKey, newRow, oldRow, timestamp) - SELECT '${fkTableNamespace}', '${fkTableName}', 'COMPENSATION', json_object(${joinedFkPKs}), json_object(${joinedFkPKs}), NULL, NULL - FROM "${fkTableNamespace}"."${fkTableName}" WHERE "${foreignKey.parentKey}" = new."${foreignKey.childKey}"; - END; - `, + dropInsertTrigger, + ...createInsertTrigger, + ...builder.createOrReplaceUpdateCompensationTrigger( + namespace, + tableName, + foreignKey.childKey, + fkTableNamespace, + fkTableName, + joinedFkPKs, + foreignKey + ), ].map(mkStatement) } const fkTriggers = foreignKeys.map((fk) => makeTriggers(fk)) @@ -193,11 +172,11 @@ function generateCompensationTriggers(table: Table): Statement[] { * @returns An array of SQLite statements that add the necessary oplog and compensation triggers. */ export function generateTableTriggers( - tableFullName: TableFullName, - table: Table + table: Table, + builder: QueryBuilder ): Statement[] { - const oplogTriggers = generateOplogTriggers(tableFullName, table) - const fkTriggers = generateCompensationTriggers(table) + const oplogTriggers = generateOplogTriggers(table, builder) + const fkTriggers = generateCompensationTriggers(table, builder) return oplogTriggers.concat(fkTriggers) } @@ -206,17 +185,20 @@ export function generateTableTriggers( * @param tables - Dictionary mapping full table names to the corresponding tables. * @returns An array of SQLite statements that add the necessary oplog and compensation triggers for all tables. */ -export function generateTriggers(tables: Tables): Statement[] { +export function generateTriggers( + tables: Tables, + builder: QueryBuilder +): Statement[] { const tableTriggers: Statement[] = [] - tables.forEach((table, tableFullName) => { - const triggers = generateTableTriggers(tableFullName, table) + tables.forEach((table) => { + const triggers = generateTableTriggers(table, builder) tableTriggers.push(...triggers) }) const stmts = [ - { sql: 'DROP TABLE IF EXISTS _electric_trigger_settings;' }, + { sql: 'DROP TABLE IF EXISTS main._electric_trigger_settings;' }, { - sql: 'CREATE TABLE _electric_trigger_settings(tablename TEXT PRIMARY KEY, flag INTEGER);', + sql: 'CREATE TABLE main._electric_trigger_settings(namespace TEXT, tablename TEXT, flag INTEGER, PRIMARY KEY(namespace, tablename));', }, ...tableTriggers, ] diff --git a/clients/typescript/src/satellite/process.ts b/clients/typescript/src/satellite/process.ts index 51f95429cb..ae6b099d1e 100644 --- a/clients/typescript/src/satellite/process.ts +++ b/clients/typescript/src/satellite/process.ts @@ -82,6 +82,8 @@ import { inferRelationsFromSQLite } from '../util/relations' import { decodeUserIdFromToken } from '../auth/secure' import { InvalidArgumentError } from '../client/validation/errors/invalidArgumentError' import Long from 'long' +import { QueryBuilder } from '../migrators/query-builder' +import { sqliteBuilder } from '../migrators/query-builder' type ChangeAccumulator = { [key: string]: Change @@ -165,7 +167,9 @@ export class SatelliteProcess implements Satellite { migrator: Migrator, notifier: Notifier, client: Client, - opts: SatelliteOpts + opts: SatelliteOpts, + // TODO: turn `builder` into an abstract readonly field when introducing subclasses of the process + private builder: QueryBuilder = sqliteBuilder ) { this.dbName = dbName this.adapter = adapter @@ -300,18 +304,15 @@ export class SatelliteProcess implements Satellite { const tables = uniqWith(allTables, (a, b) => a.isEqual(b)) // TODO: table and schema warrant escaping here too, but they aren't in the triggers table. - const tablenames = tables.map((x) => x.toString()) - const deleteStmts = tables.map((x) => ({ - sql: `DELETE FROM ${x.toString()}`, + sql: `DELETE FROM "${x.namespace}".""${x.table}`, })) const stmtsWithTriggers = [ - // reverts to off on commit/abort { sql: 'PRAGMA defer_foreign_keys = ON' }, - ...this._disableTriggers(tablenames), + ...this._disableTriggers(tables), ...deleteStmts, - ...this._enableTriggers(tablenames), + ...this._enableTriggers(tables), ] await this.adapter.runInTransaction(...stmtsWithTriggers) @@ -489,9 +490,9 @@ export class SatelliteProcess implements Satellite { const groupedChanges = new Map< string, { - relation: Relation - dataChanges: InitialDataChange[] - tableName: QualifiedTablename + columns: string[] + records: InitialDataChange['record'][] + table: QualifiedTablename } >() @@ -508,10 +509,10 @@ export class SatelliteProcess implements Satellite { const changeGroup = groupedChanges.get(tableNameString)! changeGroup.dataChanges.push(op) } else { - groupedChanges.set(tableNameString, { - relation: op.relation, - dataChanges: [op], - tableName: tableName, + groupedChanges.set(tableName.toString(), { + columns: op.relation.columns.map((x) => x.name), + records: [op.record], + table: tableName, }) } @@ -530,14 +531,21 @@ export class SatelliteProcess implements Satellite { }) } + const qualifiedTableNames = [ + ...Array.from(groupedChanges.values()).map((chg) => chg.table), + ] + + console.log(`Apply subs data: ${JSON.stringify(qualifiedTableNames)}`) + // Disable trigger for all affected tables - stmts.push(...this._disableTriggers([...groupedChanges.keys()])) + stmts.push(...this._disableTriggers(qualifiedTableNames)) // For each table, do a batched insert - for (const [table, { relation, dataChanges }] of groupedChanges) { + for (const [_table, { relation, dataChanges, table }] of groupedChanges) { const records = dataChanges.map((change) => change.record) const columnNames = relation.columns.map((col) => col.name) - const sqlBase = `INSERT OR IGNORE INTO ${table} (${columnNames.join( + const qualifiedTableName = `"${table.namespace}"."${table.tablename}"` + const sqlBase = `INSERT OR IGNORE INTO ${qualifiedTableName} (${columnNames.join( ', ' )}) VALUES ` @@ -552,10 +560,11 @@ export class SatelliteProcess implements Satellite { } // And re-enable the triggers for all of them - stmts.push(...this._enableTriggers([...groupedChanges.keys()])) + stmts.push(...this._enableTriggers(qualifiedTableNames)) // Then do a batched insert for the shadow table - const upsertShadowStmt = `INSERT or REPLACE INTO ${this.opts.shadowTable.toString()} (namespace, tablename, primaryKey, tags) VALUES ` + const qualifiedShadowTable = `"${this.opts.shadowTable.namespace}"."${this.opts.shadowTable.tablename}"` + const upsertShadowStmt = `INSERT or REPLACE INTO ${qualifiedShadowTable} (namespace, tablename, primaryKey, tags) VALUES ` stmts.push( ...prepareInsertBatchedStatements( upsertShadowStmt, @@ -968,8 +977,8 @@ export class SatelliteProcess implements Satellite { } try { - const oplog = this.opts.oplogTable - const shadow = this.opts.shadowTable + const oplog = `"${this.opts.oplogTable.namespace}"."${this.opts.oplogTable.tablename}"` + const shadow = `"${this.opts.shadowTable.namespace}"."${this.opts.shadowTable.tablename}"` const timestamp = new Date() const newTag = this._generateTag(timestamp) @@ -1176,15 +1185,19 @@ export class SatelliteProcess implements Satellite { tags: encodeTags(entryChanges.tags), } + const qualifiedTableName = QualifiedTablename.parse(tablenameStr) + switch (entryChanges.optype) { case OPTYPES.gone: case OPTYPES.delete: - stmts.push(_applyDeleteOperation(entryChanges, tablenameStr)) + stmts.push(_applyDeleteOperation(entryChanges, qualifiedTableName)) stmts.push(this._deleteShadowTagsStatement(shadowEntry)) break default: - stmts.push(_applyNonDeleteOperation(entryChanges, tablenameStr)) + stmts.push( + _applyNonDeleteOperation(entryChanges, qualifiedTableName) + ) stmts.push(this._updateShadowTagsStatement(shadowEntry)) } } @@ -1200,7 +1213,7 @@ export class SatelliteProcess implements Satellite { async _getEntries(since?: number): Promise { // `rowid` is never below 0, so -1 means "everything" since ??= -1 - const oplog = this.opts.oplogTable.toString() + const oplog = `"${this.opts.oplogTable.namespace}"."${this.opts.oplogTable.tablename}"` const selectEntries = ` SELECT * FROM ${oplog} @@ -1213,7 +1226,7 @@ export class SatelliteProcess implements Satellite { } _deleteShadowTagsStatement(shadow: ShadowEntry): Statement { - const shadowTable = this.opts.shadowTable.toString() + const shadowTable = `"${this.opts.shadowTable.namespace}"."${this.opts.shadowTable.tablename}"` const deleteRow = ` DELETE FROM ${shadowTable} WHERE namespace = ? AND @@ -1227,7 +1240,7 @@ export class SatelliteProcess implements Satellite { } _updateShadowTagsStatement(shadow: ShadowEntry): Statement { - const shadowTable = this.opts.shadowTable.toString() + const shadowTable = `"${this.opts.shadowTable.namespace}"."${this.opts.shadowTable.tablename}"` const updateTags = ` INSERT or REPLACE INTO ${shadowTable} (namespace, tablename, primaryKey, tags) VALUES (?, ?, ?, ?); @@ -1345,7 +1358,10 @@ export class SatelliteProcess implements Satellite { // We will create/update triggers for this new/updated table // so store it in `tablenamesSet` such that those // triggers can be disabled while executing the transaction - const affectedTable = change.table.name + const affectedTable = new QualifiedTablename( + 'main', + change.table.name + ).toString() // store the table information to generate the triggers after this `forEach` affectedTables.set(affectedTable, change.table) tablenamesSet.add(affectedTable) @@ -1358,14 +1374,18 @@ export class SatelliteProcess implements Satellite { // Also add statements to create the necessary triggers for the created/updated table affectedTables.forEach((table) => { - const triggers = generateTriggersForTable(table) + const triggers = generateTriggersForTable(table, this.builder) stmts.push(...triggers) txStmts.push(...triggers) }) // Disable the newly created triggers // during the processing of this transaction - stmts.push(...this._disableTriggers([...createdTables])) + const createdQualifiedTables = Array.from(createdTables).map( + QualifiedTablename.parse + ) + console.log(`createdTablenames IN TRANSACTION: ${createdTables}`) + stmts.push(...this._disableTriggers(createdQualifiedTables)) newTables = new Set([...newTables, ...createdTables]) } @@ -1386,11 +1406,15 @@ export class SatelliteProcess implements Satellite { // Now run the DML and DDL statements in-order in a transaction const tablenames = Array.from(tablenamesSet) + console.log(`tablenames IN TRANSACTION: ${tablenames}`) + const qualifiedTables = tablenames.map(QualifiedTablename.parse) const notNewTableNames = tablenames.filter((t) => !newTables.has(t)) + console.log(`notNewTablenames IN TRANSACTION: ${notNewTableNames}`) + const notNewQualifiedTables = notNewTableNames.map(QualifiedTablename.parse) - const allStatements = this._disableTriggers(notNewTableNames) + const allStatements = this._disableTriggers(notNewQualifiedTables) .concat(stmts) - .concat(this._enableTriggers(tablenames)) + .concat(this._enableTriggers(qualifiedTables)) if (transaction.migrationVersion) { // If a migration version is specified @@ -1435,23 +1459,29 @@ export class SatelliteProcess implements Satellite { } } - _disableTriggers(tablenames: string[]): Statement[] { - return this._updateTriggerSettings(tablenames, 0) + _disableTriggers(tables: QualifiedTablename[]): Statement[] { + return this._updateTriggerSettings(tables, 0) } - _enableTriggers(tablenames: string[]): Statement[] { - return this._updateTriggerSettings(tablenames, 1) + _enableTriggers(tables: QualifiedTablename[]): Statement[] { + return this._updateTriggerSettings(tables, 1) } - _updateTriggerSettings(tablenames: string[], flag: 0 | 1): Statement[] { - const triggers = this.opts.triggersTable.toString() - if (tablenames.length > 0) + _updateTriggerSettings( + tables: QualifiedTablename[], + flag: 0 | 1 + ): Statement[] { + const triggers = `"${this.opts.triggersTable.namespace}"."${this.opts.triggersTable.tablename}"` + const namespacesAndTableNames = tables + .map((tbl) => [tbl.namespace, tbl.tablename]) + .flat() + if (tables.length > 0) return [ { - sql: `UPDATE ${triggers} SET flag = ? WHERE ${tablenames - .map(() => 'tablename = ?') + sql: `UPDATE ${triggers} SET flag = ? WHERE ${tables + .map(() => '(namespace = ? AND tablename = ?)') .join(' OR ')}`, - args: [flag, ...tablenames], + args: [flag, ...namespacesAndTableNames], }, ] else return [] @@ -1479,7 +1509,7 @@ export class SatelliteProcess implements Satellite { ): Statement _setMetaStatement(key: Uuid, value: string | null): Statement _setMetaStatement(key: string, value: SqlValue) { - const meta = this.opts.metaTable.toString() + const meta = `"${this.opts.metaTable.namespace}"."${this.opts.metaTable.tablename}"` const sql = `UPDATE ${meta} SET value = ? WHERE key = ?` const args = [value, key] @@ -1502,7 +1532,7 @@ export class SatelliteProcess implements Satellite { async _getMeta(key: Uuid): Promise async _getMeta(key: K): Promise async _getMeta(key: string) { - const meta = this.opts.metaTable.toString() + const meta = `"${this.opts.metaTable.namespace}"."${this.opts.metaTable.tablename}"` const sql = `SELECT value from ${meta} WHERE key = ?` const args = [key] @@ -1538,7 +1568,7 @@ export class SatelliteProcess implements Satellite { async _garbageCollectOplog(commitTimestamp: Date): Promise { const isoString = commitTimestamp.toISOString() - const oplog = this.opts.oplogTable.tablename.toString() + const oplog = `"${this.opts.oplogTable.namespace}"."${this.opts.oplogTable.tablename}"` await this.adapter.run({ sql: `DELETE FROM ${oplog} WHERE timestamp = ?`, @@ -1582,7 +1612,7 @@ export class SatelliteProcess implements Satellite { function _applyDeleteOperation( entryChanges: ShadowEntryChanges, - tablenameStr: string + qualifiedTableName: QualifiedTablename ): Statement { const pkEntries = Object.entries(entryChanges.primaryKeyCols) if (pkEntries.length === 0) @@ -1599,20 +1629,24 @@ function _applyDeleteOperation( ) return { - sql: `DELETE FROM ${tablenameStr} WHERE ${params.where.join(' AND ')}`, + sql: `DELETE FROM "${qualifiedTableName.namespace}"."${ + qualifiedTableName.tablename + }" WHERE ${params.where.join(' AND ')}`, args: params.values, } } function _applyNonDeleteOperation( { fullRow, primaryKeyCols }: ShadowEntryChanges, - tablenameStr: string + qualifiedTableName: QualifiedTablename ): Statement { const columnNames = Object.keys(fullRow) const columnValues = Object.values(fullRow) - let insertStmt = `INTO ${tablenameStr}(${columnNames.join( - ', ' - )}) VALUES (${columnValues.map((_) => '?').join(',')})` + let insertStmt = `INTO "${qualifiedTableName.namespace}"."${ + qualifiedTableName.tablename + }" (${columnNames.join(', ')}) VALUES (${columnValues + .map((_) => '?') + .join(',')})` const updateColumnStmts = columnNames .filter((c) => !(c in primaryKeyCols)) @@ -1639,7 +1673,10 @@ function _applyNonDeleteOperation( return { sql: insertStmt, args: columnValues } } -export function generateTriggersForTable(tbl: MigrationTable): Statement[] { +export function generateTriggersForTable( + tbl: MigrationTable, + builder: QueryBuilder +): Statement[] { const table = { tableName: tbl.name, namespace: 'main', @@ -1658,6 +1695,6 @@ export function generateTriggersForTable(tbl: MigrationTable): Statement[] { tbl.columns.map((col) => [col.name, col.pgType!.name.toUpperCase()]) ), } - const fullTableName = table.namespace + '.' + table.tableName - return generateTableTriggers(fullTableName, table) + + return generateTableTriggers(table, builder) } diff --git a/clients/typescript/src/util/tablename.ts b/clients/typescript/src/util/tablename.ts index 837c6c0f29..ba5641911b 100644 --- a/clients/typescript/src/util/tablename.ts +++ b/clients/typescript/src/util/tablename.ts @@ -14,7 +14,24 @@ export class QualifiedTablename { } toString(): string { - return `${this.namespace}.${this.tablename}` + // Don't collapse it to "." because that can lead to clashes + // since both `QualifiedTablename("foo", "bar.baz")` and `QualifiedTablename("foo.bar", "baz")` + // would be collapsed to "foo.bar.baz". + return JSON.stringify({ + namespace: this.namespace, + tablename: this.tablename, + }) + } + + static parse(json: string): QualifiedTablename { + try { + const { namespace, tablename } = JSON.parse(json) + return new QualifiedTablename(namespace, tablename) + } catch (_e) { + throw new Error( + 'Could not parse string into a qualified table name: ' + json + ) + } } } diff --git a/clients/typescript/test/cli/migrations/builder.test.ts b/clients/typescript/test/cli/migrations/builder.test.ts index 31b7a15a65..56ebe64fe9 100644 --- a/clients/typescript/test/cli/migrations/builder.test.ts +++ b/clients/typescript/test/cli/migrations/builder.test.ts @@ -2,6 +2,7 @@ import test from 'ava' import fs from 'fs/promises' import path from 'path' import { buildMigrations } from '../../../src/cli/migrations/builder' +import { sqliteBuilder } from '../../../src/migrators/query-builder' const migrationsFolder = path.join('./test/migrators/support/migrations') @@ -31,7 +32,7 @@ test('write migration to configuration file', async (t) => { const ogMigrations = await importMigrations() t.deepEqual(ogMigrations, []) - await buildMigrations(migrationsFolder, testMigrationsFile) + await buildMigrations(migrationsFolder, testMigrationsFile, sqliteBuilder) const newMigrations = await importMigrations() const versions = newMigrations.map((m: any) => m.version) t.deepEqual(versions, ['20230613112725_814', '20230613112735_992']) diff --git a/clients/typescript/test/drivers/node-postgres.test.ts b/clients/typescript/test/drivers/node-postgres.test.ts index 2f667927a1..090deaf170 100644 --- a/clients/typescript/test/drivers/node-postgres.test.ts +++ b/clients/typescript/test/drivers/node-postgres.test.ts @@ -32,8 +32,10 @@ test('database adapter query works', async (t) => { }) // Test with an actual embedded-postgres DB +let port = 5321 +let i = 1 async function makeAdapter() { - const { db, stop } = await makePgDatabase('driver-test') + const { db, stop } = await makePgDatabase(`driver-test-${i++}`, port++) const adapter = new DatabaseAdapter(db) const createTableSql = 'CREATE TABLE IF NOT EXISTS Post(id TEXT PRIMARY KEY, title TEXT, contents TEXT, nbr integer);' @@ -41,7 +43,7 @@ async function makeAdapter() { return { adapter, stop } } -test.serial('adapter run works on real DB', async (t) => { +test('adapter run works on real DB', async (t) => { const { adapter, stop } = await makeAdapter() const insertRecordSql = "INSERT INTO Post (id, title, contents, nbr) VALUES ('i1', 't1', 'c1', 18)" @@ -50,7 +52,7 @@ test.serial('adapter run works on real DB', async (t) => { await stop() }) -test.serial('adapter query works on real DB', async (t) => { +test('adapter query works on real DB', async (t) => { const { adapter, stop } = await makeAdapter() const insertRecordSql = "INSERT INTO Post (id, title, contents, nbr) VALUES ('i1', 't1', 'c1', 18)" @@ -63,7 +65,7 @@ test.serial('adapter query works on real DB', async (t) => { await stop() }) -test.serial('adapter runInTransaction works on real DB', async (t) => { +test('adapter runInTransaction works on real DB', async (t) => { const { adapter, stop } = await makeAdapter() const insertRecord1Sql = "INSERT INTO Post (id, title, contents, nbr) VALUES ('i1', 't1', 'c1', 18)" @@ -84,7 +86,7 @@ test.serial('adapter runInTransaction works on real DB', async (t) => { await stop() }) -test.serial('adapter runInTransaction rolls back on conflict', async (t) => { +test('adapter runInTransaction rolls back on conflict', async (t) => { const { adapter, stop } = await makeAdapter() const insertRecord1Sql = "INSERT INTO Post (id, title, contents, nbr) VALUES ('i1', 't1', 'c1', 18)" @@ -110,53 +112,50 @@ test.serial('adapter runInTransaction rolls back on conflict', async (t) => { await stop() }) -test.serial( - 'adapter supports dependent queries in transaction on real DB', - async (t) => { - const { adapter, stop } = await makeAdapter() - const [txRes, rowsAffected] = (await adapter.transaction>( - (tx, setResult) => { - let rowsAffected = 0 - tx.run( - { - sql: "INSERT INTO Post (id, title, contents, nbr) VALUES ('i1', 't1', 'c1', 18)", - }, - (tx2, res) => { - rowsAffected += res.rowsAffected - const select = { sql: "SELECT nbr FROM Post WHERE id = 'i1'" } - tx2.query(select, (tx3, rows) => { - const [res] = rows as unknown as Array<{ nbr: number }> - const newNbr = res.nbr + 2 - tx3.run( - { - sql: `INSERT INTO Post (id, title, contents, nbr) VALUES ('i2', 't2', 'c2', ${newNbr})`, - }, - (_, res) => { - rowsAffected += res.rowsAffected - setResult([newNbr, rowsAffected]) - } - ) - }) - } - ) - } - )) as unknown as Array - - t.is(txRes, 20) - t.is(rowsAffected, 2) - - const selectAll = 'SELECT * FROM Post' - const res = await adapter.query({ sql: selectAll }) +test('adapter supports dependent queries in transaction on real DB', async (t) => { + const { adapter, stop } = await makeAdapter() + const [txRes, rowsAffected] = (await adapter.transaction>( + (tx, setResult) => { + let rowsAffected = 0 + tx.run( + { + sql: "INSERT INTO Post (id, title, contents, nbr) VALUES ('i1', 't1', 'c1', 18)", + }, + (tx2, res) => { + rowsAffected += res.rowsAffected + const select = { sql: "SELECT nbr FROM Post WHERE id = 'i1'" } + tx2.query(select, (tx3, rows) => { + const [res] = rows as unknown as Array<{ nbr: number }> + const newNbr = res.nbr + 2 + tx3.run( + { + sql: `INSERT INTO Post (id, title, contents, nbr) VALUES ('i2', 't2', 'c2', ${newNbr})`, + }, + (_, res) => { + rowsAffected += res.rowsAffected + setResult([newNbr, rowsAffected]) + } + ) + }) + } + ) + } + )) as unknown as Array + + t.is(txRes, 20) + t.is(rowsAffected, 2) + + const selectAll = 'SELECT * FROM Post' + const res = await adapter.query({ sql: selectAll }) - t.deepEqual(res, [ - { id: 'i1', title: 't1', contents: 'c1', nbr: 18 }, - { id: 'i2', title: 't2', contents: 'c2', nbr: 20 }, - ]) - await stop() - } -) + t.deepEqual(res, [ + { id: 'i1', title: 't1', contents: 'c1', nbr: 18 }, + { id: 'i2', title: 't2', contents: 'c2', nbr: 20 }, + ]) + await stop() +}) -test.serial('adapter rolls back dependent queries on conflict', async (t) => { +test('adapter rolls back dependent queries on conflict', async (t) => { const { adapter, stop } = await makeAdapter() try { await adapter.transaction((tx) => { diff --git a/clients/typescript/test/migrators/postgres/schema.test.ts b/clients/typescript/test/migrators/postgres/schema.test.ts index 818a11e26b..3dd24b5c85 100644 --- a/clients/typescript/test/migrators/postgres/schema.test.ts +++ b/clients/typescript/test/migrators/postgres/schema.test.ts @@ -39,13 +39,14 @@ test('check schema keys are unique', async (t) => { const migrator = new BundleMigrator(adapter, migrations) await migrator.up() + const metaTable = `"${satelliteDefaults.metaTable.namespace}"."${satelliteDefaults.metaTable.tablename}"` await adapter.run({ - sql: `INSERT INTO ${satelliteDefaults.metaTable}(key, value) values ('key', 'value')`, + sql: `INSERT INTO ${metaTable} (key, value) values ('key', 'value')`, }) try { await adapter.run({ - sql: `INSERT INTO ${satelliteDefaults.metaTable}(key, value) values ('key', 'value')`, + sql: `INSERT INTO ${metaTable} (key, value) values ('key', 'value')`, }) t.fail() } catch (err) { diff --git a/clients/typescript/test/migrators/sqlite/builder.test.ts b/clients/typescript/test/migrators/sqlite/builder.test.ts index fb961bb131..33710525f4 100644 --- a/clients/typescript/test/migrators/sqlite/builder.test.ts +++ b/clients/typescript/test/migrators/sqlite/builder.test.ts @@ -16,6 +16,7 @@ import { electrify } from '../../../src/drivers/better-sqlite3' import path from 'path' import { DbSchema } from '../../../src/client/model' import { MockSocket } from '../../../src/sockets/mock' +import { sqliteBuilder } from '../../../src/migrators/query-builder' function encodeSatOpMigrateMsg(request: SatOpMigrate) { return ( @@ -102,7 +103,7 @@ test('parse migration meta data', (t) => { test('generate migration from meta data', (t) => { const metaData = parseMetadata(migrationMetaData) - const migration = makeMigration(metaData) + const migration = makeMigration(metaData, sqliteBuilder) t.is(migration.version, migrationMetaData.version) t.is( migration.statements[0], @@ -270,7 +271,7 @@ test('make migration for table with FKs', (t) => { //const migrateMetaData = JSON.parse(`{"format":"SatOpMigrate","ops":["GjcKB3RlbmFudHMSEgoCaWQSBFRFWFQaBgoEdXVpZBIUCgRuYW1lEgRURVhUGgYKBHRleHQiAmlkCgExEooBEocBQ1JFQVRFIFRBQkxFICJ0ZW5hbnRzIiAoCiAgImlkIiBURVhUIE5PVCBOVUxMLAogICJuYW1lIiBURVhUIE5PVCBOVUxMLAogIENPTlNUUkFJTlQgInRlbmFudHNfcGtleSIgUFJJTUFSWSBLRVkgKCJpZCIpCikgV0lUSE9VVCBST1dJRDsK","GmsKBXVzZXJzEhIKAmlkEgRURVhUGgYKBHV1aWQSFAoEbmFtZRIEVEVYVBoGCgR0ZXh0EhUKBWVtYWlsEgRURVhUGgYKBHRleHQSHQoNcGFzc3dvcmRfaGFzaBIEVEVYVBoGCgR0ZXh0IgJpZAoBMRLAARK9AUNSRUFURSBUQUJMRSAidXNlcnMiICgKICAiaWQiIFRFWFQgTk9UIE5VTEwsCiAgIm5hbWUiIFRFWFQgTk9UIE5VTEwsCiAgImVtYWlsIiBURVhUIE5PVCBOVUxMLAogICJwYXNzd29yZF9oYXNoIiBURVhUIE5PVCBOVUxMLAogIENPTlNUUkFJTlQgInVzZXJzX3BrZXkiIFBSSU1BUlkgS0VZICgiaWQiKQopIFdJVEhPVVQgUk9XSUQ7Cg==","GoYBCgx0ZW5hbnRfdXNlcnMSGQoJdGVuYW50X2lkEgRURVhUGgYKBHV1aWQSFwoHdXNlcl9pZBIEVEVYVBoGCgR1dWlkGhgKCXRlbmFudF9pZBIHdGVuYW50cxoCaWQaFAoHdXNlcl9pZBIFdXNlcnMaAmlkIgl0ZW5hbnRfaWQiB3VzZXJfaWQKATESkgMSjwNDUkVBVEUgVEFCTEUgInRlbmFudF91c2VycyIgKAogICJ0ZW5hbnRfaWQiIFRFWFQgTk9UIE5VTEwsCiAgInVzZXJfaWQiIFRFWFQgTk9UIE5VTEwsCiAgQ09OU1RSQUlOVCAidGVuYW50X3VzZXJzX3RlbmFudF9pZF9ma2V5IiBGT1JFSUdOIEtFWSAoInRlbmFudF9pZCIpIFJFRkVSRU5DRVMgInRlbmFudHMiICgiaWQiKSBPTiBERUxFVEUgQ0FTQ0FERSwKICBDT05TVFJBSU5UICJ0ZW5hbnRfdXNlcnNfdXNlcl9pZF9ma2V5IiBGT1JFSUdOIEtFWSAoInVzZXJfaWQiKSBSRUZFUkVOQ0VTICJ1c2VycyIgKCJpZCIpIE9OIERFTEVURSBDQVNDQURFLAogIENPTlNUUkFJTlQgInRlbmFudF91c2Vyc19wa2V5IiBQUklNQVJZIEtFWSAoInRlbmFudF9pZCIsICJ1c2VyX2lkIikKKSBXSVRIT1VUIFJPV0lEOwo="],"protocol_version":"Electric.Satellite","version":"1"}`) const metaData = parseMetadata(migration) - makeMigration(metaData) + makeMigration(metaData, sqliteBuilder) t.pass() }) @@ -293,7 +294,7 @@ test('generate index creation migration from meta data', (t) => { protocol_version: 'Electric.Satellite', version: '20230613112725_814', }) - const migration = makeMigration(metaData) + const migration = makeMigration(metaData, sqliteBuilder) t.is(migration.version, migrationMetaData.version) t.deepEqual(migration.statements, [ 'CREATE INDEX idx_stars_username ON stars(username);', @@ -303,14 +304,17 @@ test('generate index creation migration from meta data', (t) => { const migrationsFolder = path.join('./test/migrators/support/migrations') test('read migration meta data', async (t) => { - const migrations = await loadMigrations(migrationsFolder) + const migrations = await loadMigrations(migrationsFolder, sqliteBuilder) const versions = migrations.map((m) => m.version) t.deepEqual(versions, ['20230613112725_814', '20230613112735_992']) }) test('load migration from meta data', async (t) => { const db = new Database(':memory:') - const migration = makeMigration(parseMetadata(migrationMetaData)) + const migration = makeMigration( + parseMetadata(migrationMetaData), + sqliteBuilder + ) const electric = await electrify( db, new DbSchema({}, [migration]), diff --git a/clients/typescript/test/migrators/sqlite/schema.test.ts b/clients/typescript/test/migrators/sqlite/schema.test.ts index 2af7df7f10..ad549ac1c1 100644 --- a/clients/typescript/test/migrators/sqlite/schema.test.ts +++ b/clients/typescript/test/migrators/sqlite/schema.test.ts @@ -39,13 +39,14 @@ test('check schema keys are unique', async (t) => { const migrator = new BundleMigrator(adapter, migrations) await migrator.up() + const metaTable = `"${satelliteDefaults.metaTable.namespace}"."${satelliteDefaults.metaTable.tablename}"` await adapter.run({ - sql: `INSERT INTO ${satelliteDefaults.metaTable}(key, value) values ('key', 'value')`, + sql: `INSERT INTO ${metaTable} (key, value) values ('key', 'value')`, }) try { await adapter.run({ - sql: `INSERT INTO ${satelliteDefaults.metaTable}(key, value) values ('key', 'value')`, + sql: `INSERT INTO ${metaTable} (key, value) values ('key', 'value')`, }) t.fail() } catch (err) { diff --git a/clients/typescript/test/migrators/sqlite/triggers.test.ts b/clients/typescript/test/migrators/sqlite/triggers.test.ts index 6e47ca38a7..0bba7a0c2b 100644 --- a/clients/typescript/test/migrators/sqlite/triggers.test.ts +++ b/clients/typescript/test/migrators/sqlite/triggers.test.ts @@ -5,9 +5,11 @@ import { generateTableTriggers } from '../../../src/migrators/triggers' import type { Database as SqliteDB } from 'better-sqlite3' import { satelliteDefaults } from '../../../src/satellite/config' import { migrateDb, personTable } from '../../satellite/common' +import { sqliteBuilder } from '../../../src/migrators/query-builder' type Context = { db: SqliteDB; migrateDb: () => void } const test = testAny as TestFn +const oplogTable = `"${satelliteDefaults.oplogTable.namespace}"."${satelliteDefaults.oplogTable.tablename}"` test.beforeEach(async (t) => { const db = new Database(':memory:') @@ -20,7 +22,7 @@ test.beforeEach(async (t) => { test('generateTableTriggers should create correct triggers for a table', (t) => { // Generate the oplog triggers - const triggers = generateTableTriggers(personTable.tableName, personTable) + const triggers = generateTableTriggers(personTable, sqliteBuilder) // Check that the oplog triggers are correct const triggersSQL = triggers.map((t) => t.sql).join('\n') @@ -29,7 +31,7 @@ test('generateTableTriggers should create correct triggers for a table', (t) => dedent` CREATE TRIGGER insert_main_personTable_into_oplog AFTER INSERT ON "main"."personTable" - WHEN 1 == (SELECT flag from _electric_trigger_settings WHERE tablename == 'personTable') + WHEN 1 = (SELECT flag from _electric_trigger_settings WHERE namespace = 'main' AND tablename = 'personTable') BEGIN INSERT INTO _electric_oplog (namespace, tablename, optype, primaryKey, newRow, oldRow, timestamp) VALUES ('main', 'personTable', 'INSERT', json_object('id', cast(new."id" as TEXT)), json_object('age', new."age", 'blob', CASE WHEN new."blob" IS NOT NULL THEN hex(new."blob") ELSE NULL END, 'bmi', cast(new."bmi" as TEXT), 'id', cast(new."id" as TEXT), 'int8', cast(new."int8" as TEXT), 'name', new."name"), NULL, NULL); @@ -43,7 +45,7 @@ test('generateTableTriggers should create correct triggers for a table', (t) => dedent` CREATE TRIGGER update_main_personTable_into_oplog AFTER UPDATE ON "main"."personTable" - WHEN 1 == (SELECT flag from _electric_trigger_settings WHERE tablename == 'personTable') + WHEN 1 = (SELECT flag from _electric_trigger_settings WHERE namespace = 'main' AND tablename = 'personTable') BEGIN INSERT INTO _electric_oplog (namespace, tablename, optype, primaryKey, newRow, oldRow, timestamp) VALUES ('main', 'personTable', 'UPDATE', json_object('id', cast(new."id" as TEXT)), json_object('age', new."age", 'blob', CASE WHEN new."blob" IS NOT NULL THEN hex(new."blob") ELSE NULL END, 'bmi', cast(new."bmi" as TEXT), 'id', cast(new."id" as TEXT), 'int8', cast(new."int8" as TEXT), 'name', new."name"), json_object('age', old."age", 'blob', CASE WHEN old."blob" IS NOT NULL THEN hex(old."blob") ELSE NULL END, 'bmi', cast(old."bmi" as TEXT), 'id', cast(old."id" as TEXT), 'int8', cast(old."int8" as TEXT), 'name', old."name"), NULL); @@ -57,7 +59,7 @@ test('generateTableTriggers should create correct triggers for a table', (t) => dedent` CREATE TRIGGER delete_main_personTable_into_oplog AFTER DELETE ON "main"."personTable" - WHEN 1 == (SELECT flag from _electric_trigger_settings WHERE tablename == 'personTable') + WHEN 1 = (SELECT flag from _electric_trigger_settings WHERE namespace = 'main' AND tablename = 'personTable') BEGIN INSERT INTO _electric_oplog (namespace, tablename, optype, primaryKey, newRow, oldRow, timestamp) VALUES ('main', 'personTable', 'DELETE', json_object('id', cast(old."id" as TEXT)), NULL, json_object('age', old."age", 'blob', CASE WHEN old."blob" IS NOT NULL THEN hex(old."blob") ELSE NULL END, 'bmi', cast(old."bmi" as TEXT), 'id', cast(old."id" as TEXT), 'int8', cast(old."int8" as TEXT), 'name', old."name"), NULL); @@ -79,9 +81,7 @@ test('oplog insertion trigger should insert row into oplog table', (t) => { db.exec(insertRowSQL) // Check that the oplog table contains an entry for the inserted row - const oplogRows = db - .prepare(`SELECT * FROM ${satelliteDefaults.oplogTable}`) - .all() + const oplogRows = db.prepare(`SELECT * FROM ${oplogTable}`).all() t.is(oplogRows.length, 1) t.deepEqual(oplogRows[0], { namespace: 'main', @@ -122,9 +122,7 @@ test('oplog trigger should handle Infinity values correctly', (t) => { db.exec(insertRowSQL) // Check that the oplog table contains an entry for the inserted row - const oplogRows = db - .prepare(`SELECT * FROM ${satelliteDefaults.oplogTable}`) - .all() + const oplogRows = db.prepare(`SELECT * FROM ${oplogTable}`).all() t.is(oplogRows.length, 1) t.deepEqual(oplogRows[0], { namespace: 'main', diff --git a/clients/typescript/test/satellite/common.ts b/clients/typescript/test/satellite/common.ts index ecd7763c53..8906c48d9f 100644 --- a/clients/typescript/test/satellite/common.ts +++ b/clients/typescript/test/satellite/common.ts @@ -10,7 +10,7 @@ import { GlobalRegistry, Registry, SatelliteProcess } from '../../src/satellite' import { TableInfo, initTableInfo } from '../support/satellite-helpers' import { satelliteDefaults, SatelliteOpts } from '../../src/satellite/config' import { Table, generateTableTriggers } from '../../src/migrators/triggers' -import { getData as makeInitialMigration } from '../../src/migrators/schema' +import { buildInitialMigration as makeInitialMigration } from '../../src/migrators/schema' export const dbDescription = new DbSchema( { @@ -215,6 +215,7 @@ import { PgBasicType } from '../../src/client/conversions/types' import { HKT } from '../../src/client/util/hkt' import { ElectricClient } from '../../src/client/model' import EventEmitter from 'events' +import { sqliteBuilder } from '../../src/migrators/query-builder' // Speed up the intervals for testing. export const opts = Object.assign({}, satelliteDefaults, { @@ -345,14 +346,14 @@ export function migrateDb(db: SqliteDB, table: Table) { db.exec(createTableSQL) // Apply the initial migration on the database - const initialMigration = makeInitialMigration('SQLite') + const initialMigration = makeInitialMigration(sqliteBuilder) const migration = initialMigration.migrations[0].statements migration.forEach((stmt) => { db.exec(stmt) }) // Generate the table triggers - const triggers = generateTableTriggers(tableName, table) + const triggers = generateTableTriggers(table, sqliteBuilder) // Apply the triggers on the database triggers.forEach((trigger) => { diff --git a/clients/typescript/test/satellite/merge.test.ts b/clients/typescript/test/satellite/merge.test.ts index 3b23df9140..33d8eb969b 100644 --- a/clients/typescript/test/satellite/merge.test.ts +++ b/clients/typescript/test/satellite/merge.test.ts @@ -9,12 +9,18 @@ import { DEFAULT_LOG_POS, DataChangeType, DataTransaction, + QualifiedTablename, } from '../../src/util' import Long from 'long' import { relations, migrateDb, personTable } from './common' import Database from 'better-sqlite3' import { satelliteDefaults } from '../../src/satellite/config' +const qualifiedMergeTable = new QualifiedTablename( + 'main', + 'mergeTable' +).toString() + test('merging entries: local no-op updates should cancel incoming delete', (t) => { const pk = primaryKeyToStr({ id: 1 }) @@ -48,9 +54,10 @@ test('merging entries: local no-op updates should cancel incoming delete', (t) = const merged = mergeEntries('local', local, 'remote', remote, relations) // Merge should resolve into the UPSERT for this row, since the remote DELETE didn't observe this local update - t.like(merged, { 'main.parent': { [pk]: { optype: 'UPSERT' } } }) - t.deepEqual(merged['main.parent'][pk].tags, ['local@100001000']) - t.deepEqual(merged['main.parent'][pk].fullRow, { id: 1, value: 'TEST' }) + const qualifiedTableName = new QualifiedTablename('main', 'parent').toString() + t.like(merged, { [qualifiedTableName]: { [pk]: { optype: 'UPSERT' } } }) + t.deepEqual(merged[qualifiedTableName][pk].tags, ['local@100001000']) + t.deepEqual(merged[qualifiedTableName][pk].fullRow, { id: 1, value: 'TEST' }) }) test('merge can handle infinity values', (t) => { @@ -152,9 +159,9 @@ function _mergeTableTest( // tx2 should win because tx1 and tx2 happened concurrently // but the timestamp of tx2 > tx1 - t.like(merged, { 'main.mergeTable': { [pk]: { optype: 'UPSERT' } } }) + t.like(merged, { [qualifiedMergeTable]: { [pk]: { optype: 'UPSERT' } } }) - t.deepEqual(merged['main.mergeTable'][pk].fullRow, { + t.deepEqual(merged[qualifiedMergeTable][pk].fullRow, { ...opts.expected, id: pkId, }) @@ -171,9 +178,8 @@ test('merge works on oplog entries', (t) => { db.exec(insertRowSQL) // Fetch the oplog entry for the inserted row - const oplogRows = db - .prepare(`SELECT * FROM ${satelliteDefaults.oplogTable}`) - .all() + const oplogTable = `"${satelliteDefaults.oplogTable.namespace}"."${satelliteDefaults.oplogTable.tablename}"` + const oplogRows = db.prepare(`SELECT * FROM ${oplogTable}`).all() t.is(oplogRows.length, 1) @@ -214,10 +220,14 @@ test('merge works on oplog entries', (t) => { const pk = primaryKeyToStr({ id: 9e999 }) // the incoming transaction wins + const qualifiedTableName = new QualifiedTablename( + personTable.namespace, + personTable.tableName + ).toString() t.like(merged, { - [`main.${personTable.tableName}`]: { [pk]: { optype: 'UPSERT' } }, + [qualifiedTableName]: { [pk]: { optype: 'UPSERT' } }, }) - t.deepEqual(merged[`main.${personTable.tableName}`][pk].fullRow, { + t.deepEqual(merged[qualifiedTableName][pk].fullRow, { id: 9e999, name: 'John Doe', age: 30, diff --git a/clients/typescript/test/satellite/process.test.ts b/clients/typescript/test/satellite/process.test.ts index 3768799dca..fb00a642e9 100644 --- a/clients/typescript/test/satellite/process.test.ts +++ b/clients/typescript/test/satellite/process.test.ts @@ -80,6 +80,10 @@ const startSatellite = async ( const test = anyTest as TestFn test.beforeEach(makeContext) test.afterEach.always(cleanAndStopSatellite) +const qualifiedParentTableName = new QualifiedTablename( + 'main', + 'parent' +).toString() test('setup starts a satellite process', async (t) => { t.true(t.context.satellite instanceof SatelliteProcess) @@ -352,7 +356,7 @@ test('snapshot of INSERT after DELETE', async (t) => { }, relations ) - const [_, keyChanges] = merged['main.parent']['{"id":1}'] + const [_, keyChanges] = merged[qualifiedParentTableName]['{"id":1}'] const resultingValue = keyChanges.changes.value.value t.is(resultingValue, null) }) @@ -378,7 +382,11 @@ test('snapshot of INSERT with bigint', async (t) => { }, relations ) - const [_, keyChanges] = merged['main.bigIntTable']['{"value":"1"}'] + const qualifiedTableName = new QualifiedTablename( + 'main', + 'bigIntTable' + ).toString() + const [_, keyChanges] = merged[qualifiedTableName]['{"value":"1"}'] const resultingValue = keyChanges.changes.value.value t.is(resultingValue, 1n) }) @@ -447,7 +455,7 @@ test('take snapshot and merge local wins', async (t) => { [incomingEntry], relations ) - const item = merged['main.parent']['{"id":1}'] + const item = merged[qualifiedParentTableName]['{"id":1}'] t.deepEqual(item, { namespace: 'main', @@ -507,7 +515,7 @@ test('take snapshot and merge incoming wins', async (t) => { [incomingEntry], relations ) - const item = merged['main.parent']['{"id":1}'] + const item = merged[qualifiedParentTableName]['{"id":1}'] t.deepEqual(item, { namespace: 'main', @@ -852,7 +860,7 @@ test('INSERT wins over DELETE and restored deleted values', async (t) => { ] const merged = mergeEntries(clientId, local, 'remote', incoming, relations) - const item = merged['main.parent']['{"id":1}'] + const item = merged[qualifiedParentTableName]['{"id":1}'] t.deepEqual(item, { namespace: 'main', @@ -928,7 +936,7 @@ test('concurrent updates take all changed values', async (t) => { ] const merged = mergeEntries(clientId, local, 'remote', incoming, relations) - const item = merged['main.parent']['{"id":1}'] + const item = merged[qualifiedParentTableName]['{"id":1}'] // The incoming entry modified the value of the `value` column to `'remote'` // The local entry concurrently modified the value of the `other` column to 1. @@ -980,7 +988,7 @@ test('merge incoming with empty local', async (t) => { const local: OplogEntry[] = [] const merged = mergeEntries(clientId, local, 'remote', incoming, relations) - const item = merged['main.parent']['{"id":1}'] + const item = merged[qualifiedParentTableName]['{"id":1}'] t.deepEqual(item, { namespace: 'main', @@ -1507,9 +1515,10 @@ test('apply shape data and persist subscription', async (t) => { }) // wait for process to apply shape data + const qualifiedTableName = `"${namespace}"."${tablename}"` try { const row = await adapter.query({ - sql: `SELECT id FROM ${qualified.toString()}`, + sql: `SELECT id FROM ${qualifiedTableName}`, }) t.is(row.length, 1) @@ -1606,7 +1615,7 @@ test('applied shape data will be acted upon correctly', async (t) => { const namespace = 'main' const tablename = 'parent' - const qualified = new QualifiedTablename(namespace, tablename).toString() + const qualified = `"${namespace}"."${tablename}"` // relations must be present at subscription delivery client.setRelations(relations) @@ -1740,7 +1749,6 @@ test('a subscription that failed to apply because of FK constraint triggers GC', const tablename = 'child' const namespace = 'main' - const qualified = new QualifiedTablename(namespace, tablename).toString() // relations must be present at subscription delivery client.setRelations(relations) @@ -1759,7 +1767,7 @@ test('a subscription that failed to apply because of FK constraint triggers GC', try { const row = await adapter.query({ - sql: `SELECT id FROM ${qualified}`, + sql: `SELECT id FROM "${namespace}"."${tablename}"`, }) t.is(row.length, 0) } catch (e) { @@ -1772,8 +1780,8 @@ test('a second successful subscription', async (t) => { t.context await runMigrations() + const namespace = 'main' const tablename = 'child' - const qualified = new QualifiedTablename('main', tablename).toString() // relations must be present at subscription delivery client.setRelations(relations) @@ -1797,7 +1805,7 @@ test('a second successful subscription', async (t) => { try { const row = await adapter.query({ - sql: `SELECT id FROM ${qualified}`, + sql: `SELECT id FROM "${namespace}"."${tablename}"`, }) t.is(row.length, 1) @@ -1819,8 +1827,6 @@ test('a single subscribe with multiple tables with FKs', async (t) => { t.context await runMigrations() - const qualifiedChild = new QualifiedTablename('main', 'child').toString() - // relations must be present at subscription delivery client.setRelations(relations) client.setRelationData('parent', parentRecord) @@ -1848,7 +1854,7 @@ test('a single subscribe with multiple tables with FKs', async (t) => { setTimeout(async () => { try { const row = await adapter.query({ - sql: `SELECT id FROM ${qualifiedChild}`, + sql: `SELECT id FROM "main"."child"`, }) t.is(row.length, 1) @@ -1872,6 +1878,9 @@ test.serial('a shape delivery that triggers garbage collection', async (t) => { t.context await runMigrations() + const namespace = 'main' + const tablename = 'parent' + // relations must be present at subscription delivery client.setRelations(relations) client.setRelationData('parent', parentRecord) @@ -1904,7 +1913,9 @@ test.serial('a shape delivery that triggers garbage collection', async (t) => { } catch (expected: any) { t.true(expected instanceof SatelliteError) try { - const row = await adapter.query({ sql: `SELECT id FROM main.parent` }) + const row = await adapter.query({ + sql: `SELECT id FROM "${namespace}"."${tablename}"`, + }) t.is(row.length, 0) const row1 = await adapter.query({ sql: `SELECT id FROM main.child` }) t.is(row1.length, 0) @@ -1930,8 +1941,8 @@ test('a subscription request failure does not clear the manager state', async (t await runMigrations() // relations must be present at subscription delivery + const namespace = 'main' const tablename = 'parent' - const qualified = new QualifiedTablename('main', tablename).toString() client.setRelations(relations) client.setRelationData(tablename, parentRecord) @@ -1952,7 +1963,7 @@ test('a subscription request failure does not clear the manager state', async (t try { const row = await adapter.query({ - sql: `SELECT id FROM ${qualified}`, + sql: `SELECT id FROM "${namespace}"."${tablename}"`, }) t.is(row.length, 1) } catch (e) { @@ -2054,7 +2065,7 @@ test('snapshots: generated oplog entries have the correct tags', async (t) => { const namespace = 'main' const tablename = 'parent' - const qualified = new QualifiedTablename(namespace, tablename).toString() + const qualified = `"${namespace}"."${tablename}"` // relations must be present at subscription delivery client.setRelations(relations) diff --git a/clients/typescript/test/support/migrations/migrations.js b/clients/typescript/test/support/migrations/migrations.js index 29fd599a9b..393315c968 100644 --- a/clients/typescript/test/support/migrations/migrations.js +++ b/clients/typescript/test/support/migrations/migrations.js @@ -10,7 +10,7 @@ export default [ { statements: [ 'DROP TABLE IF EXISTS _electric_trigger_settings;', - 'CREATE TABLE _electric_trigger_settings(tablename TEXT PRIMARY KEY, flag INTEGER);', + 'CREATE TABLE _electric_trigger_settings(namespace TEXT, tablename TEXT, flag INTEGER, PRIMARY KEY (namespace, tablename));', ], version: '1', }, @@ -22,56 +22,56 @@ export default [ 'CREATE TABLE IF NOT EXISTS parent (\n id INTEGER PRIMARY KEY NOT NULL,\n value TEXT,\n other INTEGER DEFAULT 0\n) WITHOUT ROWID;', 'CREATE TABLE IF NOT EXISTS child (\n id INTEGER PRIMARY KEY NOT NULL,\n parent INTEGER NOT NULL,\n FOREIGN KEY(parent) REFERENCES parent(id)\n) WITHOUT ROWID;', 'DROP TABLE IF EXISTS _electric_trigger_settings;', - 'CREATE TABLE _electric_trigger_settings(tablename TEXT PRIMARY KEY, flag INTEGER);', - "INSERT INTO _electric_trigger_settings(tablename,flag) VALUES ('main.child', 1);", - "INSERT INTO _electric_trigger_settings(tablename,flag) VALUES ('main.items', 1);", - "INSERT INTO _electric_trigger_settings(tablename,flag) VALUES ('main.bigIntTable', 1);", - "INSERT INTO _electric_trigger_settings(tablename,flag) VALUES ('main.blobTable', 1);", - "INSERT INTO _electric_trigger_settings(tablename,flag) VALUES ('main.parent', 1);", + 'CREATE TABLE _electric_trigger_settings(namespace TEXT, tablename TEXT, flag INTEGER, PRIMARY KEY (namespace, tablename));', + "INSERT INTO _electric_trigger_settings(namespace,tablename,flag) VALUES ('main', 'child', 1);", + "INSERT INTO _electric_trigger_settings(namespace,tablename,flag) VALUES ('main', 'items', 1);", + "INSERT INTO _electric_trigger_settings(namespace,tablename,flag) VALUES ('main', 'bigIntTable', 1);", + "INSERT INTO _electric_trigger_settings(namespace,tablename,flag) VALUES ('main', 'blobTable', 1);", + "INSERT INTO _electric_trigger_settings(namespace,tablename,flag) VALUES ('main', 'parent', 1);", 'DROP TRIGGER IF EXISTS update_ensure_main_child_primarykey;', "CREATE TRIGGER update_ensure_main_child_primarykey\n BEFORE UPDATE ON main.child\nBEGIN\n SELECT\n CASE\n WHEN old.id != new.id THEN\n RAISE (ABORT,'cannot change the value of column id as it belongs to the primary key')\n END;\nEND;", 'DROP TRIGGER IF EXISTS insert_main_child_into_oplog;', - "CREATE TRIGGER insert_main_child_into_oplog\n AFTER INSERT ON main.child\n WHEN 1 == (SELECT flag from _electric_trigger_settings WHERE tablename == 'main.child')\nBEGIN\n INSERT INTO _electric_oplog (namespace, tablename, optype, primaryKey, newRow, oldRow, timestamp)\n VALUES ('main', 'child', 'INSERT', json_object('id', new.id), json_object('id', new.id, 'parent', new.parent), NULL, NULL);\nEND;", + "CREATE TRIGGER insert_main_child_into_oplog\n AFTER INSERT ON main.child\n WHEN 1 == (SELECT flag from _electric_trigger_settings WHERE namespace = 'main' AND tablename = 'child')\nBEGIN\n INSERT INTO _electric_oplog (namespace, tablename, optype, primaryKey, newRow, oldRow, timestamp)\n VALUES ('main', 'child', 'INSERT', json_object('id', new.id), json_object('id', new.id, 'parent', new.parent), NULL, NULL);\nEND;", 'DROP TRIGGER IF EXISTS update_main_child_into_oplog;', - "CREATE TRIGGER update_main_child_into_oplog\n AFTER UPDATE ON main.child\n WHEN 1 == (SELECT flag from _electric_trigger_settings WHERE tablename == 'main.child')\nBEGIN\n INSERT INTO _electric_oplog (namespace, tablename, optype, primaryKey, newRow, oldRow, timestamp)\n VALUES ('main', 'child', 'UPDATE', json_object('id', new.id), json_object('id', new.id, 'parent', new.parent), json_object('id', old.id, 'parent', old.parent), NULL);\nEND;", + "CREATE TRIGGER update_main_child_into_oplog\n AFTER UPDATE ON main.child\n WHEN 1 == (SELECT flag from _electric_trigger_settings WHERE namespace = 'main' AND tablename = 'child')\nBEGIN\n INSERT INTO _electric_oplog (namespace, tablename, optype, primaryKey, newRow, oldRow, timestamp)\n VALUES ('main', 'child', 'UPDATE', json_object('id', new.id), json_object('id', new.id, 'parent', new.parent), json_object('id', old.id, 'parent', old.parent), NULL);\nEND;", 'DROP TRIGGER IF EXISTS delete_main_child_into_oplog;', - "CREATE TRIGGER delete_main_child_into_oplog\n AFTER DELETE ON main.child\n WHEN 1 == (SELECT flag from _electric_trigger_settings WHERE tablename == 'main.child')\nBEGIN\n INSERT INTO _electric_oplog (namespace, tablename, optype, primaryKey, newRow, oldRow, timestamp)\n VALUES ('main', 'child', 'DELETE', json_object('id', old.id), NULL, json_object('id', old.id, 'parent', old.parent), NULL);\nEND;", + "CREATE TRIGGER delete_main_child_into_oplog\n AFTER DELETE ON main.child\n WHEN 1 == (SELECT flag from _electric_trigger_settings WHERE namespace = 'main' AND tablename = 'child')\nBEGIN\n INSERT INTO _electric_oplog (namespace, tablename, optype, primaryKey, newRow, oldRow, timestamp)\n VALUES ('main', 'child', 'DELETE', json_object('id', old.id), NULL, json_object('id', old.id, 'parent', old.parent), NULL);\nEND;", 'DROP TRIGGER IF EXISTS compensation_insert_main_child_parent_into_oplog;', - "CREATE TRIGGER compensation_insert_main_child_parent_into_oplog\n AFTER INSERT ON main.child\n WHEN 1 == (SELECT flag from _electric_trigger_settings WHERE tablename == 'main.parent') AND\n 1 == (SELECT value from _electric_meta WHERE key == 'compensations')\nBEGIN\n INSERT INTO _electric_oplog (namespace, tablename, optype, primaryKey, newRow, oldRow, timestamp)\n SELECT 'main', 'parent', 'UPDATE', json_object('id', id), json_object('id', id, 'value', value, 'other', other), NULL, NULL\n FROM main.parent WHERE id = new.parent;\nEND;", + "CREATE TRIGGER compensation_insert_main_child_parent_into_oplog\n AFTER INSERT ON main.child\n WHEN 1 == (SELECT flag from _electric_trigger_settings WHERE namespace = 'main' AND tablename = 'parent') AND\n 1 == (SELECT value from _electric_meta WHERE key == 'compensations')\nBEGIN\n INSERT INTO _electric_oplog (namespace, tablename, optype, primaryKey, newRow, oldRow, timestamp)\n SELECT 'main', 'parent', 'UPDATE', json_object('id', id), json_object('id', id, 'value', value, 'other', other), NULL, NULL\n FROM main.parent WHERE id = new.parent;\nEND;", 'DROP TRIGGER IF EXISTS compensation_update_main_child_parent_into_oplog;', - "CREATE TRIGGER compensation_update_main_child_parent_into_oplog\n AFTER UPDATE ON main.child\n WHEN 1 == (SELECT flag from _electric_trigger_settings WHERE tablename == 'main.parent') AND\n 1 == (SELECT value from _electric_meta WHERE key == 'compensations')\nBEGIN\n INSERT INTO _electric_oplog (namespace, tablename, optype, primaryKey, newRow, oldRow, timestamp)\n SELECT 'main', 'parent', 'UPDATE', json_object('id', id), json_object('id', id, 'value', value, 'other', other), NULL, NULL\n FROM main.parent WHERE id = new.parent;\nEND;", + "CREATE TRIGGER compensation_update_main_child_parent_into_oplog\n AFTER UPDATE ON main.child\n WHEN 1 == (SELECT flag from _electric_trigger_settings WHERE namespace = 'main' AND tablename = 'parent') AND\n 1 == (SELECT value from _electric_meta WHERE key == 'compensations')\nBEGIN\n INSERT INTO _electric_oplog (namespace, tablename, optype, primaryKey, newRow, oldRow, timestamp)\n SELECT 'main', 'parent', 'UPDATE', json_object('id', id), json_object('id', id, 'value', value, 'other', other), NULL, NULL\n FROM main.parent WHERE id = new.parent;\nEND;", 'DROP TRIGGER IF EXISTS update_ensure_main_items_primarykey;', "CREATE TRIGGER update_ensure_main_items_primarykey\n BEFORE UPDATE ON main.items\nBEGIN\n SELECT\n CASE\n WHEN old.value != new.value THEN\n RAISE (ABORT,'cannot change the value of column value as it belongs to the primary key')\n END;\nEND;", 'DROP TRIGGER IF EXISTS insert_main_items_into_oplog;', - "CREATE TRIGGER insert_main_items_into_oplog\n AFTER INSERT ON main.items\n WHEN 1 == (SELECT flag from _electric_trigger_settings WHERE tablename == 'main.items')\nBEGIN\n INSERT INTO _electric_oplog (namespace, tablename, optype, primaryKey, newRow, oldRow, timestamp)\n VALUES ('main', 'items', 'INSERT', json_object('value', new.value), json_object('value', new.value), NULL, NULL);\nEND;", + "CREATE TRIGGER insert_main_items_into_oplog\n AFTER INSERT ON main.items\n WHEN 1 == (SELECT flag from _electric_trigger_settings WHERE namespace = 'main' AND tablename = 'items')\nBEGIN\n INSERT INTO _electric_oplog (namespace, tablename, optype, primaryKey, newRow, oldRow, timestamp)\n VALUES ('main', 'items', 'INSERT', json_object('value', new.value), json_object('value', new.value), NULL, NULL);\nEND;", 'DROP TRIGGER IF EXISTS update_main_items_into_oplog;', - "CREATE TRIGGER update_main_items_into_oplog\n AFTER UPDATE ON main.items\n WHEN 1 == (SELECT flag from _electric_trigger_settings WHERE tablename == 'main.items')\nBEGIN\n INSERT INTO _electric_oplog (namespace, tablename, optype, primaryKey, newRow, oldRow, timestamp)\n VALUES ('main', 'items', 'UPDATE', json_object('value', new.value), json_object('value', new.value), json_object('value', old.value), NULL);\nEND;", + "CREATE TRIGGER update_main_items_into_oplog\n AFTER UPDATE ON main.items\n WHEN 1 == (SELECT flag from _electric_trigger_settings WHERE namespace = 'main' AND tablename = 'items')\nBEGIN\n INSERT INTO _electric_oplog (namespace, tablename, optype, primaryKey, newRow, oldRow, timestamp)\n VALUES ('main', 'items', 'UPDATE', json_object('value', new.value), json_object('value', new.value), json_object('value', old.value), NULL);\nEND;", 'DROP TRIGGER IF EXISTS delete_main_items_into_oplog;', - "CREATE TRIGGER delete_main_items_into_oplog\n AFTER DELETE ON main.items\n WHEN 1 == (SELECT flag from _electric_trigger_settings WHERE tablename == 'main.items')\nBEGIN\n INSERT INTO _electric_oplog (namespace, tablename, optype, primaryKey, newRow, oldRow, timestamp)\n VALUES ('main', 'items', 'DELETE', json_object('value', old.value), NULL, json_object('value', old.value), NULL);\nEND;", + "CREATE TRIGGER delete_main_items_into_oplog\n AFTER DELETE ON main.items\n WHEN 1 == (SELECT flag from _electric_trigger_settings WHERE namespace = 'main' AND tablename = 'items')\nBEGIN\n INSERT INTO _electric_oplog (namespace, tablename, optype, primaryKey, newRow, oldRow, timestamp)\n VALUES ('main', 'items', 'DELETE', json_object('value', old.value), NULL, json_object('value', old.value), NULL);\nEND;", 'DROP TRIGGER IF EXISTS update_ensure_main_bigIntTable_primarykey;', "CREATE TRIGGER update_ensure_main_bigIntTable_primarykey\n BEFORE UPDATE ON main.bigIntTable\nBEGIN\n SELECT\n CASE\n WHEN old.value != new.value THEN\n RAISE (ABORT,'cannot change the value of column value as it belongs to the primary key')\n END;\nEND;", 'DROP TRIGGER IF EXISTS insert_main_bigIntTable_into_oplog;', - "CREATE TRIGGER insert_main_bigIntTable_into_oplog\n AFTER INSERT ON main.bigIntTable\n WHEN 1 == (SELECT flag from _electric_trigger_settings WHERE tablename == 'main.bigIntTable')\nBEGIN\n INSERT INTO _electric_oplog (namespace, tablename, optype, primaryKey, newRow, oldRow, timestamp)\n VALUES ('main', 'bigIntTable', 'INSERT', json_object('value', new.value), json_object('value', new.value), NULL, NULL);\nEND;", + "CREATE TRIGGER insert_main_bigIntTable_into_oplog\n AFTER INSERT ON main.bigIntTable\n WHEN 1 == (SELECT flag from _electric_trigger_settings WHERE namespace = 'main' AND tablename = 'bigIntTable')\nBEGIN\n INSERT INTO _electric_oplog (namespace, tablename, optype, primaryKey, newRow, oldRow, timestamp)\n VALUES ('main', 'bigIntTable', 'INSERT', json_object('value', new.value), json_object('value', new.value), NULL, NULL);\nEND;", 'DROP TRIGGER IF EXISTS update_main_bigIntTable_into_oplog;', - "CREATE TRIGGER update_main_bigIntTable_into_oplog\n AFTER UPDATE ON main.bigIntTable\n WHEN 1 == (SELECT flag from _electric_trigger_settings WHERE tablename == 'main.bigIntTable')\nBEGIN\n INSERT INTO _electric_oplog (namespace, tablename, optype, primaryKey, newRow, oldRow, timestamp)\n VALUES ('main', 'bigIntTable', 'UPDATE', json_object('value', new.value), json_object('value', new.value), json_object('value', old.value), NULL);\nEND;", + "CREATE TRIGGER update_main_bigIntTable_into_oplog\n AFTER UPDATE ON main.bigIntTable\n WHEN 1 == (SELECT flag from _electric_trigger_settings WHERE namespace = 'main' AND tablename = 'bigIntTable')\nBEGIN\n INSERT INTO _electric_oplog (namespace, tablename, optype, primaryKey, newRow, oldRow, timestamp)\n VALUES ('main', 'bigIntTable', 'UPDATE', json_object('value', new.value), json_object('value', new.value), json_object('value', old.value), NULL);\nEND;", 'DROP TRIGGER IF EXISTS delete_main_bigIntTable_into_oplog;', - "CREATE TRIGGER delete_main_bigIntTable_into_oplog\n AFTER DELETE ON main.bigIntTable\n WHEN 1 == (SELECT flag from _electric_trigger_settings WHERE tablename == 'main.bigIntTable')\nBEGIN\n INSERT INTO _electric_oplog (namespace, tablename, optype, primaryKey, newRow, oldRow, timestamp)\n VALUES ('main', 'bigIntTable', 'DELETE', json_object('value', old.value), NULL, json_object('value', old.value), NULL);\nEND;", + "CREATE TRIGGER delete_main_bigIntTable_into_oplog\n AFTER DELETE ON main.bigIntTable\n WHEN 1 == (SELECT flag from _electric_trigger_settings WHERE namespace = 'main' AND tablename = 'bigIntTable')\nBEGIN\n INSERT INTO _electric_oplog (namespace, tablename, optype, primaryKey, newRow, oldRow, timestamp)\n VALUES ('main', 'bigIntTable', 'DELETE', json_object('value', old.value), NULL, json_object('value', old.value), NULL);\nEND;", 'DROP TRIGGER IF EXISTS update_ensure_main_blobTable_primarykey;', "CREATE TRIGGER update_ensure_main_blobTable_primarykey\n BEFORE UPDATE ON main.blobTable\nBEGIN\n SELECT\n CASE\n WHEN old.value != new.value THEN\n RAISE (ABORT,'cannot change the value of column value as it belongs to the primary key')\n END;\nEND;", 'DROP TRIGGER IF EXISTS insert_main_blobTable_into_oplog;', - "CREATE TRIGGER insert_main_blobTable_into_oplog\n AFTER INSERT ON main.blobTable\n WHEN 1 == (SELECT flag from _electric_trigger_settings WHERE tablename == 'main.blobTable')\nBEGIN\n INSERT INTO _electric_oplog (namespace, tablename, optype, primaryKey, newRow, oldRow, timestamp)\n VALUES ('main', 'blobTable', 'INSERT', json_object('value', CASE WHEN new.value IS NOT NULL THEN hex(new.value) ELSE NULL END), json_object('value', CASE WHEN new.value IS NOT NULL THEN hex(new.value) ELSE NULL END), NULL, NULL);\nEND;", + "CREATE TRIGGER insert_main_blobTable_into_oplog\n AFTER INSERT ON main.blobTable\n WHEN 1 == (SELECT flag from _electric_trigger_settings WHERE namespace = 'main' AND tablename == 'blobTable')\nBEGIN\n INSERT INTO _electric_oplog (namespace, tablename, optype, primaryKey, newRow, oldRow, timestamp)\n VALUES ('main', 'blobTable', 'INSERT', json_object('value', CASE WHEN new.value IS NOT NULL THEN hex(new.value) ELSE NULL END), json_object('value', CASE WHEN new.value IS NOT NULL THEN hex(new.value) ELSE NULL END), NULL, NULL);\nEND;", 'DROP TRIGGER IF EXISTS update_main_blobTable_into_oplog;', - "CREATE TRIGGER update_main_blobTable_into_oplog\n AFTER UPDATE ON main.blobTable\n WHEN 1 == (SELECT flag from _electric_trigger_settings WHERE tablename == 'main.blobTable')\nBEGIN\n INSERT INTO _electric_oplog (namespace, tablename, optype, primaryKey, newRow, oldRow, timestamp)\n VALUES ('main', 'blobTable', 'UPDATE', json_object('value', CASE WHEN new.value IS NOT NULL THEN hex(new.value) ELSE NULL END), json_object('value', CASE WHEN new.value IS NOT NULL THEN hex(new.value) ELSE NULL END), json_object('value', old.value), NULL);\nEND;", + "CREATE TRIGGER update_main_blobTable_into_oplog\n AFTER UPDATE ON main.blobTable\n WHEN 1 == (SELECT flag from _electric_trigger_settings WHERE namespace = 'main' AND tablename == 'blobTable')\nBEGIN\n INSERT INTO _electric_oplog (namespace, tablename, optype, primaryKey, newRow, oldRow, timestamp)\n VALUES ('main', 'blobTable', 'UPDATE', json_object('value', CASE WHEN new.value IS NOT NULL THEN hex(new.value) ELSE NULL END), json_object('value', CASE WHEN new.value IS NOT NULL THEN hex(new.value) ELSE NULL END), json_object('value', old.value), NULL);\nEND;", 'DROP TRIGGER IF EXISTS delete_main_blobTable_into_oplog;', - "CREATE TRIGGER delete_main_blobTable_into_oplog\n AFTER DELETE ON main.blobTable\n WHEN 1 == (SELECT flag from _electric_trigger_settings WHERE tablename == 'main.blobTable')\nBEGIN\n INSERT INTO _electric_oplog (namespace, tablename, optype, primaryKey, newRow, oldRow, timestamp)\n VALUES ('main', 'blobTable', 'DELETE', json_object('value', CASE WHEN old.value IS NOT NULL THEN hex(old.value) ELSE NULL END), NULL, json_object('value', CASE WHEN old.value IS NOT NULL THEN hex(old.value) ELSE NULL END), NULL);\nEND;", + "CREATE TRIGGER delete_main_blobTable_into_oplog\n AFTER DELETE ON main.blobTable\n WHEN 1 == (SELECT flag from _electric_trigger_settings WHERE namespace = 'main' AND tablename == 'blobTable')\nBEGIN\n INSERT INTO _electric_oplog (namespace, tablename, optype, primaryKey, newRow, oldRow, timestamp)\n VALUES ('main', 'blobTable', 'DELETE', json_object('value', CASE WHEN old.value IS NOT NULL THEN hex(old.value) ELSE NULL END), NULL, json_object('value', CASE WHEN old.value IS NOT NULL THEN hex(old.value) ELSE NULL END), NULL);\nEND;", 'DROP TRIGGER IF EXISTS update_ensure_main_parent_primarykey;', "CREATE TRIGGER update_ensure_main_parent_primarykey\n BEFORE UPDATE ON main.parent\nBEGIN\n SELECT\n CASE\n WHEN old.id != new.id THEN\n RAISE (ABORT,'cannot change the value of column id as it belongs to the primary key')\n END;\nEND;", 'DROP TRIGGER IF EXISTS insert_main_parent_into_oplog;', - "CREATE TRIGGER insert_main_parent_into_oplog\n AFTER INSERT ON main.parent\n WHEN 1 == (SELECT flag from _electric_trigger_settings WHERE tablename == 'main.parent')\nBEGIN\n INSERT INTO _electric_oplog (namespace, tablename, optype, primaryKey, newRow, oldRow, timestamp)\n VALUES ('main', 'parent', 'INSERT', json_object('id', new.id), json_object('id', new.id, 'value', new.value, 'other', new.other), NULL, NULL);\nEND;", + "CREATE TRIGGER insert_main_parent_into_oplog\n AFTER INSERT ON main.parent\n WHEN 1 == (SELECT flag from _electric_trigger_settings WHERE namespace = 'main' AND tablename = 'parent')\nBEGIN\n INSERT INTO _electric_oplog (namespace, tablename, optype, primaryKey, newRow, oldRow, timestamp)\n VALUES ('main', 'parent', 'INSERT', json_object('id', new.id), json_object('id', new.id, 'value', new.value, 'other', new.other), NULL, NULL);\nEND;", 'DROP TRIGGER IF EXISTS update_main_parent_into_oplog;', - "CREATE TRIGGER update_main_parent_into_oplog\n AFTER UPDATE ON main.parent\n WHEN 1 == (SELECT flag from _electric_trigger_settings WHERE tablename == 'main.parent')\nBEGIN\n INSERT INTO _electric_oplog (namespace, tablename, optype, primaryKey, newRow, oldRow, timestamp)\n VALUES ('main', 'parent', 'UPDATE', json_object('id', new.id), json_object('id', new.id, 'value', new.value, 'other', new.other), json_object('id', old.id, 'value', old.value, 'other', old.other), NULL);\nEND;", + "CREATE TRIGGER update_main_parent_into_oplog\n AFTER UPDATE ON main.parent\n WHEN 1 == (SELECT flag from _electric_trigger_settings WHERE namespace = 'main' AND tablename = 'parent')\nBEGIN\n INSERT INTO _electric_oplog (namespace, tablename, optype, primaryKey, newRow, oldRow, timestamp)\n VALUES ('main', 'parent', 'UPDATE', json_object('id', new.id), json_object('id', new.id, 'value', new.value, 'other', new.other), json_object('id', old.id, 'value', old.value, 'other', old.other), NULL);\nEND;", 'DROP TRIGGER IF EXISTS delete_main_parent_into_oplog;', - "CREATE TRIGGER delete_main_parent_into_oplog\n AFTER DELETE ON main.parent\n WHEN 1 == (SELECT flag from _electric_trigger_settings WHERE tablename == 'main.parent')\nBEGIN\n INSERT INTO _electric_oplog (namespace, tablename, optype, primaryKey, newRow, oldRow, timestamp)\n VALUES ('main', 'parent', 'DELETE', json_object('id', old.id), NULL, json_object('id', old.id, 'value', old.value, 'other', old.other), NULL);\nEND;", + "CREATE TRIGGER delete_main_parent_into_oplog\n AFTER DELETE ON main.parent\n WHEN 1 == (SELECT flag from _electric_trigger_settings WHERE namespace = 'main' AND tablename = 'parent')\nBEGIN\n INSERT INTO _electric_oplog (namespace, tablename, optype, primaryKey, newRow, oldRow, timestamp)\n VALUES ('main', 'parent', 'DELETE', json_object('id', old.id), NULL, json_object('id', old.id, 'value', old.value, 'other', old.other), NULL);\nEND;", ], version: '2', }, diff --git a/clients/typescript/test/support/migrations/pg-migrations.js b/clients/typescript/test/support/migrations/pg-migrations.js index 26d97875c5..167f8d4271 100644 --- a/clients/typescript/test/support/migrations/pg-migrations.js +++ b/clients/typescript/test/support/migrations/pg-migrations.js @@ -10,7 +10,7 @@ export default [ { statements: [ 'DROP TABLE IF EXISTS main._electric_trigger_settings;', - 'CREATE TABLE main._electric_trigger_settings(tablename TEXT PRIMARY KEY, flag INTEGER);', + 'CREATE TABLE main._electric_trigger_settings(namespace TEXT, tablename TEXT, flag INTEGER, PRIMARY KEY (namespace, tablename));', ], version: '1', }, @@ -20,10 +20,10 @@ export default [ 'CREATE TABLE IF NOT EXISTS main.parent (\n id INTEGER PRIMARY KEY NOT NULL,\n value TEXT,\n other INTEGER DEFAULT 0\n);', 'CREATE TABLE IF NOT EXISTS main.child (\n id INTEGER PRIMARY KEY NOT NULL,\n parent INTEGER NOT NULL,\n FOREIGN KEY(parent) REFERENCES main.parent(id)\n);', 'DROP TABLE IF EXISTS main._electric_trigger_settings;', - 'CREATE TABLE main._electric_trigger_settings(tablename TEXT PRIMARY KEY, flag INTEGER);', - "INSERT INTO main._electric_trigger_settings(tablename,flag) VALUES ('main.child', 1);", - "INSERT INTO main._electric_trigger_settings(tablename,flag) VALUES ('main.items', 1);", - "INSERT INTO main._electric_trigger_settings(tablename,flag) VALUES ('main.parent', 1);", + 'CREATE TABLE main._electric_trigger_settings(namespace TEXT, tablename TEXT, flag INTEGER, PRIMARY KEY (namespace, tablename));', + "INSERT INTO main._electric_trigger_settings(namespace, tablename,flag) VALUES ('main', 'child', 1);", + "INSERT INTO main._electric_trigger_settings(namespace, tablename,flag) VALUES ('main', 'items', 1);", + "INSERT INTO main._electric_trigger_settings(namespace, tablename,flag) VALUES ('main', 'parent', 1);", 'DROP TRIGGER IF EXISTS update_ensure_main_child_primarykey ON main.child;', ` @@ -53,7 +53,7 @@ export default [ flag_value INTEGER; BEGIN -- Get the flag value from _electric_trigger_settings - SELECT flag INTO flag_value FROM main._electric_trigger_settings WHERE tablename = 'main.child'; + SELECT flag INTO flag_value FROM main._electric_trigger_settings WHERE namespace = 'main' AND tablename = 'child'; IF flag_value = 1 THEN -- Insert into _electric_oplog @@ -83,7 +83,7 @@ export default [ flag_value INTEGER; BEGIN -- Get the flag value from _electric_trigger_settings - SELECT flag INTO flag_value FROM main._electric_trigger_settings WHERE tablename = 'main.child'; + SELECT flag INTO flag_value FROM main._electric_trigger_settings WHERE namespace = 'main' AND tablename = 'child'; IF flag_value = 1 THEN -- Insert into _electric_oplog @@ -112,7 +112,7 @@ export default [ flag_value INTEGER; BEGIN -- Get the flag value from _electric_trigger_settings - SELECT flag INTO flag_value FROM main._electric_trigger_settings WHERE tablename = 'main.child'; + SELECT flag INTO flag_value FROM main._electric_trigger_settings WHERE namespace = 'main' AND tablename = 'child'; IF flag_value = 1 THEN -- Insert into _electric_oplog @@ -141,7 +141,7 @@ export default [ flag_value INTEGER; meta_value TEXT; BEGIN - SELECT flag INTO flag_value FROM main._electric_trigger_settings WHERE tablename = 'main.parent'; + SELECT flag INTO flag_value FROM main._electric_trigger_settings WHERE namespace = 'main' AND tablename = 'parent'; SELECT value INTO meta_value FROM main._electric_meta WHERE key = 'compensations'; @@ -174,7 +174,7 @@ export default [ meta_value TEXT; BEGIN -- Get the flag value from _electric_trigger_settings - SELECT flag INTO flag_value FROM main._electric_trigger_settings WHERE tablename = 'main.parent'; + SELECT flag INTO flag_value FROM main._electric_trigger_settings WHERE namespace = 'main' AND tablename = 'parent'; -- Get the 'compensations' value from _electric_meta SELECT value INTO meta_value FROM main._electric_meta WHERE key = 'compensations'; @@ -226,7 +226,7 @@ export default [ flag_value INTEGER; BEGIN -- Get the flag value from _electric_trigger_settings - SELECT flag INTO flag_value FROM main._electric_trigger_settings WHERE tablename = 'main.items'; + SELECT flag INTO flag_value FROM main._electric_trigger_settings WHERE namespace = 'main' AND tablename = 'items'; IF flag_value = 1 THEN -- Insert into _electric_oplog @@ -257,7 +257,7 @@ export default [ flag_value INTEGER; BEGIN -- Get the flag value from _electric_trigger_settings - SELECT flag INTO flag_value FROM main._electric_trigger_settings WHERE tablename = 'main.items'; + SELECT flag INTO flag_value FROM main._electric_trigger_settings WHERE namespace = 'main' AND tablename = 'items'; IF flag_value = 1 THEN -- Insert into _electric_oplog @@ -287,7 +287,7 @@ export default [ flag_value INTEGER; BEGIN -- Get the flag value from _electric_trigger_settings - SELECT flag INTO flag_value FROM main._electric_trigger_settings WHERE tablename = 'main.items'; + SELECT flag INTO flag_value FROM main._electric_trigger_settings WHERE namespace = 'main' AND tablename = 'items'; IF flag_value = 1 THEN -- Insert into _electric_oplog @@ -337,7 +337,7 @@ export default [ flag_value INTEGER; BEGIN -- Get the flag value from _electric_trigger_settings - SELECT flag INTO flag_value FROM main._electric_trigger_settings WHERE tablename = 'main.parent'; + SELECT flag INTO flag_value FROM main._electric_trigger_settings WHERE namespace = 'main' AND tablename = 'parent'; IF flag_value = 1 THEN -- Insert into _electric_oplog @@ -377,7 +377,7 @@ export default [ flag_value INTEGER; BEGIN -- Get the flag value from _electric_trigger_settings - SELECT flag INTO flag_value FROM main._electric_trigger_settings WHERE tablename = 'main.parent'; + SELECT flag INTO flag_value FROM main._electric_trigger_settings WHERE namespace = 'main' AND tablename = 'parent'; IF flag_value = 1 THEN -- Insert into _electric_oplog @@ -417,7 +417,7 @@ export default [ flag_value INTEGER; BEGIN -- Get the flag value from _electric_trigger_settings - SELECT flag INTO flag_value FROM main._electric_trigger_settings WHERE tablename = 'main.parent'; + SELECT flag INTO flag_value FROM main._electric_trigger_settings WHERE namespace = 'main' AND tablename = 'parent'; IF flag_value = 1 THEN -- Insert into _electric_oplog From a97d0d486b47f66f60d517afdb415a643a4e2e22 Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Mon, 12 Feb 2024 12:05:19 +0100 Subject: [PATCH 011/156] Added test for triggers in PG --- .../test/migrators/postgres/triggers.test.ts | 253 ++++++++++++++++++ .../test/migrators/sqlite/triggers.test.ts | 21 +- clients/typescript/test/satellite/common.ts | 61 +++-- .../typescript/test/satellite/merge.test.ts | 8 +- 4 files changed, 307 insertions(+), 36 deletions(-) create mode 100644 clients/typescript/test/migrators/postgres/triggers.test.ts diff --git a/clients/typescript/test/migrators/postgres/triggers.test.ts b/clients/typescript/test/migrators/postgres/triggers.test.ts new file mode 100644 index 0000000000..3c66b58a1f --- /dev/null +++ b/clients/typescript/test/migrators/postgres/triggers.test.ts @@ -0,0 +1,253 @@ +import { dedent } from 'ts-dedent' +import testAny, { TestFn } from 'ava' +import { generateTableTriggers } from '../../../src/migrators/triggers' +import { satelliteDefaults } from '../../../src/satellite/config' +import { migrateDb, personTable } from '../../satellite/common' +import { pgBuilder } from '../../../src/migrators/query-builder' +import { makePgDatabase } from '../../support/node-postgres' +import { Database } from '../../../src/drivers/node-postgres' + +type Context = { + db: Database + migrateDb: () => Promise + stopPG: () => Promise +} +const test = testAny as TestFn +const oplogTable = `"${satelliteDefaults.oplogTable.namespace}"."${satelliteDefaults.oplogTable.tablename}"` + +const personNamespace = personTable.namespace +const personTableName = personTable.tableName +const qualifiedPersonTable = `"${personNamespace}"."${personTableName}"` + +let i = 1 +let port = 5300 +test.beforeEach(async (t) => { + const dbName = `triggers-test-${i++}` + const { db, stop } = await makePgDatabase(dbName, port++) + + t.context = { + db, + migrateDb: migrateDb.bind(null, db, personTable, pgBuilder), + stopPG: stop, + } +}) + +test.afterEach.always(async (t) => { + const { stopPG } = t.context as any + await stopPG() +}) + +test('generateTableTriggers should create correct triggers for a table', (t) => { + // Generate the oplog triggers + const triggers = generateTableTriggers(personTable, pgBuilder) + + // Check that the oplog triggers are correct + const triggersSQL = triggers.map((t) => t.sql).join('\n') + t.assert( + triggersSQL.includes( + dedent` + CREATE TRIGGER insert_main_personTable_into_oplog + AFTER INSERT ON "main"."personTable" + FOR EACH ROW + EXECUTE FUNCTION insert_main_personTable_into_oplog_function(); + ` + ) + ) + + t.assert( + triggersSQL.includes( + dedent` + CREATE OR REPLACE FUNCTION insert_main_personTable_into_oplog_function() + RETURNS TRIGGER AS $$ + BEGIN + DECLARE + flag_value INTEGER; + BEGIN + -- Get the flag value from _electric_trigger_settings + SELECT flag INTO flag_value FROM main._electric_trigger_settings WHERE namespace = 'main' AND tablename = 'personTable'; + + IF flag_value = 1 THEN + -- Insert into _electric_oplog + INSERT INTO main._electric_oplog (namespace, tablename, optype, "primaryKey", "newRow", "oldRow", timestamp) + VALUES ( + 'main', + 'personTable', + 'INSERT', + jsonb_build_object('id', cast(new."id" as TEXT)), + jsonb_build_object('age', new."age", 'bmi', cast(new."bmi" as TEXT), 'id', cast(new."id" as TEXT), 'int8', cast(new."int8" as TEXT), 'name', new."name"), + NULL, + NULL + ); + END IF; + + RETURN NEW; + END; + END; + $$ LANGUAGE plpgsql; + ` + ) + ) + + t.assert( + triggersSQL.includes( + dedent` + CREATE TRIGGER update_main_personTable_into_oplog + AFTER UPDATE ON "main"."personTable" + FOR EACH ROW + EXECUTE FUNCTION update_main_personTable_into_oplog_function(); + ` + ) + ) + + t.assert( + triggersSQL.includes( + dedent` + CREATE OR REPLACE FUNCTION update_main_personTable_into_oplog_function() + RETURNS TRIGGER AS $$ + BEGIN + DECLARE + flag_value INTEGER; + BEGIN + -- Get the flag value from _electric_trigger_settings + SELECT flag INTO flag_value FROM main._electric_trigger_settings WHERE namespace = 'main' AND tablename = 'personTable'; + + IF flag_value = 1 THEN + -- Insert into _electric_oplog + INSERT INTO main._electric_oplog (namespace, tablename, optype, "primaryKey", "newRow", "oldRow", timestamp) + VALUES ( + 'main', + 'personTable', + 'UPDATE', + jsonb_build_object('id', cast(new."id" as TEXT)), + jsonb_build_object('age', new."age", 'bmi', cast(new."bmi" as TEXT), 'id', cast(new."id" as TEXT), 'int8', cast(new."int8" as TEXT), 'name', new."name"), + jsonb_build_object('age', old."age", 'bmi', cast(old."bmi" as TEXT), 'id', cast(old."id" as TEXT), 'int8', cast(old."int8" as TEXT), 'name', old."name"), + NULL + ); + END IF; + + RETURN NEW; + END; + END; + $$ LANGUAGE plpgsql; + ` + ) + ) + + t.assert( + triggersSQL.includes( + dedent` + CREATE TRIGGER delete_main_personTable_into_oplog + AFTER DELETE ON "main"."personTable" + FOR EACH ROW + EXECUTE FUNCTION delete_main_personTable_into_oplog_function(); + ` + ) + ) + + t.assert( + triggersSQL.includes( + dedent` + CREATE OR REPLACE FUNCTION delete_main_personTable_into_oplog_function() + RETURNS TRIGGER AS $$ + BEGIN + DECLARE + flag_value INTEGER; + BEGIN + -- Get the flag value from _electric_trigger_settings + SELECT flag INTO flag_value FROM main._electric_trigger_settings WHERE namespace = 'main' AND tablename = 'personTable'; + + IF flag_value = 1 THEN + -- Insert into _electric_oplog + INSERT INTO main._electric_oplog (namespace, tablename, optype, "primaryKey", "newRow", "oldRow", timestamp) + VALUES ( + 'main', + 'personTable', + 'DELETE', + jsonb_build_object('id', cast(old."id" as TEXT)), + NULL, + jsonb_build_object('age', old."age", 'bmi', cast(old."bmi" as TEXT), 'id', cast(old."id" as TEXT), 'int8', cast(old."int8" as TEXT), 'name', old."name"), + NULL + ); + END IF; + + RETURN NEW; + END; + END; + $$ LANGUAGE plpgsql; + ` + ) + ) +}) + +test('oplog insertion trigger should insert row into oplog table', async (t) => { + const { db, migrateDb } = t.context + + // Migrate the DB with the necessary tables and triggers + await migrateDb() + + // Insert a row in the table + const insertRowSQL = `INSERT INTO ${qualifiedPersonTable} (id, name, age, bmi, int8) VALUES (1, 'John Doe', 30, 25.5, 7)` + await db.exec({ sql: insertRowSQL }) + + // Check that the oplog table contains an entry for the inserted row + const { rows: oplogRows } = await db.exec({ + sql: `SELECT * FROM ${oplogTable}`, + }) + t.is(oplogRows.length, 1) + t.deepEqual(oplogRows[0], { + namespace: 'main', + tablename: personTableName, + optype: 'INSERT', + // `id` and `bmi` values are stored as strings + // because we cast REAL values to text in the trigger + // to circumvent SQLite's bug in the `json_object` function + // that is used in the triggers. + // cf. `joinColsForJSON` function in `src/migrators/triggers.ts` + // These strings are then parsed back into real numbers + // by the `deserialiseRow` function in `src/satellite/oplog.ts` + primaryKey: '{"id": "1"}', + newRow: + '{"id": "1", "age": 30, "bmi": "25.5", "int8": "7", "name": "John Doe"}', // BigInts are serialized as strings in the oplog + oldRow: null, + timestamp: null, + rowid: 1, + clearTags: '[]', + }) +}) + +test('oplog trigger should handle Infinity values correctly', async (t) => { + const { db, migrateDb } = t.context + const tableName = personTable.tableName + + // Migrate the DB with the necessary tables and triggers + await migrateDb() + + // Insert a row in the table + const insertRowSQL = `INSERT INTO ${qualifiedPersonTable} (id, name, age, bmi, int8) VALUES ('-Infinity', 'John Doe', 30, 'Infinity', 7)` + await db.exec({ sql: insertRowSQL }) + + // Check that the oplog table contains an entry for the inserted row + const { rows: oplogRows } = await db.exec({ + sql: `SELECT * FROM ${oplogTable}`, + }) + t.is(oplogRows.length, 1) + t.deepEqual(oplogRows[0], { + namespace: 'main', + tablename: tableName, + optype: 'INSERT', + // `id` and `bmi` values are stored as strings + // because we cast REAL values to text in the trigger + // to circumvent SQLite's bug in the `json_object` function + // that is used in the triggers. + // cf. `joinColsForJSON` function in `src/migrators/triggers.ts` + // These strings are then parsed back into real numbers + // by the `deserialiseRow` function in `src/satellite/oplog.ts` + primaryKey: '{"id": "-Infinity"}', + newRow: + '{"id": "-Infinity", "age": 30, "bmi": "Infinity", "int8": "7", "name": "John Doe"}', // BigInts are serialized as strings in the oplog + oldRow: null, + timestamp: null, + rowid: 1, + clearTags: '[]', + }) +}) diff --git a/clients/typescript/test/migrators/sqlite/triggers.test.ts b/clients/typescript/test/migrators/sqlite/triggers.test.ts index 0bba7a0c2b..9760c68f21 100644 --- a/clients/typescript/test/migrators/sqlite/triggers.test.ts +++ b/clients/typescript/test/migrators/sqlite/triggers.test.ts @@ -1,22 +1,23 @@ import { dedent } from 'ts-dedent' -import Database from 'better-sqlite3' +import OriginalDatabase from 'better-sqlite3' +import { Database } from 'better-sqlite3' import testAny, { TestFn } from 'ava' import { generateTableTriggers } from '../../../src/migrators/triggers' -import type { Database as SqliteDB } from 'better-sqlite3' import { satelliteDefaults } from '../../../src/satellite/config' -import { migrateDb, personTable } from '../../satellite/common' +import { migrateDb, personTable, wrapDB } from '../../satellite/common' import { sqliteBuilder } from '../../../src/migrators/query-builder' -type Context = { db: SqliteDB; migrateDb: () => void } +type Context = { db: Database; migrateDb: () => Promise } const test = testAny as TestFn const oplogTable = `"${satelliteDefaults.oplogTable.namespace}"."${satelliteDefaults.oplogTable.tablename}"` test.beforeEach(async (t) => { - const db = new Database(':memory:') + const db = new OriginalDatabase(':memory:') + const wrappedDb = wrapDB(db) t.context = { db, - migrateDb: migrateDb.bind(null, db, personTable), + migrateDb: migrateDb.bind(null, wrappedDb, personTable, sqliteBuilder), } }) @@ -69,12 +70,12 @@ test('generateTableTriggers should create correct triggers for a table', (t) => ) }) -test('oplog insertion trigger should insert row into oplog table', (t) => { +test('oplog insertion trigger should insert row into oplog table', async (t) => { const { db, migrateDb } = t.context const tableName = personTable.tableName // Migrate the DB with the necessary tables and triggers - migrateDb() + await migrateDb() // Insert a row in the table const insertRowSQL = `INSERT INTO ${tableName} (id, name, age, bmi, int8, blob) VALUES (1, 'John Doe', 30, 25.5, 7, x'0001ff')` @@ -110,12 +111,12 @@ test('oplog insertion trigger should insert row into oplog table', (t) => { }) }) -test('oplog trigger should handle Infinity values correctly', (t) => { +test('oplog trigger should handle Infinity values correctly', async (t) => { const { db, migrateDb } = t.context const tableName = personTable.tableName // Migrate the DB with the necessary tables and triggers - migrateDb() + await migrateDb() // Insert a row in the table const insertRowSQL = `INSERT INTO ${tableName} (id, name, age, bmi, int8, blob) VALUES (-9e999, 'John Doe', 30, 9e999, 7, x'0001ff')` diff --git a/clients/typescript/test/satellite/common.ts b/clients/typescript/test/satellite/common.ts index 8906c48d9f..4f1844d227 100644 --- a/clients/typescript/test/satellite/common.ts +++ b/clients/typescript/test/satellite/common.ts @@ -12,6 +12,20 @@ import { satelliteDefaults, SatelliteOpts } from '../../src/satellite/config' import { Table, generateTableTriggers } from '../../src/migrators/triggers' import { buildInitialMigration as makeInitialMigration } from '../../src/migrators/schema' +export type Database = { + exec(statement: { sql: string }): Promise +} + +export function wrapDB(db: SqliteDB): Database { + const wrappedDB = { + exec: async ({ sql }: { sql: string }) => { + console.log('EXECCC:\n' + sql) + db.exec(sql) + }, + } + return wrappedDB +} + export const dbDescription = new DbSchema( { child: { @@ -215,7 +229,7 @@ import { PgBasicType } from '../../src/client/conversions/types' import { HKT } from '../../src/client/util/hkt' import { ElectricClient } from '../../src/client/model' import EventEmitter from 'events' -import { sqliteBuilder } from '../../src/migrators/query-builder' +import { QueryBuilder } from '../../src/migrators/query-builder' // Speed up the intervals for testing. export const opts = Object.assign({}, satelliteDefaults, { @@ -339,26 +353,35 @@ export const cleanAndStopSatellite = async ( await clean(t) } -export function migrateDb(db: SqliteDB, table: Table) { +export async function migrateDb( + db: Database, + table: Table, + builder: QueryBuilder +) { + // First create the "main" schema (only when running on PG) + const initialMigration = makeInitialMigration(builder) + const migration = initialMigration.migrations[0].statements + const [createMainSchema, ...restMigration] = migration + await db.exec({ sql: createMainSchema }) + + const namespace = table.namespace const tableName = table.tableName - // Create the table in the database - const createTableSQL = `CREATE TABLE ${tableName} (id REAL PRIMARY KEY, name TEXT, age INTEGER, bmi REAL, int8 INTEGER, blob BLOB)` - db.exec(createTableSQL) + // Create the table in the database on the given namespace + const createTableSQL = `CREATE TABLE "${namespace}"."${tableName}" (id REAL PRIMARY KEY, name TEXT, age INTEGER, bmi REAL, int8 INTEGER, blob BLOB)` + await db.exec({ sql: createTableSQL }) // Apply the initial migration on the database - const initialMigration = makeInitialMigration(sqliteBuilder) - const migration = initialMigration.migrations[0].statements - migration.forEach((stmt) => { - db.exec(stmt) - }) + for (const stmt of restMigration) { + await db.exec({ sql: stmt }) + } // Generate the table triggers - const triggers = generateTableTriggers(table, sqliteBuilder) + const triggers = generateTableTriggers(table, builder) // Apply the triggers on the database - triggers.forEach((trigger) => { - db.exec(trigger.sql) - }) + for (const trigger of triggers) { + await db.exec({ sql: trigger.sql }) + } } export const personTable: Table = { @@ -368,19 +391,11 @@ export const personTable: Table = { primary: ['id'], foreignKeys: [], columnTypes: { -<<<<<<< HEAD - id: { sqliteType: 'REAL', pgType: PgBasicType.PG_REAL }, - name: { sqliteType: 'TEXT', pgType: PgBasicType.PG_TEXT }, - age: { sqliteType: 'INTEGER', pgType: PgBasicType.PG_INTEGER }, - bmi: { sqliteType: 'REAL', pgType: PgBasicType.PG_REAL }, - int8: { sqliteType: 'INTEGER', pgType: PgBasicType.PG_INT8 }, - blob: { sqliteType: 'BLOB', pgType: PgBasicType.PG_BYTEA }, -======= id: PgBasicType.PG_REAL, name: PgBasicType.PG_TEXT, age: PgBasicType.PG_INTEGER, bmi: PgBasicType.PG_REAL, int8: PgBasicType.PG_INT8, ->>>>>>> 2007ecb76 (Deprecate PgColumnType.sqlite_type) + blob: PgBasicType.PG_BYTEA, }, } diff --git a/clients/typescript/test/satellite/merge.test.ts b/clients/typescript/test/satellite/merge.test.ts index 33d8eb969b..7a07090cce 100644 --- a/clients/typescript/test/satellite/merge.test.ts +++ b/clients/typescript/test/satellite/merge.test.ts @@ -12,9 +12,10 @@ import { QualifiedTablename, } from '../../src/util' import Long from 'long' -import { relations, migrateDb, personTable } from './common' +import { relations, migrateDb, personTable, wrapDB } from './common' import Database from 'better-sqlite3' import { satelliteDefaults } from '../../src/satellite/config' +import { sqliteBuilder } from '../../src/migrators/query-builder' const qualifiedMergeTable = new QualifiedTablename( 'main', @@ -167,11 +168,12 @@ function _mergeTableTest( }) } -test('merge works on oplog entries', (t) => { +test('merge works on oplog entries', async (t) => { const db = new Database(':memory:') + const wrappedDb = wrapDB(db) // Migrate the DB with the necessary tables and triggers - migrateDb(db, personTable) + await migrateDb(wrappedDb, personTable, sqliteBuilder) // Insert a row in the table const insertRowSQL = `INSERT INTO ${personTable.tableName} (id, name, age, bmi, int8, blob) VALUES (9e999, 'John Doe', 30, 25.5, 7, x'0001ff')` From 23e076a2bbd7776f03d2d38a3a38830e3e523f65 Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Thu, 15 Feb 2024 10:12:47 +0100 Subject: [PATCH 012/156] Port the Satellite process to Postgres and all unit tests. --- clients/typescript/src/migrators/bundle.ts | 4 +- clients/typescript/src/migrators/index.ts | 2 + clients/typescript/src/migrators/mock.ts | 3 + .../src/migrators/query-builder/builder.ts | 170 +- .../src/migrators/query-builder/pgBuilder.ts | 273 ++- clients/typescript/src/migrators/triggers.ts | 9 +- clients/typescript/src/satellite/process.ts | 286 ++- clients/typescript/src/util/relations.ts | 27 +- clients/typescript/src/util/statements.ts | 45 - .../test/migrators/postgres/triggers.test.ts | 10 +- .../test/migrators/sqlite/triggers.test.ts | 6 +- clients/typescript/test/satellite/common.ts | 104 +- .../postgres/process.migration.test.ts | 795 +++++++ .../test/satellite/postgres/process.test.ts | 1949 +++++++++++++++++ .../satellite/postgres/process.timing.test.ts | 39 + .../test/satellite/process.migration.test.ts | 2 +- .../typescript/test/satellite/process.test.ts | 7 +- .../test/satellite/serialization.test.ts | 135 +- .../test/support/migrations/pg-migrations.js | 167 +- .../test/support/satellite-helpers.ts | 26 +- .../typescript/test/util/statements.test.ts | 9 +- 21 files changed, 3719 insertions(+), 349 deletions(-) create mode 100644 clients/typescript/test/satellite/postgres/process.migration.test.ts create mode 100644 clients/typescript/test/satellite/postgres/process.test.ts create mode 100644 clients/typescript/test/satellite/postgres/process.timing.test.ts diff --git a/clients/typescript/src/migrators/bundle.ts b/clients/typescript/src/migrators/bundle.ts index bf561dacca..4217b0c214 100644 --- a/clients/typescript/src/migrators/bundle.ts +++ b/clients/typescript/src/migrators/bundle.ts @@ -36,7 +36,7 @@ export const SCHEMA_VSN_ERROR_MSG = `Local schema doesn't match server's. Clear const VALID_VERSION_EXP = new RegExp('^[0-9_]+') -abstract class BundleMigratorBase implements Migrator { +export abstract class BundleMigratorBase implements Migrator { adapter: DatabaseAdapter migrations: StmtMigration[] @@ -48,7 +48,7 @@ abstract class BundleMigratorBase implements Migrator { adapter: DatabaseAdapter, migrations: Migration[] = [], queryBuilderConfig: KyselyConfig, - electricQueryBuilder: QueryBuilder + public electricQueryBuilder: QueryBuilder ) { this.adapter = adapter const baseMigration = makeBaseMigration(electricQueryBuilder) diff --git a/clients/typescript/src/migrators/index.ts b/clients/typescript/src/migrators/index.ts index 48f765e559..65ce040aa1 100644 --- a/clients/typescript/src/migrators/index.ts +++ b/clients/typescript/src/migrators/index.ts @@ -1,4 +1,5 @@ import { Statement } from '../util' +import { QueryBuilder } from './query-builder' export { SqliteBundleMigrator, PgBundleMigrator } from './bundle' export { MockMigrator } from './mock' @@ -31,4 +32,5 @@ export interface Migrator { apply(migration: StmtMigration): Promise applyIfNotAlready(migration: StmtMigration): Promise querySchemaVersion(): Promise + electricQueryBuilder: QueryBuilder } diff --git a/clients/typescript/src/migrators/mock.ts b/clients/typescript/src/migrators/mock.ts index 313c080495..7a728e8c1d 100644 --- a/clients/typescript/src/migrators/mock.ts +++ b/clients/typescript/src/migrators/mock.ts @@ -1,6 +1,9 @@ import { Migrator, StmtMigration } from './index' +import { QueryBuilder } from './query-builder' export class MockMigrator implements Migrator { + electricQueryBuilder: QueryBuilder = null as any + async up(): Promise { return 1 } diff --git a/clients/typescript/src/migrators/query-builder/builder.ts b/clients/typescript/src/migrators/query-builder/builder.ts index 0e0c18a255..ae4118dae3 100644 --- a/clients/typescript/src/migrators/query-builder/builder.ts +++ b/clients/typescript/src/migrators/query-builder/builder.ts @@ -1,7 +1,10 @@ import { ForeignKey } from '../triggers' -import { QualifiedTablename } from '../../util' +import { QualifiedTablename, SqlValue, Statement } from '../../util' export abstract class QueryBuilder { + abstract readonly dialect: 'SQLite' | 'Postgres' + abstract readonly paramSign: '?' | '$' + /** * The autoincrementing integer primary key type for the current SQL dialect. */ @@ -12,6 +15,16 @@ export abstract class QueryBuilder { */ abstract readonly BLOB: string + /** + * Defers foreign key checks for the current transaction. + */ + abstract readonly deferForeignKeys: string + + /** + * Queries the version of SQLite/Postgres we are using. + */ + abstract readonly getVersion: string + /** * Returns the given query if the current SQL dialect is PostgreSQL. */ @@ -32,6 +45,19 @@ export abstract class QueryBuilder { */ abstract sqliteOnlyQuery(query: string): string[] + /** + * Makes the i-th positional parameter, + * e.g. '$3' For Postgres when `i` is 3 + * and always '?' for SQLite + */ + abstract makePositionalParam(i: number): string + + /** + * Counts tables whose name is included in `tables`. + * The count is returned as `countName`. + */ + abstract countTablesIn(countName: string, tables: string[]): Statement + /** * Create an index on a table. */ @@ -41,6 +67,18 @@ export abstract class QueryBuilder { columns: string[] ): string + /** + * Fetches the names of all tables that are not in `notIn`. + */ + abstract getLocalTableNames(notIn?: string[]): Statement + + /** + * Fetches information about the columns of a table. + * The information includes all column names, their type, + * whether or not they are nullable, and whether they are part of the PK. + */ + abstract getTableInfo(tablename: string): Statement + /** * Insert a row into a table, ignoring it if it already exists. */ @@ -48,8 +86,48 @@ export abstract class QueryBuilder { schema: string, table: string, columns: string[], - values: string[] - ): string + values: SqlValue[] + ): Statement + + /** + * Insert a row into a table, replacing it if it already exists. + */ + abstract insertOrReplace( + schema: string, + table: string, + columns: string[], + values: Array, + conflictCols: string[], + updateCols: string[] + ): Statement + + /** + * Insert a row into a table. + * If it already exists we update the provided columns `updateCols` + * with the provided values `updateVals` + */ + abstract insertOrReplaceWith( + schema: string, + table: string, + columns: string[], + values: Array, + conflictCols: string[], + updateCols: string[], + updateVals: SqlValue[] + ): Statement + + /** + * Inserts a batch of rows into a table, replacing them if they already exist. + */ + abstract batchedInsertOrReplace( + schema: string, + table: string, + columns: string[], + records: Array>, + conflictCols: string[], + updateCols: string[], + maxSqlParameters: number + ): Statement[] /** * Drop a trigger if it exists. @@ -87,6 +165,15 @@ export abstract class QueryBuilder { ] } + /** + * Modifies the trigger setting for the table identified by its tablename and namespace. + */ + abstract setTriggerSetting( + namespace: string, + tableName: string, + value: 0 | 1 + ): string + /** * Create a trigger that logs operations into the oplog. */ @@ -202,4 +289,81 @@ export abstract class QueryBuilder { */ createOrReplaceUpdateCompensationTrigger = this.createOrReplaceFkCompensationTrigger.bind(this, 'UPDATE') + + /** + * For each first oplog entry per element, + * sets `clearTags` array to previous tags from the shadow table + */ + abstract setClearTagsForTimestamp( + oplogTable: QualifiedTablename, + shadowTable: QualifiedTablename + ): string + + /** + * For each affected shadow row, set new tag array, unless the last oplog operation was a DELETE + */ + abstract setTagsForShadowRows( + oplogTable: QualifiedTablename, + shadowTable: QualifiedTablename + ): string + + /** + * Deletes any shadow rows where the last oplog operation was a `DELETE` + */ + abstract removeDeletedShadowRows( + oplogTable: QualifiedTablename, + shadowTable: QualifiedTablename + ): string + + /** + * Prepare multiple batched insert statements for an array of records. + * + * Since SQLite only supports a limited amount of positional `?` parameters, + * we generate multiple insert statements with each one being filled as much + * as possible from the given data. All statements are derived from same `baseSql` - + * the positional parameters will be appended to this string. + * + * @param baseSql base SQL string to which inserts should be appended + * @param columns columns that describe records + * @param records records to be inserted + * @param maxParameters max parameters this SQLite can accept - determines batching factor + * @returns array of statements ready to be executed by the adapter + */ + prepareInsertBatchedStatements( + baseSql: string, + columns: string[], + records: Record[], + maxParameters: number + ): Statement[] { + const stmts: Statement[] = [] + const columnCount = columns.length + const recordCount = records.length + let processed = 0 + let positionalParam = 1 + const pos = (i: number) => `${this.makePositionalParam(i)}` + const makeInsertPattern = () => { + return ` (${Array.from( + { length: columnCount }, + () => `${pos(positionalParam++)}` + ).join(', ')})` + } + + // Largest number below maxSqlParamers that evenly divides by column count, + // divided by columnCount, giving the amount of rows we can insert at once + const batchMaxSize = + (maxParameters - (maxParameters % columnCount)) / columnCount + while (processed < recordCount) { + const currentInsertCount = Math.min(recordCount - processed, batchMaxSize) + const sql = + baseSql + + Array.from({ length: currentInsertCount }, makeInsertPattern).join(',') + const args = records + .slice(processed, processed + currentInsertCount) + .flatMap((record) => columns.map((col) => record[col] as SqlValue)) + + processed += currentInsertCount + stmts.push({ sql, args }) + } + return stmts + } } diff --git a/clients/typescript/src/migrators/query-builder/pgBuilder.ts b/clients/typescript/src/migrators/query-builder/pgBuilder.ts index b095f1d14d..b8ecd2ad05 100644 --- a/clients/typescript/src/migrators/query-builder/pgBuilder.ts +++ b/clients/typescript/src/migrators/query-builder/pgBuilder.ts @@ -1,13 +1,17 @@ import { dedent } from 'ts-dedent' -import { QualifiedTablename } from '../../util' +import { QualifiedTablename, SqlValue, Statement } from '../../util' import { QueryBuilder } from './builder' import { ForeignKey } from '../triggers' const quote = (col: string) => `"${col}"` class PgBuilder extends QueryBuilder { + readonly dialect = 'Postgres' readonly AUTOINCREMENT_PK = 'SERIAL PRIMARY KEY' readonly BLOB = 'TEXT' + readonly deferForeignKeys = 'SET CONSTRAINTS ALL DEFERRED;' + readonly getVersion = 'SELECT version();' + readonly paramSign = '$' pgOnly(query: string) { return query @@ -25,6 +29,20 @@ class PgBuilder extends QueryBuilder { return [] } + countTablesIn(countName: string, tables: string[]): Statement { + const sql = dedent` + SELECT COUNT(table_name)::integer AS "${countName}" + FROM information_schema.tables + WHERE + table_type = 'BASE TABLE' AND + table_name IN (${tables.map((_, i) => `$${i + 1}`).join(', ')}); + ` + return { + sql, + args: tables, + } + } + createIndex( indexName: string, onTable: QualifiedTablename, @@ -37,17 +55,142 @@ class PgBuilder extends QueryBuilder { .join(', ')})` } + getLocalTableNames(notIn: string[] = []): Statement { + let tables = dedent` + SELECT table_name AS name + FROM information_schema.tables + WHERE + table_type = 'BASE TABLE' AND + table_schema <> 'pg_catalog' AND + table_schema <> 'information_schema' + ` + if (notIn.length > 0) { + tables += ` AND table_name NOT IN (${notIn + .map((_, i) => `$${i + 1}`) + .join(', ')})` + } + return { + sql: tables, + args: notIn, + } + } + + getTableInfo(tablename: string): Statement { + return { + sql: dedent` + SELECT + c.column_name AS name, + UPPER(c.data_type) AS type, + CASE + WHEN c.is_nullable = 'YES' THEN 0 + ELSE 1 + END AS notnull, + c.column_default AS dflt_value, + EXISTS ( + SELECT pg_class.relname, pg_attribute.attname + FROM pg_class, pg_attribute, pg_index + WHERE pg_class.oid = pg_attribute.attrelid AND + pg_class.oid = pg_index.indrelid AND + pg_attribute.attnum = ANY(pg_index.indkey) AND + pg_index.indisprimary = 't' AND + pg_class.relname = $1 AND + pg_attribute.attname = c.column_name + ) :: INTEGER AS pk + FROM information_schema.columns AS c + WHERE + c.table_name = $1; + `, + args: [tablename], + } + } + insertOrIgnore( schema: string, table: string, columns: string[], - values: string[] - ) { - return dedent` - INSERT INTO "${schema}"."${table}" (${columns.map(quote).join(', ')}) - VALUES (${values.join(', ')}) - ON CONFLICT DO NOTHING; - ` + values: SqlValue[] + ): Statement { + return { + sql: dedent` + INSERT INTO "${schema}"."${table}" (${columns.map(quote).join(', ')}) + VALUES (${columns.map((_, i) => `$${i + 1}`).join(', ')}) + ON CONFLICT DO NOTHING; + `, + args: values, + } + } + + insertOrReplace( + schema: string, + table: string, + columns: string[], + values: Array, + conflictCols: string[], + updateCols: string[] + ): Statement { + return { + sql: dedent` + INSERT INTO "${schema}"."${table}" (${columns.map(quote).join(', ')}) + VALUES (${columns.map((_, i) => `$${i + 1}`).join(', ')}) + ON CONFLICT (${conflictCols.map(quote).join(', ')}) DO UPDATE + SET ${updateCols + .map((col) => `${quote(col)} = EXCLUDED.${quote(col)}`) + .join(', ')}; + `, + args: values, + } + } + + insertOrReplaceWith( + schema: string, + table: string, + columns: string[], + values: Array, + conflictCols: string[], + updateCols: string[], + updateVals: SqlValue[] + ): Statement { + return { + sql: dedent` + INSERT INTO "${schema}"."${table}" (${columns.map(quote).join(', ')}) + VALUES (${columns.map((_, i) => `$${i + 1}`).join(', ')}) + ON CONFLICT (${conflictCols.map(quote).join(', ')}) DO UPDATE + SET ${updateCols + .map((col, i) => `${quote(col)} = $${columns.length + i + 1}`) + .join(', ')}; + `, + args: values.concat(updateVals), + } + } + + batchedInsertOrReplace( + schema: string, + table: string, + columns: string[], + records: Array>, + conflictCols: string[], + updateCols: string[], + maxSqlParameters: number + ): Statement[] { + const baseSql = `INSERT INTO "${schema}"."${table}" (${columns + .map(quote) + .join(', ')}) VALUES ` + const statements = this.prepareInsertBatchedStatements( + baseSql, + columns, + records, + maxSqlParameters + ) + return statements.map(({ sql, args }) => ({ + sql: dedent` + ${sql} + ON CONFLICT (${conflictCols.map(quote).join(', ')}) DO UPDATE + SET ${updateCols + .map((col) => `${quote(col)} = EXCLUDED.${quote(col)}`) + .join(', ')}; + `, + args, + })) } dropTriggerIfExists( @@ -89,10 +232,46 @@ class PgBuilder extends QueryBuilder { ] } - createJsonObject(rows: string) { + // This creates a JSON object that is equivalent + // to the JSON objects created by SQLite + // in that it does not re-order the keys + // and removes whitespaces between keys and values. + createPKJsonObject(rows: string) { + // `json_build_object` introduces whitespaces + // e.g. `{"a" : 5, "b" : 6}` + // But the json produced by SQLite is `{"a":5,"b":6}`. + // So this may lead to problems because we use this JSON string + // of the primary key to compare local and remote entries. + // But the changes for the same PK would be considered to be different PKs + // if e.g. the local change is PG and the remote change is SQLite. + // We use `json_strip_nulls` on the PK as it removes the whitespaces. + // It also removes `null` values from the PK. Therefore, it is important + // that the SQLite oplog triggers also remove `null` values from the PK. + return `json_strip_nulls(json_build_object(${rows}))` + } + + createJsonbObject(rows: string) { return `jsonb_build_object(${rows})` } + // removes null values from the json object + // but most importantly also removes whitespaces introduced by `jsonb_build_object` + removeSpaceAndNullValuesFromJson(json: string): string { + return `json_strip_nulls(${json})` + } + + setTriggerSetting( + namespace: string, + tableName: string, + value: 0 | 1 + ): string { + return dedent` + INSERT INTO "main"."_electric_trigger_settings" ("namespace", "tablename", "flag") + VALUES ('${namespace}', '${tableName}', ${value}) + ON CONFLICT DO NOTHING; + ` + } + createOplogTrigger( opType: 'INSERT' | 'UPDATE' | 'DELETE', namespace: string, @@ -102,14 +281,14 @@ class PgBuilder extends QueryBuilder { oldRows: string ): string[] { const opTypeLower = opType.toLowerCase() - const pk = this.createJsonObject(newPKs) + const pk = this.createPKJsonObject(newPKs) // Update has both the old and the new row // Delete only has the old row const newRecord = - opType === 'DELETE' ? 'NULL' : this.createJsonObject(newRows) + opType === 'DELETE' ? 'NULL' : this.createJsonbObject(newRows) // Insert only has the new row const oldRecord = - opType === 'INSERT' ? 'NULL' : this.createJsonObject(oldRows) + opType === 'INSERT' ? 'NULL' : this.createJsonbObject(oldRows) return [ dedent` @@ -181,7 +360,9 @@ class PgBuilder extends QueryBuilder { '${fkTableNamespace}', '${fkTableName}', 'UPDATE', - jsonb_build_object(${joinedFkPKs}), + ${this.removeSpaceAndNullValuesFromJson( + this.createPKJsonObject(joinedFkPKs) + )}, jsonb_build_object(${joinedFkPKs}), NULL, NULL @@ -202,6 +383,72 @@ class PgBuilder extends QueryBuilder { `, ] } + + setClearTagsForTimestamp( + oplogTable: QualifiedTablename, + shadowTable: QualifiedTablename + ): string { + const oplog = `"${oplogTable.namespace}"."${oplogTable.tablename}"` + const shadow = `"${shadowTable.namespace}"."${shadowTable.tablename}"` + return dedent` + UPDATE ${oplog} + SET "clearTags" = ${shadow}.tags + FROM ${shadow} + WHERE ${oplog}.namespace = ${shadow}.namespace + AND ${oplog}.tablename = ${shadow}.tablename + AND ${shadow}."primaryKey"::jsonb @> ${oplog}."primaryKey"::jsonb AND ${shadow}."primaryKey"::jsonb <@ ${oplog}."primaryKey"::jsonb + AND ${oplog}.timestamp = $1 + ` + } + + setTagsForShadowRows( + oplogTable: QualifiedTablename, + shadowTable: QualifiedTablename + ): string { + const oplog = `"${oplogTable.namespace}"."${oplogTable.tablename}"` + const shadow = `"${shadowTable.namespace}"."${shadowTable.tablename}"` + return dedent` + INSERT INTO ${shadow} (namespace, tablename, "primaryKey", tags) + SELECT DISTINCT namespace, tablename, "primaryKey", $1 + FROM ${oplog} AS op + WHERE + timestamp = $2 + AND optype != 'DELETE' + ON CONFLICT (namespace, tablename, "primaryKey") + DO UPDATE SET tags = EXCLUDED.tags; + ` + } + + removeDeletedShadowRows( + oplogTable: QualifiedTablename, + shadowTable: QualifiedTablename + ): string { + const oplog = `"${oplogTable.namespace}"."${oplogTable.tablename}"` + const shadow = `"${shadowTable.namespace}"."${shadowTable.tablename}"` + // We do an inner join in a CTE instead of a `WHERE EXISTS (...)` + // since this is not reliant on re-executing a query + // for every row in the shadow table, but uses a PK join instead. + return dedent` + WITH _to_be_deleted (rowid) AS ( + SELECT ${shadow}.rowid + FROM ${oplog} + INNER JOIN ${shadow} + ON ${shadow}.namespace = ${oplog}.namespace + AND ${shadow}.tablename = ${oplog}.tablename + AND + ${shadow}."primaryKey"::jsonb @> ${oplog}."primaryKey"::jsonb AND ${shadow}."primaryKey"::jsonb <@ ${oplog}."primaryKey"::jsonb + WHERE ${oplog}.timestamp = $1 + AND ${oplog}.optype = 'DELETE' + GROUP BY ${shadow}.rowid + ) + DELETE FROM ${shadow} + WHERE rowid IN (SELECT rowid FROM _to_be_deleted); + ` + } + + makePositionalParam(i: number): string { + return this.paramSign + i + } } export default new PgBuilder() diff --git a/clients/typescript/src/migrators/triggers.ts b/clients/typescript/src/migrators/triggers.ts index 76fd65736d..69c3a11b79 100644 --- a/clients/typescript/src/migrators/triggers.ts +++ b/clients/typescript/src/migrators/triggers.ts @@ -62,14 +62,7 @@ export function generateOplogTriggers( return [ // Toggles for turning the triggers on and off - dedent` - ${builder.insertOrIgnore( - 'main', - '_electric_trigger_settings', - ['namespace', 'tablename', 'flag'], - [`'${namespace}'`, `'${tableName}'`, '1'] - )} - `, + builder.setTriggerSetting(namespace, tableName, 1), // Triggers for table ${tableName} // ensures primary key is immutable dropFkTrigger, diff --git a/clients/typescript/src/satellite/process.ts b/clients/typescript/src/satellite/process.ts index ae6b099d1e..c7a7de7196 100644 --- a/clients/typescript/src/satellite/process.ts +++ b/clients/typescript/src/satellite/process.ts @@ -63,7 +63,6 @@ import { import { Mutex } from 'async-mutex' import Log from 'loglevel' import { generateTableTriggers } from '../migrators/triggers' -import { prepareInsertBatchedStatements } from '../util/statements' import { mergeEntries } from './merge' import { SubscriptionsManager, getAllTablesForShape } from './shapes' import { InMemorySubscriptionsManager } from './shapes/manager' @@ -78,12 +77,11 @@ import { import { backOff } from 'exponential-backoff' import { chunkBy, genUUID } from '../util' import { isFatal, isOutOfSyncError, isThrowable, wrapFatalError } from './error' -import { inferRelationsFromSQLite } from '../util/relations' +import { inferRelationsFromDb } from '../util/relations' import { decodeUserIdFromToken } from '../auth/secure' import { InvalidArgumentError } from '../client/validation/errors/invalidArgumentError' import Long from 'long' import { QueryBuilder } from '../migrators/query-builder' -import { sqliteBuilder } from '../migrators/query-builder' type ChangeAccumulator = { [key: string]: Change @@ -125,6 +123,7 @@ export class SatelliteProcess implements Satellite { migrator: Migrator notifier: Notifier client: Client + builder: QueryBuilder opts: SatelliteOpts @@ -154,7 +153,7 @@ export class SatelliteProcess implements Satellite { * arguments. Precisely, its either 999 for versions prior to 3.32.0 and 32766 for * versions after. */ - private maxSqlParameters: 999 | 32766 = 999 + private maxSqlParameters: 999 | 32766 | 65535 = 999 private snapshotMutex: Mutex = new Mutex() private performingSnapshot = false @@ -167,15 +166,14 @@ export class SatelliteProcess implements Satellite { migrator: Migrator, notifier: Notifier, client: Client, - opts: SatelliteOpts, - // TODO: turn `builder` into an abstract readonly field when introducing subclasses of the process - private builder: QueryBuilder = sqliteBuilder + opts: SatelliteOpts ) { this.dbName = dbName this.adapter = adapter this.migrator = migrator this.notifier = notifier this.client = client + this.builder = this.migrator.electricQueryBuilder this.opts = opts this.relations = {} @@ -216,7 +214,7 @@ export class SatelliteProcess implements Satellite { async start(authConfig?: AuthConfig): Promise { if (this.opts.debug) { - await this.logSQLiteVersion() + await this.logDatabaseVersion() } await this.migrator.up() @@ -284,11 +282,13 @@ export class SatelliteProcess implements Satellite { } } - private async logSQLiteVersion(): Promise { - const sqliteVersionRow = await this.adapter.query({ - sql: 'SELECT sqlite_version() AS version', + private async logDatabaseVersion(): Promise { + const versionRow = await this.adapter.query({ + sql: this.builder.getVersion, }) - Log.info(`Using SQLite version: ${sqliteVersionRow[0]['version']}`) + Log.info( + `Using ${this.builder.dialect} version: ${versionRow[0]['version']}` + ) } _setAuthState(authState: AuthState): void { @@ -309,7 +309,7 @@ export class SatelliteProcess implements Satellite { })) const stmtsWithTriggers = [ - { sql: 'PRAGMA defer_foreign_keys = ON' }, + { sql: this.builder.deferForeignKeys }, ...this._disableTriggers(tables), ...deleteStmts, ...this._enableTriggers(tables), @@ -478,7 +478,7 @@ export class SatelliteProcess implements Satellite { additionalStmts: Statement[] = [] ) { const stmts: Statement[] = [] - stmts.push({ sql: 'PRAGMA defer_foreign_keys = ON' }) + stmts.push({ sql: this.builder.deferForeignKeys }) // It's much faster[1] to do less statements to insert the data instead of doing an insert statement for each row // so we're going to do just that, but with a caveat: SQLite has a max number of parameters in prepared statements, @@ -550,7 +550,7 @@ export class SatelliteProcess implements Satellite { )}) VALUES ` stmts.push( - ...prepareInsertBatchedStatements( + ...this.builder.prepareInsertBatchedStatements( sqlBase, columnNames, records as Record[], @@ -563,16 +563,16 @@ export class SatelliteProcess implements Satellite { stmts.push(...this._enableTriggers(qualifiedTableNames)) // Then do a batched insert for the shadow table - const qualifiedShadowTable = `"${this.opts.shadowTable.namespace}"."${this.opts.shadowTable.tablename}"` - const upsertShadowStmt = `INSERT or REPLACE INTO ${qualifiedShadowTable} (namespace, tablename, primaryKey, tags) VALUES ` - stmts.push( - ...prepareInsertBatchedStatements( - upsertShadowStmt, - ['namespace', 'tablename', 'primaryKey', 'tags'], - allArgsForShadowInsert, - this.maxSqlParameters - ) + const batchedShadowInserts = this.builder.batchedInsertOrReplace( + this.opts.shadowTable.namespace, + this.opts.shadowTable.tablename, + ['namespace', 'tablename', 'primaryKey', 'tags'], + allArgsForShadowInsert, + ['namespace', 'tablename', 'primaryKey'], + ['namespace', 'tablename', 'tags'], + this.maxSqlParameters ) + stmts.push(...batchedShadowInserts) // Then update subscription state and LSN stmts.push( @@ -939,16 +939,9 @@ export class SatelliteProcess implements Satellite { const oplog = this.opts.oplogTable.tablename const shadow = this.opts.shadowTable.tablename - const tablesExist = ` - SELECT count(name) as numTables FROM sqlite_master - WHERE type='table' - AND name IN (?, ?, ?) - ` - - const [{ numTables }] = await this.adapter.query({ - sql: tablesExist, - args: [meta, oplog, shadow], - }) + const [{ numTables }] = await this.adapter.query( + this.builder.countTablesIn('numTables', [meta, oplog, shadow]) + ) return numTables === 3 } @@ -997,7 +990,7 @@ export class SatelliteProcess implements Satellite { // Update the timestamps on all "new" entries - they have been added but timestamp is still `NULL` const q1: Statement = { sql: ` - UPDATE ${oplog} SET timestamp = ? + UPDATE ${oplog} SET timestamp = ${this.builder.makePositionalParam(1)} WHERE rowid in ( SELECT rowid FROM ${oplog} WHERE timestamp is NULL @@ -1027,34 +1020,19 @@ export class SatelliteProcess implements Satellite { // For each affected shadow row, set new tag array, unless the last oplog operation was a DELETE const q3: Statement = { - sql: ` - INSERT OR REPLACE INTO ${shadow} (namespace, tablename, primaryKey, tags) - SELECT namespace, tablename, primaryKey, ? - FROM ${oplog} AS op - WHERE timestamp = ? - GROUP BY namespace, tablename, primaryKey - HAVING rowid = max(rowid) AND optype != 'DELETE' - `, + sql: this.builder.setTagsForShadowRows( + this.opts.oplogTable, + this.opts.shadowTable + ), args: [encodeTags([newTag]), timestamp.toISOString()], } // And finally delete any shadow rows where the last oplog operation was a `DELETE` - // We do an inner join in a CTE instead of a `WHERE EXISTS (...)` since this is not reliant on re-executing a query per every row in shadow table, but uses a PK join instead. const q4: Statement = { - sql: ` - WITH _to_be_deleted (rowid) AS ( - SELECT shadow.rowid - FROM ${oplog} AS op - INNER JOIN ${shadow} AS shadow - ON shadow.namespace = op.namespace AND shadow.tablename = op.tablename AND shadow.primaryKey = op.primaryKey - WHERE op.timestamp = ? - GROUP BY op.namespace, op.tablename, op.primaryKey - HAVING op.rowid = max(op.rowid) AND op.optype = 'DELETE' - ) - - DELETE FROM ${shadow} - WHERE rowid IN _to_be_deleted - `, + sql: this.builder.removeDeletedShadowRows( + this.opts.oplogTable, + this.opts.shadowTable + ), args: [timestamp.toISOString()], } @@ -1190,13 +1168,15 @@ export class SatelliteProcess implements Satellite { switch (entryChanges.optype) { case OPTYPES.gone: case OPTYPES.delete: - stmts.push(_applyDeleteOperation(entryChanges, qualifiedTableName)) + stmts.push( + this._applyDeleteOperation(entryChanges, qualifiedTableName) + ) stmts.push(this._deleteShadowTagsStatement(shadowEntry)) break default: stmts.push( - _applyNonDeleteOperation(entryChanges, qualifiedTableName) + this._applyNonDeleteOperation(entryChanges, qualifiedTableName) ) stmts.push(this._updateShadowTagsStatement(shadowEntry)) } @@ -1218,7 +1198,7 @@ export class SatelliteProcess implements Satellite { const selectEntries = ` SELECT * FROM ${oplog} WHERE timestamp IS NOT NULL - AND rowid > ? + AND rowid > ${this.builder.makePositionalParam(1)} ORDER BY rowid ASC ` const rows = await this.adapter.query({ sql: selectEntries, args: [since] }) @@ -1227,11 +1207,12 @@ export class SatelliteProcess implements Satellite { _deleteShadowTagsStatement(shadow: ShadowEntry): Statement { const shadowTable = `"${this.opts.shadowTable.namespace}"."${this.opts.shadowTable.tablename}"` + const pos = (i: number) => this.builder.makePositionalParam(i) const deleteRow = ` DELETE FROM ${shadowTable} - WHERE namespace = ? AND - tablename = ? AND - primaryKey = ?; + WHERE namespace = ${pos(1)} AND + tablename = ${pos(2)} AND + "primaryKey" = ${pos(3)}; ` return { sql: deleteRow, @@ -1240,20 +1221,14 @@ export class SatelliteProcess implements Satellite { } _updateShadowTagsStatement(shadow: ShadowEntry): Statement { - const shadowTable = `"${this.opts.shadowTable.namespace}"."${this.opts.shadowTable.tablename}"` - const updateTags = ` - INSERT or REPLACE INTO ${shadowTable} (namespace, tablename, primaryKey, tags) VALUES - (?, ?, ?, ?); - ` - return { - sql: updateTags, - args: [ - shadow.namespace, - shadow.tablename, - shadow.primaryKey, - shadow.tags, - ], - } + return this.builder.insertOrReplace( + this.opts.shadowTable.namespace, + this.opts.shadowTable.tablename, + ['namespace', 'tablename', 'primaryKey', 'tags'], + [shadow.namespace, shadow.tablename, shadow.primaryKey, shadow.tags], + ['namespace', 'tablename', 'primaryKey'], + ['tags'] + ) } _updateRelations(rel: Omit) { @@ -1315,7 +1290,7 @@ export class SatelliteProcess implements Satellite { let firstDMLChunk = true // switches off on transaction commit/abort - stmts.push({ sql: 'PRAGMA defer_foreign_keys = ON' }) + //stmts.push({ sql: this.builder.deferForeignKeys }) // update lsn. stmts.push(this.updateLsnStmt(lsn)) stmts.push(this._resetSeenAdditionalDataStmt()) @@ -1475,16 +1450,18 @@ export class SatelliteProcess implements Satellite { const namespacesAndTableNames = tables .map((tbl) => [tbl.namespace, tbl.tablename]) .flat() - if (tables.length > 0) + if (tables.length > 0) { + const pos = (i: number) => this.builder.makePositionalParam(i) + let i = 1 return [ { - sql: `UPDATE ${triggers} SET flag = ? WHERE ${tables - .map(() => '(namespace = ? AND tablename = ?)') + sql: `UPDATE ${triggers} SET flag = ${pos(i++)} WHERE ${tables + .map((_) => `(namespace = ${pos(i++)} AND tablename = ${pos(i++)})`) .join(' OR ')}`, args: [flag, ...namespacesAndTableNames], }, ] - else return [] + } else return [] } _addSeenAdditionalDataStmt(ref: string): Statement { @@ -1510,8 +1487,8 @@ export class SatelliteProcess implements Satellite { _setMetaStatement(key: Uuid, value: string | null): Statement _setMetaStatement(key: string, value: SqlValue) { const meta = `"${this.opts.metaTable.namespace}"."${this.opts.metaTable.tablename}"` - - const sql = `UPDATE ${meta} SET value = ? WHERE key = ?` + const pos = (i: number) => this.builder.makePositionalParam(i) + const sql = `UPDATE ${meta} SET value = ${pos(1)} WHERE key = ${pos(2)}` const args = [value, key] return { sql, args } } @@ -1533,8 +1510,8 @@ export class SatelliteProcess implements Satellite { async _getMeta(key: K): Promise async _getMeta(key: string) { const meta = `"${this.opts.metaTable.namespace}"."${this.opts.metaTable.tablename}"` - - const sql = `SELECT value from ${meta} WHERE key = ?` + const pos = (i: number) => this.builder.makePositionalParam(i) + const sql = `SELECT value from ${meta} WHERE key = ${pos(1)}` const args = [key] const rows = await this.adapter.query({ sql, args }) @@ -1558,7 +1535,7 @@ export class SatelliteProcess implements Satellite { } private async _getLocalRelations(): Promise<{ [k: string]: Relation }> { - return inferRelationsFromSQLite(this.adapter, this.opts) + return inferRelationsFromDb(this.adapter, this.opts, this.builder) } private _generateTag(timestamp: Date): string { @@ -1569,9 +1546,9 @@ export class SatelliteProcess implements Satellite { async _garbageCollectOplog(commitTimestamp: Date): Promise { const isoString = commitTimestamp.toISOString() const oplog = `"${this.opts.oplogTable.namespace}"."${this.opts.oplogTable.tablename}"` - + const pos = (i: number) => this.builder.makePositionalParam(i) await this.adapter.run({ - sql: `DELETE FROM ${oplog} WHERE timestamp = ?`, + sql: `DELETE FROM ${oplog} WHERE timestamp = ${pos(1)}`, args: [isoString], }) } @@ -1587,17 +1564,6 @@ export class SatelliteProcess implements Satellite { return this._setMetaStatement('lsn', base64.fromBytes(lsn)) } - private async checkMaxSqlParameters() { - const [{ version }] = (await this.adapter.query({ - sql: 'SELECT sqlite_version() AS version', - })) as [{ version: string }] - - const [major, minor, _patch] = version.split('.').map((x) => parseInt(x)) - - if (major === 3 && minor >= 32) this.maxSqlParameters = 32766 - else this.maxSqlParameters = 999 - } - public setReplicationTransform( tableName: QualifiedTablename, transform: ReplicatedRowTransformer @@ -1608,69 +1574,79 @@ export class SatelliteProcess implements Satellite { public clearReplicationTransform(tableName: QualifiedTablename): void { this.client.clearReplicationTransform(tableName) } -} -function _applyDeleteOperation( - entryChanges: ShadowEntryChanges, - qualifiedTableName: QualifiedTablename -): Statement { - const pkEntries = Object.entries(entryChanges.primaryKeyCols) - if (pkEntries.length === 0) - throw new Error( - "Can't apply delete operation. None of the columns in changes are marked as PK." - ) - const params = pkEntries.reduce( - (acc, [column, value]) => { - acc.where.push(`${column} = ?`) - acc.values.push(value) - return acc - }, - { where: [] as string[], values: [] as SqlValue[] } - ) - - return { - sql: `DELETE FROM "${qualifiedTableName.namespace}"."${ - qualifiedTableName.tablename - }" WHERE ${params.where.join(' AND ')}`, - args: params.values, - } -} - -function _applyNonDeleteOperation( - { fullRow, primaryKeyCols }: ShadowEntryChanges, - qualifiedTableName: QualifiedTablename -): Statement { - const columnNames = Object.keys(fullRow) - const columnValues = Object.values(fullRow) - let insertStmt = `INTO "${qualifiedTableName.namespace}"."${ - qualifiedTableName.tablename - }" (${columnNames.join(', ')}) VALUES (${columnValues - .map((_) => '?') - .join(',')})` - - const updateColumnStmts = columnNames - .filter((c) => !(c in primaryKeyCols)) - .reduce( - (acc, c) => { - acc.where.push(`${c} = ?`) - acc.values.push(fullRow[c]) + _applyDeleteOperation( + entryChanges: ShadowEntryChanges, + qualifiedTableName: QualifiedTablename + ): Statement { + const pkEntries = Object.entries(entryChanges.primaryKeyCols) + if (pkEntries.length === 0) + throw new Error( + "Can't apply delete operation. None of the columns in changes are marked as PK." + ) + let i = 1 + const pos = (i: number) => this.builder.makePositionalParam(i) + const params = pkEntries.reduce( + (acc, [column, value]) => { + acc.where.push(`${column} = ${pos(i++)}`) + acc.values.push(value) return acc }, { where: [] as string[], values: [] as SqlValue[] } ) - if (updateColumnStmts.values.length > 0) { - insertStmt = ` - INSERT ${insertStmt} - ON CONFLICT DO UPDATE SET ${updateColumnStmts.where.join(', ')} - ` - columnValues.push(...updateColumnStmts.values) - } else { + return { + sql: `DELETE FROM "${qualifiedTableName.namespace}"."${ + qualifiedTableName.tablename + }" WHERE ${params.where.join(' AND ')}`, + args: params.values, + } + } + + _applyNonDeleteOperation( + { fullRow, primaryKeyCols }: ShadowEntryChanges, + qualifiedTableName: QualifiedTablename + ): Statement { + const columnNames = Object.keys(fullRow) + const columnValues = Object.values(fullRow) + const updateColumnStmts = columnNames.filter((c) => !(c in primaryKeyCols)) + + if (updateColumnStmts.length > 0) { + return this.builder.insertOrReplaceWith( + qualifiedTableName.namespace, + qualifiedTableName.tablename, + columnNames, + columnValues, + ['id'], + updateColumnStmts, + updateColumnStmts.map((col) => fullRow[col]) + ) + } + // no changes, can ignore statement if exists - insertStmt = `INSERT OR IGNORE ${insertStmt}` + return this.builder.insertOrIgnore( + qualifiedTableName.namespace, + qualifiedTableName.tablename, + columnNames, + columnValues + ) } - return { sql: insertStmt, args: columnValues } + private async checkMaxSqlParameters() { + if (this.builder.dialect === 'SQLite') { + const [{ version }] = (await this.adapter.query({ + sql: 'SELECT sqlite_version() AS version', + })) as [{ version: string }] + + const [major, minor, _patch] = version.split('.').map((x) => parseInt(x)) + + if (major === 3 && minor >= 32) this.maxSqlParameters = 32766 + else this.maxSqlParameters = 999 + } else { + // Postgres allows a maximum of 65535 query parameters + this.maxSqlParameters = 65535 + } + } } export function generateTriggersForTable( diff --git a/clients/typescript/src/util/relations.ts b/clients/typescript/src/util/relations.ts index b5306db7d4..594762bf32 100644 --- a/clients/typescript/src/util/relations.ts +++ b/clients/typescript/src/util/relations.ts @@ -1,23 +1,23 @@ import { SatRelation_RelationType } from '../_generated/protocol/satellite' import { DatabaseAdapter } from '../electric/adapter' +import { QueryBuilder } from '../migrators/query-builder' import { SatelliteOpts } from '../satellite/config' import { Relation, RelationsCache } from './types' // TODO: Improve this code once with Migrator and consider simplifying oplog. -export async function inferRelationsFromSQLite( +export async function inferRelationsFromDb( adapter: DatabaseAdapter, - opts: SatelliteOpts + opts: SatelliteOpts, + builder: QueryBuilder ): Promise<{ [k: string]: Relation }> { - const tableNames = await _getLocalTableNames(adapter, opts) + const tableNames = await _getLocalTableNames(adapter, opts, builder) const relations: RelationsCache = {} let id = 0 const schema = 'public' // TODO for (const table of tableNames) { const tableName = table.name - const sql = 'SELECT * FROM pragma_table_info(?)' - const args = [tableName] - const columnsForTable = (await adapter.query({ sql, args })) as { + const columnsForTable = (await adapter.query(builder.getTableInfo(tableName))) as { name: string type: string notnull: number @@ -41,7 +41,7 @@ export async function inferRelationsFromSQLite( primaryKey: c.pk > 0 ? c.pk : undefined, }) } - relations[`${tableName}`] = relation + relations[tableName] = relation } return relations @@ -49,7 +49,8 @@ export async function inferRelationsFromSQLite( async function _getLocalTableNames( adapter: DatabaseAdapter, - opts: SatelliteOpts + opts: SatelliteOpts, + builder: QueryBuilder ): Promise<{ name: string }[]> { const notIn = [ opts.metaTable.tablename.toString(), @@ -57,16 +58,8 @@ async function _getLocalTableNames( opts.oplogTable.tablename.toString(), opts.triggersTable.tablename.toString(), opts.shadowTable.tablename.toString(), - 'sqlite_schema', - 'sqlite_sequence', - 'sqlite_temp_schema', ] - const tables = ` - SELECT name FROM sqlite_master - WHERE type = 'table' - AND name NOT IN (${notIn.map(() => '?').join(',')}) - ` - const rows = await adapter.query({ sql: tables, args: notIn }) + const rows = await adapter.query(builder.getLocalTableNames(notIn)) return rows as Array<{ name: string }> } diff --git a/clients/typescript/src/util/statements.ts b/clients/typescript/src/util/statements.ts index 35e1c3da14..bfac3d7263 100644 --- a/clients/typescript/src/util/statements.ts +++ b/clients/typescript/src/util/statements.ts @@ -1,48 +1,3 @@ -import { SqlValue, Statement } from './types' - export function isInsertUpdateOrDeleteStatement(stmt: string) { return /^\s*(insert|update|delete)/i.test(stmt) } - -/** - * Prepare multiple batched insert statements for an array of records. - * - * Since SQLite only supports a limited amount of positional `?` parameters, - * we generate multiple insert statements with each one being filled as much - * as possible from the given data. All statements are derived from same `baseSql` - - * the positional parameters will be appended to this string. - * - * @param baseSql base SQL string to which inserts should be appended - * @param columns columns that describe records - * @param records records to be inserted - * @param maxParameters max parameters this SQLite can accept - determines batching factor - * @returns array of statements ready to be executed by the adapter - */ -export function prepareInsertBatchedStatements( - baseSql: string, - columns: string[], - records: Record[], - maxParameters: number -): Statement[] { - const stmts: Statement[] = [] - const columnCount = columns.length - const recordCount = records.length - let processed = 0 - const insertPattern = ' (' + '?, '.repeat(columnCount).slice(0, -2) + '),' - - // Largest number below maxSqlParamers that evenly divides by column count, - // divided by columnCount, giving the amount of rows we can insert at once - const batchMaxSize = - (maxParameters - (maxParameters % columnCount)) / columnCount - while (processed < recordCount) { - const currentInsertCount = Math.min(recordCount - processed, batchMaxSize) - const sql = baseSql + insertPattern.repeat(currentInsertCount).slice(0, -1) - const args = records - .slice(processed, processed + currentInsertCount) - .flatMap((record) => columns.map((col) => record[col] as SqlValue)) - - processed += currentInsertCount - stmts.push({ sql, args }) - } - return stmts -} diff --git a/clients/typescript/test/migrators/postgres/triggers.test.ts b/clients/typescript/test/migrators/postgres/triggers.test.ts index 3c66b58a1f..e55209cb4e 100644 --- a/clients/typescript/test/migrators/postgres/triggers.test.ts +++ b/clients/typescript/test/migrators/postgres/triggers.test.ts @@ -73,7 +73,7 @@ test('generateTableTriggers should create correct triggers for a table', (t) => 'main', 'personTable', 'INSERT', - jsonb_build_object('id', cast(new."id" as TEXT)), + json_strip_nulls(json_build_object('id', cast(new."id" as TEXT))), jsonb_build_object('age', new."age", 'bmi', cast(new."bmi" as TEXT), 'id', cast(new."id" as TEXT), 'int8', cast(new."int8" as TEXT), 'name', new."name"), NULL, NULL @@ -118,7 +118,7 @@ test('generateTableTriggers should create correct triggers for a table', (t) => 'main', 'personTable', 'UPDATE', - jsonb_build_object('id', cast(new."id" as TEXT)), + json_strip_nulls(json_build_object('id', cast(new."id" as TEXT))), jsonb_build_object('age', new."age", 'bmi', cast(new."bmi" as TEXT), 'id', cast(new."id" as TEXT), 'int8', cast(new."int8" as TEXT), 'name', new."name"), jsonb_build_object('age', old."age", 'bmi', cast(old."bmi" as TEXT), 'id', cast(old."id" as TEXT), 'int8', cast(old."int8" as TEXT), 'name', old."name"), NULL @@ -163,7 +163,7 @@ test('generateTableTriggers should create correct triggers for a table', (t) => 'main', 'personTable', 'DELETE', - jsonb_build_object('id', cast(old."id" as TEXT)), + json_strip_nulls(json_build_object('id', cast(old."id" as TEXT))), NULL, jsonb_build_object('age', old."age", 'bmi', cast(old."bmi" as TEXT), 'id', cast(old."id" as TEXT), 'int8', cast(old."int8" as TEXT), 'name', old."name"), NULL @@ -205,7 +205,7 @@ test('oplog insertion trigger should insert row into oplog table', async (t) => // cf. `joinColsForJSON` function in `src/migrators/triggers.ts` // These strings are then parsed back into real numbers // by the `deserialiseRow` function in `src/satellite/oplog.ts` - primaryKey: '{"id": "1"}', + primaryKey: '{"id":"1"}', newRow: '{"id": "1", "age": 30, "bmi": "25.5", "int8": "7", "name": "John Doe"}', // BigInts are serialized as strings in the oplog oldRow: null, @@ -242,7 +242,7 @@ test('oplog trigger should handle Infinity values correctly', async (t) => { // cf. `joinColsForJSON` function in `src/migrators/triggers.ts` // These strings are then parsed back into real numbers // by the `deserialiseRow` function in `src/satellite/oplog.ts` - primaryKey: '{"id": "-Infinity"}', + primaryKey: '{"id":"-Infinity"}', newRow: '{"id": "-Infinity", "age": 30, "bmi": "Infinity", "int8": "7", "name": "John Doe"}', // BigInts are serialized as strings in the oplog oldRow: null, diff --git a/clients/typescript/test/migrators/sqlite/triggers.test.ts b/clients/typescript/test/migrators/sqlite/triggers.test.ts index 9760c68f21..0a80c37384 100644 --- a/clients/typescript/test/migrators/sqlite/triggers.test.ts +++ b/clients/typescript/test/migrators/sqlite/triggers.test.ts @@ -35,7 +35,7 @@ test('generateTableTriggers should create correct triggers for a table', (t) => WHEN 1 = (SELECT flag from _electric_trigger_settings WHERE namespace = 'main' AND tablename = 'personTable') BEGIN INSERT INTO _electric_oplog (namespace, tablename, optype, primaryKey, newRow, oldRow, timestamp) - VALUES ('main', 'personTable', 'INSERT', json_object('id', cast(new."id" as TEXT)), json_object('age', new."age", 'blob', CASE WHEN new."blob" IS NOT NULL THEN hex(new."blob") ELSE NULL END, 'bmi', cast(new."bmi" as TEXT), 'id', cast(new."id" as TEXT), 'int8', cast(new."int8" as TEXT), 'name', new."name"), NULL, NULL); + VALUES ('main', 'personTable', 'INSERT', json_patch('{}', json_object('id', cast(new."id" as TEXT))), json_object('age', new."age", 'blob', CASE WHEN new."blob" IS NOT NULL THEN hex(new."blob") ELSE NULL END, 'bmi', cast(new."bmi" as TEXT), 'id', cast(new."id" as TEXT), 'int8', cast(new."int8" as TEXT), 'name', new."name"), NULL, NULL); END; ` ) @@ -49,7 +49,7 @@ test('generateTableTriggers should create correct triggers for a table', (t) => WHEN 1 = (SELECT flag from _electric_trigger_settings WHERE namespace = 'main' AND tablename = 'personTable') BEGIN INSERT INTO _electric_oplog (namespace, tablename, optype, primaryKey, newRow, oldRow, timestamp) - VALUES ('main', 'personTable', 'UPDATE', json_object('id', cast(new."id" as TEXT)), json_object('age', new."age", 'blob', CASE WHEN new."blob" IS NOT NULL THEN hex(new."blob") ELSE NULL END, 'bmi', cast(new."bmi" as TEXT), 'id', cast(new."id" as TEXT), 'int8', cast(new."int8" as TEXT), 'name', new."name"), json_object('age', old."age", 'blob', CASE WHEN old."blob" IS NOT NULL THEN hex(old."blob") ELSE NULL END, 'bmi', cast(old."bmi" as TEXT), 'id', cast(old."id" as TEXT), 'int8', cast(old."int8" as TEXT), 'name', old."name"), NULL); + VALUES ('main', 'personTable', 'UPDATE', json_patch('{}', json_object('id', cast(new."id" as TEXT))), json_object('age', new."age", 'blob', CASE WHEN new."blob" IS NOT NULL THEN hex(new."blob") ELSE NULL END, 'bmi', cast(new."bmi" as TEXT), 'id', cast(new."id" as TEXT), 'int8', cast(new."int8" as TEXT), 'name', new."name"), json_object('age', old."age", 'blob', CASE WHEN old."blob" IS NOT NULL THEN hex(old."blob") ELSE NULL END, 'bmi', cast(old."bmi" as TEXT), 'id', cast(old."id" as TEXT), 'int8', cast(old."int8" as TEXT), 'name', old."name"), NULL); END; ` ) @@ -63,7 +63,7 @@ test('generateTableTriggers should create correct triggers for a table', (t) => WHEN 1 = (SELECT flag from _electric_trigger_settings WHERE namespace = 'main' AND tablename = 'personTable') BEGIN INSERT INTO _electric_oplog (namespace, tablename, optype, primaryKey, newRow, oldRow, timestamp) - VALUES ('main', 'personTable', 'DELETE', json_object('id', cast(old."id" as TEXT)), NULL, json_object('age', old."age", 'blob', CASE WHEN old."blob" IS NOT NULL THEN hex(old."blob") ELSE NULL END, 'bmi', cast(old."bmi" as TEXT), 'id', cast(old."id" as TEXT), 'int8', cast(old."int8" as TEXT), 'name', old."name"), NULL); + VALUES ('main', 'personTable', 'DELETE', json_patch('{}', json_object('id', cast(old."id" as TEXT))), NULL, json_object('age', old."age", 'blob', CASE WHEN old."blob" IS NOT NULL THEN hex(old."blob") ELSE NULL END, 'bmi', cast(old."bmi" as TEXT), 'id', cast(old."id" as TEXT), 'int8', cast(old."int8" as TEXT), 'name', old."name"), NULL); END; ` ) diff --git a/clients/typescript/test/satellite/common.ts b/clients/typescript/test/satellite/common.ts index 4f1844d227..3871f13f6c 100644 --- a/clients/typescript/test/satellite/common.ts +++ b/clients/typescript/test/satellite/common.ts @@ -2,8 +2,9 @@ import { mkdir, rm as removeFile } from 'node:fs/promises' import { RelationsCache, randomValue } from '../../src/util' import Database from 'better-sqlite3' import type { Database as SqliteDB } from 'better-sqlite3' -import { DatabaseAdapter } from '../../src/drivers/better-sqlite3' -import { SqliteBundleMigrator as BundleMigrator } from '../../src/migrators' +import SqliteDatabase from 'better-sqlite3' +import { DatabaseAdapter as SqliteDatabaseAdapter } from '../../src/drivers/better-sqlite3' +import { SqliteBundleMigrator, PgBundleMigrator } from '../../src/migrators' import { EventNotifier, MockNotifier } from '../../src/notifiers' import { MockSatelliteClient } from '../../src/satellite/mock' import { GlobalRegistry, Registry, SatelliteProcess } from '../../src/satellite' @@ -12,8 +13,23 @@ import { satelliteDefaults, SatelliteOpts } from '../../src/satellite/config' import { Table, generateTableTriggers } from '../../src/migrators/triggers' import { buildInitialMigration as makeInitialMigration } from '../../src/migrators/schema' +import sqliteMigrations from '../support/migrations/migrations.js' +import pgMigrations from '../support/migrations/pg-migrations.js' +import { ExecutionContext } from 'ava' +import { AuthState } from '../../src/auth' +import { DbSchema, TableSchema } from '../../src/client/model/schema' +import { PgBasicType } from '../../src/client/conversions/types' +import { HKT } from '../../src/client/util/hkt' +import { ElectricClient } from '../../src/client/model' +import EventEmitter from 'events' +import { QueryBuilder } from '../../src/migrators/query-builder' +import { BundleMigratorBase } from '../../src/migrators/bundle' +import { makePgDatabase } from '../support/node-postgres' +import { DatabaseAdapter as PgDatabaseAdapter } from '../../src/drivers/node-postgres/adapter' +import { DatabaseAdapter } from '../../src/electric/adapter' + export type Database = { - exec(statement: { sql: string }): Promise + exec(statement: { sql: string }): Promise } export function wrapDB(db: SqliteDB): Database { @@ -221,16 +237,6 @@ export const relations = { }, } satisfies RelationsCache -import migrations from '../support/migrations/migrations.js' -import { ExecutionContext } from 'ava' -import { AuthState, insecureAuthToken } from '../../src/auth' -import { DbSchema, TableSchema } from '../../src/client/model/schema' -import { PgBasicType } from '../../src/client/conversions/types' -import { HKT } from '../../src/client/util/hkt' -import { ElectricClient } from '../../src/client/model' -import EventEmitter from 'events' -import { QueryBuilder } from '../../src/migrators/query-builder' - // Speed up the intervals for testing. export const opts = Object.assign({}, satelliteDefaults, { minSnapshotWindow: 40, @@ -257,17 +263,16 @@ export type ContextType = { timestamp: number authState: AuthState token: string + stop?: () => Promise } & Extra -export const makeContext = async ( +const makeContextInternal = async ( t: ExecutionContext, + dbName: string, + adapter: DatabaseAdapter, + migrator: BundleMigratorBase, options: Opts = opts ) => { - await mkdir('.tmp', { recursive: true }) - const dbName = `.tmp/test-${randomValue()}.db` - const db = new Database(dbName) - const adapter = new DatabaseAdapter(db) - const migrator = new BundleMigrator(adapter, migrations) const notifier = new MockNotifier(dbName, new EventEmitter()) const client = new MockSatelliteClient() const satellite = new SatelliteProcess( @@ -303,14 +308,64 @@ export const makeContext = async ( } } +export const makeContext = async ( + t: ExecutionContext, + options: Opts = opts +) => { + await mkdir('.tmp', { recursive: true }) + const dbName = `.tmp/test-${randomValue()}.db` + const db = new SqliteDatabase(dbName) + const adapter = new SqliteDatabaseAdapter(db) + const migrator = new SqliteBundleMigrator(adapter, sqliteMigrations) + makeContextInternal(t, dbName, adapter, migrator, options) +} + +export const makePgContext = async ( + t: ExecutionContext, + port: number, + options: Opts = opts +) => { + const dbName = `test-${randomValue()}` + const { db, stop } = await makePgDatabase(dbName, port) + const adapter = new PgDatabaseAdapter(db) + const migrator = new PgBundleMigrator(adapter, pgMigrations) + makeContextInternal(t, dbName, adapter, migrator, options) + t.context.stop = stop +} + +export const makeContext = async ( + t: ExecutionContext, + options: Opts = opts +) => { + await mkdir('.tmp', { recursive: true }) + const dbName = `.tmp/test-${randomValue()}.db` + const db = new SqliteDatabase(dbName) + const adapter = new SqliteDatabaseAdapter(db) + const migrator = new SqliteBundleMigrator(adapter, sqliteMigrations) + makeContextInternal(t, dbName, adapter, migrator, options) +} + +export const makePgContext = async ( + t: ExecutionContext, + port: number, + options: Opts = opts +) => { + const dbName = `test-${randomValue()}` + const { db, stop } = await makePgDatabase(dbName, port) + const adapter = new PgDatabaseAdapter(db) + const migrator = new PgBundleMigrator(adapter, pgMigrations) + makeContextInternal(t, dbName, adapter, migrator, options) + t.context.stop = stop +} + export const mockElectricClient = async ( db: SqliteDB, registry: Registry | GlobalRegistry, options: Opts = opts ): Promise> => { const dbName = db.name - const adapter = new DatabaseAdapter(db) - const migrator = new BundleMigrator(adapter, migrations) + const adapter = new SqliteDatabaseAdapter(db) + const migrator = new SqliteBundleMigrator(adapter, sqliteMigrations) const notifier = new MockNotifier(dbName, new EventEmitter()) const client = new MockSatelliteClient() const satellite = new SatelliteProcess( @@ -346,11 +401,16 @@ export const clean = async (t: ExecutionContext<{ dbName: string }>) => { } export const cleanAndStopSatellite = async ( - t: ExecutionContext<{ dbName: string; satellite: SatelliteProcess }> + t: ExecutionContext<{ + dbName: string + satellite: SatelliteProcess + stop?: () => Promise + }> ) => { const { satellite } = t.context await satellite.stop() await clean(t) + await t.context.stop?.() } export async function migrateDb( diff --git a/clients/typescript/test/satellite/postgres/process.migration.test.ts b/clients/typescript/test/satellite/postgres/process.migration.test.ts new file mode 100644 index 0000000000..002c75866a --- /dev/null +++ b/clients/typescript/test/satellite/postgres/process.migration.test.ts @@ -0,0 +1,795 @@ +import testAny, { ExecutionContext, TestFn } from 'ava' +import isequal from 'lodash.isequal' +import Long from 'long' +import { + SatOpMigrate_Type, + SatRelation_RelationType, +} from '../../../src/_generated/protocol/satellite' +import { generateTag } from '../../../src/satellite/oplog' +import { + DataChange, + DataChangeType, + Row, + SchemaChange, + Statement, + Transaction, +} from '../../../src/util' +import { + ContextType, + cleanAndStopSatellite, + makePgContext, + relations, +} from '../common' +import { getPgMatchingShadowEntries as getMatchingShadowEntries } from '../../support/satellite-helpers' +import { DatabaseAdapter } from '../../../src/electric/adapter' +import { pgBuilder } from '../../../src/migrators/query-builder' +import isEqual from 'lodash.isequal' + +type CurrentContext = ContextType<{ clientId: string; txDate: Date }> +const test = testAny as TestFn +const builder = pgBuilder + +let port = 5000 +test.beforeEach(async (t) => { + await makePgContext(t, port++) + const { satellite, authState } = t.context + await satellite.start(authState) + t.context['clientId'] = satellite._authState!.clientId // store clientId in the context + await populateDB(t) + const txDate = await satellite._performSnapshot() + t.context['txDate'] = txDate + // Mimic Electric sending our own operations back + // which serves as an acknowledgement (even though there is a separate ack also) + // and leads to GC of the oplog + const ackTx = { + origin: satellite._authState!.clientId, + commit_timestamp: Long.fromNumber(txDate.getTime()), + changes: [], // doesn't matter, only the origin and timestamp matter for GC of the oplog + lsn: new Uint8Array(), + } + await satellite._applyTransaction(ackTx) +}) +test.afterEach.always(cleanAndStopSatellite) + +const populateDB = async (t: ExecutionContext) => { + const adapter = t.context.adapter as DatabaseAdapter + + const stmts: Statement[] = [] + + stmts.push({ + sql: `INSERT INTO main.parent (id, value, other) VALUES ($1, $2, $3);`, + args: [1, 'local', null], + }) + stmts.push({ + sql: `INSERT INTO main.parent (id, value, other) VALUES ($1, $2, $3);`, + args: [2, 'local', null], + }) + await adapter.runInTransaction(...stmts) +} + +async function assertDbHasTables( + t: ExecutionContext, + ...tables: string[] +) { + const adapter = t.context.adapter as DatabaseAdapter + const schemaRows = await adapter.query(builder.getLocalTableNames()) + + const tableNames = new Set(schemaRows.map((r) => r.name)) + tables.forEach((tbl) => { + t.true(tableNames.has(tbl)) + }) +} + +async function getTableInfo( + table: string, + t: ExecutionContext +): Promise { + const adapter = t.context.adapter as DatabaseAdapter + return (await adapter.query(builder.getTableInfo(table))) as ColumnInfo[] +} + +type ColumnInfo = { + cid: number + name: string + type: string + notnull: number + dflt_value: null | string + pk: number +} + +test.serial('setup populates DB', async (t) => { + const adapter = t.context.adapter + + const sql = 'SELECT * FROM main.parent' + const rows = await adapter.query({ sql }) + t.deepEqual(rows, [ + { + id: 1, + value: 'local', + other: null, + }, + { + id: 2, + value: 'local', + other: null, + }, + ]) +}) + +const createTable: SchemaChange = { + table: { + name: 'NewTable', + columns: [ + { + name: 'id', + sqliteType: 'TEXT', + pgType: { name: 'TEXT', array: [], size: [] }, + }, + { + name: 'foo', + sqliteType: 'INTEGER', + pgType: { name: 'INTEGER', array: [], size: [] }, + }, + { + name: 'bar', + sqliteType: 'TEXT', + pgType: { name: 'TEXT', array: [], size: [] }, + }, + ], + fks: [], + pks: ['id'], + }, + migrationType: SatOpMigrate_Type.CREATE_TABLE, + sql: 'CREATE TABLE main."NewTable"(\ + id TEXT NOT NULL,\ + foo INTEGER,\ + bar TEXT,\ + PRIMARY KEY(id)\ + );', +} + +const addColumn: SchemaChange = { + table: { + name: 'parent', + columns: [ + { + name: 'id', + sqliteType: 'INTEGER', + pgType: { name: 'INTEGER', array: [], size: [] }, + }, + { + name: 'value', + sqliteType: 'TEXT', + pgType: { name: 'TEXT', array: [], size: [] }, + }, + { + name: 'other', + sqliteType: 'INTEGER', + pgType: { name: 'INTEGER', array: [], size: [] }, + }, + { + name: 'baz', + sqliteType: 'TEXT', + pgType: { name: 'TEXT', array: [], size: [] }, + }, + ], + fks: [], + pks: ['id'], + }, + migrationType: SatOpMigrate_Type.ALTER_ADD_COLUMN, + sql: 'ALTER TABLE main.parent ADD baz TEXT', +} + +const addColumnRelation = { + id: 2000, // doesn't matter + schema: 'public', + table: 'parent', + tableType: SatRelation_RelationType.TABLE, + columns: [ + { + name: 'id', + type: 'INTEGER', + isNullable: false, + primaryKey: true, + }, + { + name: 'value', + type: 'TEXT', + isNullable: true, + primaryKey: false, + }, + { + name: 'other', + type: 'INTEGER', + isNullable: true, + primaryKey: false, + }, + { + name: 'baz', + type: 'TEXT', + isNullable: true, + primaryKey: false, + }, + ], +} +const newTableRelation = { + id: 2001, // doesn't matter + schema: 'public', + table: 'NewTable', + tableType: SatRelation_RelationType.TABLE, + columns: [ + { + name: 'id', + type: 'TEXT', + isNullable: false, + primaryKey: true, + }, + { + name: 'foo', + type: 'INTEGER', + isNullable: true, + primaryKey: false, + }, + { + name: 'bar', + type: 'TEXT', + isNullable: true, + primaryKey: false, + }, + ], +} + +async function checkMigrationIsApplied(t: ExecutionContext) { + await assertDbHasTables(t, 'parent', 'child', 'NewTable') + + const newTableInfo = await getTableInfo('NewTable', t) + + const expectedTables = [ + // id, foo, bar + { + name: 'foo', + type: 'INTEGER', + notnull: 0, + dflt_value: null, + pk: 0, + }, + { name: 'id', type: 'TEXT', notnull: 1, dflt_value: null, pk: 1 }, + { name: 'bar', type: 'TEXT', notnull: 0, dflt_value: null, pk: 0 }, + ] + + expectedTables.forEach((tbl) => { + t.true(newTableInfo.some((t) => isEqual(t, tbl))) + }) + + const parentTableInfo = await getTableInfo('parent', t) + const parentTableHasColumn = parentTableInfo.some((col: ColumnInfo) => { + return ( + col.name === 'baz' && + col.type === 'TEXT' && + col.notnull === 0 && + col.dflt_value === null && + col.pk === 0 + ) + }) + + t.true(parentTableHasColumn) +} + +const fetchParentRows = async (adapter: DatabaseAdapter): Promise => { + return adapter.query({ + sql: 'SELECT * FROM main.parent', + }) +} + +const testSetEquality = (t: ExecutionContext, xs: T[], ys: T[]): void => { + t.is(xs.length, ys.length, 'Expected array lengths to be equal') + + const missing: T[] = [] + + for (const x of xs) { + if (ys.some((y) => isequal(x, y))) continue + else missing.push(x) + } + + t.deepEqual( + missing, + [], + 'Expected all elements from the first array to be present in the second, but some are missing' + ) +} + +test.serial('apply migration containing only DDL', async (t) => { + const { satellite, adapter, txDate } = t.context + const timestamp = txDate.getTime() + + const rowsBeforeMigration = await fetchParentRows(adapter) + + const migrationTx = { + origin: 'remote', + commit_timestamp: Long.fromNumber(timestamp), + changes: [createTable, addColumn], + lsn: new Uint8Array(), + // starts at 3, because the app already defines 2 migrations + // (see test/support/migrations/migrations.js) + // which are loaded when Satellite is started + migrationVersion: '3', + } + + // Apply the migration transaction + await satellite._applyTransaction(migrationTx) + + // Check that the migration was successfully applied + await checkMigrationIsApplied(t) + + // Check that the existing rows are still there and are unchanged + const rowsAfterMigration = await fetchParentRows(adapter) + const expectedRowsAfterMigration = rowsBeforeMigration.map((row: Row) => { + return { + ...row, + baz: null, + } + }) + + t.deepEqual(rowsAfterMigration, expectedRowsAfterMigration) +}) + +test.serial( + 'apply migration containing DDL and non-conflicting DML', + async (t) => { + /* + Test migrations containing non-conflicting DML statements and some DDL statements + - Process the following migration tx: + - DML 1 is: + insert non-conflicting row in existing table + non-conflict update to existing row + delete row + - DDL 1 is: + Add column to table that is affected by the statements in DML 1 + Create new table + - DML 2 is: + insert row in extended table with value for new column + insert row in extended table without a value for the new column + Insert some rows in newly created table + - Check that the migration was successfully applied on the local DB + - Check the modifications (insert, update, delete) to the rows + */ + + const { satellite, adapter, txDate } = t.context + const timestamp = txDate.getTime() + + const txTags = [generateTag('remote', txDate)] + const mkInsertChange = (record: any) => { + return { + type: DataChangeType.INSERT, + relation: relations['parent'], + record: record, + oldRecord: {}, + tags: txTags, + } + } + + const insertRow = { + id: 3, + value: 'remote', + other: 1, + } + + const insertChange = mkInsertChange(insertRow) + + const oldUpdateRow = { + id: 1, + value: 'local', + other: null, + } + + const updateRow = { + id: 1, + value: 'remote', + other: 5, + } + + const updateChange = { + //type: DataChangeType.INSERT, // insert since `opLogEntryToChange` also transforms update optype into insert + type: DataChangeType.UPDATE, + relation: relations['parent'], + record: updateRow, + oldRecord: oldUpdateRow, + tags: txTags, + } + + // Delete overwrites the insert for row with id 2 + // Thus, it overwrites the shadow tag for that row + const localEntries = await satellite._getEntries() + const shadowEntryForRow2 = await getMatchingShadowEntries( + adapter, + localEntries[1] + ) // shadow entry for insert of row with id 2 + const shadowTagsRow2 = JSON.parse(shadowEntryForRow2[0].tags) + + const deleteRow = { + id: 2, + value: 'local', + other: null, + } + + const deleteChange = { + type: DataChangeType.DELETE, + relation: relations['parent'], + oldRecord: deleteRow, + tags: shadowTagsRow2, + } + + const insertExtendedRow = { + id: 4, + value: 'remote', + other: 6, + baz: 'foo', + } + const insertExtendedChange = { + type: DataChangeType.INSERT, + relation: addColumnRelation, + record: insertExtendedRow, + oldRecord: {}, + tags: txTags, + } + + const insertExtendedWithoutValueRow = { + id: 5, + value: 'remote', + other: 7, + } + const insertExtendedWithoutValueChange = { + type: DataChangeType.INSERT, + relation: addColumnRelation, + record: insertExtendedWithoutValueRow, + oldRecord: {}, + tags: txTags, + } + + const insertInNewTableRow = { + id: '1', + foo: 1, + bar: '2', + } + const insertInNewTableChange = { + type: DataChangeType.INSERT, + relation: newTableRelation, + record: insertInNewTableRow, + oldRecord: {}, + tags: txTags, + } + + const dml1 = [insertChange, updateChange, deleteChange] + const ddl1 = [addColumn, createTable] + const dml2 = [ + insertExtendedChange, + insertExtendedWithoutValueChange, + insertInNewTableChange, + ] + + const migrationTx = { + origin: 'remote', + commit_timestamp: Long.fromNumber(timestamp), + changes: [...dml1, ...ddl1, ...dml2], + lsn: new Uint8Array(), + migrationVersion: '4', + } + + const rowsBeforeMigration = await fetchParentRows(adapter) + + // For each schema change, Electric sends a `SatRelation` message + // before sending a DML operation that depends on a new or modified schema. + // The `SatRelation` message is handled by `_updateRelations` in order + // to update Satellite's relations + await satellite._updateRelations(addColumnRelation) + await satellite._updateRelations(newTableRelation) + + // Apply the migration transaction + await satellite._applyTransaction(migrationTx) + + // Check that the migration was successfully applied + await checkMigrationIsApplied(t) + + // Check that the existing rows are still there and are unchanged + const rowsAfterMigration = await fetchParentRows(adapter) + const expectedRowsAfterMigration = rowsBeforeMigration + .filter((r: Row) => r.id !== deleteRow.id && r.id !== oldUpdateRow.id) + .concat([insertRow, updateRow, insertExtendedWithoutValueRow]) + .map((row: Row) => { + return { + ...row, + baz: null, + } as Row + }) + .concat([insertExtendedRow]) + testSetEquality(t, rowsAfterMigration, expectedRowsAfterMigration) + + // Check the row that was inserted in the new table + const newTableRows = await adapter.query({ + sql: 'SELECT * FROM main."NewTable"', + }) + + t.is(newTableRows.length, 1) + t.deepEqual(newTableRows[0], insertInNewTableRow) + } +) + +test.serial('apply migration containing DDL and conflicting DML', async (t) => { + // Same as previous test but DML contains some conflicting operations + const { satellite, adapter, txDate } = t.context + + // Fetch the shadow tag for row 1 such that delete will overwrite it + const localEntries = await satellite._getEntries() + const shadowEntryForRow1 = await getMatchingShadowEntries( + adapter, + localEntries[0] + ) // shadow entry for insert of row with id 1 + const shadowTagsRow1 = JSON.parse(shadowEntryForRow1[0].tags) + + // Locally update row with id 1 + await adapter.runInTransaction({ + sql: `UPDATE main.parent SET value = $1, other = $2 WHERE id = $3;`, + args: ['still local', 5, 1], + }) + + await satellite._performSnapshot() + + // Now receive a concurrent delete of that row + // such that it deletes the row with id 1 that was initially inserted + const timestamp = txDate.getTime() + //const txTags = [ generateTag('remote', txDate) ] + + const deleteRow = { + id: 1, + value: 'local', + other: null, + } + + const deleteChange = { + type: DataChangeType.DELETE, + relation: relations['parent'], + oldRecord: deleteRow, + tags: shadowTagsRow1, + } + + // Process the incoming delete + const ddl = [addColumn, createTable] + const dml = [deleteChange] + + const migrationTx = { + origin: 'remote', + commit_timestamp: Long.fromNumber(timestamp), + changes: [...ddl, ...dml], + lsn: new Uint8Array(), + migrationVersion: '5', + } + + const rowsBeforeMigration = await fetchParentRows(adapter) + const rowsBeforeMigrationExceptConflictingRow = rowsBeforeMigration.filter( + (r) => r.id !== deleteRow.id + ) + + // For each schema change, Electric sends a `SatRelation` message + // before sending a DML operation that depends on a new or modified schema. + // The `SatRelation` message is handled by `_updateRelations` in order + // to update Satellite's relations. + // In this case, the DML operation deletes a row in `parent` table + // so we receive a `SatRelation` message for that table + await satellite._updateRelations(addColumnRelation) + + // Apply the migration transaction + await satellite._applyTransaction(migrationTx) + + // Check that the migration was successfully applied + await checkMigrationIsApplied(t) + + // The local update and remote delete happened concurrently + // Check that the update wins + const rowsAfterMigration = await fetchParentRows(adapter) + const newRowsExceptConflictingRow = rowsAfterMigration.filter( + (r) => r.id !== deleteRow.id + ) + const conflictingRow = rowsAfterMigration.find((r) => r.id === deleteRow.id) + + testSetEquality( + t, + rowsBeforeMigrationExceptConflictingRow.map((r) => { + return { + baz: null, + ...r, + } + }), + newRowsExceptConflictingRow + ) + + t.deepEqual(conflictingRow, { + id: 1, + value: 'still local', + other: 5, + baz: null, + }) +}) + +test.serial('apply migration and concurrent transaction', async (t) => { + const { satellite, adapter, txDate } = t.context + + const timestamp = txDate.getTime() + const remoteA = 'remoteA' + const remoteB = 'remoteB' + const txTagsRemoteA = [generateTag(remoteA, txDate)] + const txTagsRemoteB = [generateTag(remoteB, txDate)] + + const mkInsertChange = ( + record: Record, + tags: string[] + ): DataChange => { + return { + type: DataChangeType.INSERT, + relation: relations['parent'], + record: record, + oldRecord: {}, + tags: tags, + } + } + + const insertRowA = { + id: 3, + value: 'remote A', + other: 8, + } + + const insertRowB = { + id: 3, + value: 'remote B', + other: 9, + } + + // Make 2 concurrent insert changes. + // They are concurrent because both remoteA and remoteB + // generated the changes at `timestamp` + const insertChangeA = mkInsertChange(insertRowA, txTagsRemoteA) + const insertChangeB = mkInsertChange(insertRowB, txTagsRemoteB) + + const txA: Transaction = { + origin: remoteA, + commit_timestamp: Long.fromNumber(timestamp), + changes: [insertChangeA], + lsn: new Uint8Array(), + } + + const ddl = [addColumn, createTable] + + const txB: Transaction = { + origin: remoteB, + commit_timestamp: Long.fromNumber(timestamp), + changes: [...ddl, insertChangeB], + lsn: new Uint8Array(), + migrationVersion: '6', + } + + const rowsBeforeMigration = await fetchParentRows(adapter) + + // For each schema change, Electric sends a `SatRelation` message + // before sending a DML operation that depends on a new or modified schema. + // The `SatRelation` message is handled by `_updateRelations` in order + // to update Satellite's relations. + // In this case, the DML operation adds a row in `parent` table + // so we receive a `SatRelation` message for that table + await satellite._updateRelations(addColumnRelation) + + // Apply the concurrent transactions + await satellite._applyTransaction(txB) + await satellite._applyTransaction(txA) + + // Check that the migration was successfully applied + await checkMigrationIsApplied(t) + + // Check that one of the two insertions won + const rowsAfterMigration = await fetchParentRows(adapter) + const extendRow = (r: Row) => { + return { + ...r, + baz: null, + } + } + const extendedRows = rowsBeforeMigration.map(extendRow) + + // Check that all rows now have an additional column + t.deepEqual( + rowsAfterMigration.filter((r) => r.id !== insertRowA.id), + extendedRows + ) + + const conflictingRow = rowsAfterMigration.find((r) => r.id === insertRowA.id) + + // Now also check the row that was concurrently inserted + t.assert( + isequal(conflictingRow, extendRow(insertRowA)) || + isequal(conflictingRow, extendRow(insertRowB)) + ) +}) + +const migrationWithFKs: SchemaChange[] = [ + { + migrationType: SatOpMigrate_Type.CREATE_TABLE, + sql: ` + CREATE TABLE main."test_items" ( + "id" TEXT NOT NULL, + CONSTRAINT "test_items_pkey" PRIMARY KEY ("id") + ); + `, + table: { + name: 'test_items', + columns: [ + { + name: 'id', + sqliteType: 'TEXT', + pgType: { name: 'TEXT', array: [], size: [] }, + }, + ], + fks: [], + pks: ['id'], + }, + }, + { + migrationType: SatOpMigrate_Type.CREATE_TABLE, + sql: ` + CREATE TABLE main."test_other_items" ( + "id" TEXT NOT NULL, + "item_id" TEXT, + -- CONSTRAINT "test_other_items_item_id_fkey" FOREIGN KEY ("item_id") REFERENCES "test_items" ("id"), + CONSTRAINT "test_other_items_pkey" PRIMARY KEY ("id") + ); + `, + table: { + name: 'test_other_items', + columns: [ + { + name: 'id', + sqliteType: 'TEXT', + pgType: { name: 'TEXT', array: [], size: [] }, + }, + { + name: 'item_id', + sqliteType: 'TEXT', + pgType: { name: 'TEXT', array: [], size: [] }, + }, + ], + fks: [ + { + $type: 'Electric.Satellite.SatOpMigrate.ForeignKey', + fkCols: ['item_id'], + pkTable: 'test_items', + pkCols: ['id'], + }, + ], + pks: ['id'], + }, + }, +] + +test.serial('apply another migration', async (t) => { + const { satellite } = t.context + + const migrationTx = { + origin: 'remote', + commit_timestamp: Long.fromNumber(new Date().getTime()), + changes: migrationWithFKs, + lsn: new Uint8Array(), + // starts at 3, because the app already defines 2 migrations + // (see test/support/migrations/migrations.js) + // which are loaded when Satellite is started + migrationVersion: '3', + } + + // Apply the migration transaction + try { + await satellite._applyTransaction(migrationTx) + } catch (e) { + console.error(e) + throw e + } + + await assertDbHasTables(t, 'test_items', 'test_other_items') + t.pass() +}) diff --git a/clients/typescript/test/satellite/postgres/process.test.ts b/clients/typescript/test/satellite/postgres/process.test.ts new file mode 100644 index 0000000000..9cd0e8a6c2 --- /dev/null +++ b/clients/typescript/test/satellite/postgres/process.test.ts @@ -0,0 +1,1949 @@ +import anyTest, { TestFn } from 'ava' + +import { + MOCK_BEHIND_WINDOW_LSN, + MOCK_INTERNAL_ERROR, + MockSatelliteClient, +} from '../../../src/satellite/mock' +import { QualifiedTablename } from '../../../src/util/tablename' +import { sleepAsync } from '../../../src/util/timer' + +import { + OPTYPES, + localOperationsToTableChanges, + fromTransaction, + OplogEntry, + toTransactions, + generateTag, + encodeTags, + opLogEntryToChange, +} from '../../../src/satellite/oplog' +import { SatelliteProcess } from '../../../src/satellite/process' + +import { + loadSatelliteMetaTable, + generateLocalOplogEntry, + generateRemoteOplogEntry, + genEncodedTags, + getMatchingShadowEntries, +} from '../../support/satellite-helpers' +import Long from 'long' +import { + DataChangeType, + DataTransaction, + SatelliteError, + SatelliteErrorCode, +} from '../../../src/util/types' +import { + makePgContext, + opts, + relations, + cleanAndStopSatellite, + ContextType, +} from '../common' +import { + DEFAULT_LOG_POS, + numberToBytes, + base64, +} from '../../../src/util/common' + +import { + ClientShapeDefinition, + SubscriptionData, +} from '../../../src/satellite/shapes/types' +import { mergeEntries } from '../../../src/satellite/merge' +import { MockSubscriptionsManager } from '../../../src/satellite/shapes/manager' +import { pgBuilder } from '../../../src/migrators/query-builder' + +const parentRecord = { + id: 1, + value: 'incoming', + other: 1, +} + +const childRecord = { + id: 1, + parent: 1, +} + +let port = 5200 +// Run all tests in this file serially +// because there are a lot of tests +// and it would lead to PG running out of shared memory +const test = anyTest.serial as TestFn +test.beforeEach(async (t) => { + await makePgContext(t, port++) +}) +test.afterEach.always(cleanAndStopSatellite) +const qualifiedParentTableName = new QualifiedTablename( + 'main', + 'parent' +).toString() +const builder = pgBuilder + +test('setup starts a satellite process', async (t) => { + t.true(t.context.satellite instanceof SatelliteProcess) +}) + +test('start creates system tables', async (t) => { + const { adapter, satellite, authState } = t.context + + await satellite.start(authState) + + const rows = await adapter.query(builder.getLocalTableNames()) + const names = rows.map((row) => row.name) + + t.true(names.includes('_electric_oplog')) +}) + +test('load metadata', async (t) => { + const { adapter, runMigrations } = t.context + await runMigrations() + + const meta = await loadSatelliteMetaTable(adapter) + t.deepEqual(meta, { + compensations: '1', + lsn: '', + clientId: '', + subscriptions: '', + }) +}) + +test('set persistent client id', async (t) => { + const { satellite, authState } = t.context + + await satellite.start(authState) + const clientId1 = satellite._authState!.clientId + t.truthy(clientId1) + await satellite.stop() + + await satellite.start(authState) + + const clientId2 = satellite._authState!.clientId + t.truthy(clientId2) + t.assert(clientId1 === clientId2) +}) + +test('cannot UPDATE primary key', async (t) => { + const { adapter, runMigrations } = t.context + await runMigrations() + + await adapter.run({ sql: `INSERT INTO main.parent(id) VALUES ('1'),('2')` }) + await t.throwsAsync( + adapter.run({ sql: `UPDATE main.parent SET id='3' WHERE id = '1'` }), + { + code: 'P0001', + } + ) +}) + +test('snapshot works', async (t) => { + const { satellite } = t.context + const { adapter, notifier, runMigrations, authState } = t.context + await runMigrations() + await satellite._setAuthState(authState) + + await adapter.run({ sql: `INSERT INTO main.parent(id) VALUES ('1'),('2')` }) + + let snapshotTimestamp = await satellite._performSnapshot() + + const clientId = satellite._authState!.clientId + let shadowTags = encodeTags([generateTag(clientId, snapshotTimestamp)]) + + var shadowRows = await adapter.query({ + sql: `SELECT tags FROM main._electric_shadow`, + }) + t.is(shadowRows.length, 2) + for (const row of shadowRows) { + t.is(row.tags, shadowTags) + } + + t.is(notifier.notifications.length, 1) + + const { changes } = notifier.notifications[0] + const expectedChange = { + qualifiedTablename: new QualifiedTablename('main', 'parent'), + rowids: [1, 2], + } + + t.deepEqual(changes, [expectedChange]) +}) + +test('(regression) performSnapshot cant be called concurrently', async (t) => { + const { authState, satellite, runMigrations } = t.context + await runMigrations() + await satellite._setAuthState(authState) + + await t.throwsAsync( + async () => { + const run = satellite.adapter.run.bind(satellite.adapter) + satellite.adapter.run = (stmt) => + new Promise((res) => setTimeout(() => run(stmt).then(res), 100)) + + const p1 = satellite._performSnapshot() + const p2 = satellite._performSnapshot() + await Promise.all([p1, p2]) + }, + { + instanceOf: SatelliteError, + code: SatelliteErrorCode.INTERNAL, + message: 'already performing snapshot', + } + ) +}) + +test('(regression) throttle with mutex prevents race when snapshot is slow', async (t) => { + const { authState, satellite, runMigrations } = t.context + await runMigrations() + await satellite._setAuthState(authState) + + // delay termination of _performSnapshot + const run = satellite.adapter.run.bind(satellite.adapter) + satellite.adapter.run = (stmt) => + new Promise((res) => setTimeout(() => run(stmt).then(res), 100)) + + const p1 = satellite._throttledSnapshot() + const p2 = new Promise((res) => { + // call snapshot after throttle time has expired + setTimeout(() => satellite._throttledSnapshot()?.then(res), 50) + }) + + await t.notThrowsAsync(async () => { + await p1 + await p2 + }) +}) + +test('starting and stopping the process works', async (t) => { + const { adapter, notifier, runMigrations, satellite, authState } = t.context + await runMigrations() + + await adapter.run({ sql: `INSERT INTO main.parent(id) VALUES ('1'),('2')` }) + + const conn = await satellite.start(authState) + await conn.connectionPromise + + await sleepAsync(opts.pollingInterval) + + // connect, 1st txn + t.is(notifier.notifications.length, 2) + + await adapter.run({ sql: `INSERT INTO main.parent(id) VALUES ('3'),('4')` }) + await sleepAsync(opts.pollingInterval) + + // 2nd txm + t.is(notifier.notifications.length, 3) + + await satellite.stop() + await adapter.run({ sql: `INSERT INTO main.parent(id) VALUES ('5'),('6')` }) + await sleepAsync(opts.pollingInterval) + + // no txn notified + t.is(notifier.notifications.length, 4) + + const conn1 = await satellite.start(authState) + await conn1.connectionPromise + await sleepAsync(opts.pollingInterval) + + // connect, 4th txn + t.is(notifier.notifications.length, 6) +}) + +test('snapshots on potential data change', async (t) => { + const { adapter, notifier, runMigrations } = t.context + await runMigrations() + + await adapter.run({ sql: `INSERT INTO main.parent(id) VALUES ('1'),('2')` }) + + t.is(notifier.notifications.length, 0) + + await notifier.potentiallyChanged() + + t.is(notifier.notifications.length, 1) +}) + +// INSERT after DELETE shall nullify all non explicitly set columns +// If last operation is a DELETE, concurrent INSERT shall resurrect deleted +// values as in 'INSERT wins over DELETE and restored deleted values' +test('snapshot of INSERT after DELETE', async (t) => { + const { adapter, runMigrations, satellite, authState } = t.context + + await runMigrations() + + await adapter.run({ + sql: `INSERT INTO main.parent(id, value) VALUES (1,'val1')`, + }) + await adapter.run({ sql: `DELETE FROM main.parent WHERE id=1` }) + await adapter.run({ sql: `INSERT INTO main.parent(id) VALUES (1)` }) + + await satellite._setAuthState(authState) + await satellite._performSnapshot() + const entries = await satellite._getEntries() + const clientId = satellite._authState!.clientId + + const merged = localOperationsToTableChanges( + entries, + (timestamp: Date) => { + return generateTag(clientId, timestamp) + }, + relations + ) + const [_, keyChanges] = merged[qualifiedParentTableName]['{"id":1}'] + const resultingValue = keyChanges.changes.value.value + t.is(resultingValue, null) +}) + +test('snapshot of INSERT with bigint', async (t) => { + const { adapter, runMigrations, satellite, authState } = t.context + + await runMigrations() + + await adapter.run({ + sql: `INSERT INTO main."bigIntTable"(value) VALUES (1)`, + }) + + await satellite._setAuthState(authState) + await satellite._performSnapshot() + const entries = await satellite._getEntries() + const clientId = satellite._authState!.clientId + + const merged = localOperationsToTableChanges( + entries, + (timestamp: Date) => { + return generateTag(clientId, timestamp) + }, + relations + ) + const qualifiedTableName = new QualifiedTablename( + 'main', + 'bigIntTable' + ).toString() + const [_, keyChanges] = merged[qualifiedTableName]['{"value":"1"}'] + const resultingValue = keyChanges.changes.value.value + t.is(resultingValue, 1n) +}) + +test('take snapshot and merge local wins', async (t) => { + const { adapter, runMigrations, satellite, tableInfo, authState } = t.context + await runMigrations() + + const incomingTs = new Date().getTime() - 1 + const incomingEntry = generateRemoteOplogEntry( + tableInfo, + 'main', + 'parent', + OPTYPES.insert, + incomingTs, + encodeTags([generateTag('remote', new Date(incomingTs))]), + { + id: 1, + value: 'incoming', + } + ) + await adapter.run({ + sql: `INSERT INTO main.parent(id, value, other) VALUES (1, 'local', 1)`, + }) + + await satellite._setAuthState(authState) + const localTime = await satellite._performSnapshot() + const clientId = satellite._authState!.clientId + + const local = await satellite._getEntries() + const localTimestamp = new Date(local[0].timestamp).getTime() + const merged = mergeEntries( + clientId, + local, + 'remote', + [incomingEntry], + relations + ) + const item = merged[qualifiedParentTableName]['{"id":1}'] + + t.deepEqual(item, { + namespace: 'main', + tablename: 'parent', + primaryKeyCols: { id: 1 }, + optype: OPTYPES.upsert, + changes: { + id: { value: 1, timestamp: localTimestamp }, + value: { value: 'local', timestamp: localTimestamp }, + other: { value: 1, timestamp: localTimestamp }, + }, + fullRow: { + id: 1, + value: 'local', + other: 1, + }, + tags: [ + generateTag(clientId, localTime), + generateTag('remote', new Date(incomingTs)), + ], + }) +}) + +test('take snapshot and merge incoming wins', async (t) => { + const { adapter, runMigrations, satellite, tableInfo, authState } = t.context + await runMigrations() + + await adapter.run({ + sql: `INSERT INTO main.parent(id, value, other) VALUES (1, 'local', 1)`, + }) + + await satellite._setAuthState(authState) + const clientId = satellite._authState!.clientId + await satellite._performSnapshot() + + const local = await satellite._getEntries() + const localTimestamp = new Date(local[0].timestamp).getTime() + + const incomingTs = localTimestamp + 1 + const incomingEntry = generateRemoteOplogEntry( + tableInfo, + 'main', + 'parent', + OPTYPES.insert, + incomingTs, + genEncodedTags('remote', [incomingTs]), + { + id: 1, + value: 'incoming', + } + ) + + const merged = mergeEntries( + clientId, + local, + 'remote', + [incomingEntry], + relations + ) + const item = merged[qualifiedParentTableName]['{"id":1}'] + + t.deepEqual(item, { + namespace: 'main', + tablename: 'parent', + primaryKeyCols: { id: 1 }, + optype: OPTYPES.upsert, + changes: { + id: { value: 1, timestamp: incomingTs }, + value: { value: 'incoming', timestamp: incomingTs }, + other: { value: 1, timestamp: localTimestamp }, + }, + fullRow: { + id: 1, + value: 'incoming', + other: 1, + }, + tags: [ + generateTag(clientId, new Date(localTimestamp)), + generateTag('remote', new Date(incomingTs)), + ], + }) +}) + +test('merge incoming wins on persisted ops', async (t) => { + const { adapter, runMigrations, satellite, tableInfo, authState } = t.context + await runMigrations() + await satellite._setAuthState(authState) + satellite.relations = relations + + // This operation is persisted + await adapter.run({ + sql: `INSERT INTO main.parent(id, value, other) VALUES (1, 'local', 1)`, + }) + await satellite._performSnapshot() + const [originalInsert] = await satellite._getEntries() + const [tx] = toTransactions([originalInsert], satellite.relations) + tx.origin = authState.clientId + await satellite._applyTransaction(tx) + + // Verify that GC worked as intended and the oplog entry was deleted + t.deepEqual(await satellite._getEntries(), []) + + // This operation is done offline + await adapter.run({ + sql: `UPDATE main.parent SET value = 'new local' WHERE id = 1`, + }) + await satellite._performSnapshot() + const [offlineInsert] = await satellite._getEntries() + const offlineTimestamp = new Date(offlineInsert.timestamp).getTime() + + // This operation is done concurrently with offline but at a later point in time. It's sent immediately on connection + const incomingTs = offlineTimestamp + 1 + const firstIncomingEntry = generateRemoteOplogEntry( + tableInfo, + 'main', + 'parent', + OPTYPES.update, + incomingTs, + genEncodedTags('remote', [incomingTs]), + { id: 1, value: 'incoming' }, + { id: 1, value: 'local' } + ) + + const firstIncomingTx = { + origin: 'remote', + commit_timestamp: Long.fromNumber(incomingTs), + changes: [opLogEntryToChange(firstIncomingEntry, satellite.relations)], + lsn: new Uint8Array(), + } + await satellite._applyTransaction(firstIncomingTx) + + const [{ value: value1 }] = await adapter.query({ + sql: 'SELECT value FROM main.parent WHERE id = 1', + }) + t.is( + value1, + 'incoming', + 'LWW conflict merge of the incoming transaction should lead to incoming operation winning' + ) + + // And after the offline transaction was sent, the resolved no-op transaction comes in + const secondIncomingEntry = generateRemoteOplogEntry( + tableInfo, + 'main', + 'parent', + OPTYPES.update, + offlineTimestamp, + encodeTags([ + generateTag('remote', incomingTs), + generateTag(authState.clientId, offlineTimestamp), + ]), + { id: 1, value: 'incoming' }, + { id: 1, value: 'incoming' } + ) + + const secondIncomingTx = { + origin: authState.clientId, + commit_timestamp: Long.fromNumber(offlineTimestamp), + changes: [opLogEntryToChange(secondIncomingEntry, satellite.relations)], + lsn: new Uint8Array(), + } + await satellite._applyTransaction(secondIncomingTx) + + const [{ value: value2 }] = await adapter.query({ + sql: 'SELECT value FROM main.parent WHERE id = 1', + }) + t.is( + value2, + 'incoming', + 'Applying the resolved write from the round trip should be a no-op' + ) +}) + +test('apply does not add anything to oplog', async (t) => { + const { adapter, runMigrations, satellite, tableInfo, authState } = t.context + await runMigrations() + await adapter.run({ + sql: `INSERT INTO main.parent(id, value, other) VALUES (1, 'local', null)`, + }) + + await satellite._setAuthState(authState) + const clientId = satellite._authState!.clientId + + const localTimestamp = await satellite._performSnapshot() + + const incomingTs = new Date().getTime() + const incomingEntry = generateRemoteOplogEntry( + tableInfo, + 'main', + 'parent', + OPTYPES.insert, + incomingTs, + genEncodedTags('remote', [incomingTs]), + { + id: 1, + value: 'incoming', + other: 1, + } + ) + + satellite.relations = relations // satellite must be aware of the relations in order to turn `DataChange`s into `OpLogEntry`s + + const incomingChange = opLogEntryToChange(incomingEntry, relations) + const incomingTx = { + origin: 'remote', + commit_timestamp: Long.fromNumber(incomingTs), + changes: [incomingChange], + lsn: new Uint8Array(), + } + await satellite._applyTransaction(incomingTx) + + await satellite._performSnapshot() + + const sql = 'SELECT * from main.parent WHERE id=1' + const [row] = await adapter.query({ sql }) + t.is(row.value, 'incoming') + t.is(row.other, 1) + + const localEntries = await satellite._getEntries() + const shadowEntry = await getMatchingShadowEntries( + adapter, + localEntries[0], + builder + ) + + t.deepEqual( + encodeTags([ + generateTag(clientId, new Date(localTimestamp)), + generateTag('remote', new Date(incomingTs)), + ]), + shadowEntry[0].tags + ) + + //t.deepEqual(shadowEntries, shadowEntries2) + t.is(localEntries.length, 1) +}) + +test('apply incoming with no local', async (t) => { + const { adapter, runMigrations, satellite, tableInfo, authState } = t.context + await runMigrations() + + const incomingTs = new Date() + const incomingEntry = generateRemoteOplogEntry( + tableInfo, + 'main', + 'parent', + OPTYPES.delete, + incomingTs.getTime(), + genEncodedTags('remote', []), + { + id: 1, + value: 'incoming', + otherValue: 1, + } + ) + + satellite.relations = relations // satellite must be aware of the relations in order to deserialise oplog entries + + await satellite._setAuthState(authState) + await satellite._apply([incomingEntry], 'remote') + + const sql = 'SELECT * from main.parent WHERE id=1' + const rows = await adapter.query({ sql }) + const shadowEntries = await getMatchingShadowEntries( + adapter, + undefined, + builder + ) + + t.is(shadowEntries.length, 0) + t.is(rows.length, 0) +}) + +test('apply empty incoming', async (t) => { + const { runMigrations, satellite, authState } = t.context + await runMigrations() + + await satellite._setAuthState(authState) + await satellite._apply([], 'external') + + t.true(true) +}) + +test('apply incoming with null on column with default', async (t) => { + const { runMigrations, satellite, adapter, tableInfo, authState } = t.context + await runMigrations() + + const incomingTs = new Date().getTime() + const incomingEntry = generateRemoteOplogEntry( + tableInfo, + 'main', + 'parent', + OPTYPES.insert, + incomingTs, + genEncodedTags('remote', [incomingTs]), + { + id: 1234, + value: 'incoming', + other: null, + } + ) + + await satellite._setAuthState(authState) + + satellite.relations = relations // satellite must be aware of the relations in order to turn `DataChange`s into `OpLogEntry`s + + const incomingChange = opLogEntryToChange(incomingEntry, relations) + const incomingTx = { + origin: 'remote', + commit_timestamp: Long.fromNumber(incomingTs), + changes: [incomingChange], + lsn: new Uint8Array(), + } + await satellite._applyTransaction(incomingTx) + + const sql = `SELECT * from main.parent WHERE value='incoming'` + const rows = await adapter.query({ sql }) + + t.is(rows[0].other, null) + t.pass() +}) + +test('apply incoming with undefined on column with default', async (t) => { + const { runMigrations, satellite, adapter, tableInfo, authState } = t.context + await runMigrations() + + const incomingTs = new Date().getTime() + const incomingEntry = generateRemoteOplogEntry( + tableInfo, + 'main', + 'parent', + OPTYPES.insert, + incomingTs, + genEncodedTags('remote', [incomingTs]), + { + id: 1234, + value: 'incoming', + } + ) + + await satellite._setAuthState(authState) + + satellite.relations = relations // satellite must be aware of the relations in order to turn `DataChange`s into `OpLogEntry`s + + const incomingChange = opLogEntryToChange(incomingEntry, relations) + const incomingTx = { + origin: 'remote', + commit_timestamp: Long.fromNumber(incomingTs), + changes: [incomingChange], + lsn: new Uint8Array(), + } + await satellite._applyTransaction(incomingTx) + + const sql = `SELECT * from main.parent WHERE value='incoming'` + const rows = await adapter.query({ sql }) + + t.is(rows[0].other, 0) + t.pass() +}) + +test('INSERT wins over DELETE and restored deleted values', async (t) => { + const { runMigrations, satellite, tableInfo, authState } = t.context + await runMigrations() + await satellite._setAuthState(authState) + const clientId = satellite._authState!.clientId + + const localTs = new Date().getTime() + const incomingTs = localTs + 1 + + const incoming = [ + generateRemoteOplogEntry( + tableInfo, + 'main', + 'parent', + OPTYPES.insert, + incomingTs, + genEncodedTags('remote', [incomingTs]), + { + id: 1, + other: 1, + } + ), + generateRemoteOplogEntry( + tableInfo, + 'main', + 'parent', + OPTYPES.delete, + incomingTs, + genEncodedTags('remote', []), + { + id: 1, + } + ), + ] + + const local = [ + generateLocalOplogEntry( + tableInfo, + 'main', + 'parent', + OPTYPES.insert, + localTs, + genEncodedTags(clientId, [localTs]), + { + id: 1, + value: 'local', + other: null, + } + ), + ] + + const merged = mergeEntries(clientId, local, 'remote', incoming, relations) + const item = merged[qualifiedParentTableName]['{"id":1}'] + + t.deepEqual(item, { + namespace: 'main', + tablename: 'parent', + primaryKeyCols: { id: 1 }, + optype: OPTYPES.upsert, + changes: { + id: { value: 1, timestamp: incomingTs }, + value: { value: 'local', timestamp: localTs }, + other: { value: 1, timestamp: incomingTs }, + }, + fullRow: { + id: 1, + value: 'local', + other: 1, + }, + tags: [ + generateTag(clientId, new Date(localTs)), + generateTag('remote', new Date(incomingTs)), + ], + }) +}) + +test('concurrent updates take all changed values', async (t) => { + const { runMigrations, satellite, tableInfo, authState } = t.context + await runMigrations() + await satellite._setAuthState(authState) + const clientId = satellite._authState!.clientId + + const localTs = new Date().getTime() + const incomingTs = localTs + 1 + + const incoming = [ + generateRemoteOplogEntry( + tableInfo, + 'main', + 'parent', + OPTYPES.update, + incomingTs, + genEncodedTags('remote', [incomingTs]), + { + id: 1, + value: 'remote', // the only modified column + other: 0, + }, + { + id: 1, + value: 'local', + other: 0, + } + ), + ] + + const local = [ + generateLocalOplogEntry( + tableInfo, + 'main', + 'parent', + OPTYPES.update, + localTs, + genEncodedTags(clientId, [localTs]), + { + id: 1, + value: 'local', + other: 1, // the only modified column + }, + { + id: 1, + value: 'local', + other: 0, + } + ), + ] + + const merged = mergeEntries(clientId, local, 'remote', incoming, relations) + const item = merged[qualifiedParentTableName]['{"id":1}'] + + // The incoming entry modified the value of the `value` column to `'remote'` + // The local entry concurrently modified the value of the `other` column to 1. + // The merged entries should have `value = 'remote'` and `other = 1`. + t.deepEqual(item, { + namespace: 'main', + tablename: 'parent', + primaryKeyCols: { id: 1 }, + optype: OPTYPES.upsert, + changes: { + value: { value: 'remote', timestamp: incomingTs }, + other: { value: 1, timestamp: localTs }, + }, + fullRow: { + id: 1, + value: 'remote', + other: 1, + }, + tags: [ + generateTag(clientId, new Date(localTs)), + generateTag('remote', new Date(incomingTs)), + ], + }) +}) + +test('merge incoming with empty local', async (t) => { + const { runMigrations, satellite, tableInfo, authState } = t.context + await runMigrations() + await satellite._setAuthState(authState) + const clientId = satellite._authState!.clientId + + const localTs = new Date().getTime() + const incomingTs = localTs + 1 + + const incoming = [ + generateRemoteOplogEntry( + tableInfo, + 'main', + 'parent', + OPTYPES.insert, + incomingTs, + genEncodedTags('remote', [incomingTs]), + { + id: 1, + }, + undefined + ), + ] + + const local: OplogEntry[] = [] + const merged = mergeEntries(clientId, local, 'remote', incoming, relations) + const item = merged[qualifiedParentTableName]['{"id":1}'] + + t.deepEqual(item, { + namespace: 'main', + tablename: 'parent', + primaryKeyCols: { id: 1 }, + optype: OPTYPES.upsert, + changes: { + id: { value: 1, timestamp: incomingTs }, + }, + fullRow: { + id: 1, + }, + tags: [generateTag('remote', new Date(incomingTs))], + }) +}) + +test('compensations: referential integrity is enforced', async (t) => { + const { adapter, runMigrations, satellite } = t.context + await runMigrations() + + await satellite._setMeta('compensations', 0) + await adapter.run({ + sql: `INSERT INTO main.parent(id, value) VALUES (1, '1')`, + }) + + await t.throwsAsync( + adapter.run({ sql: `INSERT INTO main.child(id, parent) VALUES (1, 2)` }), + { + code: '23503', + } + ) +}) + +test('compensations: incoming operation breaks referential integrity', async (t) => { + const { runMigrations, satellite, tableInfo, timestamp, authState } = + t.context + await runMigrations() + + await satellite._setMeta('compensations', 0) + await satellite._setAuthState(authState) + + const incoming = generateLocalOplogEntry( + tableInfo, + 'main', + 'child', + OPTYPES.insert, + timestamp, + genEncodedTags('remote', [timestamp]), + { + id: 1, + parent: 1, + } + ) + + satellite.relations = relations // satellite must be aware of the relations in order to turn `DataChange`s into `OpLogEntry`s + + const incomingChange = opLogEntryToChange(incoming, relations) + const incomingTx = { + origin: 'remote', + commit_timestamp: Long.fromNumber(timestamp), + changes: [incomingChange], + lsn: new Uint8Array(), + } + + await t.throwsAsync(satellite._applyTransaction(incomingTx), { + code: '23503', + }) +}) + +test('compensations: incoming operations accepted if restore referential integrity', async (t) => { + const { adapter, runMigrations, satellite, tableInfo, timestamp, authState } = + t.context + await runMigrations() + + await satellite._setMeta('compensations', 0) + await satellite._setAuthState(authState) + const clientId = satellite._authState!.clientId + + const childInsertEntry = generateRemoteOplogEntry( + tableInfo, + 'main', + 'child', + OPTYPES.insert, + timestamp, + genEncodedTags(clientId, [timestamp]), + { + id: 1, + parent: 1, + } + ) + + const parentInsertEntry = generateRemoteOplogEntry( + tableInfo, + 'main', + 'parent', + OPTYPES.insert, + timestamp, + genEncodedTags(clientId, [timestamp]), + { + id: 1, + } + ) + + await adapter.run({ + sql: `INSERT INTO main.parent(id, value) VALUES (1, '1')`, + }) + await adapter.run({ sql: `DELETE FROM main.parent WHERE id=1` }) + + await satellite._performSnapshot() + + satellite.relations = relations // satellite must be aware of the relations in order to turn `DataChange`s into `OpLogEntry`s + + const childInsertChange = opLogEntryToChange(childInsertEntry, relations) + const parentInsertChange = opLogEntryToChange(parentInsertEntry, relations) + const insertChildAndParentTx = { + origin: 'remote', + commit_timestamp: Long.fromNumber(new Date().getTime()), // timestamp is not important for this test, it is only used to GC the oplog + changes: [parentInsertChange, childInsertChange], + lsn: new Uint8Array(), + } + await satellite._applyTransaction(insertChildAndParentTx) + + const rows = await adapter.query({ + sql: `SELECT * from main.parent WHERE id=1`, + }) + + // Not only does the parent exist. + t.is(rows.length, 1) + + // But it's also recreated with deleted values. + t.is(rows[0].value, '1') +}) + +test('compensations: using triggers with flag 0', async (t) => { + const { adapter, runMigrations, satellite, tableInfo, authState } = t.context + await runMigrations() + + await satellite._setMeta('compensations', 0) + + await adapter.run({ + sql: `INSERT INTO main.parent(id, value) VALUES (1, '1')`, + }) + await satellite._setAuthState(authState) + const ts = await satellite._performSnapshot() + await satellite._garbageCollectOplog(ts) + + await adapter.run({ sql: `INSERT INTO main.child(id, parent) VALUES (1, 1)` }) + await satellite._performSnapshot() + + const timestamp = new Date().getTime() + const incoming = generateRemoteOplogEntry( + tableInfo, + 'main', + 'parent', + OPTYPES.delete, + timestamp, + genEncodedTags('remote', []), + { + id: 1, + } + ) + + satellite.relations = relations // satellite must be aware of the relations in order to turn `DataChange`s into `OpLogEntry`s + + const incomingChange = opLogEntryToChange(incoming, relations) + const incomingTx = { + origin: 'remote', + commit_timestamp: Long.fromNumber(timestamp), + changes: [incomingChange], + lsn: new Uint8Array(), + } + + await t.throwsAsync(satellite._applyTransaction(incomingTx), { + code: '23503', + }) +}) + +test('compensations: using triggers with flag 1', async (t) => { + const { adapter, runMigrations, satellite, tableInfo, authState } = t.context + await runMigrations() + + await satellite._setMeta('compensations', 1) + + await adapter.run({ + sql: `INSERT INTO main.parent(id, value) VALUES (1, '1')`, + }) + await satellite._setAuthState(authState) + const ts = await satellite._performSnapshot() + await satellite._garbageCollectOplog(ts) + + await adapter.run({ sql: `INSERT INTO main.child(id, parent) VALUES (1, 1)` }) + await satellite._performSnapshot() + + const timestamp = new Date().getTime() + const incoming = [ + generateRemoteOplogEntry( + tableInfo, + 'main', + 'parent', + OPTYPES.delete, + timestamp, + genEncodedTags('remote', []), + { + id: 1, + } + ), + ] + + satellite.relations = relations // satellite must be aware of the relations in order to deserialise oplog entries + + await satellite._apply(incoming, 'remote') + t.pass() +}) + +test('get oplogEntries from transaction', async (t) => { + const { runMigrations, satellite } = t.context + await runMigrations() + + const relations = await satellite['_getLocalRelations']() + + const transaction: DataTransaction = { + lsn: DEFAULT_LOG_POS, + commit_timestamp: Long.UZERO, + changes: [ + { + relation: relations.parent, + type: DataChangeType.INSERT, + record: { id: 0 }, + tags: [], // proper values are not relevent here + }, + ], + } + + const expected: OplogEntry = { + namespace: 'main', + tablename: 'parent', + optype: 'INSERT', + newRow: '{"id":0}', + oldRow: undefined, + primaryKey: '{"id":0}', + rowid: -1, + timestamp: '1970-01-01T00:00:00.000Z', + clearTags: encodeTags([]), + } + + const opLog = fromTransaction(transaction, relations) + t.deepEqual(opLog[0], expected) +}) + +test('get transactions from opLogEntries', async (t) => { + const { runMigrations } = t.context + await runMigrations() + + const opLogEntries: OplogEntry[] = [ + { + namespace: 'public', + tablename: 'parent', + optype: 'INSERT', + newRow: '{"id":0}', + oldRow: undefined, + primaryKey: '{"id":0}', + rowid: 1, + timestamp: '1970-01-01T00:00:00.000Z', + clearTags: encodeTags([]), + }, + { + namespace: 'public', + tablename: 'parent', + optype: 'UPDATE', + newRow: '{"id":1}', + oldRow: '{"id":1}', + primaryKey: '{"id":1}', + rowid: 2, + timestamp: '1970-01-01T00:00:00.000Z', + clearTags: encodeTags([]), + }, + { + namespace: 'public', + tablename: 'parent', + optype: 'INSERT', + newRow: '{"id":2}', + oldRow: undefined, + primaryKey: '{"id":0}', + rowid: 3, + timestamp: '1970-01-01T00:00:01.000Z', + clearTags: encodeTags([]), + }, + ] + + const expected = [ + { + lsn: numberToBytes(2), + commit_timestamp: Long.UZERO, + changes: [ + { + relation: relations.parent, + type: DataChangeType.INSERT, + record: { id: 0 }, + oldRecord: undefined, + tags: [], + }, + { + relation: relations.parent, + type: DataChangeType.UPDATE, + record: { id: 1 }, + oldRecord: { id: 1 }, + tags: [], + }, + ], + }, + { + lsn: numberToBytes(3), + commit_timestamp: Long.UZERO.add(1000), + changes: [ + { + relation: relations.parent, + type: DataChangeType.INSERT, + record: { id: 2 }, + oldRecord: undefined, + tags: [], + }, + ], + }, + ] + + const opLog = toTransactions(opLogEntries, relations) + t.deepEqual(opLog, expected) +}) + +test('handling connectivity state change stops queueing operations', async (t) => { + const { runMigrations, satellite, adapter, authState } = t.context + await runMigrations() + await satellite.start(authState) + + adapter.run({ + sql: `INSERT INTO main.parent(id, value, other) VALUES (1, 'local', 1)`, + }) + + await satellite._performSnapshot() + + // We should have sent (or at least enqueued to send) one row + const sentLsn = satellite.client.getLastSentLsn() + t.deepEqual(sentLsn, numberToBytes(1)) + + await satellite._handleConnectivityStateChange('disconnected') + + adapter.run({ + sql: `INSERT INTO main.parent(id, value, other) VALUES (2, 'local', 1)`, + }) + + await satellite._performSnapshot() + + // Since connectivity is down, that row isn't yet sent + const lsn1 = satellite.client.getLastSentLsn() + t.deepEqual(lsn1, sentLsn) + + // Once connectivity is restored, we will immediately run a snapshot to send pending rows + await satellite._handleConnectivityStateChange('available') + await sleepAsync(200) // Wait for snapshot to run + const lsn2 = satellite.client.getLastSentLsn() + t.deepEqual(lsn2, numberToBytes(2)) +}) + +test('garbage collection is triggered when transaction from the same origin is replicated', async (t) => { + const { satellite } = t.context + const { runMigrations, adapter, authState } = t.context + await runMigrations() + await satellite.start(authState) + + adapter.run({ + sql: `INSERT INTO main.parent(id, value, other) VALUES (1, 'local', 1);`, + }) + adapter.run({ + sql: `UPDATE main.parent SET value = 'local', other = 2 WHERE id = 1;`, + }) + + // Before snapshot, we didn't send anything + const lsn1 = satellite.client.getLastSentLsn() + t.deepEqual(lsn1, numberToBytes(0)) + + // Snapshot sends these oplog entries + await satellite._performSnapshot() + const lsn2 = satellite.client.getLastSentLsn() + t.deepEqual(lsn2, numberToBytes(2)) + + const old_oplog = await satellite._getEntries() + const transactions = toTransactions(old_oplog, relations) + transactions[0].origin = satellite._authState!.clientId + + // Transaction containing these oplogs is applies, which means we delete them + await satellite._applyTransaction(transactions[0]) + const new_oplog = await satellite._getEntries() + t.deepEqual(new_oplog, []) +}) + +// stub client and make satellite throw the error with option off/succeed with option on +test('clear database on BEHIND_WINDOW', async (t) => { + const { satellite } = t.context + const { runMigrations, authState } = t.context + await runMigrations() + + const base64lsn = base64.fromBytes(numberToBytes(MOCK_BEHIND_WINDOW_LSN)) + await satellite._setMeta('lsn', base64lsn) + try { + const conn = await satellite.start(authState) + await conn.connectionPromise + const lsnAfter = await satellite._getMeta('lsn') + t.not(lsnAfter, base64lsn) + } catch (e) { + t.fail('start should not throw') + } + + // TODO: test clear subscriptions +}) + +test('throw other replication errors', async (t) => { + t.plan(2) + const { satellite } = t.context + const { runMigrations, authState } = t.context + await runMigrations() + + const base64lsn = base64.fromBytes(numberToBytes(MOCK_INTERNAL_ERROR)) + await satellite._setMeta('lsn', base64lsn) + + const conn = await satellite.start(authState) + return Promise.all( + [satellite['initializing']?.waitOn(), conn.connectionPromise].map((p) => + p?.catch((e: SatelliteError) => { + t.is(e.code, SatelliteErrorCode.INTERNAL) + }) + ) + ) +}) + +test('apply shape data and persist subscription', async (t) => { + const { client, satellite, adapter, notifier } = t.context + const { runMigrations, authState } = t.context + await runMigrations() + + const namespace = 'main' + const tablename = 'parent' + const qualified = new QualifiedTablename(namespace, tablename) + + // relations must be present at subscription delivery + client.setRelations(relations) + client.setRelationData(tablename, parentRecord) + + const conn = await satellite.start(authState) + await conn.connectionPromise + + const shapeDef: ClientShapeDefinition = { + selects: [{ tablename }], + } + + satellite!.relations = relations + const { synced } = await satellite.subscribe([shapeDef]) + await synced + + // first notification is 'connected' + t.is(notifier.notifications.length, 2) + t.is(notifier.notifications[1].changes.length, 1) + t.deepEqual(notifier.notifications[1].changes[0], { + qualifiedTablename: qualified, + rowids: [], + }) + + // wait for process to apply shape data + const qualifiedTableName = `"${namespace}"."${tablename}"` + try { + const row = await adapter.query({ + sql: `SELECT id FROM ${qualifiedTableName}`, + }) + t.is(row.length, 1) + + const shadowRows = await adapter.query({ + sql: `SELECT tags FROM main._electric_shadow`, + }) + t.is(shadowRows.length, 1) + + const subsMeta = await satellite._getMeta('subscriptions') + const subsObj = JSON.parse(subsMeta) + t.is(Object.keys(subsObj).length, 1) + + // Check that we save the LSN sent by the mock + t.deepEqual(satellite._lsn, base64.toBytes('MTIz')) + } catch (e) { + t.fail(JSON.stringify(e)) + } +}) + +test('(regression) shape subscription succeeds even if subscription data is delivered before the SatSubsReq RPC call receives its SatSubsResp answer', async (t) => { + const { client, satellite } = t.context + const { runMigrations, authState } = t.context + await runMigrations() + + const tablename = 'parent' + + // relations must be present at subscription delivery + client.setRelations(relations) + client.setRelationData(tablename, parentRecord) + + const conn = await satellite.start(authState) + await conn.connectionPromise + + const shapeDef: ClientShapeDefinition = { + selects: [{ tablename }], + } + + satellite!.relations = relations + + // Enable the deliver first flag in the mock client + // such that the subscription data is delivered before the + // subscription promise is resolved + const mockClient = satellite.client as MockSatelliteClient + mockClient.enableDeliverFirst() + + const { synced } = await satellite.subscribe([shapeDef]) + await synced + + t.pass() +}) + +test('multiple subscriptions for the same shape are deduplicated', async (t) => { + const { client, satellite } = t.context + const { runMigrations, authState } = t.context + await runMigrations() + + const tablename = 'parent' + + // relations must be present at subscription delivery + client.setRelations(relations) + client.setRelationData(tablename, parentRecord) + + const conn = await satellite.start(authState) + await conn.connectionPromise + + const shapeDef: ClientShapeDefinition = { + selects: [{ tablename }], + } + + satellite!.relations = relations + + // We want none of these cases to throw + await t.notThrowsAsync(async () => { + // We should dedupe subscriptions that are done at the same time + const [sub1, sub2] = await Promise.all([ + satellite.subscribe([shapeDef]), + satellite.subscribe([shapeDef]), + ]) + // That are done after first await but before the data + const sub3 = await satellite.subscribe([shapeDef]) + // And that are done after previous data is resolved + await Promise.all([sub1.synced, sub2.synced, sub3.synced]) + const sub4 = await satellite.subscribe([shapeDef]) + + await sub4.synced + }) + + // And be "merged" into one subscription + t.is(satellite.subscriptions.getFulfilledSubscriptions().length, 1) +}) + +test('applied shape data will be acted upon correctly', async (t) => { + const { client, satellite, adapter } = t.context + const { runMigrations, authState } = t.context + await runMigrations() + + const namespace = 'main' + const tablename = 'parent' + const qualified = `"${namespace}"."${tablename}"` + + // relations must be present at subscription delivery + client.setRelations(relations) + client.setRelationData(tablename, parentRecord) + + const conn = await satellite.start(authState) + await conn.connectionPromise + + const shapeDef: ClientShapeDefinition = { + selects: [{ tablename }], + } + + satellite!.relations = relations + const { synced } = await satellite.subscribe([shapeDef]) + await synced + + // wait for process to apply shape data + try { + const row = await adapter.query({ + sql: `SELECT id FROM ${qualified}`, + }) + t.is(row.length, 1) + + const shadowRows = await adapter.query({ + sql: `SELECT * FROM main._electric_shadow`, + }) + t.is(shadowRows.length, 1) + t.like(shadowRows[0], { + namespace: 'main', + tablename: 'parent', + }) + + await adapter.run({ sql: `DELETE FROM ${qualified} WHERE id = 1` }) + await satellite._performSnapshot() + + const oplogs = await adapter.query({ + sql: `SELECT * FROM main._electric_oplog`, + }) + t.not(oplogs[0].clearTags, '[]') + } catch (e) { + t.fail(JSON.stringify(e)) + } +}) + +test('a subscription that failed to apply because of FK constraint triggers GC', async (t) => { + const { client, satellite, adapter } = t.context + const { runMigrations, authState } = t.context + await runMigrations() + + const tablename = 'child' + const namespace = 'main' + + // relations must be present at subscription delivery + client.setRelations(relations) + client.setRelationData(tablename, childRecord) + + const conn = await satellite.start(authState) + await conn.connectionPromise + + const shapeDef1: ClientShapeDefinition = { + selects: [{ tablename }], + } + + satellite!.relations = relations + const { synced } = await satellite.subscribe([shapeDef1]) + await synced // wait for subscription to be fulfilled + + try { + const row = await adapter.query({ + sql: `SELECT id FROM "${namespace}"."${tablename}"`, + }) + t.is(row.length, 0) + } catch (e) { + t.fail(JSON.stringify(e)) + } +}) + +test('a second successful subscription', async (t) => { + const { client, satellite, adapter } = t.context + const { runMigrations, authState } = t.context + await runMigrations() + + const namespace = 'main' + const tablename = 'child' + + // relations must be present at subscription delivery + client.setRelations(relations) + client.setRelationData('parent', parentRecord) + client.setRelationData(tablename, childRecord) + + const conn = await satellite.start(authState) + await conn.connectionPromise + + const shapeDef1: ClientShapeDefinition = { + selects: [{ tablename: 'parent' }], + } + const shapeDef2: ClientShapeDefinition = { + selects: [{ tablename: tablename }], + } + + satellite!.relations = relations + await satellite.subscribe([shapeDef1]) + const { synced } = await satellite.subscribe([shapeDef2]) + await synced + + try { + const row = await adapter.query({ + sql: `SELECT id FROM "${namespace}"."${tablename}"`, + }) + t.is(row.length, 1) + + const shadowRows = await adapter.query({ + sql: `SELECT tags FROM main._electric_shadow`, + }) + t.is(shadowRows.length, 2) + + const subsMeta = await satellite._getMeta('subscriptions') + const subsObj = JSON.parse(subsMeta) + t.is(Object.keys(subsObj).length, 2) + } catch (e) { + t.fail(JSON.stringify(e)) + } +}) + +test('a single subscribe with multiple tables with FKs', async (t) => { + const { client, satellite, adapter } = t.context + const { runMigrations, authState } = t.context + await runMigrations() + + // relations must be present at subscription delivery + client.setRelations(relations) + client.setRelationData('parent', parentRecord) + client.setRelationData('child', childRecord) + + const conn = await satellite.start(authState) + await conn.connectionPromise + + const shapeDef1: ClientShapeDefinition = { + selects: [{ tablename: 'child' }], + } + const shapeDef2: ClientShapeDefinition = { + selects: [{ tablename: 'parent' }], + } + + satellite!.relations = relations + + const prom = new Promise((res, rej) => { + client.subscribeToSubscriptionEvents( + (data: SubscriptionData) => { + // child is applied first + t.is(data.data[0].relation.table, 'child') + t.is(data.data[1].relation.table, 'parent') + + setTimeout(async () => { + try { + const row = await adapter.query({ + sql: `SELECT id FROM "main"."child"`, + }) + t.is(row.length, 1) + + res() + } catch (e) { + rej(e) + } + }, 10) + }, + () => undefined + ) + }) + + await satellite.subscribe([shapeDef1, shapeDef2]) + + return prom +}) + +test('a shape delivery that triggers garbage collection', async (t) => { + const { client, satellite, adapter } = t.context + const { runMigrations, authState } = t.context + await runMigrations() + + const namespace = 'main' + const tablename = 'parent' + + // relations must be present at subscription delivery + client.setRelations(relations) + client.setRelationData(tablename, parentRecord) + client.setRelationData('another', {}) + + const conn = await satellite.start(authState) + await conn.connectionPromise + + const shapeDef1: ClientShapeDefinition = { + selects: [{ tablename: 'parent' }], + } + const shapeDef2: ClientShapeDefinition = { + selects: [{ tablename: 'another' }], + } + + satellite!.relations = relations + const { synced: synced1 } = await satellite.subscribe([shapeDef1]) + await synced1 + const { synced } = await satellite.subscribe([shapeDef2]) + + try { + await synced + t.fail() + } catch (expected: any) { + try { + const row = await adapter.query({ + sql: `SELECT id FROM "${namespace}"."${tablename}"`, + }) + t.is(row.length, 0) + + const shadowRows = await adapter.query({ + sql: `SELECT tags FROM main._electric_shadow`, + }) + t.is(shadowRows.length, 1) + + const subsMeta = await satellite._getMeta('subscriptions') + const subsObj = JSON.parse(subsMeta) + t.deepEqual(subsObj, {}) + t.true(expected.message.search("table 'another'") >= 0) + } catch (e) { + t.fail(JSON.stringify(e)) + } + } +}) + +test('a subscription request failure does not clear the manager state', async (t) => { + const { client, satellite, adapter } = t.context + const { runMigrations, authState } = t.context + await runMigrations() + + // relations must be present at subscription delivery + const namespace = 'main' + const tablename = 'parent' + client.setRelations(relations) + client.setRelationData(tablename, parentRecord) + + const conn = await satellite.start(authState) + await conn.connectionPromise + + const shapeDef1: ClientShapeDefinition = { + selects: [{ tablename: tablename }], + } + + const shapeDef2: ClientShapeDefinition = { + selects: [{ tablename: 'failure' }], + } + + satellite!.relations = relations + const { synced } = await satellite.subscribe([shapeDef1]) + await synced + + try { + const row = await adapter.query({ + sql: `SELECT id FROM "${namespace}"."${tablename}"`, + }) + t.is(row.length, 1) + } catch (e) { + t.fail(JSON.stringify(e)) + } + + try { + await satellite.subscribe([shapeDef2]) + } catch (error: any) { + t.is(error.code, SatelliteErrorCode.TABLE_NOT_FOUND) + } +}) + +test('unsubscribing all subscriptions does not trigger FK violations', async (t) => { + const { satellite, runMigrations } = t.context + + await runMigrations() // because the meta tables need to exist for shape GC + + const subsManager = new MockSubscriptionsManager( + satellite._garbageCollectShapeHandler.bind(satellite) + ) + + // Create the 'users' and 'posts' tables expected by sqlite + // populate it with foreign keys and check that the subscription + // manager does not violate the FKs when unsubscribing from all subscriptions + await satellite.adapter.runInTransaction( + { sql: `CREATE TABLE main.users (id TEXT PRIMARY KEY, name TEXT)` }, + { + sql: `CREATE TABLE main.posts (id TEXT PRIMARY KEY, title TEXT, author_id TEXT, FOREIGN KEY(author_id) REFERENCES main.users(id) DEFERRABLE INITIALLY IMMEDIATE)`, + }, + { sql: `INSERT INTO main.users (id, name) VALUES ('u1', 'user1')` }, + { + sql: `INSERT INTO main.posts (id, title, author_id) VALUES ('p1', 'My first post', 'u1')`, + } + ) + + await subsManager.unsubscribeAll() + // if we reach here, the FKs were not violated + + // Check that everything was deleted + const users = await satellite.adapter.query({ + sql: 'SELECT * FROM main.users', + }) + t.assert(users.length === 0) + + const posts = await satellite.adapter.query({ + sql: 'SELECT * FROM main.posts', + }) + t.assert(posts.length === 0) +}) + +test("Garbage collecting the subscription doesn't generate oplog entries", async (t) => { + const { adapter, runMigrations, satellite, authState } = t.context + await satellite.start(authState) + await runMigrations() + await adapter.run({ sql: `INSERT INTO main.parent(id) VALUES ('1'),('2')` }) + const ts = await satellite._performSnapshot() + await satellite._garbageCollectOplog(ts) + t.is((await satellite._getEntries(0)).length, 0) + + satellite._garbageCollectShapeHandler([ + { uuid: '', definition: { selects: [{ tablename: 'parent' }] } }, + ]) + + await satellite._performSnapshot() + t.deepEqual(await satellite._getEntries(0), []) +}) + +test('snapshots: generated oplog entries have the correct tags', async (t) => { + const { client, satellite, adapter, tableInfo } = t.context + const { runMigrations, authState } = t.context + await runMigrations() + + const namespace = 'main' + const tablename = 'parent' + const qualified = `"${namespace}"."${tablename}"` + + // relations must be present at subscription delivery + client.setRelations(relations) + client.setRelationData(tablename, parentRecord) + + const conn = await satellite.start(authState) + await conn.connectionPromise + + const shapeDef: ClientShapeDefinition = { + selects: [{ tablename }], + } + + satellite!.relations = relations + const { synced } = await satellite.subscribe([shapeDef]) + await synced + + const expectedTs = new Date().getTime() + const incoming = generateRemoteOplogEntry( + tableInfo, + 'main', + 'parent', + OPTYPES.insert, + expectedTs, + genEncodedTags('remote', [expectedTs]), + { + id: 2, + } + ) + const incomingChange = opLogEntryToChange(incoming, relations) + + await satellite._applyTransaction({ + origin: 'remote', + commit_timestamp: Long.fromNumber(expectedTs), + changes: [incomingChange], + lsn: new Uint8Array(), + }) + + const row = await adapter.query({ + sql: `SELECT id FROM ${qualified}`, + }) + t.is(row.length, 2) + + const shadowRows = await adapter.query({ + sql: `SELECT * FROM main._electric_shadow`, + }) + t.is(shadowRows.length, 2) + t.like(shadowRows[0], { + namespace: 'main', + tablename: 'parent', + }) + + await adapter.run({ sql: `DELETE FROM ${qualified} WHERE id = 2` }) + await satellite._performSnapshot() + + const oplogs = await adapter.query({ + sql: `SELECT * FROM main._electric_oplog`, + }) + t.is(oplogs[0].clearTags, genEncodedTags('remote', [expectedTs])) +}) + +test('DELETE after DELETE sends clearTags', async (t) => { + const { adapter, runMigrations, satellite, authState } = t.context + await runMigrations() + + await satellite._setAuthState(authState) + + await adapter.run({ + sql: `INSERT INTO main.parent(id, value) VALUES (1,'val1')`, + }) + await adapter.run({ + sql: `INSERT INTO main.parent(id, value) VALUES (2,'val2')`, + }) + + await adapter.run({ sql: `DELETE FROM main.parent WHERE id=1` }) + + await satellite._performSnapshot() + + await adapter.run({ sql: `DELETE FROM main.parent WHERE id=2` }) + + await satellite._performSnapshot() + + const entries = await satellite._getEntries() + + t.is(entries.length, 4) + + const delete1 = entries[2] + const delete2 = entries[3] + + t.is(delete1.primaryKey, '{"id":1}') + t.is(delete1.optype, 'DELETE') + // No tags for first delete + t.is(delete1.clearTags, '[]') + + t.is(delete2.primaryKey, '{"id":2}') + t.is(delete2.optype, 'DELETE') + // The second should have clearTags + t.not(delete2.clearTags, '[]') +}) + +test('connection backoff success', async (t) => { + t.plan(3) + const { client, satellite } = t.context + + client.disconnect() + + const retry = (_e: any, a: number) => { + if (a > 0) { + t.pass() + return false + } + return true + } + + satellite['_connectRetryHandler'] = retry + + await Promise.all( + [satellite._connectWithBackoff(), satellite['initializing']?.waitOn()].map( + (p) => p?.catch(() => t.pass()) + ) + ) +}) + +// check that performing snapshot doesn't throw without resetting the performing snapshot assertions +test('(regression) performSnapshot handles exceptions gracefully', async (t) => { + const { adapter, runMigrations, satellite, authState } = t.context + await runMigrations() + await satellite._setAuthState(authState) + + const error = 'FAKE TRANSACTION' + + const txnFn = adapter.transaction + adapter.transaction = () => { + throw new Error(error) + } + + try { + await satellite._performSnapshot() + } catch (e: any) { + t.is(e.message, error) + adapter.transaction = txnFn + } + + await satellite._performSnapshot() + t.pass() +}) diff --git a/clients/typescript/test/satellite/postgres/process.timing.test.ts b/clients/typescript/test/satellite/postgres/process.timing.test.ts new file mode 100644 index 0000000000..cdcaa2a894 --- /dev/null +++ b/clients/typescript/test/satellite/postgres/process.timing.test.ts @@ -0,0 +1,39 @@ +import anyTest, { TestFn } from 'ava' +import { sleepAsync } from '../../../src/util/timer' + +import { satelliteDefaults } from '../../../src/satellite/config' +import { makePgContext, cleanAndStopSatellite, ContextType } from '../common' + +// Speed up the intervals for testing. +const opts = Object.assign({}, satelliteDefaults, { + minSnapshotWindow: 80, + pollingInterval: 500, +}) + +const test = anyTest as TestFn +let port = 4900 +test.beforeEach(async (t) => { + await makePgContext(t, port++, opts) +}) +test.afterEach.always(cleanAndStopSatellite) + +test('throttled snapshot respects window', async (t) => { + const { adapter, notifier, runMigrations, satellite, authState } = t.context + await runMigrations() + + await satellite._setAuthState(authState) + + await satellite._throttledSnapshot() + + const numNotifications = notifier.notifications.length + + const sql = `INSERT INTO main.parent(id) VALUES ('1'),('2')` + await adapter.run({ sql }) + await satellite._throttledSnapshot() + + t.is(notifier.notifications.length, numNotifications) + + await sleepAsync(opts.minSnapshotWindow + 10) + + t.is(notifier.notifications.length, numNotifications + 1) +}) diff --git a/clients/typescript/test/satellite/process.migration.test.ts b/clients/typescript/test/satellite/process.migration.test.ts index 09b9c2c19e..cba5952dfd 100644 --- a/clients/typescript/test/satellite/process.migration.test.ts +++ b/clients/typescript/test/satellite/process.migration.test.ts @@ -5,7 +5,7 @@ import { SatOpMigrate_Type, SatRelation_RelationType, } from '../../src/_generated/protocol/satellite' -import { DatabaseAdapter } from '../../src/drivers/better-sqlite3' +import { DatabaseAdapter } from '../../src/electric/adapter' import { generateTag } from '../../src/satellite/oplog' import { DataChange, diff --git a/clients/typescript/test/satellite/process.test.ts b/clients/typescript/test/satellite/process.test.ts index fb00a642e9..862950c2e9 100644 --- a/clients/typescript/test/satellite/process.test.ts +++ b/clients/typescript/test/satellite/process.test.ts @@ -54,6 +54,7 @@ import { mergeEntries } from '../../src/satellite/merge' import { MockSubscriptionsManager } from '../../src/satellite/shapes/manager' import { AuthState, insecureAuthToken } from '../../src/auth' import { ConnectivityStateChangeNotification } from '../../src/notifiers' +import { sqliteBuilder } from '../../src/migrators/query-builder' const parentRecord = { id: 1, @@ -84,6 +85,7 @@ const qualifiedParentTableName = new QualifiedTablename( 'main', 'parent' ).toString() +const builder = sqliteBuilder test('setup starts a satellite process', async (t) => { t.true(t.context.satellite instanceof SatelliteProcess) @@ -94,8 +96,7 @@ test('start creates system tables', async (t) => { await satellite.start(authState) - const sql = "select name from sqlite_master where type = 'table'" - const rows = await adapter.query({ sql }) + const rows = await adapter.query(builder.getLocalTableNames()) const names = rows.map((row) => row.name) t.true(names.includes('_electric_oplog')) @@ -1111,7 +1112,7 @@ test('compensations: incoming operations accepted if restore referential integri const insertChildAndParentTx = { origin: 'remote', commit_timestamp: Long.fromNumber(new Date().getTime()), // timestamp is not important for this test, it is only used to GC the oplog - changes: [childInsertChange, parentInsertChange], + changes: [parentInsertChange, childInsertChange], lsn: new Uint8Array(), } await satellite._applyTransaction(insertChildAndParentTx) diff --git a/clients/typescript/test/satellite/serialization.test.ts b/clients/typescript/test/satellite/serialization.test.ts index 7ec46191a9..7c9fbf764b 100644 --- a/clients/typescript/test/satellite/serialization.test.ts +++ b/clients/typescript/test/satellite/serialization.test.ts @@ -1,14 +1,25 @@ import { SatRelation_RelationType } from '../../src/_generated/protocol/satellite' import { serializeRow, deserializeRow } from '../../src/satellite/client' -import test from 'ava' +import test, { ExecutionContext } from 'ava' import { Relation, Record } from '../../src/util/types' import { DbSchema, TableSchema } from '../../src/client/model/schema' import { PgBasicType } from '../../src/client/conversions/types' import { HKT } from '../../src/client/util/hkt' import Database from 'better-sqlite3' -import { DatabaseAdapter } from '../../src/drivers/better-sqlite3' -import { inferRelationsFromSQLite } from '../../src/util/relations' +import { DatabaseAdapter as SQLiteDatabaseAdapter } from '../../src/drivers/better-sqlite3' +import { DatabaseAdapter as PgDatabaseAdapter } from '../../src/drivers/node-postgres/adapter' +import { DatabaseAdapter as DatabaseAdapterInterface } from '../../src/electric/adapter' +import { inferRelationsFromDb } from '../../src/util/relations' import { satelliteDefaults } from '../../src/satellite/config' +import { + QueryBuilder, + pgBuilder, + sqliteBuilder, +} from '../../src/migrators/query-builder' +import { makePgDatabase } from '../support/node-postgres' +import { randomValue } from '../../src/util/random' + +const builder = sqliteBuilder test('serialize/deserialize row data', async (t) => { const rel: Relation = { @@ -266,14 +277,15 @@ test('Prioritize PG types in the schema before inferred SQLite types', async (t) const db = new Database(':memory:') t.teardown(() => db.close()) - const adapter = new DatabaseAdapter(db) + const adapter = new SQLiteDatabaseAdapter(db) await adapter.run({ sql: 'CREATE TABLE bools (id INTEGER PRIMARY KEY, b INTEGER)', }) - const sqliteInferredRelations = await inferRelationsFromSQLite( + const sqliteInferredRelations = await inferRelationsFromDb( adapter, - satelliteDefaults + satelliteDefaults, + builder ) const boolsInferredRelation = sqliteInferredRelations['bools'] @@ -328,50 +340,73 @@ test('Prioritize PG types in the schema before inferred SQLite types', async (t) t.deepEqual(deserializedRow, { id: 5, b: 1 }) }) -test('Use incoming Relation types if not found in the schema', async (t) => { +type MaybePromise = T | Promise +type SetupFn = ( + t: ExecutionContext +) => MaybePromise<[DatabaseAdapterInterface, QueryBuilder]> +const setupSqlite: SetupFn = (t: ExecutionContext) => { const db = new Database(':memory:') t.teardown(() => db.close()) - - const adapter = new DatabaseAdapter(db) - - const sqliteInferredRelations = await inferRelationsFromSQLite( - adapter, - satelliteDefaults - ) - // Empty database - t.is(Object.keys(sqliteInferredRelations).length, 0) - - // Empty Db schema - const testDbDescription = new DbSchema({}, []) - - const newTableRelation: Relation = { - id: 1, - schema: 'schema', - table: 'new_table', - tableType: SatRelation_RelationType.TABLE, - columns: [ - { name: 'value', type: 'INTEGER', isNullable: true }, - { name: 'color', type: 'COLOR', isNullable: true }, // at runtime, incoming SatRelation messages contain the name of the enum type - ], - } - - const row = { - value: 6, - color: 'red', - } - - const satOpRow = serializeRow(row, newTableRelation, testDbDescription) - - t.deepEqual( - satOpRow.values.map((bytes) => new TextDecoder().decode(bytes)), - ['6', 'red'] - ) - - const deserializedRow = deserializeRow( - satOpRow, - newTableRelation, - testDbDescription - ) - - t.deepEqual(deserializedRow, row) + return [new SQLiteDatabaseAdapter(db), builder] +} + +let port = 4800 +const setupPG: SetupFn = async (t: ExecutionContext) => { + const dbName = `serialization-test-${randomValue()}` + const { db, stop } = await makePgDatabase(dbName, port++) + t.teardown(async () => await stop()) + return [new PgDatabaseAdapter(db), pgBuilder] +} + +;( + [ + ['SQLite', setupSqlite], + ['Postgres', setupPG], + ] as const +).forEach(([dialect, setup]) => { + test(`(${dialect}) Use incoming Relation types if not found in the schema`, async (t) => { + const [adapter, builder] = await setup(t) + + const inferredRelations = await inferRelationsFromDb( + adapter, + satelliteDefaults, + builder + ) + // Empty database + t.is(Object.keys(inferredRelations).length, 0) + + // Empty Db schema + const testDbDescription = new DbSchema({}, []) + + const newTableRelation: Relation = { + id: 1, + schema: 'schema', + table: 'new_table', + tableType: SatRelation_RelationType.TABLE, + columns: [ + { name: 'value', type: 'INTEGER', isNullable: true }, + { name: 'color', type: 'COLOR', isNullable: true }, // at runtime, incoming SatRelation messages contain the name of the enum type + ], + } + + const row = { + value: 6, + color: 'red', + } + + const satOpRow = serializeRow(row, newTableRelation, testDbDescription) + + t.deepEqual( + satOpRow.values.map((bytes) => new TextDecoder().decode(bytes)), + ['6', 'red'] + ) + + const deserializedRow = deserializeRow( + satOpRow, + newTableRelation, + testDbDescription + ) + + t.deepEqual(deserializedRow, row) + }) }) diff --git a/clients/typescript/test/support/migrations/pg-migrations.js b/clients/typescript/test/support/migrations/pg-migrations.js index 167f8d4271..9eee3163fc 100644 --- a/clients/typescript/test/support/migrations/pg-migrations.js +++ b/clients/typescript/test/support/migrations/pg-migrations.js @@ -17,8 +17,9 @@ export default [ { statements: [ 'CREATE TABLE IF NOT EXISTS main.items (\n value TEXT PRIMARY KEY NOT NULL\n);', + 'CREATE TABLE IF NOT EXISTS main."bigIntTable" (\n value BIGINT PRIMARY KEY NOT NULL\n);', 'CREATE TABLE IF NOT EXISTS main.parent (\n id INTEGER PRIMARY KEY NOT NULL,\n value TEXT,\n other INTEGER DEFAULT 0\n);', - 'CREATE TABLE IF NOT EXISTS main.child (\n id INTEGER PRIMARY KEY NOT NULL,\n parent INTEGER NOT NULL,\n FOREIGN KEY(parent) REFERENCES main.parent(id)\n);', + 'CREATE TABLE IF NOT EXISTS main.child (\n id INTEGER PRIMARY KEY NOT NULL,\n parent INTEGER NOT NULL,\n FOREIGN KEY(parent) REFERENCES main.parent(id) DEFERRABLE INITIALLY IMMEDIATE\n);', 'DROP TABLE IF EXISTS main._electric_trigger_settings;', 'CREATE TABLE main._electric_trigger_settings(namespace TEXT, tablename TEXT, flag INTEGER, PRIMARY KEY (namespace, tablename));', "INSERT INTO main._electric_trigger_settings(namespace, tablename,flag) VALUES ('main', 'child', 1);", @@ -58,7 +59,7 @@ export default [ IF flag_value = 1 THEN -- Insert into _electric_oplog INSERT INTO main._electric_oplog (namespace, tablename, optype, "primaryKey", "newRow", "oldRow", timestamp) - VALUES ('main', 'child', 'INSERT', jsonb_build_object('id', NEW.id), jsonb_build_object('id', NEW.id, 'parent', NEW.parent), NULL, NULL); + VALUES ('main', 'child', 'INSERT', json_strip_nulls(json_build_object('id', NEW.id)), jsonb_build_object('id', NEW.id, 'parent', NEW.parent), NULL, NULL); END IF; RETURN NEW; @@ -88,7 +89,7 @@ export default [ IF flag_value = 1 THEN -- Insert into _electric_oplog INSERT INTO main._electric_oplog (namespace, tablename, optype, "primaryKey", "newRow", "oldRow", timestamp) - VALUES ('main', 'child', 'UPDATE', jsonb_build_object('id', NEW.id), jsonb_build_object('id', NEW.id, 'parent', NEW.parent), jsonb_build_object('id', OLD.id, 'parent', OLD.parent), NULL); + VALUES ('main', 'child', 'UPDATE', json_strip_nulls(json_build_object('id', NEW.id)), jsonb_build_object('id', NEW.id, 'parent', NEW.parent), jsonb_build_object('id', OLD.id, 'parent', OLD.parent), NULL); END IF; RETURN NEW; @@ -117,7 +118,7 @@ export default [ IF flag_value = 1 THEN -- Insert into _electric_oplog INSERT INTO main._electric_oplog (namespace, tablename, optype, "primaryKey", "newRow", "oldRow", timestamp) - VALUES ('main', 'child', 'DELETE', jsonb_build_object('id', OLD.id), NULL, jsonb_build_object('id', OLD.id, 'parent', OLD.parent), NULL); + VALUES ('main', 'child', 'DELETE', json_strip_nulls(json_build_object('id', OLD.id)), NULL, jsonb_build_object('id', OLD.id, 'parent', OLD.parent), NULL); END IF; RETURN NEW; @@ -147,7 +148,7 @@ export default [ IF flag_value = 1 AND meta_value = '1' THEN INSERT INTO main._electric_oplog (namespace, tablename, optype, "primaryKey", "newRow", "oldRow", timestamp) - SELECT 'main', 'parent', 'INSERT', jsonb_build_object('id', id), + SELECT 'main', 'parent', 'INSERT', json_strip_nulls(json_build_object('id', id)), jsonb_build_object('id', id, 'value', value, 'other', other), NULL, NULL FROM main.parent WHERE id = NEW."parent"; END IF; @@ -182,7 +183,7 @@ export default [ IF flag_value = 1 AND meta_value = '1' THEN -- Insert into _electric_oplog INSERT INTO main._electric_oplog (namespace, tablename, optype, "primaryKey", "newRow", "oldRow", timestamp) - SELECT 'main', 'parent', 'UPDATE', jsonb_build_object('id', id), + SELECT 'main', 'parent', 'UPDATE', json_strip_nulls(json_build_object('id', id)), jsonb_build_object('id', id, 'value', value, 'other', other), NULL, NULL FROM main.parent WHERE id = NEW."parent"; END IF; @@ -194,7 +195,7 @@ export default [ `, ` CREATE TRIGGER compensation_update_main_child_parent_into_oplog - AFTER UPDATE ON main.parent + AFTER UPDATE ON main.child FOR EACH ROW EXECUTE FUNCTION compensation_update_main_child_parent_into_oplog_function(); `, @@ -231,7 +232,7 @@ export default [ IF flag_value = 1 THEN -- Insert into _electric_oplog INSERT INTO main._electric_oplog (namespace, tablename, optype, "primaryKey", "newRow", "oldRow", timestamp) - VALUES ('main', 'items', 'INSERT', jsonb_build_object('value', NEW.value), jsonb_build_object('value', NEW.value), NULL, NULL); + VALUES ('main', 'items', 'INSERT', json_strip_nulls(json_build_object('value', NEW.value)), jsonb_build_object('value', NEW.value), NULL, NULL); END IF; RETURN NEW; @@ -262,7 +263,7 @@ export default [ IF flag_value = 1 THEN -- Insert into _electric_oplog INSERT INTO main._electric_oplog (namespace, tablename, optype, "primaryKey", "newRow", "oldRow", timestamp) - VALUES ('main', 'items', 'UPDATE', jsonb_build_object('value', NEW.value), jsonb_build_object('value', NEW.value), jsonb_build_object('value', OLD.value), NULL); + VALUES ('main', 'items', 'UPDATE', json_strip_nulls(json_build_object('value', NEW.value)), jsonb_build_object('value', NEW.value), jsonb_build_object('value', OLD.value), NULL); END IF; RETURN NEW; @@ -292,7 +293,7 @@ export default [ IF flag_value = 1 THEN -- Insert into _electric_oplog INSERT INTO main._electric_oplog (namespace, tablename, optype, "primaryKey", "newRow", "oldRow", timestamp) - VALUES ('main', 'items', 'DELETE', jsonb_build_object('value', OLD.value), NULL, jsonb_build_object('value', OLD.value), NULL); + VALUES ('main', 'items', 'DELETE', json_strip_nulls(json_build_object('value', OLD.value)), NULL, jsonb_build_object('value', OLD.value), NULL); END IF; RETURN OLD; @@ -346,7 +347,7 @@ export default [ 'main', 'parent', 'INSERT', - jsonb_build_object('id', NEW.id), + json_strip_nulls(json_build_object('id', NEW.id)), jsonb_build_object('id', NEW.id, 'value', NEW.value, 'other', NEW.other), NULL, NULL @@ -386,7 +387,7 @@ export default [ 'main', 'parent', 'UPDATE', - jsonb_build_object('id', NEW.id), + json_strip_nulls(json_build_object('id', NEW.id)), jsonb_build_object('id', NEW.id, 'value', NEW.value, 'other', NEW.other), jsonb_build_object('id', OLD.id, 'value', OLD.value, 'other', OLD.other), NULL @@ -426,7 +427,7 @@ export default [ 'main', 'parent', 'DELETE', - jsonb_build_object('id', OLD.id), + json_strip_nulls(json_build_object('id', OLD.id)), NULL, jsonb_build_object('id', OLD.id, 'value', OLD.value, 'other', OLD.other), NULL @@ -446,6 +447,146 @@ export default [ FOR EACH ROW EXECUTE FUNCTION delete_main_parent_into_oplog_function(); `, + ` + -- Toggles for turning the triggers on and off + INSERT INTO "main"."_electric_trigger_settings" ("namespace", "tablename", "flag") + VALUES ('main', 'bigIntTable', 1) + ON CONFLICT DO NOTHING; + `, + ` + /* Triggers for table bigIntTable */ + + -- ensures primary key is immutable + DROP TRIGGER IF EXISTS update_ensure_main_bigIntTable_primarykey ON "main"."bigIntTable"; + `, + ` + CREATE OR REPLACE FUNCTION update_ensure_main_bigIntTable_primarykey_function() + RETURNS TRIGGER AS $$ + BEGIN + IF OLD."value" IS DISTINCT FROM NEW."value" THEN + RAISE EXCEPTION 'Cannot change the value of column value as it belongs to the primary key'; + END IF; + RETURN NEW; + END; + $$ LANGUAGE plpgsql; + `, + ` + CREATE TRIGGER update_ensure_main_bigIntTable_primarykey + BEFORE UPDATE ON "main"."bigIntTable" + FOR EACH ROW + EXECUTE FUNCTION update_ensure_main_bigIntTable_primarykey_function(); + `, + ` + -- Triggers that add INSERT, UPDATE, DELETE operation to the oplog table + DROP TRIGGER IF EXISTS insert_main_bigIntTable_into_oplog ON "main"."bigIntTable"; + `, + ` + CREATE OR REPLACE FUNCTION insert_main_bigIntTable_into_oplog_function() + RETURNS TRIGGER AS $$ + BEGIN + DECLARE + flag_value INTEGER; + BEGIN + -- Get the flag value from _electric_trigger_settings + SELECT flag INTO flag_value FROM main._electric_trigger_settings WHERE namespace = 'main' AND tablename = 'bigIntTable'; + + IF flag_value = 1 THEN + -- Insert into _electric_oplog + INSERT INTO main._electric_oplog (namespace, tablename, optype, "primaryKey", "newRow", "oldRow", timestamp) + VALUES ( + 'main', + 'bigIntTable', + 'INSERT', + json_strip_nulls(json_build_object('value', cast(new."value" as TEXT))), + jsonb_build_object('value', cast(new."value" as TEXT)), + NULL, + NULL + ); + END IF; + + RETURN NEW; + END; + END; + $$ LANGUAGE plpgsql; + `, + ` + CREATE TRIGGER insert_main_bigIntTable_into_oplog + AFTER INSERT ON "main"."bigIntTable" + FOR EACH ROW + EXECUTE FUNCTION insert_main_bigIntTable_into_oplog_function(); + `, + 'DROP TRIGGER IF EXISTS update_main_bigIntTable_into_oplog ON "main"."bigIntTable";', + ` + CREATE OR REPLACE FUNCTION update_main_bigIntTable_into_oplog_function() + RETURNS TRIGGER AS $$ + BEGIN + DECLARE + flag_value INTEGER; + BEGIN + -- Get the flag value from _electric_trigger_settings + SELECT flag INTO flag_value FROM main._electric_trigger_settings WHERE namespace = 'main' AND tablename = 'bigIntTable'; + + IF flag_value = 1 THEN + -- Insert into _electric_oplog + INSERT INTO main._electric_oplog (namespace, tablename, optype, "primaryKey", "newRow", "oldRow", timestamp) + VALUES ( + 'main', + 'bigIntTable', + 'UPDATE', + json_strip_nulls(json_build_object('value', cast(new."value" as TEXT))), + jsonb_build_object('value', cast(new."value" as TEXT)), + jsonb_build_object('value', cast(old."value" as TEXT)), + NULL + ); + END IF; + + RETURN NEW; + END; + END; + $$ LANGUAGE plpgsql; + `, + ` + CREATE TRIGGER update_main_bigIntTable_into_oplog + AFTER UPDATE ON "main"."bigIntTable" + FOR EACH ROW + EXECUTE FUNCTION update_main_bigIntTable_into_oplog_function(); + `, + 'DROP TRIGGER IF EXISTS delete_main_bigIntTable_into_oplog ON "main"."bigIntTable";', + ` + CREATE OR REPLACE FUNCTION delete_main_bigIntTable_into_oplog_function() + RETURNS TRIGGER AS $$ + BEGIN + DECLARE + flag_value INTEGER; + BEGIN + -- Get the flag value from _electric_trigger_settings + SELECT flag INTO flag_value FROM main._electric_trigger_settings WHERE namespace = 'main' AND tablename = 'bigIntTable'; + + IF flag_value = 1 THEN + -- Insert into _electric_oplog + INSERT INTO main._electric_oplog (namespace, tablename, optype, "primaryKey", "newRow", "oldRow", timestamp) + VALUES ( + 'main', + 'bigIntTable', + 'DELETE', + json_strip_nulls(json_build_object('value', cast(old."value" as TEXT))), + NULL, + jsonb_build_object('value', cast(old."value" as TEXT)), + NULL + ); + END IF; + + RETURN NEW; + END; + END; + $$ LANGUAGE plpgsql; + `, + ` + CREATE TRIGGER delete_main_bigIntTable_into_oplog + AFTER DELETE ON "main"."bigIntTable" + FOR EACH ROW + EXECUTE FUNCTION delete_main_bigIntTable_into_oplog_function(); + `, ], version: '2', }, diff --git a/clients/typescript/test/support/satellite-helpers.ts b/clients/typescript/test/support/satellite-helpers.ts index 628e5445c2..26dcf3b5db 100644 --- a/clients/typescript/test/support/satellite-helpers.ts +++ b/clients/typescript/test/support/satellite-helpers.ts @@ -1,4 +1,9 @@ import { DatabaseAdapter } from '../../src/electric/adapter' +import { + QueryBuilder, + pgBuilder, + sqliteBuilder, +} from '../../src/migrators/query-builder' import { OplogEntry, OpType, @@ -42,7 +47,7 @@ export const loadSatelliteMetaTable = async ( metaTableName = '_electric_meta' ): Promise => { const rows = await db.query({ - sql: `SELECT key, value FROM ${metaTableName}`, + sql: `SELECT key, value FROM main.${metaTableName}`, }) const entries = rows.map((x) => [x.key, x.value]) @@ -190,22 +195,23 @@ export const genEncodedTags = ( } /** - * List all shadow entires, or get just one if an `oplog` parameter is provided + * List all shadow entries, or get just one if an `oplog` parameter is provided */ export async function getMatchingShadowEntries( adapter: DatabaseAdapter, oplog?: OplogEntry, + builder: QueryBuilder = sqliteBuilder, shadowTable = 'main._electric_shadow' ): Promise { let query: Statement - let selectTags = `SELECT * FROM ${shadowTable}` + let selectTags = `SELECT namespace, tablename, "primaryKey", tags FROM ${shadowTable}` if (oplog != undefined) { selectTags = selectTags + ` WHERE - namespace = ? AND - tablename = ? AND - primaryKey = ? + namespace = ${builder.makePositionalParam(1)} AND + tablename = ${builder.makePositionalParam(2)} AND + "primaryKey" = ${builder.makePositionalParam(3)} ` const args = [oplog.namespace, oplog.tablename, getShadowPrimaryKey(oplog)] query = { sql: selectTags, args: args } @@ -215,3 +221,11 @@ export async function getMatchingShadowEntries( return (await adapter.query(query)) as unknown as ShadowEntry[] } + +export async function getPgMatchingShadowEntries( + adapter: DatabaseAdapter, + oplog?: OplogEntry, + shadowTable = 'main._electric_shadow' +): Promise { + return getMatchingShadowEntries(adapter, oplog, pgBuilder, shadowTable) +} diff --git a/clients/typescript/test/util/statements.test.ts b/clients/typescript/test/util/statements.test.ts index 993594d364..6f20e76e86 100644 --- a/clients/typescript/test/util/statements.test.ts +++ b/clients/typescript/test/util/statements.test.ts @@ -1,6 +1,9 @@ import test from 'ava' +import { sqliteBuilder } from '../../src/migrators/query-builder' -import { prepareInsertBatchedStatements } from '../../src/util/statements' +//import { prepareInsertBatchedStatements } from '../../src/util/statements' + +const builder = sqliteBuilder test('prepareInsertBatchedStatements correctly splits up data in batches', (t) => { const data = [ @@ -8,7 +11,7 @@ test('prepareInsertBatchedStatements correctly splits up data in batches', (t) = { a: 3, b: 4 }, { a: 5, b: 6 }, ] - const stmts = prepareInsertBatchedStatements( + const stmts = builder.prepareInsertBatchedStatements( 'INSERT INTO test (a, b) VALUES', ['a', 'b'], data, @@ -30,7 +33,7 @@ test('prepareInsertBatchedStatements respects column order', (t) => { { a: 3, b: 4 }, { a: 5, b: 6 }, ] - const stmts = prepareInsertBatchedStatements( + const stmts = builder.prepareInsertBatchedStatements( 'INSERT INTO test (a, b) VALUES', ['b', 'a'], data, From e84583ce2a1d76f9b979c7fa99562bab4506df37 Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Wed, 21 Feb 2024 16:30:53 +0100 Subject: [PATCH 013/156] Common test file for process timing tests and make PG and SQLite versions. --- .../satellite/postgres/process.timing.test.ts | 32 ++----------- .../test/satellite/process.timing.test.ts | 46 +++++++++++-------- .../satellite/sqlite/process.timing.test.ts | 9 ++++ 3 files changed, 38 insertions(+), 49 deletions(-) create mode 100644 clients/typescript/test/satellite/sqlite/process.timing.test.ts diff --git a/clients/typescript/test/satellite/postgres/process.timing.test.ts b/clients/typescript/test/satellite/postgres/process.timing.test.ts index cdcaa2a894..e6a8610f0c 100644 --- a/clients/typescript/test/satellite/postgres/process.timing.test.ts +++ b/clients/typescript/test/satellite/postgres/process.timing.test.ts @@ -1,39 +1,13 @@ import anyTest, { TestFn } from 'ava' -import { sleepAsync } from '../../../src/util/timer' - -import { satelliteDefaults } from '../../../src/satellite/config' +import { processTimingTests, opts } from '../process.timing.test' import { makePgContext, cleanAndStopSatellite, ContextType } from '../common' -// Speed up the intervals for testing. -const opts = Object.assign({}, satelliteDefaults, { - minSnapshotWindow: 80, - pollingInterval: 500, -}) +let port = 4900 const test = anyTest as TestFn -let port = 4900 test.beforeEach(async (t) => { await makePgContext(t, port++, opts) }) test.afterEach.always(cleanAndStopSatellite) -test('throttled snapshot respects window', async (t) => { - const { adapter, notifier, runMigrations, satellite, authState } = t.context - await runMigrations() - - await satellite._setAuthState(authState) - - await satellite._throttledSnapshot() - - const numNotifications = notifier.notifications.length - - const sql = `INSERT INTO main.parent(id) VALUES ('1'),('2')` - await adapter.run({ sql }) - await satellite._throttledSnapshot() - - t.is(notifier.notifications.length, numNotifications) - - await sleepAsync(opts.minSnapshotWindow + 10) - - t.is(notifier.notifications.length, numNotifications + 1) -}) +processTimingTests(test) diff --git a/clients/typescript/test/satellite/process.timing.test.ts b/clients/typescript/test/satellite/process.timing.test.ts index bbb44a864c..60da87d3ed 100644 --- a/clients/typescript/test/satellite/process.timing.test.ts +++ b/clients/typescript/test/satellite/process.timing.test.ts @@ -1,36 +1,42 @@ -import anyTest, { TestFn } from 'ava' +import { TestFn } from 'ava' import { sleepAsync } from '../../src/util/timer' +import { ContextType } from './common' import { satelliteDefaults } from '../../src/satellite/config' -import { makeContext, clean, ContextType } from './common' + +/* + * This file defines the tests for the process timing of Satellite. + * These tests are common to both SQLite and Postgres. + * Only their context differs. + * Therefore, the SQLite and Postgres test files + * setup their context and then call the tests from this file. + */ // Speed up the intervals for testing. -const opts = Object.assign({}, satelliteDefaults, { +export const opts = Object.assign({}, satelliteDefaults, { minSnapshotWindow: 80, pollingInterval: 500, }) -const test = anyTest as TestFn -test.beforeEach(async (t) => makeContext(t, opts)) -test.afterEach.always(clean) - -test('throttled snapshot respects window', async (t) => { - const { adapter, notifier, runMigrations, satellite, authState } = t.context - await runMigrations() +export const processTimingTests = (test: TestFn) => { + test(`throttled snapshot respects window`, async (t) => { + const { adapter, notifier, runMigrations, satellite, authState } = t.context + await runMigrations() - await satellite._setAuthState(authState) + await satellite._setAuthState(authState) - await satellite._throttledSnapshot() + await satellite._throttledSnapshot() - const numNotifications = notifier.notifications.length + const numNotifications = notifier.notifications.length - const sql = `INSERT INTO parent(id) VALUES ('1'),('2')` - await adapter.run({ sql }) - await satellite._throttledSnapshot() + const sql = `INSERT INTO main.parent(id) VALUES ('1'),('2')` + await adapter.run({ sql }) + await satellite._throttledSnapshot() - t.is(notifier.notifications.length, numNotifications) + t.is(notifier.notifications.length, numNotifications) - await sleepAsync(opts.minSnapshotWindow) + await sleepAsync(opts.minSnapshotWindow + 50) - t.is(notifier.notifications.length, numNotifications + 1) -}) + t.is(notifier.notifications.length, numNotifications + 1) + }) +} diff --git a/clients/typescript/test/satellite/sqlite/process.timing.test.ts b/clients/typescript/test/satellite/sqlite/process.timing.test.ts new file mode 100644 index 0000000000..43caa89d68 --- /dev/null +++ b/clients/typescript/test/satellite/sqlite/process.timing.test.ts @@ -0,0 +1,9 @@ +import anyTest, { TestFn } from 'ava' +import { processTimingTests, opts } from '../process.timing.test' +import { makeContext, clean, ContextType } from '../common' + +const test = anyTest as TestFn +test.beforeEach(async (t) => makeContext(t, opts)) +test.afterEach.always(clean) + +processTimingTests(test) From f32b06fa6f7bdef52dcd0aafb5c295b85e3ef876 Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Wed, 21 Feb 2024 17:34:15 +0100 Subject: [PATCH 014/156] Refactored process.tags.test into a common file for both PG and SQLite --- .../satellite/postgres/process.tags.test.ts | 691 +------ .../test/satellite/process.tags.test.ts | 1677 +++++++++-------- .../satellite/sqlite/process.tags.test.ts | 15 + 3 files changed, 883 insertions(+), 1500 deletions(-) create mode 100644 clients/typescript/test/satellite/sqlite/process.tags.test.ts diff --git a/clients/typescript/test/satellite/postgres/process.tags.test.ts b/clients/typescript/test/satellite/postgres/process.tags.test.ts index b7515aa5bc..973e9166e6 100644 --- a/clients/typescript/test/satellite/postgres/process.tags.test.ts +++ b/clients/typescript/test/satellite/postgres/process.tags.test.ts @@ -1,696 +1,17 @@ import anyTest, { TestFn } from 'ava' -import Long from 'long' -import { - OPTYPES, - generateTag, - encodeTags, - opLogEntryToChange, -} from '../../../src/satellite/oplog' +import { makePgContext, cleanAndStopSatellite } from '../common' -import { - generateRemoteOplogEntry, - genEncodedTags, - getPgMatchingShadowEntries as getMatchingShadowEntries, -} from '../../support/satellite-helpers' -import { Statement } from '../../../src/util/types' +import { getPgMatchingShadowEntries } from '../../support/satellite-helpers' +import { processTagsTests, ContextType } from '../process.tags.test' -import { - makePgContext, - cleanAndStopSatellite, - relations, - ContextType, -} from '../common' +let port = 5100 const test = anyTest as TestFn -let port = 5100 test.beforeEach(async (t) => { await makePgContext(t, port++) + t.context.getMatchingShadowEntries = getPgMatchingShadowEntries }) test.afterEach.always(cleanAndStopSatellite) -test('basic rules for setting tags', async (t) => { - const { adapter, runMigrations, satellite, authState } = t.context - await runMigrations() - - await satellite._setAuthState(authState) - const clientId = satellite._authState?.clientId ?? 'test_client' - - await adapter.run({ - sql: `INSERT INTO main.parent(id, value, other) VALUES (1, 'local', null)`, - }) - - const txDate1 = await satellite._performSnapshot() - let shadow = await getMatchingShadowEntries(adapter) - t.is(shadow.length, 1) - t.is(shadow[0].tags, genEncodedTags(clientId, [txDate1])) - - await adapter.run({ - sql: `UPDATE main.parent SET value = 'local1', other = 3 WHERE id = 1`, - }) - - const txDate2 = await satellite._performSnapshot() - shadow = await getMatchingShadowEntries(adapter) - t.is(shadow.length, 1) - t.is(shadow[0].tags, genEncodedTags(clientId, [txDate2])) - - await adapter.run({ - sql: `UPDATE main.parent SET value = 'local2', other = 4 WHERE id = 1`, - }) - - const txDate3 = await satellite._performSnapshot() - shadow = await getMatchingShadowEntries(adapter) - t.is(shadow.length, 1) - t.is(shadow[0].tags, genEncodedTags(clientId, [txDate3])) - - await adapter.run({ - sql: `DELETE FROM main.parent WHERE id = 1`, - }) - - const txDate4 = await satellite._performSnapshot() - shadow = await getMatchingShadowEntries(adapter) - t.is(shadow.length, 0) - - const entries = await satellite._getEntries() - t.is(entries[0].clearTags, encodeTags([])) - t.is(entries[1].clearTags, genEncodedTags(clientId, [txDate1])) - t.is(entries[2].clearTags, genEncodedTags(clientId, [txDate2])) - t.is(entries[3].clearTags, genEncodedTags(clientId, [txDate3])) - - t.not(txDate1, txDate2) - t.not(txDate2, txDate3) - t.not(txDate3, txDate4) -}) - -test('TX1=INSERT, TX2=DELETE, TX3=INSERT, ack TX1', async (t) => { - const { adapter, runMigrations, satellite, tableInfo, authState } = t.context - await runMigrations() - await satellite._setAuthState(authState) - - const clientId = satellite._authState?.clientId ?? 'test_id' - - // Local INSERT - const stmts1 = { - sql: `INSERT INTO main.parent (id, value, other) VALUES ($1, $2, $3)`, - args: ['1', 'local', null], - } - await adapter.runInTransaction(stmts1) - const txDate1 = await satellite._performSnapshot() - - const localEntries1 = await satellite._getEntries() - const shadowEntry1 = await getMatchingShadowEntries(adapter, localEntries1[0]) - - // shadow tag is time of snapshot - const tag1 = genEncodedTags(clientId, [txDate1]) - t.is(tag1, shadowEntry1[0].tags) - // clearTag is empty - t.like(localEntries1[0], { - clearTags: JSON.stringify([]), - timestamp: txDate1.toISOString(), - }) - - // Local DELETE - const stmts2 = { - sql: `DELETE FROM main.parent WHERE id=$1`, - args: ['1'], - } - await adapter.runInTransaction(stmts2) - const txDate2 = await satellite._performSnapshot() - - const localEntries2 = await satellite._getEntries() - const shadowEntry2 = await getMatchingShadowEntries(adapter, localEntries2[1]) - - // shadowTag is empty - t.is(0, shadowEntry2.length) - // clearTags contains previous shadowTag - t.like(localEntries2[1], { - clearTags: tag1, - timestamp: txDate2.toISOString(), - }) - - // Local INSERT - const stmts3 = { - sql: `INSERT INTO main.parent (id, value, other) VALUES ($1, $2, $3)`, - args: ['1', 'local', null], - } - await adapter.runInTransaction(stmts3) - const txDate3 = await satellite._performSnapshot() - - const localEntries3 = await satellite._getEntries() - const shadowEntry3 = await getMatchingShadowEntries(adapter, localEntries3[1]) - - const tag3 = genEncodedTags(clientId, [txDate3]) - // shadow tag is tag3 - t.is(tag3, shadowEntry3[0].tags) - // clearTags is empty after a DELETE - t.like(localEntries3[2], { - clearTags: JSON.stringify([]), - timestamp: txDate3.toISOString(), - }) - - // apply incomig operation (local operation ack) - const ackEntry = generateRemoteOplogEntry( - tableInfo, - 'main', - 'parent', - OPTYPES.insert, - txDate1.getTime(), - tag1, - { - id: 1, - value: 'local', - other: null, - }, - undefined - ) - - const ackDataChange = opLogEntryToChange(ackEntry, relations) - satellite.relations = relations // satellite must be aware of the relations in order to turn the `ackDataChange` DataChange into an OpLogEntry - const tx = { - origin: clientId, - commit_timestamp: Long.fromNumber((txDate1 as Date).getTime()), - changes: [ackDataChange], - lsn: new Uint8Array(), - } - await satellite._applyTransaction(tx) - - // validate that garbage collection has been triggered - t.is(2, (await satellite._getEntries()).length) - - const shadow = await getMatchingShadowEntries(adapter) - t.like( - shadow[0], - { - tags: genEncodedTags(clientId, [txDate3]), - }, - 'error: tag1 was reintroduced after merging acked operation' - ) -}) - -test('remote tx (INSERT) concurrently with local tx (INSERT -> DELETE)', async (t) => { - const { adapter, runMigrations, satellite, tableInfo, authState } = t.context - await runMigrations() - await satellite._setAuthState(authState) - - const stmts: Statement[] = [] - - // For this key we will choose remote Tx, such that: Local TM > Remote TX - stmts.push({ - sql: `INSERT INTO main.parent (id, value, other) VALUES ($1, $2, $3);`, - args: ['1', 'local', null], - }) - stmts.push({ sql: `DELETE FROM main.parent WHERE id = 1` }) - // For this key we will choose remote Tx, such that: Local TM < Remote TX - stmts.push({ - sql: `INSERT INTO main.parent (id, value, other) VALUES ($1, $2, $3);`, - args: ['2', 'local', null], - }) - stmts.push({ sql: `DELETE FROM main.parent WHERE id = 2` }) - await adapter.runInTransaction(...stmts) - - const txDate1 = await satellite._performSnapshot() - - const prevTs = txDate1.getTime() - 1 - const nextTs = txDate1.getTime() + 1 - - const prevEntry = generateRemoteOplogEntry( - tableInfo, - 'main', - 'parent', - OPTYPES.insert, - prevTs, - genEncodedTags('remote', [prevTs]), - { - id: 1, - value: 'remote', - other: 1, - }, - undefined - ) - const nextEntry = generateRemoteOplogEntry( - tableInfo, - 'main', - 'parent', - OPTYPES.insert, - nextTs, - genEncodedTags('remote', [nextTs]), - { - id: 2, - value: 'remote', - other: 2, - }, - undefined - ) - - satellite.relations = relations // satellite must be aware of the relations in order to turn `DataChange`s into `OpLogEntry`s - - const prevChange = opLogEntryToChange(prevEntry, relations) - const prevTx = { - origin: 'remote', - commit_timestamp: Long.fromNumber(prevTs), - changes: [prevChange], - lsn: new Uint8Array(), - } - await satellite._applyTransaction(prevTx) - - const nextChange = opLogEntryToChange(nextEntry, relations) - const nextTx = { - origin: 'remote', - commit_timestamp: Long.fromNumber(nextTs), - changes: [nextChange], - lsn: new Uint8Array(), - } - await satellite._applyTransaction(nextTx) - - const shadow = await getMatchingShadowEntries(adapter) - const expectedShadow = [ - { - namespace: 'main', - tablename: 'parent', - primaryKey: '{"id":1}', - tags: genEncodedTags('remote', [prevTs]), - }, - { - namespace: 'main', - tablename: 'parent', - primaryKey: '{"id":2}', - tags: genEncodedTags('remote', [nextTs]), - }, - ] - t.deepEqual(shadow, expectedShadow) - - const userTable = await adapter.query({ sql: `SELECT * FROM main.parent;` }) - - // In both cases insert wins over delete, but - // for id = 1 CR picks local data before delete, while - // for id = 2 CR picks remote data - const expectedUserTable = [ - { id: 1, value: 'local', other: null }, - { id: 2, value: 'remote', other: 2 }, - ] - t.deepEqual(expectedUserTable, userTable) -}) - -test('remote tx (INSERT) concurrently with 2 local txses (INSERT -> DELETE)', async (t) => { - const { adapter, runMigrations, satellite, tableInfo, authState } = t.context - await runMigrations() - await satellite._setAuthState(authState) - - let stmts: Statement[] = [] - - // For this key we will choose remote Tx, such that: Local TM > Remote TX - stmts.push({ - sql: `INSERT INTO main.parent (id, value, other) VALUES ($1, $2, $3);`, - args: ['1', 'local', null], - }) - stmts.push({ - sql: `INSERT INTO main.parent (id, value, other) VALUES ($1, $2, $3);`, - args: ['2', 'local', null], - }) - await adapter.runInTransaction(...stmts) - const txDate1 = await satellite._performSnapshot() - - stmts = [] - // For this key we will choose remote Tx, such that: Local TM < Remote TX - stmts.push({ sql: `DELETE FROM main.parent WHERE id = 1` }) - stmts.push({ sql: `DELETE FROM main.parent WHERE id = 2` }) - await adapter.runInTransaction(...stmts) - await satellite._performSnapshot() - - const prevTs = txDate1.getTime() - 1 - const nextTs = txDate1.getTime() + 1 - - const prevEntry = generateRemoteOplogEntry( - tableInfo, - 'main', - 'parent', - OPTYPES.insert, - prevTs, - genEncodedTags('remote', [prevTs]), - { - id: 1, - value: 'remote', - other: 1, - }, - undefined - ) - const nextEntry = generateRemoteOplogEntry( - tableInfo, - 'main', - 'parent', - OPTYPES.insert, - nextTs, - genEncodedTags('remote', [nextTs]), - { - id: 2, - value: 'remote', - other: 2, - }, - undefined - ) - - satellite.relations = relations // satellite must be aware of the relations in order to turn `DataChange`s into `OpLogEntry`s in `_applyTransaction` - - const prevChange = opLogEntryToChange(prevEntry, relations) - const prevTx = { - origin: 'remote', - commit_timestamp: Long.fromNumber(prevTs), - changes: [prevChange], - lsn: new Uint8Array(), - } - await satellite._applyTransaction(prevTx) - - const nextChange = opLogEntryToChange(nextEntry, relations) - const nextTx = { - origin: 'remote', - commit_timestamp: Long.fromNumber(nextTs), - changes: [nextChange], - lsn: new Uint8Array(), - } - await satellite._applyTransaction(nextTx) - - const shadow = await getMatchingShadowEntries(adapter) - const expectedShadow = [ - { - namespace: 'main', - tablename: 'parent', - primaryKey: '{"id":1}', - tags: genEncodedTags('remote', [prevTs]), - }, - { - namespace: 'main', - tablename: 'parent', - primaryKey: '{"id":2}', - tags: genEncodedTags('remote', [nextTs]), - }, - ] - t.deepEqual(shadow, expectedShadow) - - let userTable = await adapter.query({ sql: `SELECT * FROM main.parent;` }) - - // In both cases insert wins over delete, but - // for id = 1 CR picks local data before delete, while - // for id = 2 CR picks remote data - const expectedUserTable = [ - { id: 1, value: 'local', other: null }, - { id: 2, value: 'remote', other: 2 }, - ] - t.deepEqual(expectedUserTable, userTable) -}) - -test('remote tx (INSERT) concurrently with local tx (INSERT -> UPDATE)', async (t) => { - const { adapter, runMigrations, satellite, tableInfo, authState } = t.context - await runMigrations() - await satellite._setAuthState(authState) - const clientId = satellite._authState?.clientId ?? 'test_id' - let stmts: Statement[] = [] - - // For this key we will choose remote Tx, such that: Local TM > Remote TX - stmts.push({ - sql: `INSERT INTO main.parent (id, value, other) VALUES ($1, $2, $3);`, - args: ['1', 'local', null], - }) - stmts.push({ - sql: `UPDATE main.parent SET value = $1, other = $2 WHERE id = 1`, - args: ['local', 999], - }) - // For this key we will choose remote Tx, such that: Local TM < Remote TX - stmts.push({ - sql: `INSERT INTO main.parent (id, value, other) VALUES ($1, $2, $3);`, - args: ['2', 'local', null], - }) - stmts.push({ - sql: `UPDATE main.parent SET value = $1, other = $2 WHERE id = 1`, - args: ['local', 999], - }) - await adapter.runInTransaction(...stmts) - - const txDate1 = await satellite._performSnapshot() - - const prevTs = txDate1.getTime() - 1 - const nextTs = txDate1.getTime() + 1 - - const prevEntry = generateRemoteOplogEntry( - tableInfo, - 'main', - 'parent', - OPTYPES.insert, - prevTs, - genEncodedTags('remote', [prevTs]), - { - id: 1, - value: 'remote', - other: 1, - }, - undefined - ) - - const nextEntry = generateRemoteOplogEntry( - tableInfo, - 'main', - 'parent', - OPTYPES.insert, - nextTs, - genEncodedTags('remote', [nextTs]), - { - id: 2, - value: 'remote', - other: 2, - }, - undefined - ) - - satellite.relations = relations // satellite must be aware of the relations in order to turn `DataChange`s into `OpLogEntry`s in `_applyTransaction` - - const prevChange = opLogEntryToChange(prevEntry, relations) - const prevTx = { - origin: 'remote', - commit_timestamp: Long.fromNumber(prevTs), - changes: [prevChange], - lsn: new Uint8Array(), - } - await satellite._applyTransaction(prevTx) - - const nextChange = opLogEntryToChange(nextEntry, relations) - const nextTx = { - origin: 'remote', - commit_timestamp: Long.fromNumber(nextTs), - changes: [nextChange], - lsn: new Uint8Array(), - } - await satellite._applyTransaction(nextTx) - - let shadow = await getMatchingShadowEntries(adapter) - const expectedShadow = [ - { - namespace: 'main', - tablename: 'parent', - primaryKey: '{"id":1}', - tags: encodeTags([ - generateTag(clientId, new Date(txDate1)), - generateTag('remote', new Date(prevTs)), - ]), - }, - { - namespace: 'main', - tablename: 'parent', - primaryKey: '{"id":2}', - tags: encodeTags([ - generateTag(clientId, new Date(txDate1)), - generateTag('remote', new Date(nextTs)), - ]), - }, - ] - t.deepEqual(shadow, expectedShadow) - - let entries = await satellite._getEntries() - - // Given that Insert and Update happen within the same transaction clear should not - // contain itself - t.is(entries[0].clearTags, encodeTags([])) - t.is(entries[1].clearTags, encodeTags([])) - t.is(entries[2].clearTags, encodeTags([])) - t.is(entries[3].clearTags, encodeTags([])) - - let userTable = await adapter.query({ sql: `SELECT * FROM main.parent;` }) - - // In both cases insert wins over delete, but - // for id = 1 CR picks local data before delete, while - // for id = 2 CR picks remote data - const expectedUserTable = [ - { id: 1, value: 'local', other: 999 }, - { id: 2, value: 'remote', other: 2 }, - ] - t.deepEqual(expectedUserTable, userTable) -}) - -test('origin tx (INSERT) concurrently with local txses (INSERT -> DELETE)', async (t) => { - // - const { adapter, runMigrations, satellite, tableInfo, authState } = t.context - await runMigrations() - await satellite._setAuthState(authState) - const clientId = satellite._authState?.clientId ?? 'test_id' - - let stmts: Statement[] = [] - - // For this key we will choose remote Tx, such that: Local TM > Remote TX - stmts.push({ - sql: `INSERT INTO main.parent (id, value, other) VALUES ($1, $2, $3);`, - args: ['1', 'local', null], - }) - stmts.push({ - sql: `INSERT INTO main.parent (id, value, other) VALUES ($1, $2, $3);`, - args: ['2', 'local', null], - }) - await adapter.runInTransaction(...stmts) - const txDate1 = await satellite._performSnapshot() - - stmts = [] - // For this key we will choose remote Tx, such that: Local TM < Remote TX - stmts.push({ sql: `DELETE FROM main.parent WHERE id = 1` }) - stmts.push({ sql: `DELETE FROM main.parent WHERE id = 2` }) - await adapter.runInTransaction(...stmts) - await satellite._performSnapshot() - - let entries = await satellite._getEntries() - t.assert(entries[0].newRow) - t.assert(entries[1]) - t.assert(entries[1].newRow) - - // For this key we receive transaction which was older - const electricEntrySameTs = new Date(entries[0].timestamp).getTime() - let electricEntrySame = generateRemoteOplogEntry( - tableInfo, - entries[0].namespace, - entries[0].tablename, - OPTYPES.insert, - electricEntrySameTs, - genEncodedTags(clientId, [txDate1]), - JSON.parse(entries[0].newRow!), - undefined - ) - - // For this key we had concurrent insert transaction from another node `remote` - // with same timestamp - const electricEntryConflictTs = new Date(entries[1].timestamp).getTime() - let electricEntryConflict = generateRemoteOplogEntry( - tableInfo, - entries[1].namespace, - entries[1].tablename, - OPTYPES.insert, - electricEntryConflictTs, - encodeTags([ - generateTag(clientId, txDate1), - generateTag('remote', txDate1), - ]), - JSON.parse(entries[1].newRow!), - undefined - ) - - satellite.relations = relations // satellite must be aware of the relations in order to turn `DataChange`s into `OpLogEntry`s in `_applyTransaction` - - const electricEntrySameChange = opLogEntryToChange( - electricEntrySame, - relations - ) - const electricEntryConflictChange = opLogEntryToChange( - electricEntryConflict, - relations - ) - const tx = { - origin: clientId, - commit_timestamp: Long.fromNumber(new Date().getTime()), // commit_timestamp doesn't matter for this test, it is only used to GC the oplog - changes: [electricEntrySameChange, electricEntryConflictChange], - lsn: new Uint8Array(), - } - await satellite._applyTransaction(tx) - - let shadow = await getMatchingShadowEntries(adapter) - const expectedShadow = [ - { - namespace: 'main', - tablename: 'parent', - primaryKey: '{"id":2}', - tags: genEncodedTags('remote', [txDate1]), - }, - ] - t.deepEqual(shadow, expectedShadow) - - let userTable = await adapter.query({ sql: `SELECT * FROM main.parent;` }) - const expectedUserTable = [{ id: 2, value: 'local', other: null }] - t.deepEqual(expectedUserTable, userTable) -}) - -test('local (INSERT -> UPDATE -> DELETE) with remote equivalent', async (t) => { - const { runMigrations, satellite, tableInfo, authState, adapter } = t.context - await runMigrations() - await satellite._setAuthState(authState) - const clientId = satellite._authState?.clientId ?? 'test_id' - let txDate1 = new Date().getTime() - - const insertEntry = generateRemoteOplogEntry( - tableInfo, - 'main', - 'parent', - OPTYPES.update, - txDate1, - genEncodedTags('remote', [txDate1]), - { - id: 1, - value: 'local', - }, - undefined - ) - - const deleteDate = txDate1 + 1 - const deleteEntry = generateRemoteOplogEntry( - tableInfo, - 'main', - 'parent', - OPTYPES.delete, - deleteDate, - genEncodedTags('remote', []), - { - id: 1, - value: 'local', - }, - undefined - ) - - satellite.relations = relations // satellite must be aware of the relations in order to turn `DataChange`s into `OpLogEntry`s in `_applyTransaction` - - const insertChange = opLogEntryToChange(insertEntry, relations) - const insertTx = { - origin: clientId, - commit_timestamp: Long.fromNumber(txDate1), - changes: [insertChange], - lsn: new Uint8Array(), - } - await satellite._applyTransaction(insertTx) - - let shadow = await getMatchingShadowEntries(adapter) - const expectedShadow = [ - { - namespace: 'main', - tablename: 'parent', - primaryKey: '{"id":1}', - tags: genEncodedTags('remote', [txDate1]), - }, - ] - t.deepEqual(shadow, expectedShadow) - - const deleteChange = opLogEntryToChange(deleteEntry, relations) - const deleteTx = { - origin: clientId, - commit_timestamp: Long.fromNumber(deleteDate), - changes: [deleteChange], - lsn: new Uint8Array(), - } - await satellite._applyTransaction(deleteTx) - - shadow = await getMatchingShadowEntries(adapter) - t.deepEqual([], shadow) - - let entries = await satellite._getEntries(0) - t.deepEqual([], entries) -}) +processTagsTests(test) diff --git a/clients/typescript/test/satellite/process.tags.test.ts b/clients/typescript/test/satellite/process.tags.test.ts index 0286f1a232..4edf1294b2 100644 --- a/clients/typescript/test/satellite/process.tags.test.ts +++ b/clients/typescript/test/satellite/process.tags.test.ts @@ -1,4 +1,4 @@ -import anyTest, { TestFn } from 'ava' +import { TestFn } from 'ava' import Long from 'long' import { @@ -11,833 +11,880 @@ import { import { generateRemoteOplogEntry, genEncodedTags, - getMatchingShadowEntries, + getMatchingShadowEntries as getSqliteMatchingShadowEntries, + getPgMatchingShadowEntries, } from '../support/satellite-helpers' import { Statement } from '../../src/util/types' -import { - makeContext, - cleanAndStopSatellite, - relations, - ContextType, -} from './common' - -const test = anyTest as TestFn -test.beforeEach(makeContext) -test.afterEach.always(cleanAndStopSatellite) - -test('basic rules for setting tags', async (t) => { - const { adapter, runMigrations, satellite, authState } = t.context - await runMigrations() - - await satellite._setAuthState(authState) - const clientId = satellite._authState?.clientId ?? 'test_client' - - await adapter.run({ - sql: `INSERT INTO parent(id, value, other) VALUES (1, 'local', null)`, - }) - - const txDate1 = await satellite._performSnapshot() - let shadow = await getMatchingShadowEntries(adapter) - t.is(shadow.length, 1) - t.is(shadow[0].tags, genEncodedTags(clientId, [txDate1])) - - await adapter.run({ - sql: `UPDATE parent SET value = 'local1', other = 'other1' WHERE id = 1`, - }) - - const txDate2 = await satellite._performSnapshot() - shadow = await getMatchingShadowEntries(adapter) - t.is(shadow.length, 1) - t.is(shadow[0].tags, genEncodedTags(clientId, [txDate2])) - - await adapter.run({ - sql: `UPDATE parent SET value = 'local2', other = 'other2' WHERE id = 1`, - }) - - const txDate3 = await satellite._performSnapshot() - shadow = await getMatchingShadowEntries(adapter) - t.is(shadow.length, 1) - t.is(shadow[0].tags, genEncodedTags(clientId, [txDate3])) - - await adapter.run({ - sql: `DELETE FROM parent WHERE id = 1`, - }) - - const txDate4 = await satellite._performSnapshot() - shadow = await getMatchingShadowEntries(adapter) - t.is(shadow.length, 0) - - const entries = await satellite._getEntries() - t.is(entries[0].clearTags, encodeTags([])) - t.is(entries[1].clearTags, genEncodedTags(clientId, [txDate2, txDate1])) - t.is(entries[2].clearTags, genEncodedTags(clientId, [txDate3, txDate2])) - t.is(entries[3].clearTags, genEncodedTags(clientId, [txDate4, txDate3])) - - t.not(txDate1, txDate2) - t.not(txDate2, txDate3) - t.not(txDate3, txDate4) -}) - -test('Tags are correctly set on multiple operations within snapshot/transaction', async (t) => { - const { adapter, runMigrations, satellite, authState } = t.context - await runMigrations() - const clientId = 'test_client' - satellite._setAuthState({ ...authState, clientId }) - - // Insert 4 items in separate snapshots - await adapter.run({ - sql: `INSERT INTO parent (id, value) VALUES (1, 'val1')`, - }) - const ts1 = await satellite._performSnapshot() - await adapter.run({ - sql: `INSERT INTO parent (id, value) VALUES (2, 'val2')`, - }) - const ts2 = await satellite._performSnapshot() - await adapter.run({ - sql: `INSERT INTO parent (id, value) VALUES (3, 'val3')`, - }) - const ts3 = await satellite._performSnapshot() - await adapter.run({ - sql: `INSERT INTO parent (id, value) VALUES (4, 'val4')`, - }) - const ts4 = await satellite._performSnapshot() - - // Now delete them all in a single snapshot - await adapter.run({ sql: `DELETE FROM parent` }) - const ts5 = await satellite._performSnapshot() - - // Now check that each delete clears the correct tag - const entries = await satellite._getEntries(4) - t.deepEqual( - entries.map((x) => x.clearTags), - [ - genEncodedTags(clientId, [ts5, ts1]), - genEncodedTags(clientId, [ts5, ts2]), - genEncodedTags(clientId, [ts5, ts3]), - genEncodedTags(clientId, [ts5, ts4]), +import { relations, ContextType as CommonContextType } from './common' + +export type ContextType = CommonContextType & { + getMatchingShadowEntries: + | typeof getSqliteMatchingShadowEntries + | typeof getPgMatchingShadowEntries +} + +export const processTagsTests = (test: TestFn) => { + test('basic rules for setting tags', async (t) => { + const { + adapter, + runMigrations, + satellite, + authState, + getMatchingShadowEntries, + } = t.context + await runMigrations() + + await satellite._setAuthState(authState) + const clientId = satellite._authState?.clientId ?? 'test_client' + + await adapter.run({ + sql: `INSERT INTO main.parent(id, value, other) VALUES (1, 'local', null)`, + }) + + const txDate1 = await satellite._performSnapshot() + let shadow = await getMatchingShadowEntries(adapter) + t.is(shadow.length, 1) + t.is(shadow[0].tags, genEncodedTags(clientId, [txDate1])) + + await adapter.run({ + sql: `UPDATE main.parent SET value = 'local1', other = 3 WHERE id = 1`, + }) + + const txDate2 = await satellite._performSnapshot() + shadow = await getMatchingShadowEntries(adapter) + t.is(shadow.length, 1) + t.is(shadow[0].tags, genEncodedTags(clientId, [txDate2])) + + await adapter.run({ + sql: `UPDATE main.parent SET value = 'local2', other = 4 WHERE id = 1`, + }) + + const txDate3 = await satellite._performSnapshot() + shadow = await getMatchingShadowEntries(adapter) + t.is(shadow.length, 1) + t.is(shadow[0].tags, genEncodedTags(clientId, [txDate3])) + + await adapter.run({ + sql: `DELETE FROM main.parent WHERE id = 1`, + }) + + const txDate4 = await satellite._performSnapshot() + shadow = await getMatchingShadowEntries(adapter) + t.is(shadow.length, 0) + + const entries = await satellite._getEntries() + t.is(entries[0].clearTags, encodeTags([])) + t.is(entries[1].clearTags, genEncodedTags(clientId, [txDate1])) + t.is(entries[2].clearTags, genEncodedTags(clientId, [txDate2])) + t.is(entries[3].clearTags, genEncodedTags(clientId, [txDate3])) + + t.not(txDate1, txDate2) + t.not(txDate2, txDate3) + t.not(txDate3, txDate4) + }) + + test('TX1=INSERT, TX2=DELETE, TX3=INSERT, ack TX1', async (t) => { + const { + adapter, + runMigrations, + satellite, + tableInfo, + authState, + getMatchingShadowEntries, + } = t.context + await runMigrations() + await satellite._setAuthState(authState) + + const clientId = satellite._authState?.clientId ?? 'test_id' + + // Local INSERT + const stmts1 = { + sql: `INSERT INTO main.parent (id, value, other) VALUES ('1', 'local', null)`, + } + await adapter.runInTransaction(stmts1) + const txDate1 = await satellite._performSnapshot() + + const localEntries1 = await satellite._getEntries() + const shadowEntry1 = await getMatchingShadowEntries( + adapter, + localEntries1[0] + ) + + // shadow tag is time of snapshot + const tag1 = genEncodedTags(clientId, [txDate1]) + t.is(tag1, shadowEntry1[0].tags) + // clearTag is empty + t.like(localEntries1[0], { + clearTags: JSON.stringify([]), + timestamp: txDate1.toISOString(), + }) + + // Local DELETE + const stmts2 = { + sql: `DELETE FROM main.parent WHERE id='1'`, + } + await adapter.runInTransaction(stmts2) + const txDate2 = await satellite._performSnapshot() + + const localEntries2 = await satellite._getEntries() + const shadowEntry2 = await getMatchingShadowEntries( + adapter, + localEntries2[1] + ) + + // shadowTag is empty + t.is(0, shadowEntry2.length) + // clearTags contains previous shadowTag + t.like(localEntries2[1], { + clearTags: tag1, + timestamp: txDate2.toISOString(), + }) + + // Local INSERT + const stmts3 = { + sql: `INSERT INTO main.parent (id, value, other) VALUES ('1', 'local', null)`, + } + await adapter.runInTransaction(stmts3) + const txDate3 = await satellite._performSnapshot() + + const localEntries3 = await satellite._getEntries() + const shadowEntry3 = await getMatchingShadowEntries( + adapter, + localEntries3[1] + ) + + const tag3 = genEncodedTags(clientId, [txDate3]) + // shadow tag is tag3 + t.is(tag3, shadowEntry3[0].tags) + // clearTags is empty after a DELETE + t.like(localEntries3[2], { + clearTags: JSON.stringify([]), + timestamp: txDate3.toISOString(), + }) + + // apply incomig operation (local operation ack) + const ackEntry = generateRemoteOplogEntry( + tableInfo, + 'main', + 'parent', + OPTYPES.insert, + txDate1.getTime(), + tag1, + { + id: 1, + value: 'local', + other: null, + }, + undefined + ) + + const ackDataChange = opLogEntryToChange(ackEntry, relations) + satellite.relations = relations // satellite must be aware of the relations in order to turn the `ackDataChange` DataChange into an OpLogEntry + const tx = { + origin: clientId, + commit_timestamp: Long.fromNumber((txDate1 as Date).getTime()), + changes: [ackDataChange], + lsn: new Uint8Array(), + } + await satellite._applyTransaction(tx) + + // validate that garbage collection has been triggered + t.is(2, (await satellite._getEntries()).length) + + const shadow = await getMatchingShadowEntries(adapter) + t.like( + shadow[0], + { + tags: genEncodedTags(clientId, [txDate3]), + }, + 'error: tag1 was reintroduced after merging acked operation' + ) + }) + + test('remote tx (INSERT) concurrently with local tx (INSERT -> DELETE)', async (t) => { + const { + adapter, + runMigrations, + satellite, + tableInfo, + authState, + getMatchingShadowEntries, + } = t.context + await runMigrations() + await satellite._setAuthState(authState) + + const stmts: Statement[] = [] + + // For this key we will choose remote Tx, such that: Local TM > Remote TX + stmts.push({ + sql: `INSERT INTO main.parent (id, value, other) VALUES ('1', 'local', null);`, + }) + stmts.push({ sql: `DELETE FROM main.parent WHERE id = 1` }) + // For this key we will choose remote Tx, such that: Local TM < Remote TX + stmts.push({ + sql: `INSERT INTO main.parent (id, value, other) VALUES ('2', 'local', null);`, + }) + stmts.push({ sql: `DELETE FROM main.parent WHERE id = 2` }) + await adapter.runInTransaction(...stmts) + + const txDate1 = await satellite._performSnapshot() + + const prevTs = txDate1.getTime() - 1 + const nextTs = txDate1.getTime() + 1 + + const prevEntry = generateRemoteOplogEntry( + tableInfo, + 'main', + 'parent', + OPTYPES.insert, + prevTs, + genEncodedTags('remote', [prevTs]), + { + id: 1, + value: 'remote', + other: 1, + }, + undefined + ) + const nextEntry = generateRemoteOplogEntry( + tableInfo, + 'main', + 'parent', + OPTYPES.insert, + nextTs, + genEncodedTags('remote', [nextTs]), + { + id: 2, + value: 'remote', + other: 2, + }, + undefined + ) + + satellite.relations = relations // satellite must be aware of the relations in order to turn `DataChange`s into `OpLogEntry`s + + const prevChange = opLogEntryToChange(prevEntry, relations) + const prevTx = { + origin: 'remote', + commit_timestamp: Long.fromNumber(prevTs), + changes: [prevChange], + lsn: new Uint8Array(), + } + await satellite._applyTransaction(prevTx) + + const nextChange = opLogEntryToChange(nextEntry, relations) + const nextTx = { + origin: 'remote', + commit_timestamp: Long.fromNumber(nextTs), + changes: [nextChange], + lsn: new Uint8Array(), + } + await satellite._applyTransaction(nextTx) + + const shadow = await getMatchingShadowEntries(adapter) + const expectedShadow = [ + { + namespace: 'main', + tablename: 'parent', + primaryKey: '{"id":1}', + tags: genEncodedTags('remote', [prevTs]), + }, + { + namespace: 'main', + tablename: 'parent', + primaryKey: '{"id":2}', + tags: genEncodedTags('remote', [nextTs]), + }, ] - ) -}) - -test('Tags are correctly set on subsequent operations in a TX', async (t) => { - const { adapter, runMigrations, satellite, authState } = t.context - - await runMigrations() - - await adapter.run({ - sql: `INSERT INTO parent(id, value) VALUES (1,'val1')`, - }) - - // Since no snapshot was made yet - // the timestamp in the oplog is not yet set - const insertEntry = await adapter.query({ - sql: `SELECT timestamp, clearTags FROM _electric_oplog WHERE rowid = 1`, - }) - t.is(insertEntry[0].timestamp, null) - t.deepEqual(JSON.parse(insertEntry[0].clearTags as string), []) - - await satellite._setAuthState(authState) - await satellite._performSnapshot() + t.deepEqual(shadow, expectedShadow) - const parseDate = (date: string) => new Date(date).getTime() + const userTable = await adapter.query({ sql: `SELECT * FROM main.parent;` }) - // Now the timestamp is set - const insertEntryAfterSnapshot = await adapter.query({ - sql: `SELECT timestamp, clearTags FROM _electric_oplog WHERE rowid = 1`, - }) - t.assert(insertEntryAfterSnapshot[0].timestamp != null) - const insertTimestamp = parseDate( - insertEntryAfterSnapshot[0].timestamp as string - ) - t.deepEqual(JSON.parse(insertEntryAfterSnapshot[0].clearTags as string), []) - - // Now update the entry, then delete it, and then insert it again - await adapter.run({ - sql: `UPDATE parent SET value = 'val2' WHERE id=1`, - }) - - await adapter.run({ - sql: `DELETE FROM parent WHERE id=1`, - }) - - await adapter.run({ - sql: `INSERT INTO parent(id, value) VALUES (1,'val3')`, - }) - - // Since no snapshot has been taken for these operations - // their timestamp and clearTags should not be set - const updateEntry = await adapter.query({ - sql: `SELECT timestamp, clearTags FROM _electric_oplog WHERE rowid = 2`, - }) - - t.is(updateEntry[0].timestamp, null) - t.deepEqual(JSON.parse(updateEntry[0].clearTags as string), []) - - const deleteEntry = await adapter.query({ - sql: `SELECT timestamp, clearTags FROM _electric_oplog WHERE rowid = 3`, - }) - - t.is(deleteEntry[0].timestamp, null) - t.deepEqual(JSON.parse(deleteEntry[0].clearTags as string), []) - - const reinsertEntry = await adapter.query({ - sql: `SELECT timestamp, clearTags FROM _electric_oplog WHERE rowid = 4`, - }) - - t.is(reinsertEntry[0].timestamp, null) - t.deepEqual(JSON.parse(reinsertEntry[0].clearTags as string), []) - - // Now take a snapshot for these operations - await satellite._performSnapshot() - - // Now the timestamps should be set - // The first operation (update) should override - // the original insert (i.e. clearTags must contain the timestamp of the insert) - const updateEntryAfterSnapshot = await adapter.query({ - sql: `SELECT timestamp, clearTags FROM _electric_oplog WHERE rowid = 2`, - }) + // In both cases insert wins over delete, but + // for id = 1 CR picks local data before delete, while + // for id = 2 CR picks remote data + const expectedUserTable = [ + { id: 1, value: 'local', other: null }, + { id: 2, value: 'remote', other: 2 }, + ] + t.deepEqual(expectedUserTable, userTable) + }) + + test('remote tx (INSERT) concurrently with 2 local txses (INSERT -> DELETE)', async (t) => { + const { + adapter, + runMigrations, + satellite, + tableInfo, + authState, + getMatchingShadowEntries, + } = t.context + await runMigrations() + await satellite._setAuthState(authState) + + let stmts: Statement[] = [] + + // For this key we will choose remote Tx, such that: Local TM > Remote TX + stmts.push({ + sql: `INSERT INTO main.parent (id, value, other) VALUES ('1', 'local', null);`, + }) + stmts.push({ + sql: `INSERT INTO main.parent (id, value, other) VALUES ('2', 'local', null);`, + }) + await adapter.runInTransaction(...stmts) + const txDate1 = await satellite._performSnapshot() + + stmts = [] + // For this key we will choose remote Tx, such that: Local TM < Remote TX + stmts.push({ sql: `DELETE FROM main.parent WHERE id = 1` }) + stmts.push({ sql: `DELETE FROM main.parent WHERE id = 2` }) + await adapter.runInTransaction(...stmts) + await satellite._performSnapshot() + + const prevTs = txDate1.getTime() - 1 + const nextTs = txDate1.getTime() + 1 + + const prevEntry = generateRemoteOplogEntry( + tableInfo, + 'main', + 'parent', + OPTYPES.insert, + prevTs, + genEncodedTags('remote', [prevTs]), + { + id: 1, + value: 'remote', + other: 1, + }, + undefined + ) + const nextEntry = generateRemoteOplogEntry( + tableInfo, + 'main', + 'parent', + OPTYPES.insert, + nextTs, + genEncodedTags('remote', [nextTs]), + { + id: 2, + value: 'remote', + other: 2, + }, + undefined + ) + + satellite.relations = relations // satellite must be aware of the relations in order to turn `DataChange`s into `OpLogEntry`s in `_applyTransaction` + + const prevChange = opLogEntryToChange(prevEntry, relations) + const prevTx = { + origin: 'remote', + commit_timestamp: Long.fromNumber(prevTs), + changes: [prevChange], + lsn: new Uint8Array(), + } + await satellite._applyTransaction(prevTx) + + const nextChange = opLogEntryToChange(nextEntry, relations) + const nextTx = { + origin: 'remote', + commit_timestamp: Long.fromNumber(nextTs), + changes: [nextChange], + lsn: new Uint8Array(), + } + await satellite._applyTransaction(nextTx) + + const shadow = await getMatchingShadowEntries(adapter) + const expectedShadow = [ + { + namespace: 'main', + tablename: 'parent', + primaryKey: '{"id":1}', + tags: genEncodedTags('remote', [prevTs]), + }, + { + namespace: 'main', + tablename: 'parent', + primaryKey: '{"id":2}', + tags: genEncodedTags('remote', [nextTs]), + }, + ] + t.deepEqual(shadow, expectedShadow) - const rawTimestampTx2 = updateEntryAfterSnapshot[0].timestamp - t.assert(rawTimestampTx2 != null) - const timestampTx2 = parseDate(rawTimestampTx2 as string) + let userTable = await adapter.query({ sql: `SELECT * FROM main.parent;` }) - t.is( - updateEntryAfterSnapshot[0].clearTags, - genEncodedTags(authState.clientId, [timestampTx2, insertTimestamp]) - ) + // In both cases insert wins over delete, but + // for id = 1 CR picks local data before delete, while + // for id = 2 CR picks remote data + const expectedUserTable = [ + { id: 1, value: 'local', other: null }, + { id: 2, value: 'remote', other: 2 }, + ] + t.deepEqual(expectedUserTable, userTable) + }) + + test('Tags are correctly set on multiple operations within snapshot/transaction', async (t) => { + const { adapter, runMigrations, satellite, authState } = t.context + await runMigrations() + const clientId = 'test_client' + satellite._setAuthState({ ...authState, clientId }) + + // Insert 4 items in separate snapshots + await adapter.run({ + sql: `INSERT INTO parent (id, value) VALUES (1, 'val1')`, + }) + const ts1 = await satellite._performSnapshot() + await adapter.run({ + sql: `INSERT INTO parent (id, value) VALUES (2, 'val2')`, + }) + const ts2 = await satellite._performSnapshot() + await adapter.run({ + sql: `INSERT INTO parent (id, value) VALUES (3, 'val3')`, + }) + const ts3 = await satellite._performSnapshot() + await adapter.run({ + sql: `INSERT INTO parent (id, value) VALUES (4, 'val4')`, + }) + const ts4 = await satellite._performSnapshot() + + // Now delete them all in a single snapshot + await adapter.run({ sql: `DELETE FROM parent` }) + const ts5 = await satellite._performSnapshot() + + // Now check that each delete clears the correct tag + const entries = await satellite._getEntries(4) + t.deepEqual( + entries.map((x) => x.clearTags), + [ + genEncodedTags(clientId, [ts5, ts1]), + genEncodedTags(clientId, [ts5, ts2]), + genEncodedTags(clientId, [ts5, ts3]), + genEncodedTags(clientId, [ts5, ts4]), + ] + ) + }) + + test('Tags are correctly set on subsequent operations in a TX', async (t) => { + const { adapter, runMigrations, satellite, authState } = t.context + + await runMigrations() + + await adapter.run({ + sql: `INSERT INTO parent(id, value) VALUES (1,'val1')`, + }) + + // Since no snapshot was made yet + // the timestamp in the oplog is not yet set + const insertEntry = await adapter.query({ + sql: `SELECT timestamp, clearTags FROM _electric_oplog WHERE rowid = 1`, + }) + t.is(insertEntry[0].timestamp, null) + t.deepEqual(JSON.parse(insertEntry[0].clearTags as string), []) + + await satellite._setAuthState(authState) + await satellite._performSnapshot() + + const parseDate = (date: string) => new Date(date).getTime() + + // Now the timestamp is set + const insertEntryAfterSnapshot = await adapter.query({ + sql: `SELECT timestamp, clearTags FROM _electric_oplog WHERE rowid = 1`, + }) + t.assert(insertEntryAfterSnapshot[0].timestamp != null) + const insertTimestamp = parseDate( + insertEntryAfterSnapshot[0].timestamp as string + ) + t.deepEqual(JSON.parse(insertEntryAfterSnapshot[0].clearTags as string), []) + + // Now update the entry, then delete it, and then insert it again + await adapter.run({ + sql: `UPDATE parent SET value = 'val2' WHERE id=1`, + }) + + await adapter.run({ + sql: `DELETE FROM parent WHERE id=1`, + }) + + await adapter.run({ + sql: `INSERT INTO parent(id, value) VALUES (1,'val3')`, + }) + + // Since no snapshot has been taken for these operations + // their timestamp and clearTags should not be set + const updateEntry = await adapter.query({ + sql: `SELECT timestamp, clearTags FROM _electric_oplog WHERE rowid = 2`, + }) + + t.is(updateEntry[0].timestamp, null) + t.deepEqual(JSON.parse(updateEntry[0].clearTags as string), []) + + const deleteEntry = await adapter.query({ + sql: `SELECT timestamp, clearTags FROM _electric_oplog WHERE rowid = 3`, + }) + + t.is(deleteEntry[0].timestamp, null) + t.deepEqual(JSON.parse(deleteEntry[0].clearTags as string), []) + + const reinsertEntry = await adapter.query({ + sql: `SELECT timestamp, clearTags FROM _electric_oplog WHERE rowid = 4`, + }) + + t.is(reinsertEntry[0].timestamp, null) + t.deepEqual(JSON.parse(reinsertEntry[0].clearTags as string), []) + + // Now take a snapshot for these operations + await satellite._performSnapshot() + + // Now the timestamps should be set + // The first operation (update) should override + // the original insert (i.e. clearTags must contain the timestamp of the insert) + const updateEntryAfterSnapshot = await adapter.query({ + sql: `SELECT timestamp, clearTags FROM _electric_oplog WHERE rowid = 2`, + }) + + const rawTimestampTx2 = updateEntryAfterSnapshot[0].timestamp + t.assert(rawTimestampTx2 != null) + const timestampTx2 = parseDate(rawTimestampTx2 as string) + + t.is( + updateEntryAfterSnapshot[0].clearTags, + genEncodedTags(authState.clientId, [insertTimestamp]) + ) + + // The second operation (delete) should have the same timestamp + // and should contain the tag of the TX in its clearTags + const deleteEntryAfterSnapshot = await adapter.query({ + sql: `SELECT timestamp, clearTags FROM _electric_oplog WHERE rowid = 3`, + }) + + t.assert(deleteEntryAfterSnapshot[0].timestamp === rawTimestampTx2) + t.is( + deleteEntryAfterSnapshot[0].clearTags, + genEncodedTags(authState.clientId, [timestampTx2]) + ) + + // The third operation (reinsert) should have the same timestamp + // and should contain the tag of the TX in its clearTags + const reinsertEntryAfterSnapshot = await adapter.query({ + sql: `SELECT timestamp, clearTags FROM _electric_oplog WHERE rowid = 4`, + }) + + t.assert(reinsertEntryAfterSnapshot[0].timestamp === rawTimestampTx2) + t.is( + reinsertEntryAfterSnapshot[0].clearTags, + genEncodedTags(authState.clientId, [timestampTx2]) + ) + }) + + test('remote tx (INSERT) concurrently with local tx (INSERT -> UPDATE)', async (t) => { + const { + adapter, + runMigrations, + satellite, + tableInfo, + authState, + getMatchingShadowEntries, + } = t.context + await runMigrations() + await satellite._setAuthState(authState) + const clientId = satellite._authState?.clientId ?? 'test_id' + let stmts: Statement[] = [] + + // For this key we will choose remote Tx, such that: Local TM > Remote TX + stmts.push({ + sql: `INSERT INTO main.parent (id, value, other) VALUES ('1', 'local', null);`, + }) + stmts.push({ + sql: `UPDATE main.parent SET value = 'local', other = 999 WHERE id = 1`, + }) + // For this key we will choose remote Tx, such that: Local TM < Remote TX + stmts.push({ + sql: `INSERT INTO main.parent (id, value, other) VALUES ('2', 'local', null);`, + }) + stmts.push({ + sql: `UPDATE main.parent SET value = 'local', other = 999 WHERE id = 1`, + }) + await adapter.runInTransaction(...stmts) + + const txDate1 = await satellite._performSnapshot() + + const prevTs = txDate1.getTime() - 1 + const nextTs = txDate1.getTime() + 1 + + const prevEntry = generateRemoteOplogEntry( + tableInfo, + 'main', + 'parent', + OPTYPES.insert, + prevTs, + genEncodedTags('remote', [prevTs]), + { + id: 1, + value: 'remote', + other: 1, + }, + undefined + ) + + const nextEntry = generateRemoteOplogEntry( + tableInfo, + 'main', + 'parent', + OPTYPES.insert, + nextTs, + genEncodedTags('remote', [nextTs]), + { + id: 2, + value: 'remote', + other: 2, + }, + undefined + ) + + satellite.relations = relations // satellite must be aware of the relations in order to turn `DataChange`s into `OpLogEntry`s in `_applyTransaction` + + const prevChange = opLogEntryToChange(prevEntry, relations) + const prevTx = { + origin: 'remote', + commit_timestamp: Long.fromNumber(prevTs), + changes: [prevChange], + lsn: new Uint8Array(), + } + await satellite._applyTransaction(prevTx) + + const nextChange = opLogEntryToChange(nextEntry, relations) + const nextTx = { + origin: 'remote', + commit_timestamp: Long.fromNumber(nextTs), + changes: [nextChange], + lsn: new Uint8Array(), + } + await satellite._applyTransaction(nextTx) + + let shadow = await getMatchingShadowEntries(adapter) + const expectedShadow = [ + { + namespace: 'main', + tablename: 'parent', + primaryKey: '{"id":1}', + tags: encodeTags([ + generateTag(clientId, new Date(txDate1)), + generateTag('remote', new Date(prevTs)), + ]), + }, + { + namespace: 'main', + tablename: 'parent', + primaryKey: '{"id":2}', + tags: encodeTags([ + generateTag(clientId, new Date(txDate1)), + generateTag('remote', new Date(nextTs)), + ]), + }, + ] + t.deepEqual(shadow, expectedShadow) - // The second operation (delete) should have the same timestamp - // and should contain the tag of the TX in its clearTags - const deleteEntryAfterSnapshot = await adapter.query({ - sql: `SELECT timestamp, clearTags FROM _electric_oplog WHERE rowid = 3`, - }) + let entries = await satellite._getEntries() - t.assert(deleteEntryAfterSnapshot[0].timestamp === rawTimestampTx2) - t.is( - deleteEntryAfterSnapshot[0].clearTags, - genEncodedTags(authState.clientId, [timestampTx2, insertTimestamp]) - ) + // Given that Insert and Update happen within the same transaction clear should not + // contain itself + t.is(entries[0].clearTags, encodeTags([])) + t.is(entries[1].clearTags, encodeTags([])) + t.is(entries[2].clearTags, encodeTags([])) + t.is(entries[3].clearTags, encodeTags([])) - // The third operation (reinsert) should have the same timestamp - // and should contain the tag of the TX in its clearTags - const reinsertEntryAfterSnapshot = await adapter.query({ - sql: `SELECT timestamp, clearTags FROM _electric_oplog WHERE rowid = 4`, - }) + let userTable = await adapter.query({ sql: `SELECT * FROM main.parent;` }) - t.assert(reinsertEntryAfterSnapshot[0].timestamp === rawTimestampTx2) - t.is( - reinsertEntryAfterSnapshot[0].clearTags, - genEncodedTags(authState.clientId, [timestampTx2, insertTimestamp]) - ) -}) - -test('TX1=INSERT, TX2=DELETE, TX3=INSERT, ack TX1', async (t) => { - const { adapter, runMigrations, satellite, tableInfo, authState } = t.context - await runMigrations() - await satellite._setAuthState(authState) - - const clientId = satellite._authState?.clientId ?? 'test_id' - - // Local INSERT - const stmts1 = { - sql: `INSERT INTO parent (id, value, other) VALUES (?, ?, ?)`, - args: ['1', 'local', null], - } - await adapter.runInTransaction(stmts1) - const txDate1 = await satellite._performSnapshot() - - const localEntries1 = await satellite._getEntries() - const shadowEntry1 = await getMatchingShadowEntries(adapter, localEntries1[0]) - - // shadow tag is time of snapshot - const tag1 = genEncodedTags(clientId, [txDate1]) - t.is(tag1, shadowEntry1[0].tags) - // clearTag is empty - t.like(localEntries1[0], { - clearTags: JSON.stringify([]), - timestamp: txDate1.toISOString(), - }) + // In both cases insert wins over delete, but + // for id = 1 CR picks local data before delete, while + // for id = 2 CR picks remote data + const expectedUserTable = [ + { id: 1, value: 'local', other: 999 }, + { id: 2, value: 'remote', other: 2 }, + ] + t.deepEqual(expectedUserTable, userTable) + }) + + test('origin tx (INSERT) concurrently with local txses (INSERT -> DELETE)', async (t) => { + // + const { + adapter, + runMigrations, + satellite, + tableInfo, + authState, + getMatchingShadowEntries, + } = t.context + await runMigrations() + await satellite._setAuthState(authState) + const clientId = satellite._authState?.clientId ?? 'test_id' + + let stmts: Statement[] = [] + + // For this key we will choose remote Tx, such that: Local TM > Remote TX + stmts.push({ + sql: `INSERT INTO main.parent (id, value, other) VALUES ('1', 'local', null);`, + }) + stmts.push({ + sql: `INSERT INTO main.parent (id, value, other) VALUES ('2', 'local', null);`, + }) + await adapter.runInTransaction(...stmts) + const txDate1 = await satellite._performSnapshot() + + stmts = [] + // For this key we will choose remote Tx, such that: Local TM < Remote TX + stmts.push({ sql: `DELETE FROM main.parent WHERE id = 1` }) + stmts.push({ sql: `DELETE FROM main.parent WHERE id = 2` }) + await adapter.runInTransaction(...stmts) + await satellite._performSnapshot() + + let entries = await satellite._getEntries() + t.assert(entries[0].newRow) + t.assert(entries[1]) + t.assert(entries[1].newRow) + + // For this key we receive transaction which was older + const electricEntrySameTs = new Date(entries[0].timestamp).getTime() + let electricEntrySame = generateRemoteOplogEntry( + tableInfo, + entries[0].namespace, + entries[0].tablename, + OPTYPES.insert, + electricEntrySameTs, + genEncodedTags(clientId, [txDate1]), + JSON.parse(entries[0].newRow!), + undefined + ) + + // For this key we had concurrent insert transaction from another node `remote` + // with same timestamp + const electricEntryConflictTs = new Date(entries[1].timestamp).getTime() + let electricEntryConflict = generateRemoteOplogEntry( + tableInfo, + entries[1].namespace, + entries[1].tablename, + OPTYPES.insert, + electricEntryConflictTs, + encodeTags([ + generateTag(clientId, txDate1), + generateTag('remote', txDate1), + ]), + JSON.parse(entries[1].newRow!), + undefined + ) + + satellite.relations = relations // satellite must be aware of the relations in order to turn `DataChange`s into `OpLogEntry`s in `_applyTransaction` + + const electricEntrySameChange = opLogEntryToChange( + electricEntrySame, + relations + ) + const electricEntryConflictChange = opLogEntryToChange( + electricEntryConflict, + relations + ) + const tx = { + origin: clientId, + commit_timestamp: Long.fromNumber(new Date().getTime()), // commit_timestamp doesn't matter for this test, it is only used to GC the oplog + changes: [electricEntrySameChange, electricEntryConflictChange], + lsn: new Uint8Array(), + } + await satellite._applyTransaction(tx) + + let shadow = await getMatchingShadowEntries(adapter) + const expectedShadow = [ + { + namespace: 'main', + tablename: 'parent', + primaryKey: '{"id":2}', + tags: genEncodedTags('remote', [txDate1]), + }, + ] + t.deepEqual(shadow, expectedShadow) + + let userTable = await adapter.query({ sql: `SELECT * FROM main.parent;` }) + const expectedUserTable = [{ id: 2, value: 'local', other: null }] + t.deepEqual(expectedUserTable, userTable) + }) + + test('local (INSERT -> UPDATE -> DELETE) with remote equivalent', async (t) => { + const { + runMigrations, + satellite, + tableInfo, + authState, + adapter, + getMatchingShadowEntries, + } = t.context + await runMigrations() + await satellite._setAuthState(authState) + const clientId = satellite._authState?.clientId ?? 'test_id' + let txDate1 = new Date().getTime() + + const insertEntry = generateRemoteOplogEntry( + tableInfo, + 'main', + 'parent', + OPTYPES.update, + txDate1, + genEncodedTags('remote', [txDate1]), + { + id: 1, + value: 'local', + }, + undefined + ) + + const deleteDate = txDate1 + 1 + const deleteEntry = generateRemoteOplogEntry( + tableInfo, + 'main', + 'parent', + OPTYPES.delete, + deleteDate, + genEncodedTags('remote', []), + { + id: 1, + value: 'local', + }, + undefined + ) + + satellite.relations = relations // satellite must be aware of the relations in order to turn `DataChange`s into `OpLogEntry`s in `_applyTransaction` + + const insertChange = opLogEntryToChange(insertEntry, relations) + const insertTx = { + origin: clientId, + commit_timestamp: Long.fromNumber(txDate1), + changes: [insertChange], + lsn: new Uint8Array(), + } + await satellite._applyTransaction(insertTx) + + let shadow = await getMatchingShadowEntries(adapter) + const expectedShadow = [ + { + namespace: 'main', + tablename: 'parent', + primaryKey: '{"id":1}', + tags: genEncodedTags('remote', [txDate1]), + }, + ] + t.deepEqual(shadow, expectedShadow) - // Local DELETE - const stmts2 = { - sql: `DELETE FROM parent WHERE id=?`, - args: ['1'], - } - await adapter.runInTransaction(stmts2) - const txDate2 = await satellite._performSnapshot() - - const localEntries2 = await satellite._getEntries() - const shadowEntry2 = await getMatchingShadowEntries(adapter, localEntries2[1]) - - // shadowTag is empty - t.is(0, shadowEntry2.length) - // clearTags contains previous shadowTag - t.like(localEntries2[1], { - clearTags: genEncodedTags(clientId, [txDate2, txDate1]), - timestamp: txDate2.toISOString(), - }) + const deleteChange = opLogEntryToChange(deleteEntry, relations) + const deleteTx = { + origin: clientId, + commit_timestamp: Long.fromNumber(deleteDate), + changes: [deleteChange], + lsn: new Uint8Array(), + } + await satellite._applyTransaction(deleteTx) - // Local INSERT - const stmts3 = { - sql: `INSERT INTO parent (id, value, other) VALUES (?, ?, ?)`, - args: ['1', 'local', null], - } - await adapter.runInTransaction(stmts3) - const txDate3 = await satellite._performSnapshot() - - const localEntries3 = await satellite._getEntries() - const shadowEntry3 = await getMatchingShadowEntries(adapter, localEntries3[1]) - - const tag3 = genEncodedTags(clientId, [txDate3]) - // shadow tag is tag3 - t.is(tag3, shadowEntry3[0].tags) - // clearTags is empty after a DELETE - t.like(localEntries3[2], { - clearTags: JSON.stringify([]), - timestamp: txDate3.toISOString(), - }) + shadow = await getMatchingShadowEntries(adapter) + t.deepEqual([], shadow) - // apply incomig operation (local operation ack) - const ackEntry = generateRemoteOplogEntry( - tableInfo, - 'main', - 'parent', - OPTYPES.insert, - txDate1.getTime(), - tag1, - { - id: 1, - value: 'local', - other: null, - }, - undefined - ) - - const ackDataChange = opLogEntryToChange(ackEntry, relations) - satellite.relations = relations // satellite must be aware of the relations in order to turn the `ackDataChange` DataChange into an OpLogEntry - const tx = { - origin: clientId, - commit_timestamp: Long.fromNumber((txDate1 as Date).getTime()), - changes: [ackDataChange], - lsn: new Uint8Array(), - } - await satellite._applyTransaction(tx) - - // validate that garbage collection has been triggered - t.is(2, (await satellite._getEntries()).length) - - const shadow = await getMatchingShadowEntries(adapter) - t.like( - shadow[0], - { - tags: genEncodedTags(clientId, [txDate3]), - }, - 'error: tag1 was reintroduced after merging acked operation' - ) -}) - -test('remote tx (INSERT) concurrently with local tx (INSERT -> DELETE)', async (t) => { - const { adapter, runMigrations, satellite, tableInfo, authState } = t.context - await runMigrations() - await satellite._setAuthState(authState) - - const stmts: Statement[] = [] - - // For this key we will choose remote Tx, such that: Local TM > Remote TX - stmts.push({ - sql: `INSERT INTO parent (id, value, other) VALUES (?, ?, ?);`, - args: ['1', 'local', null], - }) - stmts.push({ sql: `DELETE FROM parent WHERE id = 1` }) - // For this key we will choose remote Tx, such that: Local TM < Remote TX - stmts.push({ - sql: `INSERT INTO parent (id, value, other) VALUES (?, ?, ?);`, - args: ['2', 'local', null], - }) - stmts.push({ sql: `DELETE FROM parent WHERE id = 2` }) - await adapter.runInTransaction(...stmts) - - const txDate1 = await satellite._performSnapshot() - - const prevTs = txDate1.getTime() - 1 - const nextTs = txDate1.getTime() + 1 - - const prevEntry = generateRemoteOplogEntry( - tableInfo, - 'main', - 'parent', - OPTYPES.insert, - prevTs, - genEncodedTags('remote', [prevTs]), - { - id: 1, - value: 'remote', - other: 1, - }, - undefined - ) - const nextEntry = generateRemoteOplogEntry( - tableInfo, - 'main', - 'parent', - OPTYPES.insert, - nextTs, - genEncodedTags('remote', [nextTs]), - { - id: 2, - value: 'remote', - other: 2, - }, - undefined - ) - - satellite.relations = relations // satellite must be aware of the relations in order to turn `DataChange`s into `OpLogEntry`s - - const prevChange = opLogEntryToChange(prevEntry, relations) - const prevTx = { - origin: 'remote', - commit_timestamp: Long.fromNumber(prevTs), - changes: [prevChange], - lsn: new Uint8Array(), - } - await satellite._applyTransaction(prevTx) - - const nextChange = opLogEntryToChange(nextEntry, relations) - const nextTx = { - origin: 'remote', - commit_timestamp: Long.fromNumber(nextTs), - changes: [nextChange], - lsn: new Uint8Array(), - } - await satellite._applyTransaction(nextTx) - - const shadow = await getMatchingShadowEntries(adapter) - const expectedShadow = [ - { - namespace: 'main', - tablename: 'parent', - primaryKey: '{"id":1}', - tags: genEncodedTags('remote', [prevTs]), - }, - { - namespace: 'main', - tablename: 'parent', - primaryKey: '{"id":2}', - tags: genEncodedTags('remote', [nextTs]), - }, - ] - t.deepEqual(shadow, expectedShadow) - - const userTable = await adapter.query({ sql: `SELECT * FROM parent;` }) - - // In both cases insert wins over delete, but - // for id = 1 CR picks local data before delete, while - // for id = 2 CR picks remote data - const expectedUserTable = [ - { id: 1, value: 'local', other: null }, - { id: 2, value: 'remote', other: 2 }, - ] - t.deepEqual(expectedUserTable, userTable) -}) - -test('remote tx (INSERT) concurrently with 2 local txses (INSERT -> DELETE)', async (t) => { - const { adapter, runMigrations, satellite, tableInfo, authState } = t.context - await runMigrations() - await satellite._setAuthState(authState) - - let stmts: Statement[] = [] - - // For this key we will choose remote Tx, such that: Local TM > Remote TX - stmts.push({ - sql: `INSERT INTO parent (id, value, other) VALUES (?, ?, ?);`, - args: ['1', 'local', null], - }) - stmts.push({ - sql: `INSERT INTO parent (id, value, other) VALUES (?, ?, ?);`, - args: ['2', 'local', null], - }) - await adapter.runInTransaction(...stmts) - const txDate1 = await satellite._performSnapshot() - - stmts = [] - // For this key we will choose remote Tx, such that: Local TM < Remote TX - stmts.push({ sql: `DELETE FROM parent WHERE id = 1` }) - stmts.push({ sql: `DELETE FROM parent WHERE id = 2` }) - await adapter.runInTransaction(...stmts) - await satellite._performSnapshot() - - const prevTs = txDate1.getTime() - 1 - const nextTs = txDate1.getTime() + 1 - - const prevEntry = generateRemoteOplogEntry( - tableInfo, - 'main', - 'parent', - OPTYPES.insert, - prevTs, - genEncodedTags('remote', [prevTs]), - { - id: 1, - value: 'remote', - other: 1, - }, - undefined - ) - const nextEntry = generateRemoteOplogEntry( - tableInfo, - 'main', - 'parent', - OPTYPES.insert, - nextTs, - genEncodedTags('remote', [nextTs]), - { - id: 2, - value: 'remote', - other: 2, - }, - undefined - ) - - satellite.relations = relations // satellite must be aware of the relations in order to turn `DataChange`s into `OpLogEntry`s in `_applyTransaction` - - const prevChange = opLogEntryToChange(prevEntry, relations) - const prevTx = { - origin: 'remote', - commit_timestamp: Long.fromNumber(prevTs), - changes: [prevChange], - lsn: new Uint8Array(), - } - await satellite._applyTransaction(prevTx) - - const nextChange = opLogEntryToChange(nextEntry, relations) - const nextTx = { - origin: 'remote', - commit_timestamp: Long.fromNumber(nextTs), - changes: [nextChange], - lsn: new Uint8Array(), - } - await satellite._applyTransaction(nextTx) - - const shadow = await getMatchingShadowEntries(adapter) - const expectedShadow = [ - { - namespace: 'main', - tablename: 'parent', - primaryKey: '{"id":1}', - tags: genEncodedTags('remote', [prevTs]), - }, - { - namespace: 'main', - tablename: 'parent', - primaryKey: '{"id":2}', - tags: genEncodedTags('remote', [nextTs]), - }, - ] - t.deepEqual(shadow, expectedShadow) - - let userTable = await adapter.query({ sql: `SELECT * FROM parent;` }) - - // In both cases insert wins over delete, but - // for id = 1 CR picks local data before delete, while - // for id = 2 CR picks remote data - const expectedUserTable = [ - { id: 1, value: 'local', other: null }, - { id: 2, value: 'remote', other: 2 }, - ] - t.deepEqual(expectedUserTable, userTable) -}) - -test('remote tx (INSERT) concurrently with local tx (INSERT -> UPDATE)', async (t) => { - const { adapter, runMigrations, satellite, tableInfo, authState } = t.context - await runMigrations() - await satellite._setAuthState(authState) - const clientId = satellite._authState?.clientId ?? 'test_id' - let stmts: Statement[] = [] - - // For this key we will choose remote Tx, such that: Local TM > Remote TX - stmts.push({ - sql: `INSERT INTO parent (id, value, other) VALUES (?, ?, ?);`, - args: ['1', 'local', null], - }) - stmts.push({ - sql: `UPDATE parent SET value = ?, other = ? WHERE id = 1`, - args: ['local', 'not_null'], - }) - // For this key we will choose remote Tx, such that: Local TM < Remote TX - stmts.push({ - sql: `INSERT INTO parent (id, value, other) VALUES (?, ?, ?);`, - args: ['2', 'local', null], - }) - stmts.push({ - sql: `UPDATE parent SET value = ?, other = ? WHERE id = 1`, - args: ['local', 'not_null'], - }) - await adapter.runInTransaction(...stmts) - - const txDate1 = await satellite._performSnapshot() - - const prevTs = txDate1.getTime() - 1 - const nextTs = txDate1.getTime() + 1 - - const prevEntry = generateRemoteOplogEntry( - tableInfo, - 'main', - 'parent', - OPTYPES.insert, - prevTs, - genEncodedTags('remote', [prevTs]), - { - id: 1, - value: 'remote', - other: 1, - }, - undefined - ) - - const nextEntry = generateRemoteOplogEntry( - tableInfo, - 'main', - 'parent', - OPTYPES.insert, - nextTs, - genEncodedTags('remote', [nextTs]), - { - id: 2, - value: 'remote', - other: 2, - }, - undefined - ) - - satellite.relations = relations // satellite must be aware of the relations in order to turn `DataChange`s into `OpLogEntry`s in `_applyTransaction` - - const prevChange = opLogEntryToChange(prevEntry, relations) - const prevTx = { - origin: 'remote', - commit_timestamp: Long.fromNumber(prevTs), - changes: [prevChange], - lsn: new Uint8Array(), - } - await satellite._applyTransaction(prevTx) - - const nextChange = opLogEntryToChange(nextEntry, relations) - const nextTx = { - origin: 'remote', - commit_timestamp: Long.fromNumber(nextTs), - changes: [nextChange], - lsn: new Uint8Array(), - } - await satellite._applyTransaction(nextTx) - - let shadow = await getMatchingShadowEntries(adapter) - const expectedShadow = [ - { - namespace: 'main', - tablename: 'parent', - primaryKey: '{"id":1}', - tags: encodeTags([ - generateTag(clientId, new Date(txDate1)), - generateTag('remote', new Date(prevTs)), - ]), - }, - { - namespace: 'main', - tablename: 'parent', - primaryKey: '{"id":2}', - tags: encodeTags([ - generateTag(clientId, new Date(txDate1)), - generateTag('remote', new Date(nextTs)), - ]), - }, - ] - t.deepEqual(shadow, expectedShadow) - - let entries = await satellite._getEntries() - - // Given that Insert and Update happen within the same transaction clear should not - // contain itself - t.is(entries[0].clearTags, encodeTags([])) - t.is(entries[1].clearTags, encodeTags([])) - t.is(entries[2].clearTags, encodeTags([])) - t.is(entries[3].clearTags, encodeTags([])) - - let userTable = await adapter.query({ sql: `SELECT * FROM parent;` }) - - // In both cases insert wins over delete, but - // for id = 1 CR picks local data before delete, while - // for id = 2 CR picks remote data - const expectedUserTable = [ - { id: 1, value: 'local', other: 'not_null' }, - { id: 2, value: 'remote', other: 2 }, - ] - t.deepEqual(expectedUserTable, userTable) -}) - -test('origin tx (INSERT) concurrently with local txses (INSERT -> DELETE)', async (t) => { - // - const { adapter, runMigrations, satellite, tableInfo, authState } = t.context - await runMigrations() - await satellite._setAuthState(authState) - const clientId = satellite._authState?.clientId ?? 'test_id' - - let stmts: Statement[] = [] - - // For this key we will choose remote Tx, such that: Local TM > Remote TX - stmts.push({ - sql: `INSERT INTO parent (id, value, other) VALUES (?, ?, ?);`, - args: ['1', 'local', null], - }) - stmts.push({ - sql: `INSERT INTO parent (id, value, other) VALUES (?, ?, ?);`, - args: ['2', 'local', null], + let entries = await satellite._getEntries(0) + t.deepEqual([], entries) }) - await adapter.runInTransaction(...stmts) - const txDate1 = await satellite._performSnapshot() - - stmts = [] - // For this key we will choose remote Tx, such that: Local TM < Remote TX - stmts.push({ sql: `DELETE FROM parent WHERE id = 1` }) - stmts.push({ sql: `DELETE FROM parent WHERE id = 2` }) - await adapter.runInTransaction(...stmts) - await satellite._performSnapshot() - - let entries = await satellite._getEntries() - t.assert(entries[0].newRow) - t.assert(entries[1]) - t.assert(entries[1].newRow) - - // For this key we receive transaction which was older - const electricEntrySameTs = new Date(entries[0].timestamp).getTime() - let electricEntrySame = generateRemoteOplogEntry( - tableInfo, - entries[0].namespace, - entries[0].tablename, - OPTYPES.insert, - electricEntrySameTs, - '[]', - JSON.parse(entries[0].newRow!), - undefined - ) - - // For this key we had concurrent insert transaction from another node `remote` - // with same timestamp - const electricEntryConflictTs = new Date(entries[1].timestamp).getTime() - let electricEntryConflict = generateRemoteOplogEntry( - tableInfo, - entries[1].namespace, - entries[1].tablename, - OPTYPES.insert, - electricEntryConflictTs, - encodeTags([generateTag('remote', txDate1)]), - JSON.parse(entries[1].newRow!), - undefined - ) - - satellite.relations = relations // satellite must be aware of the relations in order to turn `DataChange`s into `OpLogEntry`s in `_applyTransaction` - - const electricEntrySameChange = opLogEntryToChange( - electricEntrySame, - relations - ) - const electricEntryConflictChange = opLogEntryToChange( - electricEntryConflict, - relations - ) - const tx = { - origin: clientId, - commit_timestamp: Long.fromNumber(new Date().getTime()), // commit_timestamp doesn't matter for this test, it is only used to GC the oplog - changes: [electricEntrySameChange, electricEntryConflictChange], - lsn: new Uint8Array(), - } - await satellite._applyTransaction(tx) - - let shadow = await getMatchingShadowEntries(adapter) - const expectedShadow = [ - { - namespace: 'main', - tablename: 'parent', - primaryKey: '{"id":2}', - tags: genEncodedTags('remote', [txDate1]), - }, - ] - t.deepEqual(shadow, expectedShadow) - - let userTable = await adapter.query({ sql: `SELECT * FROM parent;` }) - const expectedUserTable = [{ id: 2, value: 'local', other: null }] - t.deepEqual(expectedUserTable, userTable) -}) - -test('local (INSERT -> UPDATE -> DELETE) with remote equivalent', async (t) => { - const { runMigrations, satellite, tableInfo, authState, adapter } = t.context - await runMigrations() - await satellite._setAuthState(authState) - const clientId = satellite._authState?.clientId ?? 'test_id' - let txDate1 = new Date().getTime() - - const insertEntry = generateRemoteOplogEntry( - tableInfo, - 'main', - 'parent', - OPTYPES.update, - txDate1, - genEncodedTags('remote', [txDate1]), - { - id: 1, - value: 'local', - }, - undefined - ) - - const deleteDate = txDate1 + 1 - const deleteEntry = generateRemoteOplogEntry( - tableInfo, - 'main', - 'parent', - OPTYPES.delete, - deleteDate, - genEncodedTags('remote', []), - { - id: 1, - value: 'local', - }, - undefined - ) - - satellite.relations = relations // satellite must be aware of the relations in order to turn `DataChange`s into `OpLogEntry`s in `_applyTransaction` - - const insertChange = opLogEntryToChange(insertEntry, relations) - const insertTx = { - origin: clientId, - commit_timestamp: Long.fromNumber(txDate1), - changes: [insertChange], - lsn: new Uint8Array(), - } - await satellite._applyTransaction(insertTx) - - let shadow = await getMatchingShadowEntries(adapter) - const expectedShadow = [ - { - namespace: 'main', - tablename: 'parent', - primaryKey: '{"id":1}', - tags: genEncodedTags('remote', [txDate1]), - }, - ] - t.deepEqual(shadow, expectedShadow) - - const deleteChange = opLogEntryToChange(deleteEntry, relations) - const deleteTx = { - origin: clientId, - commit_timestamp: Long.fromNumber(deleteDate), - changes: [deleteChange], - lsn: new Uint8Array(), - } - await satellite._applyTransaction(deleteTx) - - shadow = await getMatchingShadowEntries(adapter) - t.deepEqual([], shadow) - - let entries = await satellite._getEntries(0) - t.deepEqual([], entries) -}) +} diff --git a/clients/typescript/test/satellite/sqlite/process.tags.test.ts b/clients/typescript/test/satellite/sqlite/process.tags.test.ts new file mode 100644 index 0000000000..31738feb7f --- /dev/null +++ b/clients/typescript/test/satellite/sqlite/process.tags.test.ts @@ -0,0 +1,15 @@ +import anyTest, { TestFn } from 'ava' + +import { makeContext, cleanAndStopSatellite } from '../common' + +import { processTagsTests, ContextType } from '../process.tags.test' +import { getMatchingShadowEntries } from '../../support/satellite-helpers' + +const test = anyTest as TestFn +test.beforeEach(async (t) => { + await makeContext(t) + t.context.getMatchingShadowEntries = getMatchingShadowEntries +}) +test.afterEach.always(cleanAndStopSatellite) + +processTagsTests(test) From adfa31c388d74b3d1ee78d8937a830bb19e33414 Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Thu, 22 Feb 2024 09:58:51 +0100 Subject: [PATCH 015/156] Modified getTableInfo in sqlite builder to return only the columns we need --- .../migrators/query-builder/sqliteBuilder.ts | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/clients/typescript/src/migrators/query-builder/sqliteBuilder.ts b/clients/typescript/src/migrators/query-builder/sqliteBuilder.ts index c12f53f876..ba90f1df99 100644 --- a/clients/typescript/src/migrators/query-builder/sqliteBuilder.ts +++ b/clients/typescript/src/migrators/query-builder/sqliteBuilder.ts @@ -23,6 +23,25 @@ class SqliteBuilder extends QueryBuilder { return [query] } + countTablesIn(countName: string, tables: string[]): Statement { + const sql = dedent` + SELECT count(name) as ${countName} FROM sqlite_master + WHERE type='table' + AND name IN (${tables.map(() => '?').join(', ')}) + ` + return { + sql, + args: tables, + } + } + + getTableInfo(tablename: string): Statement { + return { + sql: `SELECT name, type, "notnull", dflt_value, pk FROM pragma_table_info(?)`, + args: [tablename], + } + } + createIndex( indexName: string, onTable: QualifiedTablename, From 31524ebf6651792c2bec329a2b3731bf22f062c3 Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Thu, 22 Feb 2024 09:59:33 +0100 Subject: [PATCH 016/156] Refactored process.migration.test --- .../postgres/process.migration.test.ts | 797 +---------- .../test/satellite/process.migration.test.ts | 1267 +++++++++-------- .../sqlite/process.migration.test.ts | 21 + 3 files changed, 672 insertions(+), 1413 deletions(-) create mode 100644 clients/typescript/test/satellite/sqlite/process.migration.test.ts diff --git a/clients/typescript/test/satellite/postgres/process.migration.test.ts b/clients/typescript/test/satellite/postgres/process.migration.test.ts index 002c75866a..7f10839ebd 100644 --- a/clients/typescript/test/satellite/postgres/process.migration.test.ts +++ b/clients/typescript/test/satellite/postgres/process.migration.test.ts @@ -1,795 +1,22 @@ -import testAny, { ExecutionContext, TestFn } from 'ava' -import isequal from 'lodash.isequal' -import Long from 'long' -import { - SatOpMigrate_Type, - SatRelation_RelationType, -} from '../../../src/_generated/protocol/satellite' -import { generateTag } from '../../../src/satellite/oplog' -import { - DataChange, - DataChangeType, - Row, - SchemaChange, - Statement, - Transaction, -} from '../../../src/util' +import testAny, { TestFn } from 'ava' +import { cleanAndStopSatellite, makePgContext } from '../common' +import { getPgMatchingShadowEntries } from '../../support/satellite-helpers' +import { pgBuilder } from '../../../src/migrators/query-builder' import { + commonSetup, ContextType, - cleanAndStopSatellite, - makePgContext, - relations, -} from '../common' -import { getPgMatchingShadowEntries as getMatchingShadowEntries } from '../../support/satellite-helpers' -import { DatabaseAdapter } from '../../../src/electric/adapter' -import { pgBuilder } from '../../../src/migrators/query-builder' -import isEqual from 'lodash.isequal' + processMigrationTests, +} from '../process.migration.test' -type CurrentContext = ContextType<{ clientId: string; txDate: Date }> -const test = testAny as TestFn -const builder = pgBuilder +const test = testAny as TestFn let port = 5000 test.beforeEach(async (t) => { await makePgContext(t, port++) - const { satellite, authState } = t.context - await satellite.start(authState) - t.context['clientId'] = satellite._authState!.clientId // store clientId in the context - await populateDB(t) - const txDate = await satellite._performSnapshot() - t.context['txDate'] = txDate - // Mimic Electric sending our own operations back - // which serves as an acknowledgement (even though there is a separate ack also) - // and leads to GC of the oplog - const ackTx = { - origin: satellite._authState!.clientId, - commit_timestamp: Long.fromNumber(txDate.getTime()), - changes: [], // doesn't matter, only the origin and timestamp matter for GC of the oplog - lsn: new Uint8Array(), - } - await satellite._applyTransaction(ackTx) + t.context.getMatchingShadowEntries = getPgMatchingShadowEntries + t.context.builder = pgBuilder + await commonSetup(t) }) test.afterEach.always(cleanAndStopSatellite) -const populateDB = async (t: ExecutionContext) => { - const adapter = t.context.adapter as DatabaseAdapter - - const stmts: Statement[] = [] - - stmts.push({ - sql: `INSERT INTO main.parent (id, value, other) VALUES ($1, $2, $3);`, - args: [1, 'local', null], - }) - stmts.push({ - sql: `INSERT INTO main.parent (id, value, other) VALUES ($1, $2, $3);`, - args: [2, 'local', null], - }) - await adapter.runInTransaction(...stmts) -} - -async function assertDbHasTables( - t: ExecutionContext, - ...tables: string[] -) { - const adapter = t.context.adapter as DatabaseAdapter - const schemaRows = await adapter.query(builder.getLocalTableNames()) - - const tableNames = new Set(schemaRows.map((r) => r.name)) - tables.forEach((tbl) => { - t.true(tableNames.has(tbl)) - }) -} - -async function getTableInfo( - table: string, - t: ExecutionContext -): Promise { - const adapter = t.context.adapter as DatabaseAdapter - return (await adapter.query(builder.getTableInfo(table))) as ColumnInfo[] -} - -type ColumnInfo = { - cid: number - name: string - type: string - notnull: number - dflt_value: null | string - pk: number -} - -test.serial('setup populates DB', async (t) => { - const adapter = t.context.adapter - - const sql = 'SELECT * FROM main.parent' - const rows = await adapter.query({ sql }) - t.deepEqual(rows, [ - { - id: 1, - value: 'local', - other: null, - }, - { - id: 2, - value: 'local', - other: null, - }, - ]) -}) - -const createTable: SchemaChange = { - table: { - name: 'NewTable', - columns: [ - { - name: 'id', - sqliteType: 'TEXT', - pgType: { name: 'TEXT', array: [], size: [] }, - }, - { - name: 'foo', - sqliteType: 'INTEGER', - pgType: { name: 'INTEGER', array: [], size: [] }, - }, - { - name: 'bar', - sqliteType: 'TEXT', - pgType: { name: 'TEXT', array: [], size: [] }, - }, - ], - fks: [], - pks: ['id'], - }, - migrationType: SatOpMigrate_Type.CREATE_TABLE, - sql: 'CREATE TABLE main."NewTable"(\ - id TEXT NOT NULL,\ - foo INTEGER,\ - bar TEXT,\ - PRIMARY KEY(id)\ - );', -} - -const addColumn: SchemaChange = { - table: { - name: 'parent', - columns: [ - { - name: 'id', - sqliteType: 'INTEGER', - pgType: { name: 'INTEGER', array: [], size: [] }, - }, - { - name: 'value', - sqliteType: 'TEXT', - pgType: { name: 'TEXT', array: [], size: [] }, - }, - { - name: 'other', - sqliteType: 'INTEGER', - pgType: { name: 'INTEGER', array: [], size: [] }, - }, - { - name: 'baz', - sqliteType: 'TEXT', - pgType: { name: 'TEXT', array: [], size: [] }, - }, - ], - fks: [], - pks: ['id'], - }, - migrationType: SatOpMigrate_Type.ALTER_ADD_COLUMN, - sql: 'ALTER TABLE main.parent ADD baz TEXT', -} - -const addColumnRelation = { - id: 2000, // doesn't matter - schema: 'public', - table: 'parent', - tableType: SatRelation_RelationType.TABLE, - columns: [ - { - name: 'id', - type: 'INTEGER', - isNullable: false, - primaryKey: true, - }, - { - name: 'value', - type: 'TEXT', - isNullable: true, - primaryKey: false, - }, - { - name: 'other', - type: 'INTEGER', - isNullable: true, - primaryKey: false, - }, - { - name: 'baz', - type: 'TEXT', - isNullable: true, - primaryKey: false, - }, - ], -} -const newTableRelation = { - id: 2001, // doesn't matter - schema: 'public', - table: 'NewTable', - tableType: SatRelation_RelationType.TABLE, - columns: [ - { - name: 'id', - type: 'TEXT', - isNullable: false, - primaryKey: true, - }, - { - name: 'foo', - type: 'INTEGER', - isNullable: true, - primaryKey: false, - }, - { - name: 'bar', - type: 'TEXT', - isNullable: true, - primaryKey: false, - }, - ], -} - -async function checkMigrationIsApplied(t: ExecutionContext) { - await assertDbHasTables(t, 'parent', 'child', 'NewTable') - - const newTableInfo = await getTableInfo('NewTable', t) - - const expectedTables = [ - // id, foo, bar - { - name: 'foo', - type: 'INTEGER', - notnull: 0, - dflt_value: null, - pk: 0, - }, - { name: 'id', type: 'TEXT', notnull: 1, dflt_value: null, pk: 1 }, - { name: 'bar', type: 'TEXT', notnull: 0, dflt_value: null, pk: 0 }, - ] - - expectedTables.forEach((tbl) => { - t.true(newTableInfo.some((t) => isEqual(t, tbl))) - }) - - const parentTableInfo = await getTableInfo('parent', t) - const parentTableHasColumn = parentTableInfo.some((col: ColumnInfo) => { - return ( - col.name === 'baz' && - col.type === 'TEXT' && - col.notnull === 0 && - col.dflt_value === null && - col.pk === 0 - ) - }) - - t.true(parentTableHasColumn) -} - -const fetchParentRows = async (adapter: DatabaseAdapter): Promise => { - return adapter.query({ - sql: 'SELECT * FROM main.parent', - }) -} - -const testSetEquality = (t: ExecutionContext, xs: T[], ys: T[]): void => { - t.is(xs.length, ys.length, 'Expected array lengths to be equal') - - const missing: T[] = [] - - for (const x of xs) { - if (ys.some((y) => isequal(x, y))) continue - else missing.push(x) - } - - t.deepEqual( - missing, - [], - 'Expected all elements from the first array to be present in the second, but some are missing' - ) -} - -test.serial('apply migration containing only DDL', async (t) => { - const { satellite, adapter, txDate } = t.context - const timestamp = txDate.getTime() - - const rowsBeforeMigration = await fetchParentRows(adapter) - - const migrationTx = { - origin: 'remote', - commit_timestamp: Long.fromNumber(timestamp), - changes: [createTable, addColumn], - lsn: new Uint8Array(), - // starts at 3, because the app already defines 2 migrations - // (see test/support/migrations/migrations.js) - // which are loaded when Satellite is started - migrationVersion: '3', - } - - // Apply the migration transaction - await satellite._applyTransaction(migrationTx) - - // Check that the migration was successfully applied - await checkMigrationIsApplied(t) - - // Check that the existing rows are still there and are unchanged - const rowsAfterMigration = await fetchParentRows(adapter) - const expectedRowsAfterMigration = rowsBeforeMigration.map((row: Row) => { - return { - ...row, - baz: null, - } - }) - - t.deepEqual(rowsAfterMigration, expectedRowsAfterMigration) -}) - -test.serial( - 'apply migration containing DDL and non-conflicting DML', - async (t) => { - /* - Test migrations containing non-conflicting DML statements and some DDL statements - - Process the following migration tx: - - DML 1 is: - insert non-conflicting row in existing table - non-conflict update to existing row - delete row - - DDL 1 is: - Add column to table that is affected by the statements in DML 1 - Create new table - - DML 2 is: - insert row in extended table with value for new column - insert row in extended table without a value for the new column - Insert some rows in newly created table - - Check that the migration was successfully applied on the local DB - - Check the modifications (insert, update, delete) to the rows - */ - - const { satellite, adapter, txDate } = t.context - const timestamp = txDate.getTime() - - const txTags = [generateTag('remote', txDate)] - const mkInsertChange = (record: any) => { - return { - type: DataChangeType.INSERT, - relation: relations['parent'], - record: record, - oldRecord: {}, - tags: txTags, - } - } - - const insertRow = { - id: 3, - value: 'remote', - other: 1, - } - - const insertChange = mkInsertChange(insertRow) - - const oldUpdateRow = { - id: 1, - value: 'local', - other: null, - } - - const updateRow = { - id: 1, - value: 'remote', - other: 5, - } - - const updateChange = { - //type: DataChangeType.INSERT, // insert since `opLogEntryToChange` also transforms update optype into insert - type: DataChangeType.UPDATE, - relation: relations['parent'], - record: updateRow, - oldRecord: oldUpdateRow, - tags: txTags, - } - - // Delete overwrites the insert for row with id 2 - // Thus, it overwrites the shadow tag for that row - const localEntries = await satellite._getEntries() - const shadowEntryForRow2 = await getMatchingShadowEntries( - adapter, - localEntries[1] - ) // shadow entry for insert of row with id 2 - const shadowTagsRow2 = JSON.parse(shadowEntryForRow2[0].tags) - - const deleteRow = { - id: 2, - value: 'local', - other: null, - } - - const deleteChange = { - type: DataChangeType.DELETE, - relation: relations['parent'], - oldRecord: deleteRow, - tags: shadowTagsRow2, - } - - const insertExtendedRow = { - id: 4, - value: 'remote', - other: 6, - baz: 'foo', - } - const insertExtendedChange = { - type: DataChangeType.INSERT, - relation: addColumnRelation, - record: insertExtendedRow, - oldRecord: {}, - tags: txTags, - } - - const insertExtendedWithoutValueRow = { - id: 5, - value: 'remote', - other: 7, - } - const insertExtendedWithoutValueChange = { - type: DataChangeType.INSERT, - relation: addColumnRelation, - record: insertExtendedWithoutValueRow, - oldRecord: {}, - tags: txTags, - } - - const insertInNewTableRow = { - id: '1', - foo: 1, - bar: '2', - } - const insertInNewTableChange = { - type: DataChangeType.INSERT, - relation: newTableRelation, - record: insertInNewTableRow, - oldRecord: {}, - tags: txTags, - } - - const dml1 = [insertChange, updateChange, deleteChange] - const ddl1 = [addColumn, createTable] - const dml2 = [ - insertExtendedChange, - insertExtendedWithoutValueChange, - insertInNewTableChange, - ] - - const migrationTx = { - origin: 'remote', - commit_timestamp: Long.fromNumber(timestamp), - changes: [...dml1, ...ddl1, ...dml2], - lsn: new Uint8Array(), - migrationVersion: '4', - } - - const rowsBeforeMigration = await fetchParentRows(adapter) - - // For each schema change, Electric sends a `SatRelation` message - // before sending a DML operation that depends on a new or modified schema. - // The `SatRelation` message is handled by `_updateRelations` in order - // to update Satellite's relations - await satellite._updateRelations(addColumnRelation) - await satellite._updateRelations(newTableRelation) - - // Apply the migration transaction - await satellite._applyTransaction(migrationTx) - - // Check that the migration was successfully applied - await checkMigrationIsApplied(t) - - // Check that the existing rows are still there and are unchanged - const rowsAfterMigration = await fetchParentRows(adapter) - const expectedRowsAfterMigration = rowsBeforeMigration - .filter((r: Row) => r.id !== deleteRow.id && r.id !== oldUpdateRow.id) - .concat([insertRow, updateRow, insertExtendedWithoutValueRow]) - .map((row: Row) => { - return { - ...row, - baz: null, - } as Row - }) - .concat([insertExtendedRow]) - testSetEquality(t, rowsAfterMigration, expectedRowsAfterMigration) - - // Check the row that was inserted in the new table - const newTableRows = await adapter.query({ - sql: 'SELECT * FROM main."NewTable"', - }) - - t.is(newTableRows.length, 1) - t.deepEqual(newTableRows[0], insertInNewTableRow) - } -) - -test.serial('apply migration containing DDL and conflicting DML', async (t) => { - // Same as previous test but DML contains some conflicting operations - const { satellite, adapter, txDate } = t.context - - // Fetch the shadow tag for row 1 such that delete will overwrite it - const localEntries = await satellite._getEntries() - const shadowEntryForRow1 = await getMatchingShadowEntries( - adapter, - localEntries[0] - ) // shadow entry for insert of row with id 1 - const shadowTagsRow1 = JSON.parse(shadowEntryForRow1[0].tags) - - // Locally update row with id 1 - await adapter.runInTransaction({ - sql: `UPDATE main.parent SET value = $1, other = $2 WHERE id = $3;`, - args: ['still local', 5, 1], - }) - - await satellite._performSnapshot() - - // Now receive a concurrent delete of that row - // such that it deletes the row with id 1 that was initially inserted - const timestamp = txDate.getTime() - //const txTags = [ generateTag('remote', txDate) ] - - const deleteRow = { - id: 1, - value: 'local', - other: null, - } - - const deleteChange = { - type: DataChangeType.DELETE, - relation: relations['parent'], - oldRecord: deleteRow, - tags: shadowTagsRow1, - } - - // Process the incoming delete - const ddl = [addColumn, createTable] - const dml = [deleteChange] - - const migrationTx = { - origin: 'remote', - commit_timestamp: Long.fromNumber(timestamp), - changes: [...ddl, ...dml], - lsn: new Uint8Array(), - migrationVersion: '5', - } - - const rowsBeforeMigration = await fetchParentRows(adapter) - const rowsBeforeMigrationExceptConflictingRow = rowsBeforeMigration.filter( - (r) => r.id !== deleteRow.id - ) - - // For each schema change, Electric sends a `SatRelation` message - // before sending a DML operation that depends on a new or modified schema. - // The `SatRelation` message is handled by `_updateRelations` in order - // to update Satellite's relations. - // In this case, the DML operation deletes a row in `parent` table - // so we receive a `SatRelation` message for that table - await satellite._updateRelations(addColumnRelation) - - // Apply the migration transaction - await satellite._applyTransaction(migrationTx) - - // Check that the migration was successfully applied - await checkMigrationIsApplied(t) - - // The local update and remote delete happened concurrently - // Check that the update wins - const rowsAfterMigration = await fetchParentRows(adapter) - const newRowsExceptConflictingRow = rowsAfterMigration.filter( - (r) => r.id !== deleteRow.id - ) - const conflictingRow = rowsAfterMigration.find((r) => r.id === deleteRow.id) - - testSetEquality( - t, - rowsBeforeMigrationExceptConflictingRow.map((r) => { - return { - baz: null, - ...r, - } - }), - newRowsExceptConflictingRow - ) - - t.deepEqual(conflictingRow, { - id: 1, - value: 'still local', - other: 5, - baz: null, - }) -}) - -test.serial('apply migration and concurrent transaction', async (t) => { - const { satellite, adapter, txDate } = t.context - - const timestamp = txDate.getTime() - const remoteA = 'remoteA' - const remoteB = 'remoteB' - const txTagsRemoteA = [generateTag(remoteA, txDate)] - const txTagsRemoteB = [generateTag(remoteB, txDate)] - - const mkInsertChange = ( - record: Record, - tags: string[] - ): DataChange => { - return { - type: DataChangeType.INSERT, - relation: relations['parent'], - record: record, - oldRecord: {}, - tags: tags, - } - } - - const insertRowA = { - id: 3, - value: 'remote A', - other: 8, - } - - const insertRowB = { - id: 3, - value: 'remote B', - other: 9, - } - - // Make 2 concurrent insert changes. - // They are concurrent because both remoteA and remoteB - // generated the changes at `timestamp` - const insertChangeA = mkInsertChange(insertRowA, txTagsRemoteA) - const insertChangeB = mkInsertChange(insertRowB, txTagsRemoteB) - - const txA: Transaction = { - origin: remoteA, - commit_timestamp: Long.fromNumber(timestamp), - changes: [insertChangeA], - lsn: new Uint8Array(), - } - - const ddl = [addColumn, createTable] - - const txB: Transaction = { - origin: remoteB, - commit_timestamp: Long.fromNumber(timestamp), - changes: [...ddl, insertChangeB], - lsn: new Uint8Array(), - migrationVersion: '6', - } - - const rowsBeforeMigration = await fetchParentRows(adapter) - - // For each schema change, Electric sends a `SatRelation` message - // before sending a DML operation that depends on a new or modified schema. - // The `SatRelation` message is handled by `_updateRelations` in order - // to update Satellite's relations. - // In this case, the DML operation adds a row in `parent` table - // so we receive a `SatRelation` message for that table - await satellite._updateRelations(addColumnRelation) - - // Apply the concurrent transactions - await satellite._applyTransaction(txB) - await satellite._applyTransaction(txA) - - // Check that the migration was successfully applied - await checkMigrationIsApplied(t) - - // Check that one of the two insertions won - const rowsAfterMigration = await fetchParentRows(adapter) - const extendRow = (r: Row) => { - return { - ...r, - baz: null, - } - } - const extendedRows = rowsBeforeMigration.map(extendRow) - - // Check that all rows now have an additional column - t.deepEqual( - rowsAfterMigration.filter((r) => r.id !== insertRowA.id), - extendedRows - ) - - const conflictingRow = rowsAfterMigration.find((r) => r.id === insertRowA.id) - - // Now also check the row that was concurrently inserted - t.assert( - isequal(conflictingRow, extendRow(insertRowA)) || - isequal(conflictingRow, extendRow(insertRowB)) - ) -}) - -const migrationWithFKs: SchemaChange[] = [ - { - migrationType: SatOpMigrate_Type.CREATE_TABLE, - sql: ` - CREATE TABLE main."test_items" ( - "id" TEXT NOT NULL, - CONSTRAINT "test_items_pkey" PRIMARY KEY ("id") - ); - `, - table: { - name: 'test_items', - columns: [ - { - name: 'id', - sqliteType: 'TEXT', - pgType: { name: 'TEXT', array: [], size: [] }, - }, - ], - fks: [], - pks: ['id'], - }, - }, - { - migrationType: SatOpMigrate_Type.CREATE_TABLE, - sql: ` - CREATE TABLE main."test_other_items" ( - "id" TEXT NOT NULL, - "item_id" TEXT, - -- CONSTRAINT "test_other_items_item_id_fkey" FOREIGN KEY ("item_id") REFERENCES "test_items" ("id"), - CONSTRAINT "test_other_items_pkey" PRIMARY KEY ("id") - ); - `, - table: { - name: 'test_other_items', - columns: [ - { - name: 'id', - sqliteType: 'TEXT', - pgType: { name: 'TEXT', array: [], size: [] }, - }, - { - name: 'item_id', - sqliteType: 'TEXT', - pgType: { name: 'TEXT', array: [], size: [] }, - }, - ], - fks: [ - { - $type: 'Electric.Satellite.SatOpMigrate.ForeignKey', - fkCols: ['item_id'], - pkTable: 'test_items', - pkCols: ['id'], - }, - ], - pks: ['id'], - }, - }, -] - -test.serial('apply another migration', async (t) => { - const { satellite } = t.context - - const migrationTx = { - origin: 'remote', - commit_timestamp: Long.fromNumber(new Date().getTime()), - changes: migrationWithFKs, - lsn: new Uint8Array(), - // starts at 3, because the app already defines 2 migrations - // (see test/support/migrations/migrations.js) - // which are loaded when Satellite is started - migrationVersion: '3', - } - - // Apply the migration transaction - try { - await satellite._applyTransaction(migrationTx) - } catch (e) { - console.error(e) - throw e - } - - await assertDbHasTables(t, 'test_items', 'test_other_items') - t.pass() -}) +processMigrationTests(test) diff --git a/clients/typescript/test/satellite/process.migration.test.ts b/clients/typescript/test/satellite/process.migration.test.ts index cba5952dfd..8127906311 100644 --- a/clients/typescript/test/satellite/process.migration.test.ts +++ b/clients/typescript/test/satellite/process.migration.test.ts @@ -1,12 +1,13 @@ -import testAny, { ExecutionContext, TestFn } from 'ava' +import { ExecutionContext, TestFn } from 'ava' import isequal from 'lodash.isequal' import Long from 'long' import { SatOpMigrate_Type, SatRelation_RelationType, } from '../../src/_generated/protocol/satellite' -import { DatabaseAdapter } from '../../src/electric/adapter' + import { generateTag } from '../../src/satellite/oplog' + import { DataChange, DataChangeType, @@ -16,19 +17,36 @@ import { Statement, Transaction, } from '../../src/util' + +import { ContextType as CommonContextType, relations } from './common' + import { - ContextType, - cleanAndStopSatellite, - makeContext, - relations, -} from './common' -import { getMatchingShadowEntries } from '../support/satellite-helpers' - -type CurrentContext = ContextType<{ clientId: string; txDate: Date }> -const test = testAny as TestFn - -test.beforeEach(async (t) => { - await makeContext(t) + getMatchingShadowEntries as getSqliteMatchingShadowEntries, + getPgMatchingShadowEntries, +} from '../support/satellite-helpers' + +import { DatabaseAdapter } from '../../src/electric/adapter' +import isEqual from 'lodash.isequal' +import { QueryBuilder } from '../../src/migrators/query-builder' + +export type ContextType = CommonContextType & { + clientId: string + txDate: Date + builder: QueryBuilder + getMatchingShadowEntries: + | typeof getSqliteMatchingShadowEntries + | typeof getPgMatchingShadowEntries +} + +type ColumnInfo = { + name: string + type: string + notnull: number + dflt_value: null | string + pk: number +} + +export const commonSetup = async (t: ExecutionContext) => { const { satellite, authState, token } = t.context await satellite.start(authState) satellite.setToken(token) @@ -47,35 +65,30 @@ test.beforeEach(async (t) => { lsn: new Uint8Array(), } await satellite._applyTransaction(ackTx) -}) -test.afterEach.always(cleanAndStopSatellite) +} -const populateDB = async (t: ExecutionContext) => { +const populateDB = async (t: ExecutionContext) => { const adapter = t.context.adapter as DatabaseAdapter const stmts: Statement[] = [] stmts.push({ - sql: `INSERT INTO parent (id, value, other) VALUES (?, ?, ?);`, - args: [1, 'local', null], + sql: `INSERT INTO main.parent (id, value, other) VALUES (1, 'local', null);`, }) stmts.push({ - sql: `INSERT INTO parent (id, value, other) VALUES (?, ?, ?);`, - args: [2, 'local', null], + sql: `INSERT INTO main.parent (id, value, other) VALUES (2, 'local', null);`, }) await adapter.runInTransaction(...stmts) } async function assertDbHasTables( - t: ExecutionContext, + t: ExecutionContext, ...tables: string[] ) { - const adapter = t.context.adapter as DatabaseAdapter - const schemaRows = await adapter.query({ - sql: "SELECT tbl_name FROM sqlite_schema WHERE type = 'table'", - }) + const { adapter, builder } = t.context + const schemaRows = await adapter.query(builder.getLocalTableNames()) - const tableNames = new Set(schemaRows.map((r) => r.tbl_name)) + const tableNames = new Set(schemaRows.map((r) => r.name)) tables.forEach((tbl) => { t.true(tableNames.has(tbl)) }) @@ -83,396 +96,583 @@ async function assertDbHasTables( async function getTableInfo( table: string, - t: ExecutionContext + t: ExecutionContext ): Promise { - const adapter = t.context.adapter as DatabaseAdapter - return (await adapter.query({ - sql: `pragma table_info(${table});`, - })) as ColumnInfo[] + const { adapter, builder } = t.context + return (await adapter.query(builder.getTableInfo(table))) as ColumnInfo[] } -type ColumnInfo = { - cid: number - name: string - type: string - notnull: number - dflt_value: null | string - pk: number -} - -test.serial('setup populates DB', async (t) => { - const adapter = t.context.adapter - - const sql = 'SELECT * FROM parent' - const rows = await adapter.query({ sql }) - t.deepEqual(rows, [ - { - id: 1, - value: 'local', - other: null, - }, - { - id: 2, - value: 'local', - other: null, - }, - ]) -}) +export const processMigrationTests = (test: TestFn) => { + test.serial('setup populates DB', async (t) => { + const adapter = t.context.adapter -const createTable: SchemaChange = { - table: { - name: 'NewTable', - columns: [ - { - name: 'id', - sqliteType: 'TEXT', - pgType: { name: 'TEXT', array: [], size: [] }, - }, + const sql = 'SELECT * FROM main.parent' + const rows = await adapter.query({ sql }) + t.deepEqual(rows, [ { - name: 'foo', - sqliteType: 'INTEGER', - pgType: { name: 'INTEGER', array: [], size: [] }, + id: 1, + value: 'local', + other: null, }, { - name: 'bar', - sqliteType: 'TEXT', - pgType: { name: 'TEXT', array: [], size: [] }, + id: 2, + value: 'local', + other: null, }, - ], - fks: [], - pks: ['id'], - }, - migrationType: SatOpMigrate_Type.CREATE_TABLE, - sql: 'CREATE TABLE NewTable(\ + ]) + }) + + const createTable: SchemaChange = { + table: { + name: 'NewTable', + columns: [ + { + name: 'id', + sqliteType: 'TEXT', + pgType: { name: 'TEXT', array: [], size: [] }, + }, + { + name: 'foo', + sqliteType: 'INTEGER', + pgType: { name: 'INTEGER', array: [], size: [] }, + }, + { + name: 'bar', + sqliteType: 'TEXT', + pgType: { name: 'TEXT', array: [], size: [] }, + }, + ], + fks: [], + pks: ['id'], + }, + migrationType: SatOpMigrate_Type.CREATE_TABLE, + sql: 'CREATE TABLE main."NewTable"(\ id TEXT NOT NULL,\ foo INTEGER,\ bar TEXT,\ PRIMARY KEY(id)\ );', -} + } + + const addColumn: SchemaChange = { + table: { + name: 'parent', + columns: [ + { + name: 'id', + sqliteType: 'INTEGER', + pgType: { name: 'INTEGER', array: [], size: [] }, + }, + { + name: 'value', + sqliteType: 'TEXT', + pgType: { name: 'TEXT', array: [], size: [] }, + }, + { + name: 'other', + sqliteType: 'INTEGER', + pgType: { name: 'INTEGER', array: [], size: [] }, + }, + { + name: 'baz', + sqliteType: 'TEXT', + pgType: { name: 'TEXT', array: [], size: [] }, + }, + ], + fks: [], + pks: ['id'], + }, + migrationType: SatOpMigrate_Type.ALTER_ADD_COLUMN, + sql: 'ALTER TABLE main.parent ADD baz TEXT', + } -const addColumn: SchemaChange = { - table: { - name: 'parent', + const addColumnRelation = { + id: 2000, // doesn't matter + schema: 'public', + table: 'parent', + tableType: SatRelation_RelationType.TABLE, columns: [ { name: 'id', - sqliteType: 'INTEGER', - pgType: { name: 'INTEGER', array: [], size: [] }, + type: 'INTEGER', + isNullable: false, + primaryKey: true, }, { name: 'value', - sqliteType: 'TEXT', - pgType: { name: 'TEXT', array: [], size: [] }, + type: 'TEXT', + isNullable: true, + primaryKey: false, }, { name: 'other', - sqliteType: 'INTEGER', - pgType: { name: 'INTEGER', array: [], size: [] }, + type: 'INTEGER', + isNullable: true, + primaryKey: false, }, { name: 'baz', - sqliteType: 'TEXT', - pgType: { name: 'TEXT', array: [], size: [] }, + type: 'TEXT', + isNullable: true, + primaryKey: false, }, ], - fks: [], - pks: ['id'], - }, - migrationType: SatOpMigrate_Type.ALTER_ADD_COLUMN, - sql: 'ALTER TABLE parent ADD baz TEXT', -} - -const addColumnRelation = { - id: 2000, // doesn't matter - schema: 'public', - table: 'parent', - tableType: SatRelation_RelationType.TABLE, - columns: [ - { - name: 'id', - type: 'INTEGER', - isNullable: false, - primaryKey: 1, - }, - { - name: 'value', - type: 'TEXT', - isNullable: true, - primaryKey: undefined, - }, - { - name: 'other', - type: 'INTEGER', - isNullable: true, - primaryKey: undefined, - }, - { - name: 'baz', - type: 'TEXT', - isNullable: true, - primaryKey: undefined, - }, - ], -} satisfies Relation -const newTableRelation = { - id: 2001, // doesn't matter - schema: 'public', - table: 'NewTable', - tableType: SatRelation_RelationType.TABLE, - columns: [ - { - name: 'id', - type: 'TEXT', - isNullable: false, - primaryKey: 1, - }, - { - name: 'foo', - type: 'INTEGER', - isNullable: true, - primaryKey: undefined, - }, - { - name: 'bar', - type: 'TEXT', - isNullable: true, - primaryKey: undefined, - }, - ], -} satisfies Relation - -async function checkMigrationIsApplied(t: ExecutionContext) { - await assertDbHasTables(t, 'parent', 'child', 'NewTable') + } + const newTableRelation = { + id: 2001, // doesn't matter + schema: 'public', + table: 'NewTable', + tableType: SatRelation_RelationType.TABLE, + columns: [ + { + name: 'id', + type: 'TEXT', + isNullable: false, + primaryKey: true, + }, + { + name: 'foo', + type: 'INTEGER', + isNullable: true, + primaryKey: false, + }, + { + name: 'bar', + type: 'TEXT', + isNullable: true, + primaryKey: false, + }, + ], + } - const newTableInfo = await getTableInfo('NewTable', t) + async function checkMigrationIsApplied(t: ExecutionContext) { + await assertDbHasTables(t, 'parent', 'child', 'NewTable') - t.deepEqual(newTableInfo, [ - // id, foo, bar - { cid: 0, name: 'id', type: 'TEXT', notnull: 1, dflt_value: null, pk: 1 }, - { - cid: 1, - name: 'foo', - type: 'INTEGER', - notnull: 0, - dflt_value: null, - pk: 0, - }, - { cid: 2, name: 'bar', type: 'TEXT', notnull: 0, dflt_value: null, pk: 0 }, - ]) - - const parentTableInfo = await getTableInfo('parent', t) - const parentTableHasColumn = parentTableInfo.some((col: ColumnInfo) => { - return ( - col.name === 'baz' && - col.type === 'TEXT' && - col.notnull === 0 && - col.dflt_value === null && - col.pk === 0 - ) - }) + const newTableInfo = await getTableInfo('NewTable', t) - t.true(parentTableHasColumn) -} + const expectedTables = [ + { + name: 'foo', + type: 'INTEGER', + notnull: 0, + dflt_value: null, + pk: 0, + }, + { name: 'id', type: 'TEXT', notnull: 1, dflt_value: null, pk: 1 }, + { name: 'bar', type: 'TEXT', notnull: 0, dflt_value: null, pk: 0 }, + ] -const fetchParentRows = async (adapter: DatabaseAdapter): Promise => { - return adapter.query({ - sql: 'SELECT * FROM parent', - }) -} + expectedTables.forEach((tbl) => { + t.true(newTableInfo.some((t) => isEqual(t, tbl))) + }) -const testSetEquality = (t: ExecutionContext, xs: T[], ys: T[]): void => { - t.is(xs.length, ys.length, 'Expected array lengths to be equal') + const parentTableInfo = await getTableInfo('parent', t) + const parentTableHasColumn = parentTableInfo.some((col: ColumnInfo) => { + return ( + col.name === 'baz' && + col.type === 'TEXT' && + col.notnull === 0 && + col.dflt_value === null && + col.pk === 0 + ) + }) - const missing: T[] = [] + t.true(parentTableHasColumn) + } - for (const x of xs) { - if (ys.some((y) => isequal(x, y))) continue - else missing.push(x) + const fetchParentRows = async (adapter: DatabaseAdapter): Promise => { + return adapter.query({ + sql: 'SELECT * FROM main.parent', + }) } - t.deepEqual( - missing, - [], - 'Expected all elements from the first array to be present in the second, but some are missing' - ) -} + const testSetEquality = (t: ExecutionContext, xs: T[], ys: T[]): void => { + t.is(xs.length, ys.length, 'Expected array lengths to be equal') -test.serial('apply migration containing only DDL', async (t) => { - const { satellite, adapter, txDate } = t.context - const timestamp = txDate.getTime() + const missing: T[] = [] - const rowsBeforeMigration = await fetchParentRows(adapter) + for (const x of xs) { + if (ys.some((y) => isequal(x, y))) continue + else missing.push(x) + } - const migrationTx = { - origin: 'remote', - commit_timestamp: Long.fromNumber(timestamp), - changes: [createTable, addColumn], - lsn: new Uint8Array(), - // starts at 3, because the app already defines 2 migrations - // (see test/support/migrations/migrations.js) - // which are loaded when Satellite is started - migrationVersion: '3', + t.deepEqual( + missing, + [], + 'Expected all elements from the first array to be present in the second, but some are missing' + ) } - // Apply the migration transaction - await satellite._applyTransaction(migrationTx) + test.serial('apply migration containing only DDL', async (t) => { + const { satellite, adapter, txDate } = t.context + const timestamp = txDate.getTime() - // Check that the migration was successfully applied - await checkMigrationIsApplied(t) + const rowsBeforeMigration = await fetchParentRows(adapter) - // Check that the existing rows are still there and are unchanged - const rowsAfterMigration = await fetchParentRows(adapter) - const expectedRowsAfterMigration = rowsBeforeMigration.map((row: Row) => { - return { - ...row, - baz: null, + const migrationTx = { + origin: 'remote', + commit_timestamp: Long.fromNumber(timestamp), + changes: [createTable, addColumn], + lsn: new Uint8Array(), + // starts at 3, because the app already defines 2 migrations + // (see test/support/migrations/migrations.js) + // which are loaded when Satellite is started + migrationVersion: '3', } - }) - t.deepEqual(rowsAfterMigration, expectedRowsAfterMigration) -}) - -test.serial( - 'apply migration containing DDL and non-conflicting DML', - async (t) => { - /* - Test migrations containing non-conflicting DML statements and some DDL statements - - Process the following migration tx: - - DML 1 is: - insert non-conflicting row in existing table - non-conflict update to existing row - delete row - - DDL 1 is: - Add column to table that is affected by the statements in DML 1 - Create new table - - DML 2 is: - insert row in extended table with value for new column - insert row in extended table without a value for the new column - Insert some rows in newly created table - - Check that the migration was successfully applied on the local DB - - Check the modifications (insert, update, delete) to the rows - */ + // Apply the migration transaction + await satellite._applyTransaction(migrationTx) - const { satellite, adapter, txDate } = t.context - const timestamp = txDate.getTime() + // Check that the migration was successfully applied + await checkMigrationIsApplied(t) - const txTags = [generateTag('remote', txDate)] - const mkInsertChange = (record: any) => { + // Check that the existing rows are still there and are unchanged + const rowsAfterMigration = await fetchParentRows(adapter) + const expectedRowsAfterMigration = rowsBeforeMigration.map((row: Row) => { return { - type: DataChangeType.INSERT, + ...row, + baz: null, + } + }) + + t.deepEqual(rowsAfterMigration, expectedRowsAfterMigration) + }) + + test.serial( + 'apply migration containing DDL and non-conflicting DML', + async (t) => { + /* + Test migrations containing non-conflicting DML statements and some DDL statements + - Process the following migration tx: + - DML 1 is: + insert non-conflicting row in existing table + non-conflict update to existing row + delete row + - DDL 1 is: + Add column to table that is affected by the statements in DML 1 + Create new table + - DML 2 is: + insert row in extended table with value for new column + insert row in extended table without a value for the new column + Insert some rows in newly created table + - Check that the migration was successfully applied on the local DB + - Check the modifications (insert, update, delete) to the rows + */ + + const { satellite, adapter, txDate, getMatchingShadowEntries } = t.context + const timestamp = txDate.getTime() + + const txTags = [generateTag('remote', txDate)] + const mkInsertChange = (record: any) => { + return { + type: DataChangeType.INSERT, + relation: relations['parent'], + record: record, + oldRecord: {}, + tags: txTags, + } + } + + const insertRow = { + id: 3, + value: 'remote', + other: 1, + } + + const insertChange = mkInsertChange(insertRow) + + const oldUpdateRow = { + id: 1, + value: 'local', + other: null, + } + + const updateRow = { + id: 1, + value: 'remote', + other: 5, + } + + const updateChange = { + //type: DataChangeType.INSERT, // insert since `opLogEntryToChange` also transforms update optype into insert + type: DataChangeType.UPDATE, relation: relations['parent'], - record: record, + record: updateRow, + oldRecord: oldUpdateRow, + tags: txTags, + } + + // Delete overwrites the insert for row with id 2 + // Thus, it overwrites the shadow tag for that row + const localEntries = await satellite._getEntries() + const shadowEntryForRow2 = await getMatchingShadowEntries( + adapter, + localEntries[1] + ) // shadow entry for insert of row with id 2 + const shadowTagsRow2 = JSON.parse(shadowEntryForRow2[0].tags) + + const deleteRow = { + id: 2, + value: 'local', + other: null, + } + + const deleteChange = { + type: DataChangeType.DELETE, + relation: relations['parent'], + oldRecord: deleteRow, + tags: shadowTagsRow2, + } + + const insertExtendedRow = { + id: 4, + value: 'remote', + other: 6, + baz: 'foo', + } + const insertExtendedChange = { + type: DataChangeType.INSERT, + relation: addColumnRelation, + record: insertExtendedRow, oldRecord: {}, tags: txTags, } - } - const insertRow = { - id: 3, - value: 'remote', - other: 1, - } + const insertExtendedWithoutValueRow = { + id: 5, + value: 'remote', + other: 7, + } + const insertExtendedWithoutValueChange = { + type: DataChangeType.INSERT, + relation: addColumnRelation, + record: insertExtendedWithoutValueRow, + oldRecord: {}, + tags: txTags, + } - const insertChange = mkInsertChange(insertRow) + const insertInNewTableRow = { + id: '1', + foo: 1, + bar: '2', + } + const insertInNewTableChange = { + type: DataChangeType.INSERT, + relation: newTableRelation, + record: insertInNewTableRow, + oldRecord: {}, + tags: txTags, + } - const oldUpdateRow = { - id: 1, - value: 'local', - other: null, - } + const dml1 = [insertChange, updateChange, deleteChange] + const ddl1 = [addColumn, createTable] + const dml2 = [ + insertExtendedChange, + insertExtendedWithoutValueChange, + insertInNewTableChange, + ] + + const migrationTx = { + origin: 'remote', + commit_timestamp: Long.fromNumber(timestamp), + changes: [...dml1, ...ddl1, ...dml2], + lsn: new Uint8Array(), + migrationVersion: '4', + } - const updateRow = { - id: 1, - value: 'remote', - other: 5, - } + const rowsBeforeMigration = await fetchParentRows(adapter) + + // For each schema change, Electric sends a `SatRelation` message + // before sending a DML operation that depends on a new or modified schema. + // The `SatRelation` message is handled by `_updateRelations` in order + // to update Satellite's relations + await satellite._updateRelations(addColumnRelation) + await satellite._updateRelations(newTableRelation) + + // Apply the migration transaction + await satellite._applyTransaction(migrationTx) + + // Check that the migration was successfully applied + await checkMigrationIsApplied(t) + + // Check that the existing rows are still there and are unchanged + const rowsAfterMigration = await fetchParentRows(adapter) + const expectedRowsAfterMigration = rowsBeforeMigration + .filter((r: Row) => r.id !== deleteRow.id && r.id !== oldUpdateRow.id) + .concat([insertRow, updateRow, insertExtendedWithoutValueRow]) + .map((row: Row) => { + return { + ...row, + baz: null, + } as Row + }) + .concat([insertExtendedRow]) + testSetEquality(t, rowsAfterMigration, expectedRowsAfterMigration) + + // Check the row that was inserted in the new table + const newTableRows = await adapter.query({ + sql: 'SELECT * FROM main."NewTable"', + }) - const updateChange = { - //type: DataChangeType.INSERT, // insert since `opLogEntryToChange` also transforms update optype into insert - type: DataChangeType.UPDATE, - relation: relations['parent'], - record: updateRow, - oldRecord: oldUpdateRow, - tags: txTags, + t.is(newTableRows.length, 1) + t.deepEqual(newTableRows[0], insertInNewTableRow) } + ) - // Delete overwrites the insert for row with id 2 - // Thus, it overwrites the shadow tag for that row - const localEntries = await satellite._getEntries() - const shadowEntryForRow2 = await getMatchingShadowEntries( - adapter, - localEntries[1] - ) // shadow entry for insert of row with id 2 - const shadowTagsRow2 = JSON.parse(shadowEntryForRow2[0].tags) - - const deleteRow = { - id: 2, - value: 'local', - other: null, - } + test.serial( + 'apply migration containing DDL and conflicting DML', + async (t) => { + // Same as previous test but DML contains some conflicting operations + const { satellite, adapter, txDate, getMatchingShadowEntries } = t.context + + // Fetch the shadow tag for row 1 such that delete will overwrite it + const localEntries = await satellite._getEntries() + const shadowEntryForRow1 = await getMatchingShadowEntries( + adapter, + localEntries[0] + ) // shadow entry for insert of row with id 1 + const shadowTagsRow1 = JSON.parse(shadowEntryForRow1[0].tags) + + // Locally update row with id 1 + await adapter.runInTransaction({ + sql: `UPDATE main.parent SET value = 'still local', other = 5 WHERE id = 1;`, + }) - const deleteChange = { - type: DataChangeType.DELETE, - relation: relations['parent'], - oldRecord: deleteRow, - tags: shadowTagsRow2, - } + await satellite._performSnapshot() - const insertExtendedRow = { - id: 4, - value: 'remote', - other: 6, - baz: 'foo', - } - const insertExtendedChange = { - type: DataChangeType.INSERT, - relation: addColumnRelation, - record: insertExtendedRow, - oldRecord: {}, - tags: txTags, + // Now receive a concurrent delete of that row + // such that it deletes the row with id 1 that was initially inserted + const timestamp = txDate.getTime() + //const txTags = [ generateTag('remote', txDate) ] + + const deleteRow = { + id: 1, + value: 'local', + other: null, + } + + const deleteChange = { + type: DataChangeType.DELETE, + relation: relations['parent'], + oldRecord: deleteRow, + tags: shadowTagsRow1, + } + + // Process the incoming delete + const ddl = [addColumn, createTable] + const dml = [deleteChange] + + const migrationTx = { + origin: 'remote', + commit_timestamp: Long.fromNumber(timestamp), + changes: [...ddl, ...dml], + lsn: new Uint8Array(), + migrationVersion: '5', + } + + const rowsBeforeMigration = await fetchParentRows(adapter) + const rowsBeforeMigrationExceptConflictingRow = + rowsBeforeMigration.filter((r) => r.id !== deleteRow.id) + + // For each schema change, Electric sends a `SatRelation` message + // before sending a DML operation that depends on a new or modified schema. + // The `SatRelation` message is handled by `_updateRelations` in order + // to update Satellite's relations. + // In this case, the DML operation deletes a row in `parent` table + // so we receive a `SatRelation` message for that table + await satellite._updateRelations(addColumnRelation) + + // Apply the migration transaction + await satellite._applyTransaction(migrationTx) + + // Check that the migration was successfully applied + await checkMigrationIsApplied(t) + + // The local update and remote delete happened concurrently + // Check that the update wins + const rowsAfterMigration = await fetchParentRows(adapter) + const newRowsExceptConflictingRow = rowsAfterMigration.filter( + (r) => r.id !== deleteRow.id + ) + const conflictingRow = rowsAfterMigration.find( + (r) => r.id === deleteRow.id + ) + + testSetEquality( + t, + rowsBeforeMigrationExceptConflictingRow.map((r) => { + return { + baz: null, + ...r, + } + }), + newRowsExceptConflictingRow + ) + + t.deepEqual(conflictingRow, { + id: 1, + value: 'still local', + other: 5, + baz: null, + }) } + ) + + test.serial('apply migration and concurrent transaction', async (t) => { + const { satellite, adapter, txDate } = t.context - const insertExtendedWithoutValueRow = { - id: 5, - value: 'remote', - other: 7, + const timestamp = txDate.getTime() + const remoteA = 'remoteA' + const remoteB = 'remoteB' + const txTagsRemoteA = [generateTag(remoteA, txDate)] + const txTagsRemoteB = [generateTag(remoteB, txDate)] + + const mkInsertChange = ( + record: Record, + tags: string[] + ): DataChange => { + return { + type: DataChangeType.INSERT, + relation: relations['parent'], + record: record, + oldRecord: {}, + tags: tags, + } } - const insertExtendedWithoutValueChange = { - type: DataChangeType.INSERT, - relation: addColumnRelation, - record: insertExtendedWithoutValueRow, - oldRecord: {}, - tags: txTags, + + const insertRowA = { + id: 3, + value: 'remote A', + other: 8, } - const insertInNewTableRow = { - id: '1', - foo: 1, - bar: '2', + const insertRowB = { + id: 3, + value: 'remote B', + other: 9, } - const insertInNewTableChange = { - type: DataChangeType.INSERT, - relation: newTableRelation, - record: insertInNewTableRow, - oldRecord: {}, - tags: txTags, + + // Make 2 concurrent insert changes. + // They are concurrent because both remoteA and remoteB + // generated the changes at `timestamp` + const insertChangeA = mkInsertChange(insertRowA, txTagsRemoteA) + const insertChangeB = mkInsertChange(insertRowB, txTagsRemoteB) + + const txA: Transaction = { + origin: remoteA, + commit_timestamp: Long.fromNumber(timestamp), + changes: [insertChangeA], + lsn: new Uint8Array(), } - const dml1 = [insertChange, updateChange, deleteChange] - const ddl1 = [addColumn, createTable] - const dml2 = [ - insertExtendedChange, - insertExtendedWithoutValueChange, - insertInNewTableChange, - ] + const ddl = [addColumn, createTable] - const migrationTx = { - origin: 'remote', + const txB: Transaction = { + origin: remoteB, commit_timestamp: Long.fromNumber(timestamp), - changes: [...dml1, ...ddl1, ...dml2], + changes: [...ddl, insertChangeB], lsn: new Uint8Array(), - migrationVersion: '4', + migrationVersion: '6', } const rowsBeforeMigration = await fetchParentRows(adapter) @@ -480,316 +680,127 @@ test.serial( // For each schema change, Electric sends a `SatRelation` message // before sending a DML operation that depends on a new or modified schema. // The `SatRelation` message is handled by `_updateRelations` in order - // to update Satellite's relations + // to update Satellite's relations. + // In this case, the DML operation adds a row in `parent` table + // so we receive a `SatRelation` message for that table await satellite._updateRelations(addColumnRelation) - await satellite._updateRelations(newTableRelation) - // Apply the migration transaction - await satellite._applyTransaction(migrationTx) + // Apply the concurrent transactions + await satellite._applyTransaction(txB) + await satellite._applyTransaction(txA) // Check that the migration was successfully applied await checkMigrationIsApplied(t) - // Check that the existing rows are still there and are unchanged + // Check that one of the two insertions won const rowsAfterMigration = await fetchParentRows(adapter) - const expectedRowsAfterMigration = rowsBeforeMigration - .filter((r: Row) => r.id !== deleteRow.id && r.id !== oldUpdateRow.id) - .concat([insertRow, updateRow, insertExtendedWithoutValueRow]) - .map((row: Row) => { - return { - ...row, - baz: null, - } as Row - }) - .concat([insertExtendedRow]) - testSetEquality(t, rowsAfterMigration, expectedRowsAfterMigration) - - // Check the row that was inserted in the new table - const newTableRows = await adapter.query({ - sql: 'SELECT * FROM NewTable', - }) - - t.is(newTableRows.length, 1) - t.deepEqual(newTableRows[0], insertInNewTableRow) - } -) - -test.serial('apply migration containing DDL and conflicting DML', async (t) => { - // Same as previous test but DML contains some conflicting operations - const { satellite, adapter, txDate } = t.context - - // Fetch the shadow tag for row 1 such that delete will overwrite it - const localEntries = await satellite._getEntries() - const shadowEntryForRow1 = await getMatchingShadowEntries( - adapter, - localEntries[0] - ) // shadow entry for insert of row with id 1 - const shadowTagsRow1 = JSON.parse(shadowEntryForRow1[0].tags) - - // Locally update row with id 1 - await adapter.runInTransaction({ - sql: `UPDATE parent SET value = ?, other = ? WHERE id = ?;`, - args: ['still local', 5, 1], - }) - - await satellite._performSnapshot() - - // Now receive a concurrent delete of that row - // such that it deletes the row with id 1 that was initially inserted - const timestamp = txDate.getTime() - //const txTags = [ generateTag('remote', txDate) ] - - const deleteRow = { - id: 1, - value: 'local', - other: null, - } - - const deleteChange = { - type: DataChangeType.DELETE, - relation: relations['parent'], - oldRecord: deleteRow, - tags: shadowTagsRow1, - } - - // Process the incoming delete - const ddl = [addColumn, createTable] - const dml = [deleteChange] - - const migrationTx = { - origin: 'remote', - commit_timestamp: Long.fromNumber(timestamp), - changes: [...ddl, ...dml], - lsn: new Uint8Array(), - migrationVersion: '5', - } - - const rowsBeforeMigration = await fetchParentRows(adapter) - const rowsBeforeMigrationExceptConflictingRow = rowsBeforeMigration.filter( - (r) => r.id !== deleteRow.id - ) - - // For each schema change, Electric sends a `SatRelation` message - // before sending a DML operation that depends on a new or modified schema. - // The `SatRelation` message is handled by `_updateRelations` in order - // to update Satellite's relations. - // In this case, the DML operation deletes a row in `parent` table - // so we receive a `SatRelation` message for that table - await satellite._updateRelations(addColumnRelation) - - // Apply the migration transaction - await satellite._applyTransaction(migrationTx) - - // Check that the migration was successfully applied - await checkMigrationIsApplied(t) - - // The local update and remote delete happened concurrently - // Check that the update wins - const rowsAfterMigration = await fetchParentRows(adapter) - const newRowsExceptConflictingRow = rowsAfterMigration.filter( - (r) => r.id !== deleteRow.id - ) - const conflictingRow = rowsAfterMigration.find((r) => r.id === deleteRow.id) - - testSetEquality( - t, - rowsBeforeMigrationExceptConflictingRow.map((r) => { + const extendRow = (r: Row) => { return { - baz: null, ...r, + baz: null, } - }), - newRowsExceptConflictingRow - ) - - t.deepEqual(conflictingRow, { - id: 1, - value: 'still local', - other: 5, - baz: null, - }) -}) - -test.serial('apply migration and concurrent transaction', async (t) => { - const { satellite, adapter, txDate } = t.context - - const timestamp = txDate.getTime() - const remoteA = 'remoteA' - const remoteB = 'remoteB' - const txTagsRemoteA = [generateTag(remoteA, txDate)] - const txTagsRemoteB = [generateTag(remoteB, txDate)] - - const mkInsertChange = ( - record: Record, - tags: string[] - ): DataChange => { - return { - type: DataChangeType.INSERT, - relation: relations['parent'], - record: record, - oldRecord: {}, - tags: tags, } - } - - const insertRowA = { - id: 3, - value: 'remote A', - other: 8, - } - - const insertRowB = { - id: 3, - value: 'remote B', - other: 9, - } - - // Make 2 concurrent insert changes. - // They are concurrent because both remoteA and remoteB - // generated the changes at `timestamp` - const insertChangeA = mkInsertChange(insertRowA, txTagsRemoteA) - const insertChangeB = mkInsertChange(insertRowB, txTagsRemoteB) + const extendedRows = rowsBeforeMigration.map(extendRow) - const txA: Transaction = { - origin: remoteA, - commit_timestamp: Long.fromNumber(timestamp), - changes: [insertChangeA], - lsn: new Uint8Array(), - } - - const ddl = [addColumn, createTable] - - const txB: Transaction = { - origin: remoteB, - commit_timestamp: Long.fromNumber(timestamp), - changes: [...ddl, insertChangeB], - lsn: new Uint8Array(), - migrationVersion: '6', - } - - const rowsBeforeMigration = await fetchParentRows(adapter) - - // For each schema change, Electric sends a `SatRelation` message - // before sending a DML operation that depends on a new or modified schema. - // The `SatRelation` message is handled by `_updateRelations` in order - // to update Satellite's relations. - // In this case, the DML operation adds a row in `parent` table - // so we receive a `SatRelation` message for that table - await satellite._updateRelations(addColumnRelation) - - // Apply the concurrent transactions - await satellite._applyTransaction(txB) - await satellite._applyTransaction(txA) - - // Check that the migration was successfully applied - await checkMigrationIsApplied(t) - - // Check that one of the two insertions won - const rowsAfterMigration = await fetchParentRows(adapter) - const extendRow = (r: Row) => { - return { - ...r, - baz: null, - } - } - const extendedRows = rowsBeforeMigration.map(extendRow) - - // Check that all rows now have an additional column - t.deepEqual( - rowsAfterMigration.filter((r) => r.id !== insertRowA.id), - extendedRows - ) + // Check that all rows now have an additional column + t.deepEqual( + rowsAfterMigration.filter((r) => r.id !== insertRowA.id), + extendedRows + ) - const conflictingRow = rowsAfterMigration.find((r) => r.id === insertRowA.id) + const conflictingRow = rowsAfterMigration.find( + (r) => r.id === insertRowA.id + ) - // Now also check the row that was concurrently inserted - t.assert( - isequal(conflictingRow, extendRow(insertRowA)) || - isequal(conflictingRow, extendRow(insertRowB)) - ) -}) + // Now also check the row that was concurrently inserted + t.assert( + isequal(conflictingRow, extendRow(insertRowA)) || + isequal(conflictingRow, extendRow(insertRowB)) + ) + }) -const migrationWithFKs: SchemaChange[] = [ - { - migrationType: SatOpMigrate_Type.CREATE_TABLE, - sql: ` - CREATE TABLE "test_items" ( + const migrationWithFKs: SchemaChange[] = [ + { + migrationType: SatOpMigrate_Type.CREATE_TABLE, + sql: ` + CREATE TABLE main."test_items" ( "id" TEXT NOT NULL, CONSTRAINT "test_items_pkey" PRIMARY KEY ("id") - ) WITHOUT ROWID; + ); `, - table: { - name: 'test_items', - columns: [ - { - name: 'id', - sqliteType: 'TEXT', - pgType: { name: 'TEXT', array: [], size: [] }, - }, - ], - fks: [], - pks: ['id'], + table: { + name: 'test_items', + columns: [ + { + name: 'id', + sqliteType: 'TEXT', + pgType: { name: 'TEXT', array: [], size: [] }, + }, + ], + fks: [], + pks: ['id'], + }, }, - }, - { - migrationType: SatOpMigrate_Type.CREATE_TABLE, - sql: ` - CREATE TABLE "test_other_items" ( + { + migrationType: SatOpMigrate_Type.CREATE_TABLE, + sql: ` + CREATE TABLE main."test_other_items" ( "id" TEXT NOT NULL, "item_id" TEXT, -- CONSTRAINT "test_other_items_item_id_fkey" FOREIGN KEY ("item_id") REFERENCES "test_items" ("id"), CONSTRAINT "test_other_items_pkey" PRIMARY KEY ("id") - ) WITHOUT ROWID; + ); `, - table: { - name: 'test_other_items', - columns: [ - { - name: 'id', - sqliteType: 'TEXT', - pgType: { name: 'TEXT', array: [], size: [] }, - }, - { - name: 'item_id', - sqliteType: 'TEXT', - pgType: { name: 'TEXT', array: [], size: [] }, - }, - ], - fks: [ - { - $type: 'Electric.Satellite.SatOpMigrate.ForeignKey', - fkCols: ['item_id'], - pkTable: 'test_items', - pkCols: ['id'], - }, - ], - pks: ['id'], + table: { + name: 'test_other_items', + columns: [ + { + name: 'id', + sqliteType: 'TEXT', + pgType: { name: 'TEXT', array: [], size: [] }, + }, + { + name: 'item_id', + sqliteType: 'TEXT', + pgType: { name: 'TEXT', array: [], size: [] }, + }, + ], + fks: [ + { + $type: 'Electric.Satellite.SatOpMigrate.ForeignKey', + fkCols: ['item_id'], + pkTable: 'test_items', + pkCols: ['id'], + }, + ], + pks: ['id'], + }, }, - }, -] + ] -test.serial('apply another migration', async (t) => { - const { satellite } = t.context + test.serial('apply another migration', async (t) => { + const { satellite } = t.context - const migrationTx = { - origin: 'remote', - commit_timestamp: Long.fromNumber(new Date().getTime()), - changes: migrationWithFKs, - lsn: new Uint8Array(), - // starts at 3, because the app already defines 2 migrations - // (see test/support/migrations/migrations.js) - // which are loaded when Satellite is started - migrationVersion: '3', - } + const migrationTx = { + origin: 'remote', + commit_timestamp: Long.fromNumber(new Date().getTime()), + changes: migrationWithFKs, + lsn: new Uint8Array(), + // starts at 3, because the app already defines 2 migrations + // (see test/support/migrations/migrations.js) + // which are loaded when Satellite is started + migrationVersion: '3', + } - // Apply the migration transaction - try { - await satellite._applyTransaction(migrationTx) - } catch (e) { - console.error(e) - throw e - } + // Apply the migration transaction + try { + await satellite._applyTransaction(migrationTx) + } catch (e) { + console.error(e) + throw e + } - await assertDbHasTables(t, 'test_items', 'test_other_items') - t.pass() -}) + await assertDbHasTables(t, 'test_items', 'test_other_items') + t.pass() + }) +} diff --git a/clients/typescript/test/satellite/sqlite/process.migration.test.ts b/clients/typescript/test/satellite/sqlite/process.migration.test.ts new file mode 100644 index 0000000000..505d75bddd --- /dev/null +++ b/clients/typescript/test/satellite/sqlite/process.migration.test.ts @@ -0,0 +1,21 @@ +import testAny, { TestFn } from 'ava' +import { cleanAndStopSatellite, makeContext } from '../common' +import { getMatchingShadowEntries as getSQLiteMatchingShadowEntries } from '../../support/satellite-helpers' +import { sqliteBuilder } from '../../../src/migrators/query-builder' +import { + ContextType, + commonSetup, + processMigrationTests, +} from '../process.migration.test' + +const test = testAny as TestFn + +test.beforeEach(async (t) => { + await makeContext(t) + t.context.getMatchingShadowEntries = getSQLiteMatchingShadowEntries + t.context.builder = sqliteBuilder + await commonSetup(t) +}) +test.afterEach.always(cleanAndStopSatellite) + +processMigrationTests(test) From 9ade95f71c4e710f5b7c845d5dcab2a196d7ccee Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Thu, 22 Feb 2024 11:12:13 +0100 Subject: [PATCH 017/156] Refactored unit test for process --- .../test/satellite/postgres/process.test.ts | 1940 +--------- .../typescript/test/satellite/process.test.ts | 3185 +++++++++-------- .../test/satellite/sqlite/process.test.ts | 18 + 3 files changed, 1622 insertions(+), 3521 deletions(-) create mode 100644 clients/typescript/test/satellite/sqlite/process.test.ts diff --git a/clients/typescript/test/satellite/postgres/process.test.ts b/clients/typescript/test/satellite/postgres/process.test.ts index 9cd0e8a6c2..fd2a1be5f7 100644 --- a/clients/typescript/test/satellite/postgres/process.test.ts +++ b/clients/typescript/test/satellite/postgres/process.test.ts @@ -1,1949 +1,23 @@ import anyTest, { TestFn } from 'ava' -import { - MOCK_BEHIND_WINDOW_LSN, - MOCK_INTERNAL_ERROR, - MockSatelliteClient, -} from '../../../src/satellite/mock' -import { QualifiedTablename } from '../../../src/util/tablename' -import { sleepAsync } from '../../../src/util/timer' +import { getPgMatchingShadowEntries } from '../../support/satellite-helpers' -import { - OPTYPES, - localOperationsToTableChanges, - fromTransaction, - OplogEntry, - toTransactions, - generateTag, - encodeTags, - opLogEntryToChange, -} from '../../../src/satellite/oplog' -import { SatelliteProcess } from '../../../src/satellite/process' +import { makePgContext, cleanAndStopSatellite } from '../common' -import { - loadSatelliteMetaTable, - generateLocalOplogEntry, - generateRemoteOplogEntry, - genEncodedTags, - getMatchingShadowEntries, -} from '../../support/satellite-helpers' -import Long from 'long' -import { - DataChangeType, - DataTransaction, - SatelliteError, - SatelliteErrorCode, -} from '../../../src/util/types' -import { - makePgContext, - opts, - relations, - cleanAndStopSatellite, - ContextType, -} from '../common' -import { - DEFAULT_LOG_POS, - numberToBytes, - base64, -} from '../../../src/util/common' - -import { - ClientShapeDefinition, - SubscriptionData, -} from '../../../src/satellite/shapes/types' -import { mergeEntries } from '../../../src/satellite/merge' -import { MockSubscriptionsManager } from '../../../src/satellite/shapes/manager' import { pgBuilder } from '../../../src/migrators/query-builder' - -const parentRecord = { - id: 1, - value: 'incoming', - other: 1, -} - -const childRecord = { - id: 1, - parent: 1, -} +import { processTests, ContextType } from '../process.test' let port = 5200 // Run all tests in this file serially // because there are a lot of tests // and it would lead to PG running out of shared memory const test = anyTest.serial as TestFn +test.serial = test // because the common test file uses `test.serial` for some tests (but for PG all tests are serial) test.beforeEach(async (t) => { await makePgContext(t, port++) + t.context.builder = pgBuilder + t.context.getMatchingShadowEntries = getPgMatchingShadowEntries }) test.afterEach.always(cleanAndStopSatellite) -const qualifiedParentTableName = new QualifiedTablename( - 'main', - 'parent' -).toString() -const builder = pgBuilder - -test('setup starts a satellite process', async (t) => { - t.true(t.context.satellite instanceof SatelliteProcess) -}) - -test('start creates system tables', async (t) => { - const { adapter, satellite, authState } = t.context - - await satellite.start(authState) - - const rows = await adapter.query(builder.getLocalTableNames()) - const names = rows.map((row) => row.name) - - t.true(names.includes('_electric_oplog')) -}) - -test('load metadata', async (t) => { - const { adapter, runMigrations } = t.context - await runMigrations() - - const meta = await loadSatelliteMetaTable(adapter) - t.deepEqual(meta, { - compensations: '1', - lsn: '', - clientId: '', - subscriptions: '', - }) -}) - -test('set persistent client id', async (t) => { - const { satellite, authState } = t.context - - await satellite.start(authState) - const clientId1 = satellite._authState!.clientId - t.truthy(clientId1) - await satellite.stop() - - await satellite.start(authState) - - const clientId2 = satellite._authState!.clientId - t.truthy(clientId2) - t.assert(clientId1 === clientId2) -}) - -test('cannot UPDATE primary key', async (t) => { - const { adapter, runMigrations } = t.context - await runMigrations() - - await adapter.run({ sql: `INSERT INTO main.parent(id) VALUES ('1'),('2')` }) - await t.throwsAsync( - adapter.run({ sql: `UPDATE main.parent SET id='3' WHERE id = '1'` }), - { - code: 'P0001', - } - ) -}) - -test('snapshot works', async (t) => { - const { satellite } = t.context - const { adapter, notifier, runMigrations, authState } = t.context - await runMigrations() - await satellite._setAuthState(authState) - - await adapter.run({ sql: `INSERT INTO main.parent(id) VALUES ('1'),('2')` }) - - let snapshotTimestamp = await satellite._performSnapshot() - - const clientId = satellite._authState!.clientId - let shadowTags = encodeTags([generateTag(clientId, snapshotTimestamp)]) - - var shadowRows = await adapter.query({ - sql: `SELECT tags FROM main._electric_shadow`, - }) - t.is(shadowRows.length, 2) - for (const row of shadowRows) { - t.is(row.tags, shadowTags) - } - - t.is(notifier.notifications.length, 1) - - const { changes } = notifier.notifications[0] - const expectedChange = { - qualifiedTablename: new QualifiedTablename('main', 'parent'), - rowids: [1, 2], - } - - t.deepEqual(changes, [expectedChange]) -}) - -test('(regression) performSnapshot cant be called concurrently', async (t) => { - const { authState, satellite, runMigrations } = t.context - await runMigrations() - await satellite._setAuthState(authState) - - await t.throwsAsync( - async () => { - const run = satellite.adapter.run.bind(satellite.adapter) - satellite.adapter.run = (stmt) => - new Promise((res) => setTimeout(() => run(stmt).then(res), 100)) - - const p1 = satellite._performSnapshot() - const p2 = satellite._performSnapshot() - await Promise.all([p1, p2]) - }, - { - instanceOf: SatelliteError, - code: SatelliteErrorCode.INTERNAL, - message: 'already performing snapshot', - } - ) -}) - -test('(regression) throttle with mutex prevents race when snapshot is slow', async (t) => { - const { authState, satellite, runMigrations } = t.context - await runMigrations() - await satellite._setAuthState(authState) - - // delay termination of _performSnapshot - const run = satellite.adapter.run.bind(satellite.adapter) - satellite.adapter.run = (stmt) => - new Promise((res) => setTimeout(() => run(stmt).then(res), 100)) - - const p1 = satellite._throttledSnapshot() - const p2 = new Promise((res) => { - // call snapshot after throttle time has expired - setTimeout(() => satellite._throttledSnapshot()?.then(res), 50) - }) - - await t.notThrowsAsync(async () => { - await p1 - await p2 - }) -}) - -test('starting and stopping the process works', async (t) => { - const { adapter, notifier, runMigrations, satellite, authState } = t.context - await runMigrations() - - await adapter.run({ sql: `INSERT INTO main.parent(id) VALUES ('1'),('2')` }) - - const conn = await satellite.start(authState) - await conn.connectionPromise - - await sleepAsync(opts.pollingInterval) - - // connect, 1st txn - t.is(notifier.notifications.length, 2) - - await adapter.run({ sql: `INSERT INTO main.parent(id) VALUES ('3'),('4')` }) - await sleepAsync(opts.pollingInterval) - - // 2nd txm - t.is(notifier.notifications.length, 3) - - await satellite.stop() - await adapter.run({ sql: `INSERT INTO main.parent(id) VALUES ('5'),('6')` }) - await sleepAsync(opts.pollingInterval) - - // no txn notified - t.is(notifier.notifications.length, 4) - - const conn1 = await satellite.start(authState) - await conn1.connectionPromise - await sleepAsync(opts.pollingInterval) - - // connect, 4th txn - t.is(notifier.notifications.length, 6) -}) - -test('snapshots on potential data change', async (t) => { - const { adapter, notifier, runMigrations } = t.context - await runMigrations() - - await adapter.run({ sql: `INSERT INTO main.parent(id) VALUES ('1'),('2')` }) - - t.is(notifier.notifications.length, 0) - - await notifier.potentiallyChanged() - - t.is(notifier.notifications.length, 1) -}) - -// INSERT after DELETE shall nullify all non explicitly set columns -// If last operation is a DELETE, concurrent INSERT shall resurrect deleted -// values as in 'INSERT wins over DELETE and restored deleted values' -test('snapshot of INSERT after DELETE', async (t) => { - const { adapter, runMigrations, satellite, authState } = t.context - - await runMigrations() - - await adapter.run({ - sql: `INSERT INTO main.parent(id, value) VALUES (1,'val1')`, - }) - await adapter.run({ sql: `DELETE FROM main.parent WHERE id=1` }) - await adapter.run({ sql: `INSERT INTO main.parent(id) VALUES (1)` }) - - await satellite._setAuthState(authState) - await satellite._performSnapshot() - const entries = await satellite._getEntries() - const clientId = satellite._authState!.clientId - - const merged = localOperationsToTableChanges( - entries, - (timestamp: Date) => { - return generateTag(clientId, timestamp) - }, - relations - ) - const [_, keyChanges] = merged[qualifiedParentTableName]['{"id":1}'] - const resultingValue = keyChanges.changes.value.value - t.is(resultingValue, null) -}) - -test('snapshot of INSERT with bigint', async (t) => { - const { adapter, runMigrations, satellite, authState } = t.context - - await runMigrations() - - await adapter.run({ - sql: `INSERT INTO main."bigIntTable"(value) VALUES (1)`, - }) - - await satellite._setAuthState(authState) - await satellite._performSnapshot() - const entries = await satellite._getEntries() - const clientId = satellite._authState!.clientId - - const merged = localOperationsToTableChanges( - entries, - (timestamp: Date) => { - return generateTag(clientId, timestamp) - }, - relations - ) - const qualifiedTableName = new QualifiedTablename( - 'main', - 'bigIntTable' - ).toString() - const [_, keyChanges] = merged[qualifiedTableName]['{"value":"1"}'] - const resultingValue = keyChanges.changes.value.value - t.is(resultingValue, 1n) -}) - -test('take snapshot and merge local wins', async (t) => { - const { adapter, runMigrations, satellite, tableInfo, authState } = t.context - await runMigrations() - - const incomingTs = new Date().getTime() - 1 - const incomingEntry = generateRemoteOplogEntry( - tableInfo, - 'main', - 'parent', - OPTYPES.insert, - incomingTs, - encodeTags([generateTag('remote', new Date(incomingTs))]), - { - id: 1, - value: 'incoming', - } - ) - await adapter.run({ - sql: `INSERT INTO main.parent(id, value, other) VALUES (1, 'local', 1)`, - }) - - await satellite._setAuthState(authState) - const localTime = await satellite._performSnapshot() - const clientId = satellite._authState!.clientId - - const local = await satellite._getEntries() - const localTimestamp = new Date(local[0].timestamp).getTime() - const merged = mergeEntries( - clientId, - local, - 'remote', - [incomingEntry], - relations - ) - const item = merged[qualifiedParentTableName]['{"id":1}'] - - t.deepEqual(item, { - namespace: 'main', - tablename: 'parent', - primaryKeyCols: { id: 1 }, - optype: OPTYPES.upsert, - changes: { - id: { value: 1, timestamp: localTimestamp }, - value: { value: 'local', timestamp: localTimestamp }, - other: { value: 1, timestamp: localTimestamp }, - }, - fullRow: { - id: 1, - value: 'local', - other: 1, - }, - tags: [ - generateTag(clientId, localTime), - generateTag('remote', new Date(incomingTs)), - ], - }) -}) - -test('take snapshot and merge incoming wins', async (t) => { - const { adapter, runMigrations, satellite, tableInfo, authState } = t.context - await runMigrations() - - await adapter.run({ - sql: `INSERT INTO main.parent(id, value, other) VALUES (1, 'local', 1)`, - }) - - await satellite._setAuthState(authState) - const clientId = satellite._authState!.clientId - await satellite._performSnapshot() - - const local = await satellite._getEntries() - const localTimestamp = new Date(local[0].timestamp).getTime() - - const incomingTs = localTimestamp + 1 - const incomingEntry = generateRemoteOplogEntry( - tableInfo, - 'main', - 'parent', - OPTYPES.insert, - incomingTs, - genEncodedTags('remote', [incomingTs]), - { - id: 1, - value: 'incoming', - } - ) - - const merged = mergeEntries( - clientId, - local, - 'remote', - [incomingEntry], - relations - ) - const item = merged[qualifiedParentTableName]['{"id":1}'] - - t.deepEqual(item, { - namespace: 'main', - tablename: 'parent', - primaryKeyCols: { id: 1 }, - optype: OPTYPES.upsert, - changes: { - id: { value: 1, timestamp: incomingTs }, - value: { value: 'incoming', timestamp: incomingTs }, - other: { value: 1, timestamp: localTimestamp }, - }, - fullRow: { - id: 1, - value: 'incoming', - other: 1, - }, - tags: [ - generateTag(clientId, new Date(localTimestamp)), - generateTag('remote', new Date(incomingTs)), - ], - }) -}) - -test('merge incoming wins on persisted ops', async (t) => { - const { adapter, runMigrations, satellite, tableInfo, authState } = t.context - await runMigrations() - await satellite._setAuthState(authState) - satellite.relations = relations - - // This operation is persisted - await adapter.run({ - sql: `INSERT INTO main.parent(id, value, other) VALUES (1, 'local', 1)`, - }) - await satellite._performSnapshot() - const [originalInsert] = await satellite._getEntries() - const [tx] = toTransactions([originalInsert], satellite.relations) - tx.origin = authState.clientId - await satellite._applyTransaction(tx) - - // Verify that GC worked as intended and the oplog entry was deleted - t.deepEqual(await satellite._getEntries(), []) - - // This operation is done offline - await adapter.run({ - sql: `UPDATE main.parent SET value = 'new local' WHERE id = 1`, - }) - await satellite._performSnapshot() - const [offlineInsert] = await satellite._getEntries() - const offlineTimestamp = new Date(offlineInsert.timestamp).getTime() - - // This operation is done concurrently with offline but at a later point in time. It's sent immediately on connection - const incomingTs = offlineTimestamp + 1 - const firstIncomingEntry = generateRemoteOplogEntry( - tableInfo, - 'main', - 'parent', - OPTYPES.update, - incomingTs, - genEncodedTags('remote', [incomingTs]), - { id: 1, value: 'incoming' }, - { id: 1, value: 'local' } - ) - - const firstIncomingTx = { - origin: 'remote', - commit_timestamp: Long.fromNumber(incomingTs), - changes: [opLogEntryToChange(firstIncomingEntry, satellite.relations)], - lsn: new Uint8Array(), - } - await satellite._applyTransaction(firstIncomingTx) - - const [{ value: value1 }] = await adapter.query({ - sql: 'SELECT value FROM main.parent WHERE id = 1', - }) - t.is( - value1, - 'incoming', - 'LWW conflict merge of the incoming transaction should lead to incoming operation winning' - ) - - // And after the offline transaction was sent, the resolved no-op transaction comes in - const secondIncomingEntry = generateRemoteOplogEntry( - tableInfo, - 'main', - 'parent', - OPTYPES.update, - offlineTimestamp, - encodeTags([ - generateTag('remote', incomingTs), - generateTag(authState.clientId, offlineTimestamp), - ]), - { id: 1, value: 'incoming' }, - { id: 1, value: 'incoming' } - ) - - const secondIncomingTx = { - origin: authState.clientId, - commit_timestamp: Long.fromNumber(offlineTimestamp), - changes: [opLogEntryToChange(secondIncomingEntry, satellite.relations)], - lsn: new Uint8Array(), - } - await satellite._applyTransaction(secondIncomingTx) - - const [{ value: value2 }] = await adapter.query({ - sql: 'SELECT value FROM main.parent WHERE id = 1', - }) - t.is( - value2, - 'incoming', - 'Applying the resolved write from the round trip should be a no-op' - ) -}) - -test('apply does not add anything to oplog', async (t) => { - const { adapter, runMigrations, satellite, tableInfo, authState } = t.context - await runMigrations() - await adapter.run({ - sql: `INSERT INTO main.parent(id, value, other) VALUES (1, 'local', null)`, - }) - - await satellite._setAuthState(authState) - const clientId = satellite._authState!.clientId - - const localTimestamp = await satellite._performSnapshot() - - const incomingTs = new Date().getTime() - const incomingEntry = generateRemoteOplogEntry( - tableInfo, - 'main', - 'parent', - OPTYPES.insert, - incomingTs, - genEncodedTags('remote', [incomingTs]), - { - id: 1, - value: 'incoming', - other: 1, - } - ) - - satellite.relations = relations // satellite must be aware of the relations in order to turn `DataChange`s into `OpLogEntry`s - - const incomingChange = opLogEntryToChange(incomingEntry, relations) - const incomingTx = { - origin: 'remote', - commit_timestamp: Long.fromNumber(incomingTs), - changes: [incomingChange], - lsn: new Uint8Array(), - } - await satellite._applyTransaction(incomingTx) - - await satellite._performSnapshot() - - const sql = 'SELECT * from main.parent WHERE id=1' - const [row] = await adapter.query({ sql }) - t.is(row.value, 'incoming') - t.is(row.other, 1) - - const localEntries = await satellite._getEntries() - const shadowEntry = await getMatchingShadowEntries( - adapter, - localEntries[0], - builder - ) - - t.deepEqual( - encodeTags([ - generateTag(clientId, new Date(localTimestamp)), - generateTag('remote', new Date(incomingTs)), - ]), - shadowEntry[0].tags - ) - - //t.deepEqual(shadowEntries, shadowEntries2) - t.is(localEntries.length, 1) -}) - -test('apply incoming with no local', async (t) => { - const { adapter, runMigrations, satellite, tableInfo, authState } = t.context - await runMigrations() - - const incomingTs = new Date() - const incomingEntry = generateRemoteOplogEntry( - tableInfo, - 'main', - 'parent', - OPTYPES.delete, - incomingTs.getTime(), - genEncodedTags('remote', []), - { - id: 1, - value: 'incoming', - otherValue: 1, - } - ) - - satellite.relations = relations // satellite must be aware of the relations in order to deserialise oplog entries - - await satellite._setAuthState(authState) - await satellite._apply([incomingEntry], 'remote') - - const sql = 'SELECT * from main.parent WHERE id=1' - const rows = await adapter.query({ sql }) - const shadowEntries = await getMatchingShadowEntries( - adapter, - undefined, - builder - ) - - t.is(shadowEntries.length, 0) - t.is(rows.length, 0) -}) - -test('apply empty incoming', async (t) => { - const { runMigrations, satellite, authState } = t.context - await runMigrations() - - await satellite._setAuthState(authState) - await satellite._apply([], 'external') - - t.true(true) -}) - -test('apply incoming with null on column with default', async (t) => { - const { runMigrations, satellite, adapter, tableInfo, authState } = t.context - await runMigrations() - - const incomingTs = new Date().getTime() - const incomingEntry = generateRemoteOplogEntry( - tableInfo, - 'main', - 'parent', - OPTYPES.insert, - incomingTs, - genEncodedTags('remote', [incomingTs]), - { - id: 1234, - value: 'incoming', - other: null, - } - ) - - await satellite._setAuthState(authState) - - satellite.relations = relations // satellite must be aware of the relations in order to turn `DataChange`s into `OpLogEntry`s - - const incomingChange = opLogEntryToChange(incomingEntry, relations) - const incomingTx = { - origin: 'remote', - commit_timestamp: Long.fromNumber(incomingTs), - changes: [incomingChange], - lsn: new Uint8Array(), - } - await satellite._applyTransaction(incomingTx) - - const sql = `SELECT * from main.parent WHERE value='incoming'` - const rows = await adapter.query({ sql }) - - t.is(rows[0].other, null) - t.pass() -}) - -test('apply incoming with undefined on column with default', async (t) => { - const { runMigrations, satellite, adapter, tableInfo, authState } = t.context - await runMigrations() - - const incomingTs = new Date().getTime() - const incomingEntry = generateRemoteOplogEntry( - tableInfo, - 'main', - 'parent', - OPTYPES.insert, - incomingTs, - genEncodedTags('remote', [incomingTs]), - { - id: 1234, - value: 'incoming', - } - ) - - await satellite._setAuthState(authState) - - satellite.relations = relations // satellite must be aware of the relations in order to turn `DataChange`s into `OpLogEntry`s - - const incomingChange = opLogEntryToChange(incomingEntry, relations) - const incomingTx = { - origin: 'remote', - commit_timestamp: Long.fromNumber(incomingTs), - changes: [incomingChange], - lsn: new Uint8Array(), - } - await satellite._applyTransaction(incomingTx) - - const sql = `SELECT * from main.parent WHERE value='incoming'` - const rows = await adapter.query({ sql }) - - t.is(rows[0].other, 0) - t.pass() -}) - -test('INSERT wins over DELETE and restored deleted values', async (t) => { - const { runMigrations, satellite, tableInfo, authState } = t.context - await runMigrations() - await satellite._setAuthState(authState) - const clientId = satellite._authState!.clientId - - const localTs = new Date().getTime() - const incomingTs = localTs + 1 - - const incoming = [ - generateRemoteOplogEntry( - tableInfo, - 'main', - 'parent', - OPTYPES.insert, - incomingTs, - genEncodedTags('remote', [incomingTs]), - { - id: 1, - other: 1, - } - ), - generateRemoteOplogEntry( - tableInfo, - 'main', - 'parent', - OPTYPES.delete, - incomingTs, - genEncodedTags('remote', []), - { - id: 1, - } - ), - ] - - const local = [ - generateLocalOplogEntry( - tableInfo, - 'main', - 'parent', - OPTYPES.insert, - localTs, - genEncodedTags(clientId, [localTs]), - { - id: 1, - value: 'local', - other: null, - } - ), - ] - - const merged = mergeEntries(clientId, local, 'remote', incoming, relations) - const item = merged[qualifiedParentTableName]['{"id":1}'] - - t.deepEqual(item, { - namespace: 'main', - tablename: 'parent', - primaryKeyCols: { id: 1 }, - optype: OPTYPES.upsert, - changes: { - id: { value: 1, timestamp: incomingTs }, - value: { value: 'local', timestamp: localTs }, - other: { value: 1, timestamp: incomingTs }, - }, - fullRow: { - id: 1, - value: 'local', - other: 1, - }, - tags: [ - generateTag(clientId, new Date(localTs)), - generateTag('remote', new Date(incomingTs)), - ], - }) -}) - -test('concurrent updates take all changed values', async (t) => { - const { runMigrations, satellite, tableInfo, authState } = t.context - await runMigrations() - await satellite._setAuthState(authState) - const clientId = satellite._authState!.clientId - - const localTs = new Date().getTime() - const incomingTs = localTs + 1 - - const incoming = [ - generateRemoteOplogEntry( - tableInfo, - 'main', - 'parent', - OPTYPES.update, - incomingTs, - genEncodedTags('remote', [incomingTs]), - { - id: 1, - value: 'remote', // the only modified column - other: 0, - }, - { - id: 1, - value: 'local', - other: 0, - } - ), - ] - - const local = [ - generateLocalOplogEntry( - tableInfo, - 'main', - 'parent', - OPTYPES.update, - localTs, - genEncodedTags(clientId, [localTs]), - { - id: 1, - value: 'local', - other: 1, // the only modified column - }, - { - id: 1, - value: 'local', - other: 0, - } - ), - ] - - const merged = mergeEntries(clientId, local, 'remote', incoming, relations) - const item = merged[qualifiedParentTableName]['{"id":1}'] - - // The incoming entry modified the value of the `value` column to `'remote'` - // The local entry concurrently modified the value of the `other` column to 1. - // The merged entries should have `value = 'remote'` and `other = 1`. - t.deepEqual(item, { - namespace: 'main', - tablename: 'parent', - primaryKeyCols: { id: 1 }, - optype: OPTYPES.upsert, - changes: { - value: { value: 'remote', timestamp: incomingTs }, - other: { value: 1, timestamp: localTs }, - }, - fullRow: { - id: 1, - value: 'remote', - other: 1, - }, - tags: [ - generateTag(clientId, new Date(localTs)), - generateTag('remote', new Date(incomingTs)), - ], - }) -}) - -test('merge incoming with empty local', async (t) => { - const { runMigrations, satellite, tableInfo, authState } = t.context - await runMigrations() - await satellite._setAuthState(authState) - const clientId = satellite._authState!.clientId - - const localTs = new Date().getTime() - const incomingTs = localTs + 1 - - const incoming = [ - generateRemoteOplogEntry( - tableInfo, - 'main', - 'parent', - OPTYPES.insert, - incomingTs, - genEncodedTags('remote', [incomingTs]), - { - id: 1, - }, - undefined - ), - ] - - const local: OplogEntry[] = [] - const merged = mergeEntries(clientId, local, 'remote', incoming, relations) - const item = merged[qualifiedParentTableName]['{"id":1}'] - - t.deepEqual(item, { - namespace: 'main', - tablename: 'parent', - primaryKeyCols: { id: 1 }, - optype: OPTYPES.upsert, - changes: { - id: { value: 1, timestamp: incomingTs }, - }, - fullRow: { - id: 1, - }, - tags: [generateTag('remote', new Date(incomingTs))], - }) -}) - -test('compensations: referential integrity is enforced', async (t) => { - const { adapter, runMigrations, satellite } = t.context - await runMigrations() - - await satellite._setMeta('compensations', 0) - await adapter.run({ - sql: `INSERT INTO main.parent(id, value) VALUES (1, '1')`, - }) - - await t.throwsAsync( - adapter.run({ sql: `INSERT INTO main.child(id, parent) VALUES (1, 2)` }), - { - code: '23503', - } - ) -}) - -test('compensations: incoming operation breaks referential integrity', async (t) => { - const { runMigrations, satellite, tableInfo, timestamp, authState } = - t.context - await runMigrations() - - await satellite._setMeta('compensations', 0) - await satellite._setAuthState(authState) - - const incoming = generateLocalOplogEntry( - tableInfo, - 'main', - 'child', - OPTYPES.insert, - timestamp, - genEncodedTags('remote', [timestamp]), - { - id: 1, - parent: 1, - } - ) - - satellite.relations = relations // satellite must be aware of the relations in order to turn `DataChange`s into `OpLogEntry`s - - const incomingChange = opLogEntryToChange(incoming, relations) - const incomingTx = { - origin: 'remote', - commit_timestamp: Long.fromNumber(timestamp), - changes: [incomingChange], - lsn: new Uint8Array(), - } - - await t.throwsAsync(satellite._applyTransaction(incomingTx), { - code: '23503', - }) -}) - -test('compensations: incoming operations accepted if restore referential integrity', async (t) => { - const { adapter, runMigrations, satellite, tableInfo, timestamp, authState } = - t.context - await runMigrations() - - await satellite._setMeta('compensations', 0) - await satellite._setAuthState(authState) - const clientId = satellite._authState!.clientId - - const childInsertEntry = generateRemoteOplogEntry( - tableInfo, - 'main', - 'child', - OPTYPES.insert, - timestamp, - genEncodedTags(clientId, [timestamp]), - { - id: 1, - parent: 1, - } - ) - - const parentInsertEntry = generateRemoteOplogEntry( - tableInfo, - 'main', - 'parent', - OPTYPES.insert, - timestamp, - genEncodedTags(clientId, [timestamp]), - { - id: 1, - } - ) - - await adapter.run({ - sql: `INSERT INTO main.parent(id, value) VALUES (1, '1')`, - }) - await adapter.run({ sql: `DELETE FROM main.parent WHERE id=1` }) - - await satellite._performSnapshot() - - satellite.relations = relations // satellite must be aware of the relations in order to turn `DataChange`s into `OpLogEntry`s - - const childInsertChange = opLogEntryToChange(childInsertEntry, relations) - const parentInsertChange = opLogEntryToChange(parentInsertEntry, relations) - const insertChildAndParentTx = { - origin: 'remote', - commit_timestamp: Long.fromNumber(new Date().getTime()), // timestamp is not important for this test, it is only used to GC the oplog - changes: [parentInsertChange, childInsertChange], - lsn: new Uint8Array(), - } - await satellite._applyTransaction(insertChildAndParentTx) - - const rows = await adapter.query({ - sql: `SELECT * from main.parent WHERE id=1`, - }) - - // Not only does the parent exist. - t.is(rows.length, 1) - - // But it's also recreated with deleted values. - t.is(rows[0].value, '1') -}) - -test('compensations: using triggers with flag 0', async (t) => { - const { adapter, runMigrations, satellite, tableInfo, authState } = t.context - await runMigrations() - - await satellite._setMeta('compensations', 0) - - await adapter.run({ - sql: `INSERT INTO main.parent(id, value) VALUES (1, '1')`, - }) - await satellite._setAuthState(authState) - const ts = await satellite._performSnapshot() - await satellite._garbageCollectOplog(ts) - - await adapter.run({ sql: `INSERT INTO main.child(id, parent) VALUES (1, 1)` }) - await satellite._performSnapshot() - - const timestamp = new Date().getTime() - const incoming = generateRemoteOplogEntry( - tableInfo, - 'main', - 'parent', - OPTYPES.delete, - timestamp, - genEncodedTags('remote', []), - { - id: 1, - } - ) - - satellite.relations = relations // satellite must be aware of the relations in order to turn `DataChange`s into `OpLogEntry`s - - const incomingChange = opLogEntryToChange(incoming, relations) - const incomingTx = { - origin: 'remote', - commit_timestamp: Long.fromNumber(timestamp), - changes: [incomingChange], - lsn: new Uint8Array(), - } - - await t.throwsAsync(satellite._applyTransaction(incomingTx), { - code: '23503', - }) -}) -test('compensations: using triggers with flag 1', async (t) => { - const { adapter, runMigrations, satellite, tableInfo, authState } = t.context - await runMigrations() - - await satellite._setMeta('compensations', 1) - - await adapter.run({ - sql: `INSERT INTO main.parent(id, value) VALUES (1, '1')`, - }) - await satellite._setAuthState(authState) - const ts = await satellite._performSnapshot() - await satellite._garbageCollectOplog(ts) - - await adapter.run({ sql: `INSERT INTO main.child(id, parent) VALUES (1, 1)` }) - await satellite._performSnapshot() - - const timestamp = new Date().getTime() - const incoming = [ - generateRemoteOplogEntry( - tableInfo, - 'main', - 'parent', - OPTYPES.delete, - timestamp, - genEncodedTags('remote', []), - { - id: 1, - } - ), - ] - - satellite.relations = relations // satellite must be aware of the relations in order to deserialise oplog entries - - await satellite._apply(incoming, 'remote') - t.pass() -}) - -test('get oplogEntries from transaction', async (t) => { - const { runMigrations, satellite } = t.context - await runMigrations() - - const relations = await satellite['_getLocalRelations']() - - const transaction: DataTransaction = { - lsn: DEFAULT_LOG_POS, - commit_timestamp: Long.UZERO, - changes: [ - { - relation: relations.parent, - type: DataChangeType.INSERT, - record: { id: 0 }, - tags: [], // proper values are not relevent here - }, - ], - } - - const expected: OplogEntry = { - namespace: 'main', - tablename: 'parent', - optype: 'INSERT', - newRow: '{"id":0}', - oldRow: undefined, - primaryKey: '{"id":0}', - rowid: -1, - timestamp: '1970-01-01T00:00:00.000Z', - clearTags: encodeTags([]), - } - - const opLog = fromTransaction(transaction, relations) - t.deepEqual(opLog[0], expected) -}) - -test('get transactions from opLogEntries', async (t) => { - const { runMigrations } = t.context - await runMigrations() - - const opLogEntries: OplogEntry[] = [ - { - namespace: 'public', - tablename: 'parent', - optype: 'INSERT', - newRow: '{"id":0}', - oldRow: undefined, - primaryKey: '{"id":0}', - rowid: 1, - timestamp: '1970-01-01T00:00:00.000Z', - clearTags: encodeTags([]), - }, - { - namespace: 'public', - tablename: 'parent', - optype: 'UPDATE', - newRow: '{"id":1}', - oldRow: '{"id":1}', - primaryKey: '{"id":1}', - rowid: 2, - timestamp: '1970-01-01T00:00:00.000Z', - clearTags: encodeTags([]), - }, - { - namespace: 'public', - tablename: 'parent', - optype: 'INSERT', - newRow: '{"id":2}', - oldRow: undefined, - primaryKey: '{"id":0}', - rowid: 3, - timestamp: '1970-01-01T00:00:01.000Z', - clearTags: encodeTags([]), - }, - ] - - const expected = [ - { - lsn: numberToBytes(2), - commit_timestamp: Long.UZERO, - changes: [ - { - relation: relations.parent, - type: DataChangeType.INSERT, - record: { id: 0 }, - oldRecord: undefined, - tags: [], - }, - { - relation: relations.parent, - type: DataChangeType.UPDATE, - record: { id: 1 }, - oldRecord: { id: 1 }, - tags: [], - }, - ], - }, - { - lsn: numberToBytes(3), - commit_timestamp: Long.UZERO.add(1000), - changes: [ - { - relation: relations.parent, - type: DataChangeType.INSERT, - record: { id: 2 }, - oldRecord: undefined, - tags: [], - }, - ], - }, - ] - - const opLog = toTransactions(opLogEntries, relations) - t.deepEqual(opLog, expected) -}) - -test('handling connectivity state change stops queueing operations', async (t) => { - const { runMigrations, satellite, adapter, authState } = t.context - await runMigrations() - await satellite.start(authState) - - adapter.run({ - sql: `INSERT INTO main.parent(id, value, other) VALUES (1, 'local', 1)`, - }) - - await satellite._performSnapshot() - - // We should have sent (or at least enqueued to send) one row - const sentLsn = satellite.client.getLastSentLsn() - t.deepEqual(sentLsn, numberToBytes(1)) - - await satellite._handleConnectivityStateChange('disconnected') - - adapter.run({ - sql: `INSERT INTO main.parent(id, value, other) VALUES (2, 'local', 1)`, - }) - - await satellite._performSnapshot() - - // Since connectivity is down, that row isn't yet sent - const lsn1 = satellite.client.getLastSentLsn() - t.deepEqual(lsn1, sentLsn) - - // Once connectivity is restored, we will immediately run a snapshot to send pending rows - await satellite._handleConnectivityStateChange('available') - await sleepAsync(200) // Wait for snapshot to run - const lsn2 = satellite.client.getLastSentLsn() - t.deepEqual(lsn2, numberToBytes(2)) -}) - -test('garbage collection is triggered when transaction from the same origin is replicated', async (t) => { - const { satellite } = t.context - const { runMigrations, adapter, authState } = t.context - await runMigrations() - await satellite.start(authState) - - adapter.run({ - sql: `INSERT INTO main.parent(id, value, other) VALUES (1, 'local', 1);`, - }) - adapter.run({ - sql: `UPDATE main.parent SET value = 'local', other = 2 WHERE id = 1;`, - }) - - // Before snapshot, we didn't send anything - const lsn1 = satellite.client.getLastSentLsn() - t.deepEqual(lsn1, numberToBytes(0)) - - // Snapshot sends these oplog entries - await satellite._performSnapshot() - const lsn2 = satellite.client.getLastSentLsn() - t.deepEqual(lsn2, numberToBytes(2)) - - const old_oplog = await satellite._getEntries() - const transactions = toTransactions(old_oplog, relations) - transactions[0].origin = satellite._authState!.clientId - - // Transaction containing these oplogs is applies, which means we delete them - await satellite._applyTransaction(transactions[0]) - const new_oplog = await satellite._getEntries() - t.deepEqual(new_oplog, []) -}) - -// stub client and make satellite throw the error with option off/succeed with option on -test('clear database on BEHIND_WINDOW', async (t) => { - const { satellite } = t.context - const { runMigrations, authState } = t.context - await runMigrations() - - const base64lsn = base64.fromBytes(numberToBytes(MOCK_BEHIND_WINDOW_LSN)) - await satellite._setMeta('lsn', base64lsn) - try { - const conn = await satellite.start(authState) - await conn.connectionPromise - const lsnAfter = await satellite._getMeta('lsn') - t.not(lsnAfter, base64lsn) - } catch (e) { - t.fail('start should not throw') - } - - // TODO: test clear subscriptions -}) - -test('throw other replication errors', async (t) => { - t.plan(2) - const { satellite } = t.context - const { runMigrations, authState } = t.context - await runMigrations() - - const base64lsn = base64.fromBytes(numberToBytes(MOCK_INTERNAL_ERROR)) - await satellite._setMeta('lsn', base64lsn) - - const conn = await satellite.start(authState) - return Promise.all( - [satellite['initializing']?.waitOn(), conn.connectionPromise].map((p) => - p?.catch((e: SatelliteError) => { - t.is(e.code, SatelliteErrorCode.INTERNAL) - }) - ) - ) -}) - -test('apply shape data and persist subscription', async (t) => { - const { client, satellite, adapter, notifier } = t.context - const { runMigrations, authState } = t.context - await runMigrations() - - const namespace = 'main' - const tablename = 'parent' - const qualified = new QualifiedTablename(namespace, tablename) - - // relations must be present at subscription delivery - client.setRelations(relations) - client.setRelationData(tablename, parentRecord) - - const conn = await satellite.start(authState) - await conn.connectionPromise - - const shapeDef: ClientShapeDefinition = { - selects: [{ tablename }], - } - - satellite!.relations = relations - const { synced } = await satellite.subscribe([shapeDef]) - await synced - - // first notification is 'connected' - t.is(notifier.notifications.length, 2) - t.is(notifier.notifications[1].changes.length, 1) - t.deepEqual(notifier.notifications[1].changes[0], { - qualifiedTablename: qualified, - rowids: [], - }) - - // wait for process to apply shape data - const qualifiedTableName = `"${namespace}"."${tablename}"` - try { - const row = await adapter.query({ - sql: `SELECT id FROM ${qualifiedTableName}`, - }) - t.is(row.length, 1) - - const shadowRows = await adapter.query({ - sql: `SELECT tags FROM main._electric_shadow`, - }) - t.is(shadowRows.length, 1) - - const subsMeta = await satellite._getMeta('subscriptions') - const subsObj = JSON.parse(subsMeta) - t.is(Object.keys(subsObj).length, 1) - - // Check that we save the LSN sent by the mock - t.deepEqual(satellite._lsn, base64.toBytes('MTIz')) - } catch (e) { - t.fail(JSON.stringify(e)) - } -}) - -test('(regression) shape subscription succeeds even if subscription data is delivered before the SatSubsReq RPC call receives its SatSubsResp answer', async (t) => { - const { client, satellite } = t.context - const { runMigrations, authState } = t.context - await runMigrations() - - const tablename = 'parent' - - // relations must be present at subscription delivery - client.setRelations(relations) - client.setRelationData(tablename, parentRecord) - - const conn = await satellite.start(authState) - await conn.connectionPromise - - const shapeDef: ClientShapeDefinition = { - selects: [{ tablename }], - } - - satellite!.relations = relations - - // Enable the deliver first flag in the mock client - // such that the subscription data is delivered before the - // subscription promise is resolved - const mockClient = satellite.client as MockSatelliteClient - mockClient.enableDeliverFirst() - - const { synced } = await satellite.subscribe([shapeDef]) - await synced - - t.pass() -}) - -test('multiple subscriptions for the same shape are deduplicated', async (t) => { - const { client, satellite } = t.context - const { runMigrations, authState } = t.context - await runMigrations() - - const tablename = 'parent' - - // relations must be present at subscription delivery - client.setRelations(relations) - client.setRelationData(tablename, parentRecord) - - const conn = await satellite.start(authState) - await conn.connectionPromise - - const shapeDef: ClientShapeDefinition = { - selects: [{ tablename }], - } - - satellite!.relations = relations - - // We want none of these cases to throw - await t.notThrowsAsync(async () => { - // We should dedupe subscriptions that are done at the same time - const [sub1, sub2] = await Promise.all([ - satellite.subscribe([shapeDef]), - satellite.subscribe([shapeDef]), - ]) - // That are done after first await but before the data - const sub3 = await satellite.subscribe([shapeDef]) - // And that are done after previous data is resolved - await Promise.all([sub1.synced, sub2.synced, sub3.synced]) - const sub4 = await satellite.subscribe([shapeDef]) - - await sub4.synced - }) - - // And be "merged" into one subscription - t.is(satellite.subscriptions.getFulfilledSubscriptions().length, 1) -}) - -test('applied shape data will be acted upon correctly', async (t) => { - const { client, satellite, adapter } = t.context - const { runMigrations, authState } = t.context - await runMigrations() - - const namespace = 'main' - const tablename = 'parent' - const qualified = `"${namespace}"."${tablename}"` - - // relations must be present at subscription delivery - client.setRelations(relations) - client.setRelationData(tablename, parentRecord) - - const conn = await satellite.start(authState) - await conn.connectionPromise - - const shapeDef: ClientShapeDefinition = { - selects: [{ tablename }], - } - - satellite!.relations = relations - const { synced } = await satellite.subscribe([shapeDef]) - await synced - - // wait for process to apply shape data - try { - const row = await adapter.query({ - sql: `SELECT id FROM ${qualified}`, - }) - t.is(row.length, 1) - - const shadowRows = await adapter.query({ - sql: `SELECT * FROM main._electric_shadow`, - }) - t.is(shadowRows.length, 1) - t.like(shadowRows[0], { - namespace: 'main', - tablename: 'parent', - }) - - await adapter.run({ sql: `DELETE FROM ${qualified} WHERE id = 1` }) - await satellite._performSnapshot() - - const oplogs = await adapter.query({ - sql: `SELECT * FROM main._electric_oplog`, - }) - t.not(oplogs[0].clearTags, '[]') - } catch (e) { - t.fail(JSON.stringify(e)) - } -}) - -test('a subscription that failed to apply because of FK constraint triggers GC', async (t) => { - const { client, satellite, adapter } = t.context - const { runMigrations, authState } = t.context - await runMigrations() - - const tablename = 'child' - const namespace = 'main' - - // relations must be present at subscription delivery - client.setRelations(relations) - client.setRelationData(tablename, childRecord) - - const conn = await satellite.start(authState) - await conn.connectionPromise - - const shapeDef1: ClientShapeDefinition = { - selects: [{ tablename }], - } - - satellite!.relations = relations - const { synced } = await satellite.subscribe([shapeDef1]) - await synced // wait for subscription to be fulfilled - - try { - const row = await adapter.query({ - sql: `SELECT id FROM "${namespace}"."${tablename}"`, - }) - t.is(row.length, 0) - } catch (e) { - t.fail(JSON.stringify(e)) - } -}) - -test('a second successful subscription', async (t) => { - const { client, satellite, adapter } = t.context - const { runMigrations, authState } = t.context - await runMigrations() - - const namespace = 'main' - const tablename = 'child' - - // relations must be present at subscription delivery - client.setRelations(relations) - client.setRelationData('parent', parentRecord) - client.setRelationData(tablename, childRecord) - - const conn = await satellite.start(authState) - await conn.connectionPromise - - const shapeDef1: ClientShapeDefinition = { - selects: [{ tablename: 'parent' }], - } - const shapeDef2: ClientShapeDefinition = { - selects: [{ tablename: tablename }], - } - - satellite!.relations = relations - await satellite.subscribe([shapeDef1]) - const { synced } = await satellite.subscribe([shapeDef2]) - await synced - - try { - const row = await adapter.query({ - sql: `SELECT id FROM "${namespace}"."${tablename}"`, - }) - t.is(row.length, 1) - - const shadowRows = await adapter.query({ - sql: `SELECT tags FROM main._electric_shadow`, - }) - t.is(shadowRows.length, 2) - - const subsMeta = await satellite._getMeta('subscriptions') - const subsObj = JSON.parse(subsMeta) - t.is(Object.keys(subsObj).length, 2) - } catch (e) { - t.fail(JSON.stringify(e)) - } -}) - -test('a single subscribe with multiple tables with FKs', async (t) => { - const { client, satellite, adapter } = t.context - const { runMigrations, authState } = t.context - await runMigrations() - - // relations must be present at subscription delivery - client.setRelations(relations) - client.setRelationData('parent', parentRecord) - client.setRelationData('child', childRecord) - - const conn = await satellite.start(authState) - await conn.connectionPromise - - const shapeDef1: ClientShapeDefinition = { - selects: [{ tablename: 'child' }], - } - const shapeDef2: ClientShapeDefinition = { - selects: [{ tablename: 'parent' }], - } - - satellite!.relations = relations - - const prom = new Promise((res, rej) => { - client.subscribeToSubscriptionEvents( - (data: SubscriptionData) => { - // child is applied first - t.is(data.data[0].relation.table, 'child') - t.is(data.data[1].relation.table, 'parent') - - setTimeout(async () => { - try { - const row = await adapter.query({ - sql: `SELECT id FROM "main"."child"`, - }) - t.is(row.length, 1) - - res() - } catch (e) { - rej(e) - } - }, 10) - }, - () => undefined - ) - }) - - await satellite.subscribe([shapeDef1, shapeDef2]) - - return prom -}) - -test('a shape delivery that triggers garbage collection', async (t) => { - const { client, satellite, adapter } = t.context - const { runMigrations, authState } = t.context - await runMigrations() - - const namespace = 'main' - const tablename = 'parent' - - // relations must be present at subscription delivery - client.setRelations(relations) - client.setRelationData(tablename, parentRecord) - client.setRelationData('another', {}) - - const conn = await satellite.start(authState) - await conn.connectionPromise - - const shapeDef1: ClientShapeDefinition = { - selects: [{ tablename: 'parent' }], - } - const shapeDef2: ClientShapeDefinition = { - selects: [{ tablename: 'another' }], - } - - satellite!.relations = relations - const { synced: synced1 } = await satellite.subscribe([shapeDef1]) - await synced1 - const { synced } = await satellite.subscribe([shapeDef2]) - - try { - await synced - t.fail() - } catch (expected: any) { - try { - const row = await adapter.query({ - sql: `SELECT id FROM "${namespace}"."${tablename}"`, - }) - t.is(row.length, 0) - - const shadowRows = await adapter.query({ - sql: `SELECT tags FROM main._electric_shadow`, - }) - t.is(shadowRows.length, 1) - - const subsMeta = await satellite._getMeta('subscriptions') - const subsObj = JSON.parse(subsMeta) - t.deepEqual(subsObj, {}) - t.true(expected.message.search("table 'another'") >= 0) - } catch (e) { - t.fail(JSON.stringify(e)) - } - } -}) - -test('a subscription request failure does not clear the manager state', async (t) => { - const { client, satellite, adapter } = t.context - const { runMigrations, authState } = t.context - await runMigrations() - - // relations must be present at subscription delivery - const namespace = 'main' - const tablename = 'parent' - client.setRelations(relations) - client.setRelationData(tablename, parentRecord) - - const conn = await satellite.start(authState) - await conn.connectionPromise - - const shapeDef1: ClientShapeDefinition = { - selects: [{ tablename: tablename }], - } - - const shapeDef2: ClientShapeDefinition = { - selects: [{ tablename: 'failure' }], - } - - satellite!.relations = relations - const { synced } = await satellite.subscribe([shapeDef1]) - await synced - - try { - const row = await adapter.query({ - sql: `SELECT id FROM "${namespace}"."${tablename}"`, - }) - t.is(row.length, 1) - } catch (e) { - t.fail(JSON.stringify(e)) - } - - try { - await satellite.subscribe([shapeDef2]) - } catch (error: any) { - t.is(error.code, SatelliteErrorCode.TABLE_NOT_FOUND) - } -}) - -test('unsubscribing all subscriptions does not trigger FK violations', async (t) => { - const { satellite, runMigrations } = t.context - - await runMigrations() // because the meta tables need to exist for shape GC - - const subsManager = new MockSubscriptionsManager( - satellite._garbageCollectShapeHandler.bind(satellite) - ) - - // Create the 'users' and 'posts' tables expected by sqlite - // populate it with foreign keys and check that the subscription - // manager does not violate the FKs when unsubscribing from all subscriptions - await satellite.adapter.runInTransaction( - { sql: `CREATE TABLE main.users (id TEXT PRIMARY KEY, name TEXT)` }, - { - sql: `CREATE TABLE main.posts (id TEXT PRIMARY KEY, title TEXT, author_id TEXT, FOREIGN KEY(author_id) REFERENCES main.users(id) DEFERRABLE INITIALLY IMMEDIATE)`, - }, - { sql: `INSERT INTO main.users (id, name) VALUES ('u1', 'user1')` }, - { - sql: `INSERT INTO main.posts (id, title, author_id) VALUES ('p1', 'My first post', 'u1')`, - } - ) - - await subsManager.unsubscribeAll() - // if we reach here, the FKs were not violated - - // Check that everything was deleted - const users = await satellite.adapter.query({ - sql: 'SELECT * FROM main.users', - }) - t.assert(users.length === 0) - - const posts = await satellite.adapter.query({ - sql: 'SELECT * FROM main.posts', - }) - t.assert(posts.length === 0) -}) - -test("Garbage collecting the subscription doesn't generate oplog entries", async (t) => { - const { adapter, runMigrations, satellite, authState } = t.context - await satellite.start(authState) - await runMigrations() - await adapter.run({ sql: `INSERT INTO main.parent(id) VALUES ('1'),('2')` }) - const ts = await satellite._performSnapshot() - await satellite._garbageCollectOplog(ts) - t.is((await satellite._getEntries(0)).length, 0) - - satellite._garbageCollectShapeHandler([ - { uuid: '', definition: { selects: [{ tablename: 'parent' }] } }, - ]) - - await satellite._performSnapshot() - t.deepEqual(await satellite._getEntries(0), []) -}) - -test('snapshots: generated oplog entries have the correct tags', async (t) => { - const { client, satellite, adapter, tableInfo } = t.context - const { runMigrations, authState } = t.context - await runMigrations() - - const namespace = 'main' - const tablename = 'parent' - const qualified = `"${namespace}"."${tablename}"` - - // relations must be present at subscription delivery - client.setRelations(relations) - client.setRelationData(tablename, parentRecord) - - const conn = await satellite.start(authState) - await conn.connectionPromise - - const shapeDef: ClientShapeDefinition = { - selects: [{ tablename }], - } - - satellite!.relations = relations - const { synced } = await satellite.subscribe([shapeDef]) - await synced - - const expectedTs = new Date().getTime() - const incoming = generateRemoteOplogEntry( - tableInfo, - 'main', - 'parent', - OPTYPES.insert, - expectedTs, - genEncodedTags('remote', [expectedTs]), - { - id: 2, - } - ) - const incomingChange = opLogEntryToChange(incoming, relations) - - await satellite._applyTransaction({ - origin: 'remote', - commit_timestamp: Long.fromNumber(expectedTs), - changes: [incomingChange], - lsn: new Uint8Array(), - }) - - const row = await adapter.query({ - sql: `SELECT id FROM ${qualified}`, - }) - t.is(row.length, 2) - - const shadowRows = await adapter.query({ - sql: `SELECT * FROM main._electric_shadow`, - }) - t.is(shadowRows.length, 2) - t.like(shadowRows[0], { - namespace: 'main', - tablename: 'parent', - }) - - await adapter.run({ sql: `DELETE FROM ${qualified} WHERE id = 2` }) - await satellite._performSnapshot() - - const oplogs = await adapter.query({ - sql: `SELECT * FROM main._electric_oplog`, - }) - t.is(oplogs[0].clearTags, genEncodedTags('remote', [expectedTs])) -}) - -test('DELETE after DELETE sends clearTags', async (t) => { - const { adapter, runMigrations, satellite, authState } = t.context - await runMigrations() - - await satellite._setAuthState(authState) - - await adapter.run({ - sql: `INSERT INTO main.parent(id, value) VALUES (1,'val1')`, - }) - await adapter.run({ - sql: `INSERT INTO main.parent(id, value) VALUES (2,'val2')`, - }) - - await adapter.run({ sql: `DELETE FROM main.parent WHERE id=1` }) - - await satellite._performSnapshot() - - await adapter.run({ sql: `DELETE FROM main.parent WHERE id=2` }) - - await satellite._performSnapshot() - - const entries = await satellite._getEntries() - - t.is(entries.length, 4) - - const delete1 = entries[2] - const delete2 = entries[3] - - t.is(delete1.primaryKey, '{"id":1}') - t.is(delete1.optype, 'DELETE') - // No tags for first delete - t.is(delete1.clearTags, '[]') - - t.is(delete2.primaryKey, '{"id":2}') - t.is(delete2.optype, 'DELETE') - // The second should have clearTags - t.not(delete2.clearTags, '[]') -}) - -test('connection backoff success', async (t) => { - t.plan(3) - const { client, satellite } = t.context - - client.disconnect() - - const retry = (_e: any, a: number) => { - if (a > 0) { - t.pass() - return false - } - return true - } - - satellite['_connectRetryHandler'] = retry - - await Promise.all( - [satellite._connectWithBackoff(), satellite['initializing']?.waitOn()].map( - (p) => p?.catch(() => t.pass()) - ) - ) -}) - -// check that performing snapshot doesn't throw without resetting the performing snapshot assertions -test('(regression) performSnapshot handles exceptions gracefully', async (t) => { - const { adapter, runMigrations, satellite, authState } = t.context - await runMigrations() - await satellite._setAuthState(authState) - - const error = 'FAKE TRANSACTION' - - const txnFn = adapter.transaction - adapter.transaction = () => { - throw new Error(error) - } - - try { - await satellite._performSnapshot() - } catch (e: any) { - t.is(e.message, error) - adapter.transaction = txnFn - } - - await satellite._performSnapshot() - t.pass() -}) +processTests(test) diff --git a/clients/typescript/test/satellite/process.test.ts b/clients/typescript/test/satellite/process.test.ts index 862950c2e9..bfd510b813 100644 --- a/clients/typescript/test/satellite/process.test.ts +++ b/clients/typescript/test/satellite/process.test.ts @@ -1,4 +1,4 @@ -import anyTest, { TestFn } from 'ava' +import { TestFn, ExecutionContext } from 'ava' import { MOCK_BEHIND_WINDOW_LSN, @@ -25,7 +25,8 @@ import { generateLocalOplogEntry, generateRemoteOplogEntry, genEncodedTags, - getMatchingShadowEntries, + getMatchingShadowEntries as getSqliteMatchingShadowEntries, + getPgMatchingShadowEntries, } from '../support/satellite-helpers' import Long from 'long' import { @@ -34,14 +35,9 @@ import { SatelliteError, SatelliteErrorCode, } from '../../src/util/types' -import { - makeContext, - opts, - relations, - cleanAndStopSatellite, - ContextType, - clean, -} from './common' +import { opts, relations, ContextType as CommonContextType } from './common' +import { DEFAULT_LOG_POS, numberToBytes, base64 } from '../../src/util/common' + import { DEFAULT_LOG_POS, numberToBytes, @@ -54,7 +50,14 @@ import { mergeEntries } from '../../src/satellite/merge' import { MockSubscriptionsManager } from '../../src/satellite/shapes/manager' import { AuthState, insecureAuthToken } from '../../src/auth' import { ConnectivityStateChangeNotification } from '../../src/notifiers' -import { sqliteBuilder } from '../../src/migrators/query-builder' +import { QueryBuilder } from '../../src/migrators/query-builder' + +export type ContextType = CommonContextType & { + builder: QueryBuilder + getMatchingShadowEntries: + | typeof getSqliteMatchingShadowEntries + | typeof getPgMatchingShadowEntries +} const parentRecord = { id: 1, @@ -78,65 +81,72 @@ const startSatellite = async ( return { connectionPromise } } -const test = anyTest as TestFn -test.beforeEach(makeContext) -test.afterEach.always(cleanAndStopSatellite) const qualifiedParentTableName = new QualifiedTablename( 'main', 'parent' ).toString() -const builder = sqliteBuilder -test('setup starts a satellite process', async (t) => { - t.true(t.context.satellite instanceof SatelliteProcess) -}) +const dialectValue = ( + sqliteValue: any, + pgValue: any, + t: ExecutionContext +) => { + if (t.context.builder.dialect === 'SQLite') { + return sqliteValue + } + return pgValue +} -test('start creates system tables', async (t) => { - const { adapter, satellite, authState } = t.context +export const processTests = (test: TestFn) => { + test('setup starts a satellite process', async (t) => { + t.true(t.context.satellite instanceof SatelliteProcess) + }) - await satellite.start(authState) + test('start creates system tables', async (t) => { + const { adapter, satellite, authState, builder } = t.context - const rows = await adapter.query(builder.getLocalTableNames()) - const names = rows.map((row) => row.name) + await satellite.start(authState) - t.true(names.includes('_electric_oplog')) -}) + const rows = await adapter.query(builder.getLocalTableNames()) + const names = rows.map((row) => row.name) -test('load metadata', async (t) => { - const { adapter, runMigrations } = t.context - await runMigrations() + t.true(names.includes('_electric_oplog')) + }) + + test('load metadata', async (t) => { + const { adapter, runMigrations } = t.context + await runMigrations() - const meta = await loadSatelliteMetaTable(adapter) - t.deepEqual(meta, { - compensations: 1, - lsn: '', - clientId: '', - subscriptions: '', - seenAdditionalData: '', + const meta = await loadSatelliteMetaTable(adapter) + t.deepEqual(meta, { + compensations: dialectValue(1, '1', t), + lsn: '', + clientId: '', + subscriptions: '', + }) }) -}) -test('set persistent client id', async (t) => { - const { satellite, authState, token } = t.context + test('set persistent client id', async (t) => { + const { satellite, authState, token } = t.context - const { connectionPromise } = await startSatellite( + const { connectionPromise } = await startSatellite( satellite, authState, token ) - const clientId1 = satellite._authState!.clientId - t.truthy(clientId1) + const clientId1 = satellite._authState!.clientId + t.truthy(clientId1) await connectionPromise - await satellite.stop() + await satellite.stop() - await startSatellite(satellite, authState, token) + await startSatellite(satellite, authState, token) - const clientId2 = satellite._authState!.clientId - t.truthy(clientId2) - t.assert(clientId1 === clientId2) -}) + const clientId2 = satellite._authState!.clientId + t.truthy(clientId2) + t.assert(clientId1 === clientId2) + }) test('can use user_id in JWT', async (t) => { const { satellite, authState } = t.context @@ -188,638 +198,614 @@ test('cannot update user id', async (t) => { ) }) -test('cannot UPDATE primary key', async (t) => { - const { adapter, runMigrations } = t.context - await runMigrations() + test('cannot UPDATE primary key', async (t) => { + const { adapter, runMigrations } = t.context + await runMigrations() - await adapter.run({ sql: `INSERT INTO parent(id) VALUES ('1'),('2')` }) - await t.throwsAsync( - adapter.run({ sql: `UPDATE parent SET id='3' WHERE id = '1'` }), - { - code: 'SQLITE_CONSTRAINT_TRIGGER', - } - ) -}) + await adapter.run({ sql: `INSERT INTO main.parent(id) VALUES ('1'),('2')` }) + await t.throwsAsync( + adapter.run({ sql: `UPDATE main.parent SET id='3' WHERE id = '1'` }), + { + code: dialectValue('SQLITE_CONSTRAINT_TRIGGER', 'P0001', t), + } + ) + }) -test('snapshot works', async (t) => { - const { satellite } = t.context - const { adapter, notifier, runMigrations, authState } = t.context - await runMigrations() - await satellite._setAuthState(authState) + test('snapshot works', async (t) => { + const { satellite } = t.context + const { adapter, notifier, runMigrations, authState } = t.context + await runMigrations() + await satellite._setAuthState(authState) - await adapter.run({ sql: `INSERT INTO parent(id) VALUES ('1'),('2')` }) + await adapter.run({ sql: `INSERT INTO main.parent(id) VALUES ('1'),('2')` }) - let snapshotTimestamp = await satellite._performSnapshot() + let snapshotTimestamp = await satellite._performSnapshot() - const clientId = satellite._authState!.clientId - let shadowTags = encodeTags([generateTag(clientId, snapshotTimestamp)]) + const clientId = satellite._authState!.clientId + let shadowTags = encodeTags([generateTag(clientId, snapshotTimestamp)]) - var shadowRows = await adapter.query({ - sql: `SELECT tags FROM _electric_shadow`, - }) - t.is(shadowRows.length, 2) - for (const row of shadowRows) { - t.is(row.tags, shadowTags) - } + var shadowRows = await adapter.query({ + sql: `SELECT tags FROM main._electric_shadow`, + }) + t.is(shadowRows.length, 2) + for (const row of shadowRows) { + t.is(row.tags, shadowTags) + } - t.is(notifier.notifications.length, 1) + t.is(notifier.notifications.length, 1) - const { changes } = notifier.notifications[0] - const expectedChange = { - qualifiedTablename: new QualifiedTablename('main', 'parent'), - rowids: [1, 2], + const { changes } = notifier.notifications[0] + const expectedChange = { + qualifiedTablename: new QualifiedTablename('main', 'parent'), + rowids: [1, 2], recordChanges: [ { primaryKey: { id: 1 }, type: 'INSERT' }, { primaryKey: { id: 2 }, type: 'INSERT' }, ], - } - - t.deepEqual(changes, [expectedChange]) -}) - -test('(regression) performSnapshot cant be called concurrently', async (t) => { - const { authState, satellite, runMigrations } = t.context - await runMigrations() - await satellite._setAuthState(authState) - - await t.throwsAsync( - async () => { - const run = satellite.adapter.run.bind(satellite.adapter) - satellite.adapter.run = (stmt) => - new Promise((res) => setTimeout(() => run(stmt).then(res), 100)) - - const p1 = satellite._performSnapshot() - const p2 = satellite._performSnapshot() - await Promise.all([p1, p2]) - }, - { - instanceOf: SatelliteError, - code: SatelliteErrorCode.INTERNAL, - message: 'already performing snapshot', } - ) -}) - -test('(regression) throttle with mutex prevents race when snapshot is slow', async (t) => { - const { authState, satellite, runMigrations } = t.context - await runMigrations() - await satellite._setAuthState(authState) - // delay termination of _performSnapshot - const run = satellite.adapter.run.bind(satellite.adapter) - satellite.adapter.run = (stmt) => - new Promise((res) => setTimeout(() => run(stmt).then(res), 100)) - - const p1 = satellite._throttledSnapshot() - const p2 = new Promise((res) => { - // call snapshot after throttle time has expired - setTimeout(() => satellite._throttledSnapshot()?.then(res), 50) + t.deepEqual(changes, [expectedChange]) }) - await t.notThrowsAsync(async () => { - await p1 - await p2 - }) -}) + test('(regression) performSnapshot cant be called concurrently', async (t) => { + const { authState, satellite, runMigrations } = t.context + await runMigrations() + await satellite._setAuthState(authState) -test('starting and stopping the process works', async (t) => { - const { adapter, notifier, runMigrations, satellite, authState, token } = - t.context - await runMigrations() + await t.throwsAsync( + async () => { + const run = satellite.adapter.run.bind(satellite.adapter) + satellite.adapter.run = (stmt) => + new Promise((res) => setTimeout(() => run(stmt).then(res), 100)) - await adapter.run({ sql: `INSERT INTO parent(id) VALUES ('1'),('2')` }) + const p1 = satellite._performSnapshot() + const p2 = satellite._performSnapshot() + await Promise.all([p1, p2]) + }, + { + instanceOf: SatelliteError, + code: SatelliteErrorCode.INTERNAL, + message: 'already performing snapshot', + } + ) + }) - const conn = await startSatellite(satellite, authState, token) - await conn.connectionPromise + test('(regression) throttle with mutex prevents race when snapshot is slow', async (t) => { + const { authState, satellite, runMigrations } = t.context + await runMigrations() + await satellite._setAuthState(authState) - await sleepAsync(opts.pollingInterval) + // delay termination of _performSnapshot + const run = satellite.adapter.run.bind(satellite.adapter) + satellite.adapter.run = (stmt) => + new Promise((res) => setTimeout(() => run(stmt).then(res), 100)) - // connect, 1st txn - t.is(notifier.notifications.length, 2) + const p1 = satellite._throttledSnapshot() + const p2 = new Promise((res) => { + // call snapshot after throttle time has expired + setTimeout(() => satellite._throttledSnapshot()?.then(res), 50) + }) - await adapter.run({ sql: `INSERT INTO parent(id) VALUES ('3'),('4')` }) - await sleepAsync(opts.pollingInterval) + await t.notThrowsAsync(async () => { + await p1 + await p2 + }) + }) - // 2nd txm - t.is(notifier.notifications.length, 3) + test('starting and stopping the process works', async (t) => { + const { adapter, notifier, runMigrations, satellite, authState, token } = + t.context + await runMigrations() - await satellite.stop() - await adapter.run({ sql: `INSERT INTO parent(id) VALUES ('5'),('6')` }) - await sleepAsync(opts.pollingInterval) + await adapter.run({ sql: `INSERT INTO main.parent(id) VALUES ('1'),('2')` }) - // no txn notified - t.is(notifier.notifications.length, 4) + const conn = await startSatellite(satellite, authState, token) + await conn.connectionPromise - const conn1 = await startSatellite(satellite, authState, token) - await conn1.connectionPromise - await sleepAsync(opts.pollingInterval) + await sleepAsync(opts.pollingInterval) - // connect, 4th txn - t.is(notifier.notifications.length, 6) -}) + // connect, 1st txn + t.is(notifier.notifications.length, 2) -test('snapshots on potential data change', async (t) => { - const { adapter, notifier, runMigrations } = t.context - await runMigrations() + await adapter.run({ sql: `INSERT INTO main.parent(id) VALUES ('3'),('4')` }) + await sleepAsync(opts.pollingInterval) - await adapter.run({ sql: `INSERT INTO parent(id) VALUES ('1'),('2')` }) + // 2nd txm + t.is(notifier.notifications.length, 3) - t.is(notifier.notifications.length, 0) + await satellite.stop() + await adapter.run({ sql: `INSERT INTO main.parent(id) VALUES ('5'),('6')` }) + await sleepAsync(opts.pollingInterval) - await notifier.potentiallyChanged() + // no txn notified + t.is(notifier.notifications.length, 4) - t.is(notifier.notifications.length, 1) -}) + const conn1 = await startSatellite(satellite, authState, token) + await conn1.connectionPromise + await sleepAsync(opts.pollingInterval) -// INSERT after DELETE shall nullify all non explicitly set columns -// If last operation is a DELETE, concurrent INSERT shall resurrect deleted -// values as in 'INSERT wins over DELETE and restored deleted values' -test('snapshot of INSERT after DELETE', async (t) => { - const { adapter, runMigrations, satellite, authState } = t.context + // connect, 4th txn + t.is(notifier.notifications.length, 6) + }) - await runMigrations() + test('snapshots on potential data change', async (t) => { + const { adapter, notifier, runMigrations } = t.context + await runMigrations() - await adapter.run({ - sql: `INSERT INTO parent(id, value) VALUES (1,'val1')`, - }) - await adapter.run({ sql: `DELETE FROM parent WHERE id=1` }) - await adapter.run({ sql: `INSERT INTO parent(id) VALUES (1)` }) + await adapter.run({ sql: `INSERT INTO main.parent(id) VALUES ('1'),('2')` }) - await satellite._setAuthState(authState) - await satellite._performSnapshot() - const entries = await satellite._getEntries() - const clientId = satellite._authState!.clientId - - const merged = localOperationsToTableChanges( - entries, - (timestamp: Date) => { - return generateTag(clientId, timestamp) - }, - relations - ) - const [_, keyChanges] = merged[qualifiedParentTableName]['{"id":1}'] - const resultingValue = keyChanges.changes.value.value - t.is(resultingValue, null) -}) + t.is(notifier.notifications.length, 0) -test('snapshot of INSERT with bigint', async (t) => { - const { adapter, runMigrations, satellite, authState } = t.context + await notifier.potentiallyChanged() - await runMigrations() + t.is(notifier.notifications.length, 1) + }) - await adapter.run({ - sql: `INSERT INTO bigIntTable(value) VALUES (1)`, + test('snapshot of INSERT with blob/Uint8Array', async (t) => { + const { adapter, runMigrations, satellite, authState } = t.context + + await runMigrations() + + const blob = new Uint8Array([1, 2, 255, 244, 160, 1]) + + await adapter.run({ + sql: `INSERT INTO blobTable(value) VALUES (?)`, + args: [blob], + }) + + await satellite._setAuthState(authState) + await satellite._performSnapshot() + const entries = await satellite._getEntries() + const clientId = satellite._authState!.clientId + + const merged = localOperationsToTableChanges( + entries, + (timestamp: Date) => { + return generateTag(clientId, timestamp) + }, + relations + ) + const [_, keyChanges] = + merged['main.blobTable'][`{"value":"${blobToHexString(blob)}"}`] + const resultingValue = keyChanges.changes.value.value + t.deepEqual(resultingValue, blob) }) - await satellite._setAuthState(authState) - await satellite._performSnapshot() - const entries = await satellite._getEntries() - const clientId = satellite._authState!.clientId - - const merged = localOperationsToTableChanges( - entries, - (timestamp: Date) => { - return generateTag(clientId, timestamp) - }, - relations - ) - const qualifiedTableName = new QualifiedTablename( - 'main', - 'bigIntTable' - ).toString() - const [_, keyChanges] = merged[qualifiedTableName]['{"value":"1"}'] - const resultingValue = keyChanges.changes.value.value - t.is(resultingValue, 1n) -}) + // INSERT after DELETE shall nullify all non explicitly set columns + // If last operation is a DELETE, concurrent INSERT shall resurrect deleted + // values as in 'INSERT wins over DELETE and restored deleted values' + test('snapshot of INSERT after DELETE', async (t) => { + const { adapter, runMigrations, satellite, authState } = t.context -test('snapshot of INSERT with blob/Uint8Array', async (t) => { - const { adapter, runMigrations, satellite, authState } = t.context + await runMigrations() - await runMigrations() + await adapter.run({ + sql: `INSERT INTO main.parent(id, value) VALUES (1,'val1')`, + }) + await adapter.run({ sql: `DELETE FROM main.parent WHERE id=1` }) + await adapter.run({ sql: `INSERT INTO main.parent(id) VALUES (1)` }) - const blob = new Uint8Array([1, 2, 255, 244, 160, 1]) + await satellite._setAuthState(authState) + await satellite._performSnapshot() + const entries = await satellite._getEntries() + const clientId = satellite._authState!.clientId - await adapter.run({ - sql: `INSERT INTO blobTable(value) VALUES (?)`, - args: [blob], + const merged = localOperationsToTableChanges( + entries, + (timestamp: Date) => { + return generateTag(clientId, timestamp) + }, + relations + ) + const [_, keyChanges] = merged[qualifiedParentTableName]['{"id":1}'] + const resultingValue = keyChanges.changes.value.value + t.is(resultingValue, null) }) - await satellite._setAuthState(authState) - await satellite._performSnapshot() - const entries = await satellite._getEntries() - const clientId = satellite._authState!.clientId - - const merged = localOperationsToTableChanges( - entries, - (timestamp: Date) => { - return generateTag(clientId, timestamp) - }, - relations - ) - const [_, keyChanges] = - merged['main.blobTable'][`{"value":"${blobToHexString(blob)}"}`] - const resultingValue = keyChanges.changes.value.value - t.deepEqual(resultingValue, blob) -}) - -test('take snapshot and merge local wins', async (t) => { - const { adapter, runMigrations, satellite, tableInfo, authState } = t.context - await runMigrations() - - const incomingTs = new Date().getTime() - 1 - const incomingEntry = generateRemoteOplogEntry( - tableInfo, - 'main', - 'parent', - OPTYPES.insert, - incomingTs, - encodeTags([generateTag('remote', new Date(incomingTs))]), - { - id: 1, - value: 'incoming', - } - ) - await adapter.run({ - sql: `INSERT INTO parent(id, value, other) VALUES (1, 'local', 1)`, - }) + test('snapshot of INSERT with bigint', async (t) => { + const { adapter, runMigrations, satellite, authState } = t.context - await satellite._setAuthState(authState) - const localTime = await satellite._performSnapshot() - const clientId = satellite._authState!.clientId - - const local = await satellite._getEntries() - const localTimestamp = new Date(local[0].timestamp).getTime() - const merged = mergeEntries( - clientId, - local, - 'remote', - [incomingEntry], - relations - ) - const item = merged[qualifiedParentTableName]['{"id":1}'] + await runMigrations() - t.deepEqual(item, { - namespace: 'main', - tablename: 'parent', - primaryKeyCols: { id: 1 }, - optype: OPTYPES.upsert, - changes: { - id: { value: 1, timestamp: localTimestamp }, - value: { value: 'local', timestamp: localTimestamp }, - other: { value: 1, timestamp: localTimestamp }, - }, - fullRow: { - id: 1, - value: 'local', - other: 1, - }, - tags: [ - generateTag(clientId, localTime), - generateTag('remote', new Date(incomingTs)), - ], - }) -}) + await adapter.run({ + sql: `INSERT INTO main."bigIntTable"(value) VALUES (1)`, + }) -test('take snapshot and merge incoming wins', async (t) => { - const { adapter, runMigrations, satellite, tableInfo, authState } = t.context - await runMigrations() + await satellite._setAuthState(authState) + await satellite._performSnapshot() + const entries = await satellite._getEntries() + const clientId = satellite._authState!.clientId - await adapter.run({ - sql: `INSERT INTO parent(id, value, other) VALUES (1, 'local', 1)`, + const merged = localOperationsToTableChanges( + entries, + (timestamp: Date) => { + return generateTag(clientId, timestamp) + }, + relations + ) + const qualifiedTableName = new QualifiedTablename( + 'main', + 'bigIntTable' + ).toString() + const [_, keyChanges] = merged[qualifiedTableName]['{"value":"1"}'] + const resultingValue = keyChanges.changes.value.value + t.is(resultingValue, 1n) }) - await satellite._setAuthState(authState) - const clientId = satellite._authState!.clientId - await satellite._performSnapshot() - - const local = await satellite._getEntries() - const localTimestamp = new Date(local[0].timestamp).getTime() + test('take snapshot and merge local wins', async (t) => { + const { adapter, runMigrations, satellite, tableInfo, authState } = + t.context + await runMigrations() - const incomingTs = localTimestamp + 1 - const incomingEntry = generateRemoteOplogEntry( - tableInfo, - 'main', - 'parent', - OPTYPES.insert, - incomingTs, - genEncodedTags('remote', [incomingTs]), - { - id: 1, - value: 'incoming', - } - ) + const incomingTs = new Date().getTime() - 1 + const incomingEntry = generateRemoteOplogEntry( + tableInfo, + 'main', + 'parent', + OPTYPES.insert, + incomingTs, + encodeTags([generateTag('remote', new Date(incomingTs))]), + { + id: 1, + value: 'incoming', + } + ) + await adapter.run({ + sql: `INSERT INTO main.parent(id, value, other) VALUES (1, 'local', 1)`, + }) - const merged = mergeEntries( - clientId, - local, - 'remote', - [incomingEntry], - relations - ) - const item = merged[qualifiedParentTableName]['{"id":1}'] + await satellite._setAuthState(authState) + const localTime = await satellite._performSnapshot() + const clientId = satellite._authState!.clientId + + const local = await satellite._getEntries() + const localTimestamp = new Date(local[0].timestamp).getTime() + const merged = mergeEntries( + clientId, + local, + 'remote', + [incomingEntry], + relations + ) + const item = merged[qualifiedParentTableName]['{"id":1}'] - t.deepEqual(item, { - namespace: 'main', - tablename: 'parent', - primaryKeyCols: { id: 1 }, - optype: OPTYPES.upsert, - changes: { - id: { value: 1, timestamp: incomingTs }, - value: { value: 'incoming', timestamp: incomingTs }, - other: { value: 1, timestamp: localTimestamp }, - }, - fullRow: { - id: 1, - value: 'incoming', - other: 1, - }, - tags: [ - generateTag(clientId, new Date(localTimestamp)), - generateTag('remote', new Date(incomingTs)), - ], + t.deepEqual(item, { + namespace: 'main', + tablename: 'parent', + primaryKeyCols: { id: 1 }, + optype: OPTYPES.upsert, + changes: { + id: { value: 1, timestamp: localTimestamp }, + value: { value: 'local', timestamp: localTimestamp }, + other: { value: 1, timestamp: localTimestamp }, + }, + fullRow: { + id: 1, + value: 'local', + other: 1, + }, + tags: [ + generateTag(clientId, localTime), + generateTag('remote', new Date(incomingTs)), + ], + }) }) -}) -test('merge incoming wins on persisted ops', async (t) => { - const { adapter, runMigrations, satellite, tableInfo, authState } = t.context - await runMigrations() - await satellite._setAuthState(authState) - satellite.relations = relations + test('take snapshot and merge incoming wins', async (t) => { + const { adapter, runMigrations, satellite, tableInfo, authState } = + t.context + await runMigrations() - // This operation is persisted - await adapter.run({ - sql: `INSERT INTO parent(id, value, other) VALUES (1, 'local', 1)`, - }) - await satellite._performSnapshot() - const [originalInsert] = await satellite._getEntries() - const [tx] = toTransactions([originalInsert], satellite.relations) - tx.origin = authState.clientId - await satellite._applyTransaction(tx) + await adapter.run({ + sql: `INSERT INTO main.parent(id, value, other) VALUES (1, 'local', 1)`, + }) - // Verify that GC worked as intended and the oplog entry was deleted - t.deepEqual(await satellite._getEntries(), []) + await satellite._setAuthState(authState) + const clientId = satellite._authState!.clientId + await satellite._performSnapshot() - // This operation is done offline - await adapter.run({ - sql: `UPDATE parent SET value = 'new local' WHERE id = 1`, - }) - await satellite._performSnapshot() - const [offlineInsert] = await satellite._getEntries() - const offlineTimestamp = new Date(offlineInsert.timestamp).getTime() + const local = await satellite._getEntries() + const localTimestamp = new Date(local[0].timestamp).getTime() - // This operation is done concurrently with offline but at a later point in time. It's sent immediately on connection - const incomingTs = offlineTimestamp + 1 - const firstIncomingEntry = generateRemoteOplogEntry( - tableInfo, - 'main', - 'parent', - OPTYPES.update, - incomingTs, - genEncodedTags('remote', [incomingTs]), - { id: 1, value: 'incoming' }, - { id: 1, value: 'local' } - ) + const incomingTs = localTimestamp + 1 + const incomingEntry = generateRemoteOplogEntry( + tableInfo, + 'main', + 'parent', + OPTYPES.insert, + incomingTs, + genEncodedTags('remote', [incomingTs]), + { + id: 1, + value: 'incoming', + } + ) - const firstIncomingTx = { - origin: 'remote', - commit_timestamp: Long.fromNumber(incomingTs), - changes: [opLogEntryToChange(firstIncomingEntry, satellite.relations)], - lsn: new Uint8Array(), - } - await satellite._applyTransaction(firstIncomingTx) + const merged = mergeEntries( + clientId, + local, + 'remote', + [incomingEntry], + relations + ) + const item = merged[qualifiedParentTableName]['{"id":1}'] - const [{ value: value1 }] = await adapter.query({ - sql: 'SELECT value FROM parent WHERE id = 1', + t.deepEqual(item, { + namespace: 'main', + tablename: 'parent', + primaryKeyCols: { id: 1 }, + optype: OPTYPES.upsert, + changes: { + id: { value: 1, timestamp: incomingTs }, + value: { value: 'incoming', timestamp: incomingTs }, + other: { value: 1, timestamp: localTimestamp }, + }, + fullRow: { + id: 1, + value: 'incoming', + other: 1, + }, + tags: [ + generateTag(clientId, new Date(localTimestamp)), + generateTag('remote', new Date(incomingTs)), + ], + }) }) - t.is( - value1, - 'incoming', - 'LWW conflict merge of the incoming transaction should lead to incoming operation winning' - ) - // And after the offline transaction was sent, the resolved no-op transaction comes in - const secondIncomingEntry = generateRemoteOplogEntry( - tableInfo, - 'main', - 'parent', - OPTYPES.update, - offlineTimestamp, - encodeTags([ - generateTag('remote', incomingTs), - generateTag(authState.clientId, offlineTimestamp), - ]), - { id: 1, value: 'incoming' }, - { id: 1, value: 'incoming' } - ) - - const secondIncomingTx = { - origin: authState.clientId, - commit_timestamp: Long.fromNumber(offlineTimestamp), - changes: [opLogEntryToChange(secondIncomingEntry, satellite.relations)], - lsn: new Uint8Array(), - } - await satellite._applyTransaction(secondIncomingTx) + test('merge incoming wins on persisted ops', async (t) => { + const { adapter, runMigrations, satellite, tableInfo, authState } = + t.context + await runMigrations() + await satellite._setAuthState(authState) + satellite.relations = relations - const [{ value: value2 }] = await adapter.query({ - sql: 'SELECT value FROM parent WHERE id = 1', - }) - t.is( - value2, - 'incoming', - 'Applying the resolved write from the round trip should be a no-op' - ) -}) + // This operation is persisted + await adapter.run({ + sql: `INSERT INTO main.parent(id, value, other) VALUES (1, 'local', 1)`, + }) + await satellite._performSnapshot() + const [originalInsert] = await satellite._getEntries() + const [tx] = toTransactions([originalInsert], satellite.relations) + tx.origin = authState.clientId + await satellite._applyTransaction(tx) -test('apply does not add anything to oplog', async (t) => { - const { adapter, runMigrations, satellite, tableInfo, authState } = t.context - await runMigrations() - await adapter.run({ - sql: `INSERT INTO parent(id, value, other) VALUES (1, 'local', null)`, - }) + // Verify that GC worked as intended and the oplog entry was deleted + t.deepEqual(await satellite._getEntries(), []) - await satellite._setAuthState(authState) - const clientId = satellite._authState!.clientId + // This operation is done offline + await adapter.run({ + sql: `UPDATE main.parent SET value = 'new local' WHERE id = 1`, + }) + await satellite._performSnapshot() + const [offlineInsert] = await satellite._getEntries() + const offlineTimestamp = new Date(offlineInsert.timestamp).getTime() - const localTimestamp = await satellite._performSnapshot() + // This operation is done concurrently with offline but at a later point in time. It's sent immediately on connection + const incomingTs = offlineTimestamp + 1 + const firstIncomingEntry = generateRemoteOplogEntry( + tableInfo, + 'main', + 'parent', + OPTYPES.update, + incomingTs, + genEncodedTags('remote', [incomingTs]), + { id: 1, value: 'incoming' }, + { id: 1, value: 'local' } + ) - const incomingTs = new Date().getTime() - const incomingEntry = generateRemoteOplogEntry( - tableInfo, - 'main', - 'parent', - OPTYPES.insert, - incomingTs, - genEncodedTags('remote', [incomingTs]), - { - id: 1, - value: 'incoming', - other: 1, + const firstIncomingTx = { + origin: 'remote', + commit_timestamp: Long.fromNumber(incomingTs), + changes: [opLogEntryToChange(firstIncomingEntry, satellite.relations)], + lsn: new Uint8Array(), } - ) + await satellite._applyTransaction(firstIncomingTx) - satellite.relations = relations // satellite must be aware of the relations in order to turn `DataChange`s into `OpLogEntry`s + const [{ value: value1 }] = await adapter.query({ + sql: 'SELECT value FROM main.parent WHERE id = 1', + }) + t.is( + value1, + 'incoming', + 'LWW conflict merge of the incoming transaction should lead to incoming operation winning' + ) - const incomingChange = opLogEntryToChange(incomingEntry, relations) - const incomingTx = { - origin: 'remote', - commit_timestamp: Long.fromNumber(incomingTs), - changes: [incomingChange], - lsn: new Uint8Array(), - } - await satellite._applyTransaction(incomingTx) + // And after the offline transaction was sent, the resolved no-op transaction comes in + const secondIncomingEntry = generateRemoteOplogEntry( + tableInfo, + 'main', + 'parent', + OPTYPES.update, + offlineTimestamp, + encodeTags([ + generateTag('remote', incomingTs), + generateTag(authState.clientId, offlineTimestamp), + ]), + { id: 1, value: 'incoming' }, + { id: 1, value: 'incoming' } + ) - await satellite._performSnapshot() + const secondIncomingTx = { + origin: authState.clientId, + commit_timestamp: Long.fromNumber(offlineTimestamp), + changes: [opLogEntryToChange(secondIncomingEntry, satellite.relations)], + lsn: new Uint8Array(), + } + await satellite._applyTransaction(secondIncomingTx) - const sql = 'SELECT * from parent WHERE id=1' - const [row] = await adapter.query({ sql }) - t.is(row.value, 'incoming') - t.is(row.other, 1) + const [{ value: value2 }] = await adapter.query({ + sql: 'SELECT value FROM main.parent WHERE id = 1', + }) + t.is( + value2, + 'incoming', + 'Applying the resolved write from the round trip should be a no-op' + ) + }) - const localEntries = await satellite._getEntries() - const shadowEntry = await getMatchingShadowEntries(adapter, localEntries[0]) + test('apply does not add anything to oplog', async (t) => { + const { + adapter, + runMigrations, + satellite, + tableInfo, + authState, + getMatchingShadowEntries, + } = t.context + await runMigrations() + await adapter.run({ + sql: `INSERT INTO main.parent(id, value, other) VALUES (1, 'local', null)`, + }) - t.deepEqual( - encodeTags([ - generateTag(clientId, new Date(localTimestamp)), - generateTag('remote', new Date(incomingTs)), - ]), - shadowEntry[0].tags - ) + await satellite._setAuthState(authState) + const clientId = satellite._authState!.clientId - //t.deepEqual(shadowEntries, shadowEntries2) - t.is(localEntries.length, 1) -}) + const localTimestamp = await satellite._performSnapshot() -test('apply incoming with no local', async (t) => { - const { adapter, runMigrations, satellite, tableInfo, authState } = t.context - await runMigrations() + const incomingTs = new Date().getTime() + const incomingEntry = generateRemoteOplogEntry( + tableInfo, + 'main', + 'parent', + OPTYPES.insert, + incomingTs, + genEncodedTags('remote', [incomingTs]), + { + id: 1, + value: 'incoming', + other: 1, + } + ) - const incomingTs = new Date() - const incomingEntry = generateRemoteOplogEntry( - tableInfo, - 'main', - 'parent', - OPTYPES.delete, - incomingTs.getTime(), - genEncodedTags('remote', []), - { - id: 1, - value: 'incoming', - otherValue: 1, + satellite.relations = relations // satellite must be aware of the relations in order to turn `DataChange`s into `OpLogEntry`s + + const incomingChange = opLogEntryToChange(incomingEntry, relations) + const incomingTx = { + origin: 'remote', + commit_timestamp: Long.fromNumber(incomingTs), + changes: [incomingChange], + lsn: new Uint8Array(), } - ) + await satellite._applyTransaction(incomingTx) - satellite.relations = relations // satellite must be aware of the relations in order to deserialise oplog entries + await satellite._performSnapshot() - await satellite._setAuthState(authState) - await satellite._apply([incomingEntry], 'remote') + const sql = 'SELECT * from main.parent WHERE id=1' + const [row] = await adapter.query({ sql }) + t.is(row.value, 'incoming') + t.is(row.other, 1) - const sql = 'SELECT * from parent WHERE id=1' - const rows = await adapter.query({ sql }) - const shadowEntries = await getMatchingShadowEntries(adapter) + const localEntries = await satellite._getEntries() + const shadowEntry = await getMatchingShadowEntries(adapter, localEntries[0]) - t.is(shadowEntries.length, 0) - t.is(rows.length, 0) -}) + t.deepEqual( + encodeTags([ + generateTag(clientId, new Date(localTimestamp)), + generateTag('remote', new Date(incomingTs)), + ]), + shadowEntry[0].tags + ) -test('apply empty incoming', async (t) => { - const { runMigrations, satellite, authState } = t.context - await runMigrations() + //t.deepEqual(shadowEntries, shadowEntries2) + t.is(localEntries.length, 1) + }) - await satellite._setAuthState(authState) - await satellite._apply([], 'external') + test('apply incoming with no local', async (t) => { + const { + adapter, + runMigrations, + satellite, + tableInfo, + authState, + getMatchingShadowEntries, + } = t.context + await runMigrations() - t.true(true) -}) + const incomingTs = new Date() + const incomingEntry = generateRemoteOplogEntry( + tableInfo, + 'main', + 'parent', + OPTYPES.delete, + incomingTs.getTime(), + genEncodedTags('remote', []), + { + id: 1, + value: 'incoming', + otherValue: 1, + } + ) -test('apply incoming with null on column with default', async (t) => { - const { runMigrations, satellite, adapter, tableInfo, authState } = t.context - await runMigrations() + satellite.relations = relations // satellite must be aware of the relations in order to deserialise oplog entries - const incomingTs = new Date().getTime() - const incomingEntry = generateRemoteOplogEntry( - tableInfo, - 'main', - 'parent', - OPTYPES.insert, - incomingTs, - genEncodedTags('remote', [incomingTs]), - { - id: 1234, - value: 'incoming', - other: null, - } - ) + await satellite._setAuthState(authState) + await satellite._apply([incomingEntry], 'remote') - await satellite._setAuthState(authState) + const sql = 'SELECT * from main.parent WHERE id=1' + const rows = await adapter.query({ sql }) + const shadowEntries = await getMatchingShadowEntries(adapter) - satellite.relations = relations // satellite must be aware of the relations in order to turn `DataChange`s into `OpLogEntry`s + t.is(shadowEntries.length, 0) + t.is(rows.length, 0) + }) - const incomingChange = opLogEntryToChange(incomingEntry, relations) - const incomingTx = { - origin: 'remote', - commit_timestamp: Long.fromNumber(incomingTs), - changes: [incomingChange], - lsn: new Uint8Array(), - } - await satellite._applyTransaction(incomingTx) + test('apply empty incoming', async (t) => { + const { runMigrations, satellite, authState } = t.context + await runMigrations() - const sql = `SELECT * from main.parent WHERE value='incoming'` - const rows = await adapter.query({ sql }) + await satellite._setAuthState(authState) + await satellite._apply([], 'external') - t.is(rows[0].other, null) - t.pass() -}) + t.true(true) + }) -test('apply incoming with undefined on column with default', async (t) => { - const { runMigrations, satellite, adapter, tableInfo, authState } = t.context - await runMigrations() + test('apply incoming with null on column with default', async (t) => { + const { runMigrations, satellite, adapter, tableInfo, authState } = + t.context + await runMigrations() - const incomingTs = new Date().getTime() - const incomingEntry = generateRemoteOplogEntry( - tableInfo, - 'main', - 'parent', - OPTYPES.insert, - incomingTs, - genEncodedTags('remote', [incomingTs]), - { - id: 1234, - value: 'incoming', - } - ) - - await satellite._setAuthState(authState) + const incomingTs = new Date().getTime() + const incomingEntry = generateRemoteOplogEntry( + tableInfo, + 'main', + 'parent', + OPTYPES.insert, + incomingTs, + genEncodedTags('remote', [incomingTs]), + { + id: 1234, + value: 'incoming', + other: null, + } + ) - satellite.relations = relations // satellite must be aware of the relations in order to turn `DataChange`s into `OpLogEntry`s + await satellite._setAuthState(authState) - const incomingChange = opLogEntryToChange(incomingEntry, relations) - const incomingTx = { - origin: 'remote', - commit_timestamp: Long.fromNumber(incomingTs), - changes: [incomingChange], - lsn: new Uint8Array(), - } - await satellite._applyTransaction(incomingTx) + satellite.relations = relations // satellite must be aware of the relations in order to turn `DataChange`s into `OpLogEntry`s - const sql = `SELECT * from main.parent WHERE value='incoming'` - const rows = await adapter.query({ sql }) + const incomingChange = opLogEntryToChange(incomingEntry, relations) + const incomingTx = { + origin: 'remote', + commit_timestamp: Long.fromNumber(incomingTs), + changes: [incomingChange], + lsn: new Uint8Array(), + } + await satellite._applyTransaction(incomingTx) - t.is(rows[0].other, 0) - t.pass() -}) + const sql = `SELECT * from main.parent WHERE value='incoming'` + const rows = await adapter.query({ sql }) -test('INSERT wins over DELETE and restored deleted values', async (t) => { - const { runMigrations, satellite, tableInfo, authState } = t.context - await runMigrations() - await satellite._setAuthState(authState) - const clientId = satellite._authState!.clientId + t.is(rows[0].other, null) + t.pass() + }) - const localTs = new Date().getTime() - const incomingTs = localTs + 1 + test('apply incoming with undefined on column with default', async (t) => { + const { runMigrations, satellite, adapter, tableInfo, authState } = + t.context + await runMigrations() - const incoming = [ - generateRemoteOplogEntry( + const incomingTs = new Date().getTime() + const incomingEntry = generateRemoteOplogEntry( tableInfo, 'main', 'parent', @@ -827,372 +813,392 @@ test('INSERT wins over DELETE and restored deleted values', async (t) => { incomingTs, genEncodedTags('remote', [incomingTs]), { - id: 1, - other: 1, - } - ), - generateRemoteOplogEntry( - tableInfo, - 'main', - 'parent', - OPTYPES.delete, - incomingTs, - genEncodedTags('remote', []), - { - id: 1, + id: 1234, + value: 'incoming', } - ), - ] + ) - const local = [ - generateLocalOplogEntry( - tableInfo, - 'main', - 'parent', - OPTYPES.insert, - localTs, - genEncodedTags(clientId, [localTs]), - { - id: 1, - value: 'local', - other: null, - } - ), - ] + await satellite._setAuthState(authState) - const merged = mergeEntries(clientId, local, 'remote', incoming, relations) - const item = merged[qualifiedParentTableName]['{"id":1}'] + satellite.relations = relations // satellite must be aware of the relations in order to turn `DataChange`s into `OpLogEntry`s - t.deepEqual(item, { - namespace: 'main', - tablename: 'parent', - primaryKeyCols: { id: 1 }, - optype: OPTYPES.upsert, - changes: { - id: { value: 1, timestamp: incomingTs }, - value: { value: 'local', timestamp: localTs }, - other: { value: 1, timestamp: incomingTs }, - }, - fullRow: { - id: 1, - value: 'local', - other: 1, - }, - tags: [ - generateTag(clientId, new Date(localTs)), - generateTag('remote', new Date(incomingTs)), - ], + const incomingChange = opLogEntryToChange(incomingEntry, relations) + const incomingTx = { + origin: 'remote', + commit_timestamp: Long.fromNumber(incomingTs), + changes: [incomingChange], + lsn: new Uint8Array(), + } + await satellite._applyTransaction(incomingTx) + + const sql = `SELECT * from main.parent WHERE value='incoming'` + const rows = await adapter.query({ sql }) + + t.is(rows[0].other, 0) + t.pass() }) -}) -test('concurrent updates take all changed values', async (t) => { - const { runMigrations, satellite, tableInfo, authState } = t.context - await runMigrations() - await satellite._setAuthState(authState) - const clientId = satellite._authState!.clientId + test('INSERT wins over DELETE and restored deleted values', async (t) => { + const { runMigrations, satellite, tableInfo, authState } = t.context + await runMigrations() + await satellite._setAuthState(authState) + const clientId = satellite._authState!.clientId + + const localTs = new Date().getTime() + const incomingTs = localTs + 1 + + const incoming = [ + generateRemoteOplogEntry( + tableInfo, + 'main', + 'parent', + OPTYPES.insert, + incomingTs, + genEncodedTags('remote', [incomingTs]), + { + id: 1, + other: 1, + } + ), + generateRemoteOplogEntry( + tableInfo, + 'main', + 'parent', + OPTYPES.delete, + incomingTs, + genEncodedTags('remote', []), + { + id: 1, + } + ), + ] + + const local = [ + generateLocalOplogEntry( + tableInfo, + 'main', + 'parent', + OPTYPES.insert, + localTs, + genEncodedTags(clientId, [localTs]), + { + id: 1, + value: 'local', + other: null, + } + ), + ] - const localTs = new Date().getTime() - const incomingTs = localTs + 1 + const merged = mergeEntries(clientId, local, 'remote', incoming, relations) + const item = merged[qualifiedParentTableName]['{"id":1}'] - const incoming = [ - generateRemoteOplogEntry( - tableInfo, - 'main', - 'parent', - OPTYPES.update, - incomingTs, - genEncodedTags('remote', [incomingTs]), - { - id: 1, - value: 'remote', // the only modified column - other: 0, + t.deepEqual(item, { + namespace: 'main', + tablename: 'parent', + primaryKeyCols: { id: 1 }, + optype: OPTYPES.upsert, + changes: { + id: { value: 1, timestamp: incomingTs }, + value: { value: 'local', timestamp: localTs }, + other: { value: 1, timestamp: incomingTs }, }, - { + fullRow: { id: 1, value: 'local', - other: 0, - } - ), - ] + other: 1, + }, + tags: [ + generateTag(clientId, new Date(localTs)), + generateTag('remote', new Date(incomingTs)), + ], + }) + }) - const local = [ - generateLocalOplogEntry( - tableInfo, - 'main', - 'parent', - OPTYPES.update, - localTs, - genEncodedTags(clientId, [localTs]), - { - id: 1, - value: 'local', - other: 1, // the only modified column + test('concurrent updates take all changed values', async (t) => { + const { runMigrations, satellite, tableInfo, authState } = t.context + await runMigrations() + await satellite._setAuthState(authState) + const clientId = satellite._authState!.clientId + + const localTs = new Date().getTime() + const incomingTs = localTs + 1 + + const incoming = [ + generateRemoteOplogEntry( + tableInfo, + 'main', + 'parent', + OPTYPES.update, + incomingTs, + genEncodedTags('remote', [incomingTs]), + { + id: 1, + value: 'remote', // the only modified column + other: 0, + }, + { + id: 1, + value: 'local', + other: 0, + } + ), + ] + + const local = [ + generateLocalOplogEntry( + tableInfo, + 'main', + 'parent', + OPTYPES.update, + localTs, + genEncodedTags(clientId, [localTs]), + { + id: 1, + value: 'local', + other: 1, // the only modified column + }, + { + id: 1, + value: 'local', + other: 0, + } + ), + ] + + const merged = mergeEntries(clientId, local, 'remote', incoming, relations) + const item = merged[qualifiedParentTableName]['{"id":1}'] + + // The incoming entry modified the value of the `value` column to `'remote'` + // The local entry concurrently modified the value of the `other` column to 1. + // The merged entries should have `value = 'remote'` and `other = 1`. + t.deepEqual(item, { + namespace: 'main', + tablename: 'parent', + primaryKeyCols: { id: 1 }, + optype: OPTYPES.upsert, + changes: { + value: { value: 'remote', timestamp: incomingTs }, + other: { value: 1, timestamp: localTs }, }, - { + fullRow: { id: 1, - value: 'local', - other: 0, - } - ), - ] - - const merged = mergeEntries(clientId, local, 'remote', incoming, relations) - const item = merged[qualifiedParentTableName]['{"id":1}'] - - // The incoming entry modified the value of the `value` column to `'remote'` - // The local entry concurrently modified the value of the `other` column to 1. - // The merged entries should have `value = 'remote'` and `other = 1`. - t.deepEqual(item, { - namespace: 'main', - tablename: 'parent', - primaryKeyCols: { id: 1 }, - optype: OPTYPES.upsert, - changes: { - value: { value: 'remote', timestamp: incomingTs }, - other: { value: 1, timestamp: localTs }, - }, - fullRow: { - id: 1, - value: 'remote', - other: 1, - }, - tags: [ - generateTag(clientId, new Date(localTs)), - generateTag('remote', new Date(incomingTs)), - ], + value: 'remote', + other: 1, + }, + tags: [ + generateTag(clientId, new Date(localTs)), + generateTag('remote', new Date(incomingTs)), + ], + }) }) -}) -test('merge incoming with empty local', async (t) => { - const { runMigrations, satellite, tableInfo, authState } = t.context - await runMigrations() - await satellite._setAuthState(authState) - const clientId = satellite._authState!.clientId + test('merge incoming with empty local', async (t) => { + const { runMigrations, satellite, tableInfo, authState } = t.context + await runMigrations() + await satellite._setAuthState(authState) + const clientId = satellite._authState!.clientId + + const localTs = new Date().getTime() + const incomingTs = localTs + 1 + + const incoming = [ + generateRemoteOplogEntry( + tableInfo, + 'main', + 'parent', + OPTYPES.insert, + incomingTs, + genEncodedTags('remote', [incomingTs]), + { + id: 1, + }, + undefined + ), + ] - const localTs = new Date().getTime() - const incomingTs = localTs + 1 + const local: OplogEntry[] = [] + const merged = mergeEntries(clientId, local, 'remote', incoming, relations) + const item = merged[qualifiedParentTableName]['{"id":1}'] - const incoming = [ - generateRemoteOplogEntry( - tableInfo, - 'main', - 'parent', - OPTYPES.insert, - incomingTs, - genEncodedTags('remote', [incomingTs]), - { + t.deepEqual(item, { + namespace: 'main', + tablename: 'parent', + primaryKeyCols: { id: 1 }, + optype: OPTYPES.upsert, + changes: { + id: { value: 1, timestamp: incomingTs }, + }, + fullRow: { id: 1, }, - undefined - ), - ] - - const local: OplogEntry[] = [] - const merged = mergeEntries(clientId, local, 'remote', incoming, relations) - const item = merged[qualifiedParentTableName]['{"id":1}'] - - t.deepEqual(item, { - namespace: 'main', - tablename: 'parent', - primaryKeyCols: { id: 1 }, - optype: OPTYPES.upsert, - changes: { - id: { value: 1, timestamp: incomingTs }, - }, - fullRow: { - id: 1, - }, - tags: [generateTag('remote', new Date(incomingTs))], + tags: [generateTag('remote', new Date(incomingTs))], + }) }) -}) -test('compensations: referential integrity is enforced', async (t) => { - const { adapter, runMigrations, satellite } = t.context - await runMigrations() + test('compensations: referential integrity is enforced', async (t) => { + const { adapter, runMigrations, satellite, builder } = t.context + await runMigrations() - await adapter.run({ sql: `PRAGMA foreign_keys = ON` }) - await satellite._setMeta('compensations', 0) - await adapter.run({ - sql: `INSERT INTO main.parent(id, value) VALUES (1, '1')`, - }) - - await t.throwsAsync( - adapter.run({ sql: `INSERT INTO main.child(id, parent) VALUES (1, 2)` }), - { - code: 'SQLITE_CONSTRAINT_FOREIGNKEY', + if (builder.dialect === 'SQLite') { + await adapter.run({ sql: `PRAGMA foreign_keys = ON` }) } - ) -}) + await satellite._setMeta('compensations', 0) + await adapter.run({ + sql: `INSERT INTO main.parent(id, value) VALUES (1, '1')`, + }) -test('compensations: incoming operation breaks referential integrity', async (t) => { - const { adapter, runMigrations, satellite, tableInfo, timestamp, authState } = - t.context - await runMigrations() + await t.throwsAsync( + adapter.run({ sql: `INSERT INTO main.child(id, parent) VALUES (1, 2)` }), + { + code: dialectValue('SQLITE_CONSTRAINT_FOREIGNKEY', '23503', t), + } + ) + }) - await adapter.run({ sql: `PRAGMA foreign_keys = ON;` }) - await satellite._setMeta('compensations', 0) - await satellite._setAuthState(authState) + test('compensations: incoming operation breaks referential integrity', async (t) => { + const { + adapter, + runMigrations, + satellite, + tableInfo, + timestamp, + authState, + builder, + } = t.context + await runMigrations() - const incoming = generateLocalOplogEntry( - tableInfo, - 'main', - 'child', - OPTYPES.insert, - timestamp, - genEncodedTags('remote', [timestamp]), - { - id: 1, - parent: 1, + if (builder.dialect === 'SQLite') { + await adapter.run({ sql: `PRAGMA foreign_keys = ON` }) } - ) - - // await satellite._setAuthState(authState) - - satellite.relations = relations // satellite must be aware of the relations in order to turn `DataChange`s into `OpLogEntry`s + await satellite._setMeta('compensations', 0) + await satellite._setAuthState(authState) - const incomingChange = opLogEntryToChange(incoming, relations) - const incomingTx = { - origin: 'remote', - commit_timestamp: Long.fromNumber(timestamp), - changes: [incomingChange], - lsn: new Uint8Array(), - } - - await t.throwsAsync(satellite._applyTransaction(incomingTx), { - code: 'SQLITE_CONSTRAINT_FOREIGNKEY', - }) -}) - -test('compensations: incoming operations accepted if restore referential integrity', async (t) => { - const { adapter, runMigrations, satellite, tableInfo, timestamp, authState } = - t.context - await runMigrations() - - await adapter.run({ sql: `PRAGMA foreign_keys = ON;` }) - await satellite._setMeta('compensations', 0) - await satellite._setAuthState(authState) - const clientId = satellite._authState!.clientId + const incoming = generateLocalOplogEntry( + tableInfo, + 'main', + 'child', + OPTYPES.insert, + timestamp, + genEncodedTags('remote', [timestamp]), + { + id: 1, + parent: 1, + } + ) - const childInsertEntry = generateRemoteOplogEntry( - tableInfo, - 'main', - 'child', - OPTYPES.insert, - timestamp, - genEncodedTags(clientId, [timestamp]), - { - id: 1, - parent: 1, - } - ) + satellite.relations = relations // satellite must be aware of the relations in order to turn `DataChange`s into `OpLogEntry`s - const parentInsertEntry = generateRemoteOplogEntry( - tableInfo, - 'main', - 'parent', - OPTYPES.insert, - timestamp, - genEncodedTags(clientId, [timestamp]), - { - id: 1, + const incomingChange = opLogEntryToChange(incoming, relations) + const incomingTx = { + origin: 'remote', + commit_timestamp: Long.fromNumber(timestamp), + changes: [incomingChange], + lsn: new Uint8Array(), } - ) - await adapter.run({ - sql: `INSERT INTO main.parent(id, value) VALUES (1, '1')`, + await t.throwsAsync(satellite._applyTransaction(incomingTx), { + code: dialectValue('SQLITE_CONSTRAINT_FOREIGNKEY', '23503', t), + }) }) - await adapter.run({ sql: `DELETE FROM main.parent WHERE id=1` }) - - await satellite._performSnapshot() - - satellite.relations = relations // satellite must be aware of the relations in order to turn `DataChange`s into `OpLogEntry`s - - const childInsertChange = opLogEntryToChange(childInsertEntry, relations) - const parentInsertChange = opLogEntryToChange(parentInsertEntry, relations) - const insertChildAndParentTx = { - origin: 'remote', - commit_timestamp: Long.fromNumber(new Date().getTime()), // timestamp is not important for this test, it is only used to GC the oplog - changes: [parentInsertChange, childInsertChange], - lsn: new Uint8Array(), - } - await satellite._applyTransaction(insertChildAndParentTx) - const rows = await adapter.query({ - sql: `SELECT * from main.parent WHERE id=1`, - }) + test('compensations: incoming operations accepted if restore referential integrity', async (t) => { + const { + adapter, + runMigrations, + satellite, + tableInfo, + timestamp, + authState, + builder, + } = t.context + await runMigrations() - // Not only does the parent exist. - t.is(rows.length, 1) + if (builder.dialect === 'SQLite') { + await adapter.run({ sql: `PRAGMA foreign_keys = ON` }) + } + await satellite._setMeta('compensations', 0) + await satellite._setAuthState(authState) + const clientId = satellite._authState!.clientId - // But it's also recreated with deleted values. - t.is(rows[0].value, '1') -}) + const childInsertEntry = generateRemoteOplogEntry( + tableInfo, + 'main', + 'child', + OPTYPES.insert, + timestamp, + genEncodedTags(clientId, [timestamp]), + { + id: 1, + parent: 1, + } + ) -test('compensations: using triggers with flag 0', async (t) => { - const { adapter, runMigrations, satellite, tableInfo, authState } = t.context - await runMigrations() + const parentInsertEntry = generateRemoteOplogEntry( + tableInfo, + 'main', + 'parent', + OPTYPES.insert, + timestamp, + genEncodedTags(clientId, [timestamp]), + { + id: 1, + } + ) - await adapter.run({ sql: `PRAGMA foreign_keys = ON` }) - await satellite._setMeta('compensations', 0) + await adapter.run({ + sql: `INSERT INTO main.parent(id, value) VALUES (1, '1')`, + }) + await adapter.run({ sql: `DELETE FROM main.parent WHERE id=1` }) - await adapter.run({ - sql: `INSERT INTO main.parent(id, value) VALUES (1, '1')`, - }) - await satellite._setAuthState(authState) - const ts = await satellite._performSnapshot() - await satellite._garbageCollectOplog(ts) + await satellite._performSnapshot() - await adapter.run({ sql: `INSERT INTO main.child(id, parent) VALUES (1, 1)` }) - await satellite._performSnapshot() + satellite.relations = relations // satellite must be aware of the relations in order to turn `DataChange`s into `OpLogEntry`s - const timestamp = new Date().getTime() - const incoming = generateRemoteOplogEntry( - tableInfo, - 'main', - 'parent', - OPTYPES.delete, - timestamp, - genEncodedTags('remote', []), - { - id: 1, + const childInsertChange = opLogEntryToChange(childInsertEntry, relations) + const parentInsertChange = opLogEntryToChange(parentInsertEntry, relations) + const insertChildAndParentTx = { + origin: 'remote', + commit_timestamp: Long.fromNumber(new Date().getTime()), // timestamp is not important for this test, it is only used to GC the oplog + changes: [parentInsertChange, childInsertChange], + lsn: new Uint8Array(), } - ) + await satellite._applyTransaction(insertChildAndParentTx) - satellite.relations = relations // satellite must be aware of the relations in order to turn `DataChange`s into `OpLogEntry`s + const rows = await adapter.query({ + sql: `SELECT * from main.parent WHERE id=1`, + }) - const incomingChange = opLogEntryToChange(incoming, relations) - const incomingTx = { - origin: 'remote', - commit_timestamp: Long.fromNumber(timestamp), - changes: [incomingChange], - lsn: new Uint8Array(), - } + // Not only does the parent exist. + t.is(rows.length, 1) - await t.throwsAsync(satellite._applyTransaction(incomingTx), { - code: 'SQLITE_CONSTRAINT_FOREIGNKEY', + // But it's also recreated with deleted values. + t.is(rows[0].value, '1') }) -}) -test('compensations: using triggers with flag 1', async (t) => { - const { adapter, runMigrations, satellite, tableInfo, authState } = t.context - await runMigrations() + test('compensations: using triggers with flag 0', async (t) => { + const { adapter, runMigrations, satellite, tableInfo, authState, builder } = + t.context + await runMigrations() - await adapter.run({ sql: `PRAGMA foreign_keys = ON` }) - await satellite._setMeta('compensations', 1) + if (builder.dialect === 'SQLite') { + await adapter.run({ sql: `PRAGMA foreign_keys = ON` }) + } + await satellite._setMeta('compensations', 0) - await adapter.run({ - sql: `INSERT INTO main.parent(id, value) VALUES (1, '1')`, - }) - await satellite._setAuthState(authState) - const ts = await satellite._performSnapshot() - await satellite._garbageCollectOplog(ts) + await adapter.run({ + sql: `INSERT INTO main.parent(id, value) VALUES (1, '1')`, + }) + await satellite._setAuthState(authState) + const ts = await satellite._performSnapshot() + await satellite._garbageCollectOplog(ts) - await adapter.run({ sql: `INSERT INTO main.child(id, parent) VALUES (1, 1)` }) - await satellite._performSnapshot() + await adapter.run({ + sql: `INSERT INTO main.child(id, parent) VALUES (1, 1)`, + }) + await satellite._performSnapshot() - const timestamp = new Date().getTime() - const incoming = [ - generateRemoteOplogEntry( + const timestamp = new Date().getTime() + const incoming = generateRemoteOplogEntry( tableInfo, 'main', 'parent', @@ -1202,168 +1208,219 @@ test('compensations: using triggers with flag 1', async (t) => { { id: 1, } - ), - ] + ) - satellite.relations = relations // satellite must be aware of the relations in order to deserialise oplog entries + satellite.relations = relations // satellite must be aware of the relations in order to turn `DataChange`s into `OpLogEntry`s - await satellite._apply(incoming, 'remote') - t.pass() -}) + const incomingChange = opLogEntryToChange(incoming, relations) + const incomingTx = { + origin: 'remote', + commit_timestamp: Long.fromNumber(timestamp), + changes: [incomingChange], + lsn: new Uint8Array(), + } -test('get oplogEntries from transaction', async (t) => { - const { runMigrations, satellite } = t.context - await runMigrations() + await t.throwsAsync(satellite._applyTransaction(incomingTx), { + code: dialectValue('SQLITE_CONSTRAINT_FOREIGNKEY', '23503', t), + }) + }) - const relations = await satellite['_getLocalRelations']() + test('compensations: using triggers with flag 1', async (t) => { + const { adapter, runMigrations, satellite, tableInfo, authState, builder } = + t.context + await runMigrations() - const transaction: DataTransaction = { - lsn: DEFAULT_LOG_POS, - commit_timestamp: Long.UZERO, - changes: [ - { - relation: relations.parent, - type: DataChangeType.INSERT, - record: { id: 0 }, - tags: [], // proper values are not relevent here - }, - ], - } + if (builder.dialect === 'SQLite') { + await adapter.run({ sql: `PRAGMA foreign_keys = ON` }) + } + await satellite._setMeta('compensations', 1) - const expected: OplogEntry = { - namespace: 'main', - tablename: 'parent', - optype: 'INSERT', - newRow: '{"id":0}', - oldRow: undefined, - primaryKey: '{"id":0}', - rowid: -1, - timestamp: '1970-01-01T00:00:00.000Z', - clearTags: encodeTags([]), - } + await adapter.run({ + sql: `INSERT INTO main.parent(id, value) VALUES (1, '1')`, + }) + await satellite._setAuthState(authState) + const ts = await satellite._performSnapshot() + await satellite._garbageCollectOplog(ts) - const opLog = fromTransaction(transaction, relations) - t.deepEqual(opLog[0], expected) -}) + await adapter.run({ + sql: `INSERT INTO main.child(id, parent) VALUES (1, 1)`, + }) + await satellite._performSnapshot() -test('get transactions from opLogEntries', async (t) => { - const { runMigrations } = t.context - await runMigrations() + const timestamp = new Date().getTime() + const incoming = [ + generateRemoteOplogEntry( + tableInfo, + 'main', + 'parent', + OPTYPES.delete, + timestamp, + genEncodedTags('remote', []), + { + id: 1, + } + ), + ] - const opLogEntries: OplogEntry[] = [ - { - namespace: 'public', - tablename: 'parent', - optype: 'INSERT', - newRow: '{"id":0}', - oldRow: undefined, - primaryKey: '{"id":0}', - rowid: 1, - timestamp: '1970-01-01T00:00:00.000Z', - clearTags: encodeTags([]), - }, - { - namespace: 'public', - tablename: 'parent', - optype: 'UPDATE', - newRow: '{"id":1}', - oldRow: '{"id":1}', - primaryKey: '{"id":1}', - rowid: 2, - timestamp: '1970-01-01T00:00:00.000Z', - clearTags: encodeTags([]), - }, - { - namespace: 'public', - tablename: 'parent', - optype: 'INSERT', - newRow: '{"id":2}', - oldRow: undefined, - primaryKey: '{"id":0}', - rowid: 3, - timestamp: '1970-01-01T00:00:01.000Z', - clearTags: encodeTags([]), - }, - ] + satellite.relations = relations // satellite must be aware of the relations in order to deserialise oplog entries - const expected = [ - { - lsn: numberToBytes(2), + await satellite._apply(incoming, 'remote') + t.pass() + }) + + test('get oplogEntries from transaction', async (t) => { + const { runMigrations, satellite } = t.context + await runMigrations() + + const relations = await satellite['_getLocalRelations']() + + const transaction: DataTransaction = { + lsn: DEFAULT_LOG_POS, commit_timestamp: Long.UZERO, changes: [ { relation: relations.parent, type: DataChangeType.INSERT, record: { id: 0 }, - oldRecord: undefined, - tags: [], - }, - { - relation: relations.parent, - type: DataChangeType.UPDATE, - record: { id: 1 }, - oldRecord: { id: 1 }, - tags: [], - }, - ], - }, - { - lsn: numberToBytes(3), - commit_timestamp: Long.UZERO.add(1000), - changes: [ - { - relation: relations.parent, - type: DataChangeType.INSERT, - record: { id: 2 }, - oldRecord: undefined, - tags: [], + tags: [], // proper values are not relevent here }, ], - }, - ] + } - const opLog = toTransactions(opLogEntries, relations) - t.deepEqual(opLog, expected) -}) + const expected: OplogEntry = { + namespace: 'main', + tablename: 'parent', + optype: 'INSERT', + newRow: '{"id":0}', + oldRow: undefined, + primaryKey: '{"id":0}', + rowid: -1, + timestamp: '1970-01-01T00:00:00.000Z', + clearTags: encodeTags([]), + } -test('disconnect stops queueing operations', async (t) => { - const { runMigrations, satellite, adapter, authState, token } = t.context - await runMigrations() - const { connectionPromise } = await startSatellite( + const opLog = fromTransaction(transaction, relations) + t.deepEqual(opLog[0], expected) + }) + + test('get transactions from opLogEntries', async (t) => { + const { runMigrations } = t.context + await runMigrations() + + const opLogEntries: OplogEntry[] = [ + { + namespace: 'public', + tablename: 'parent', + optype: 'INSERT', + newRow: '{"id":0}', + oldRow: undefined, + primaryKey: '{"id":0}', + rowid: 1, + timestamp: '1970-01-01T00:00:00.000Z', + clearTags: encodeTags([]), + }, + { + namespace: 'public', + tablename: 'parent', + optype: 'UPDATE', + newRow: '{"id":1}', + oldRow: '{"id":1}', + primaryKey: '{"id":1}', + rowid: 2, + timestamp: '1970-01-01T00:00:00.000Z', + clearTags: encodeTags([]), + }, + { + namespace: 'public', + tablename: 'parent', + optype: 'INSERT', + newRow: '{"id":2}', + oldRow: undefined, + primaryKey: '{"id":0}', + rowid: 3, + timestamp: '1970-01-01T00:00:01.000Z', + clearTags: encodeTags([]), + }, + ] + + const expected = [ + { + lsn: numberToBytes(2), + commit_timestamp: Long.UZERO, + changes: [ + { + relation: relations.parent, + type: DataChangeType.INSERT, + record: { id: 0 }, + oldRecord: undefined, + tags: [], + }, + { + relation: relations.parent, + type: DataChangeType.UPDATE, + record: { id: 1 }, + oldRecord: { id: 1 }, + tags: [], + }, + ], + }, + { + lsn: numberToBytes(3), + commit_timestamp: Long.UZERO.add(1000), + changes: [ + { + relation: relations.parent, + type: DataChangeType.INSERT, + record: { id: 2 }, + oldRecord: undefined, + tags: [], + }, + ], + }, + ] + + const opLog = toTransactions(opLogEntries, relations) + t.deepEqual(opLog, expected) + }) + + test('disconnect stops queueing operations', async (t) => { + const { runMigrations, satellite, adapter, authState, token } = t.context + await runMigrations() + const { connectionPromise } = await startSatellite( satellite, authState, token ) await connectionPromise - adapter.run({ - sql: `INSERT INTO parent(id, value, other) VALUES (1, 'local', 1)`, - }) + adapter.run({ + sql: `INSERT INTO main.parent(id, value, other) VALUES (1, 'local', 1)`, + }) - await satellite._performSnapshot() + await satellite._performSnapshot() - // We should have sent (or at least enqueued to send) one row - const sentLsn = satellite.client.getLastSentLsn() - t.deepEqual(sentLsn, numberToBytes(1)) + // We should have sent (or at least enqueued to send) one row + const sentLsn = satellite.client.getLastSentLsn() + t.deepEqual(sentLsn, numberToBytes(1)) - satellite.disconnect() + satellite.disconnect() - adapter.run({ - sql: `INSERT INTO parent(id, value, other) VALUES (2, 'local', 1)`, - }) + adapter.run({ + sql: `INSERT INTO main.parent(id, value, other) VALUES (2, 'local', 1)`, + }) - await satellite._performSnapshot() + await satellite._performSnapshot() - // Since connectivity is down, that row isn't yet sent - const lsn1 = satellite.client.getLastSentLsn() - t.deepEqual(lsn1, sentLsn) + // Since connectivity is down, that row isn't yet sent + const lsn1 = satellite.client.getLastSentLsn() + t.deepEqual(lsn1, sentLsn) - // Once connectivity is restored, we will immediately run a snapshot to send pending rows - await satellite.connectWithBackoff() - await sleepAsync(200) // Wait for snapshot to run - const lsn2 = satellite.client.getLastSentLsn() - t.deepEqual(lsn2, numberToBytes(2)) -}) + // Once connectivity is restored, we will immediately run a snapshot to send pending rows + await satellite.connectWithBackoff() + await sleepAsync(200) // Wait for snapshot to run + const lsn2 = satellite.client.getLastSentLsn() + t.deepEqual(lsn2, numberToBytes(2)) + }) test('notifies about JWT expiration', async (t) => { const { @@ -1413,253 +1470,253 @@ test('garbage collection is triggered when transaction from the same origin is r const conn = await startSatellite(satellite, authState, token) await conn.connectionPromise - adapter.run({ - sql: `INSERT INTO parent(id, value, other) VALUES (1, 'local', 1);`, - }) - adapter.run({ - sql: `UPDATE parent SET value = 'local', other = 2 WHERE id = 1;`, - }) + adapter.run({ + sql: `INSERT INTO main.parent(id, value, other) VALUES (1, 'local', 1);`, + }) + adapter.run({ + sql: `UPDATE main.parent SET value = 'local', other = 2 WHERE id = 1;`, + }) - // Before snapshot, we didn't send anything - const lsn1 = satellite.client.getLastSentLsn() - t.deepEqual(lsn1, numberToBytes(0)) + // Before snapshot, we didn't send anything + const lsn1 = satellite.client.getLastSentLsn() + t.deepEqual(lsn1, numberToBytes(0)) - // Snapshot sends these oplog entries - await satellite._performSnapshot() - const lsn2 = satellite.client.getLastSentLsn() - t.deepEqual(lsn2, numberToBytes(2)) + // Snapshot sends these oplog entries + await satellite._performSnapshot() + const lsn2 = satellite.client.getLastSentLsn() + t.deepEqual(lsn2, numberToBytes(2)) - const old_oplog = await satellite._getEntries() - const transactions = toTransactions(old_oplog, relations) - transactions[0].origin = satellite._authState!.clientId + const old_oplog = await satellite._getEntries() + const transactions = toTransactions(old_oplog, relations) + transactions[0].origin = satellite._authState!.clientId - // Transaction containing these oplogs is applies, which means we delete them - await satellite._applyTransaction(transactions[0]) - const new_oplog = await satellite._getEntries() - t.deepEqual(new_oplog, []) -}) + // Transaction containing these oplogs is applies, which means we delete them + await satellite._applyTransaction(transactions[0]) + const new_oplog = await satellite._getEntries() + t.deepEqual(new_oplog, []) + }) -// stub client and make satellite throw the error with option off/succeed with option on -test('clear database on BEHIND_WINDOW', async (t) => { - const { satellite } = t.context - const { runMigrations, authState, token } = t.context - await runMigrations() + // stub client and make satellite throw the error with option off/succeed with option on + test('clear database on BEHIND_WINDOW', async (t) => { + const { satellite } = t.context + const { runMigrations, authState, token } = t.context + await runMigrations() - const base64lsn = base64.fromBytes(numberToBytes(MOCK_BEHIND_WINDOW_LSN)) - await satellite._setMeta('lsn', base64lsn) - try { - const conn = await startSatellite(satellite, authState, token) - await conn.connectionPromise - const lsnAfter = await satellite._getMeta('lsn') - t.not(lsnAfter, base64lsn) - } catch (e) { - t.fail('start should not throw') - } + const base64lsn = base64.fromBytes(numberToBytes(MOCK_BEHIND_WINDOW_LSN)) + await satellite._setMeta('lsn', base64lsn) + try { + const conn = await startSatellite(satellite, authState, token) + await conn.connectionPromise + const lsnAfter = await satellite._getMeta('lsn') + t.not(lsnAfter, base64lsn) + } catch (e) { + t.fail('start should not throw') + } - // TODO: test clear subscriptions -}) + // TODO: test clear subscriptions + }) test('throw other replication errors', async (t) => { t.plan(2) const { satellite, runMigrations, authState, token } = t.context await runMigrations() - const base64lsn = base64.fromBytes(numberToBytes(MOCK_INTERNAL_ERROR)) - await satellite._setMeta('lsn', base64lsn) + const base64lsn = base64.fromBytes(numberToBytes(MOCK_INTERNAL_ERROR)) + await satellite._setMeta('lsn', base64lsn) - const conn = await startSatellite(satellite, authState, token) - return Promise.all( - [satellite['initializing']?.waitOn(), conn.connectionPromise].map((p) => - p?.catch((e: SatelliteError) => { - t.is(e.code, SatelliteErrorCode.INTERNAL) - }) + const conn = await startSatellite(satellite, authState, token) + return Promise.all( + [satellite['initializing']?.waitOn(), conn.connectionPromise].map((p) => + p?.catch((e: SatelliteError) => { + t.is(e.code, SatelliteErrorCode.INTERNAL) + }) + ) ) - ) -}) + }) -test('apply shape data and persist subscription', async (t) => { - const { client, satellite, adapter, notifier, token } = t.context - const { runMigrations, authState } = t.context - await runMigrations() + test('apply shape data and persist subscription', async (t) => { + const { client, satellite, adapter, notifier, token } = t.context + const { runMigrations, authState } = t.context + await runMigrations() - const namespace = 'main' - const tablename = 'parent' - const qualified = new QualifiedTablename(namespace, tablename) + const namespace = 'main' + const tablename = 'parent' + const qualified = new QualifiedTablename(namespace, tablename) - // relations must be present at subscription delivery - client.setRelations(relations) - client.setRelationData(tablename, parentRecord) + // relations must be present at subscription delivery + client.setRelations(relations) + client.setRelationData(tablename, parentRecord) - const conn = await startSatellite(satellite, authState, token) - await conn.connectionPromise + const conn = await startSatellite(satellite, authState, token) + await conn.connectionPromise - const shapeDef: Shape = { - tablename, - } + const shapeDef: Shape = { + tablename, + } - satellite!.relations = relations - const { synced } = await satellite.subscribe([shapeDef]) - await synced + satellite!.relations = relations + const { synced } = await satellite.subscribe([shapeDef]) + await synced - // first notification is 'connected' - t.is(notifier.notifications.length, 2) - t.is(notifier.notifications[1].changes.length, 1) - t.deepEqual(notifier.notifications[1].changes[0], { - qualifiedTablename: qualified, + // first notification is 'connected' + t.is(notifier.notifications.length, 2) + t.is(notifier.notifications[1].changes.length, 1) + t.deepEqual(notifier.notifications[1].changes[0], { + qualifiedTablename: qualified, recordChanges: [ { primaryKey: { id: 1 }, type: 'INITIAL', }, ], - rowids: [], - }) - - // wait for process to apply shape data - const qualifiedTableName = `"${namespace}"."${tablename}"` - try { - const row = await adapter.query({ - sql: `SELECT id FROM ${qualifiedTableName}`, + rowids: [], }) - t.is(row.length, 1) - const shadowRows = await adapter.query({ - sql: `SELECT tags FROM _electric_shadow`, - }) - t.is(shadowRows.length, 1) + // wait for process to apply shape data + const qualifiedTableName = `"${namespace}"."${tablename}"` + try { + const row = await adapter.query({ + sql: `SELECT id FROM ${qualifiedTableName}`, + }) + t.is(row.length, 1) - const subsMeta = await satellite._getMeta('subscriptions') - const subsObj = JSON.parse(subsMeta) - t.is(Object.keys(subsObj).length, 1) + const shadowRows = await adapter.query({ + sql: `SELECT tags FROM main._electric_shadow`, + }) + t.is(shadowRows.length, 1) - // Check that we save the LSN sent by the mock - t.deepEqual(satellite._lsn, base64.toBytes('MTIz')) - } catch (e) { - t.fail(JSON.stringify(e)) - } -}) + const subsMeta = await satellite._getMeta('subscriptions') + const subsObj = JSON.parse(subsMeta) + t.is(Object.keys(subsObj).length, 1) + + // Check that we save the LSN sent by the mock + t.deepEqual(satellite._lsn, base64.toBytes('MTIz')) + } catch (e) { + t.fail(JSON.stringify(e)) + } + }) test('(regression) shape subscription succeeds even if subscription data is delivered before the SatSubsReq RPC call receives its SatSubsResp answer', async (t) => { const { client, satellite, runMigrations, authState, token } = t.context await runMigrations() - const tablename = 'parent' + const tablename = 'parent' - // relations must be present at subscription delivery - client.setRelations(relations) - client.setRelationData(tablename, parentRecord) + // relations must be present at subscription delivery + client.setRelations(relations) + client.setRelationData(tablename, parentRecord) - const conn = await startSatellite(satellite, authState, token) - await conn.connectionPromise + const conn = await startSatellite(satellite, authState, token) + await conn.connectionPromise - const shapeDef: Shape = { - tablename, - } + const shapeDef: Shape = { + tablename, + } - satellite!.relations = relations + satellite!.relations = relations - // Enable the deliver first flag in the mock client - // such that the subscription data is delivered before the - // subscription promise is resolved - const mockClient = satellite.client as MockSatelliteClient - mockClient.enableDeliverFirst() + // Enable the deliver first flag in the mock client + // such that the subscription data is delivered before the + // subscription promise is resolved + const mockClient = satellite.client as MockSatelliteClient + mockClient.enableDeliverFirst() - const { synced } = await satellite.subscribe([shapeDef]) - await synced + const { synced } = await satellite.subscribe([shapeDef]) + await synced - t.pass() -}) + t.pass() + }) test('multiple subscriptions for the same shape are deduplicated', async (t) => { const { client, satellite, runMigrations, authState, token } = t.context await runMigrations() - const tablename = 'parent' - - // relations must be present at subscription delivery - client.setRelations(relations) - client.setRelationData(tablename, parentRecord) + const tablename = 'parent' - const conn = await startSatellite(satellite, authState, token) - await conn.connectionPromise + // relations must be present at subscription delivery + client.setRelations(relations) + client.setRelationData(tablename, parentRecord) - const shapeDef: Shape = { - tablename, - } + const conn = await startSatellite(satellite, authState, token) + await conn.connectionPromise - satellite!.relations = relations + const shapeDef: Shape = { + tablename, + } - // We want none of these cases to throw - await t.notThrowsAsync(async () => { - // We should dedupe subscriptions that are done at the same time - const [sub1, sub2] = await Promise.all([ - satellite.subscribe([shapeDef]), - satellite.subscribe([shapeDef]), - ]) - // That are done after first await but before the data - const sub3 = await satellite.subscribe([shapeDef]) - // And that are done after previous data is resolved - await Promise.all([sub1.synced, sub2.synced, sub3.synced]) - const sub4 = await satellite.subscribe([shapeDef]) + satellite!.relations = relations + + // We want none of these cases to throw + await t.notThrowsAsync(async () => { + // We should dedupe subscriptions that are done at the same time + const [sub1, sub2] = await Promise.all([ + satellite.subscribe([shapeDef]), + satellite.subscribe([shapeDef]), + ]) + // That are done after first await but before the data + const sub3 = await satellite.subscribe([shapeDef]) + // And that are done after previous data is resolved + await Promise.all([sub1.synced, sub2.synced, sub3.synced]) + const sub4 = await satellite.subscribe([shapeDef]) + + await sub4.synced + }) - await sub4.synced + // And be "merged" into one subscription + t.is(satellite.subscriptions.getFulfilledSubscriptions().length, 1) }) - // And be "merged" into one subscription - t.is(satellite.subscriptions.getFulfilledSubscriptions().length, 1) -}) - test('applied shape data will be acted upon correctly', async (t) => { const { client, satellite, adapter, runMigrations, authState, token } = t.context await runMigrations() - const namespace = 'main' - const tablename = 'parent' - const qualified = `"${namespace}"."${tablename}"` + const namespace = 'main' + const tablename = 'parent' + const qualified = `"${namespace}"."${tablename}"` - // relations must be present at subscription delivery - client.setRelations(relations) - client.setRelationData(tablename, parentRecord) + // relations must be present at subscription delivery + client.setRelations(relations) + client.setRelationData(tablename, parentRecord) - const conn = await startSatellite(satellite, authState, token) - await conn.connectionPromise + const conn = await startSatellite(satellite, authState, token) + await conn.connectionPromise - const shapeDef: Shape = { - tablename, - } + const shapeDef: Shape = { + tablename, + } - satellite!.relations = relations - const { synced } = await satellite.subscribe([shapeDef]) - await synced + satellite!.relations = relations + const { synced } = await satellite.subscribe([shapeDef]) + await synced - // wait for process to apply shape data - try { - const row = await adapter.query({ - sql: `SELECT id FROM ${qualified}`, - }) - t.is(row.length, 1) + // wait for process to apply shape data + try { + const row = await adapter.query({ + sql: `SELECT id FROM ${qualified}`, + }) + t.is(row.length, 1) - const shadowRows = await adapter.query({ - sql: `SELECT * FROM _electric_shadow`, - }) - t.is(shadowRows.length, 1) - t.like(shadowRows[0], { - namespace: 'main', - tablename: 'parent', - }) + const shadowRows = await adapter.query({ + sql: `SELECT * FROM main._electric_shadow`, + }) + t.is(shadowRows.length, 1) + t.like(shadowRows[0], { + namespace: 'main', + tablename: 'parent', + }) - await adapter.run({ sql: `DELETE FROM ${qualified} WHERE id = 1` }) - await satellite._performSnapshot() + await adapter.run({ sql: `DELETE FROM ${qualified} WHERE id = 1` }) + await satellite._performSnapshot() - const oplogs = await adapter.query({ - sql: `SELECT * FROM _electric_oplog`, - }) - t.not(oplogs[0].clearTags, '[]') - } catch (e) { - t.fail(JSON.stringify(e)) - } -}) + const oplogs = await adapter.query({ + sql: `SELECT * FROM main._electric_oplog`, + }) + t.not(oplogs[0].clearTags, '[]') + } catch (e) { + t.fail(JSON.stringify(e)) + } + }) test('additional data will be stored properly', async (t) => { const { client, satellite, adapter } = t.context @@ -1748,131 +1805,131 @@ test('a subscription that failed to apply because of FK constraint triggers GC', t.context await runMigrations() - const tablename = 'child' - const namespace = 'main' + const tablename = 'child' + const namespace = 'main' - // relations must be present at subscription delivery - client.setRelations(relations) - client.setRelationData(tablename, childRecord) + // relations must be present at subscription delivery + client.setRelations(relations) + client.setRelationData(tablename, childRecord) - const conn = await startSatellite(satellite, authState, token) - await conn.connectionPromise + const conn = await startSatellite(satellite, authState, token) + await conn.connectionPromise - const shapeDef1: Shape = { - tablename, - } + const shapeDef1: Shape = { + tablename, + } - satellite!.relations = relations - const { synced } = await satellite.subscribe([shapeDef1]) - await synced // wait for subscription to be fulfilled + satellite!.relations = relations + const { synced } = await satellite.subscribe([shapeDef1]) + await synced // wait for subscription to be fulfilled - try { - const row = await adapter.query({ - sql: `SELECT id FROM "${namespace}"."${tablename}"`, - }) - t.is(row.length, 0) - } catch (e) { - t.fail(JSON.stringify(e)) - } -}) + try { + const row = await adapter.query({ + sql: `SELECT id FROM "${namespace}"."${tablename}"`, + }) + t.is(row.length, 0) + } catch (e) { + t.fail(JSON.stringify(e)) + } + }) test('a second successful subscription', async (t) => { const { client, satellite, adapter, runMigrations, authState, token } = t.context await runMigrations() - const namespace = 'main' - const tablename = 'child' + const namespace = 'main' + const tablename = 'child' - // relations must be present at subscription delivery - client.setRelations(relations) - client.setRelationData('parent', parentRecord) - client.setRelationData(tablename, childRecord) + // relations must be present at subscription delivery + client.setRelations(relations) + client.setRelationData('parent', parentRecord) + client.setRelationData(tablename, childRecord) - const conn = await startSatellite(satellite, authState, token) - await conn.connectionPromise + const conn = await startSatellite(satellite, authState, token) + await conn.connectionPromise - const shapeDef1: Shape = { - tablename: 'parent', - } - const shapeDef2: Shape = { - tablename, - } + const shapeDef1: Shape = { + tablename: 'parent', + } + const shapeDef2: Shape = { + tablename, + } - satellite!.relations = relations - await satellite.subscribe([shapeDef1]) - const { synced } = await satellite.subscribe([shapeDef2]) - await synced + satellite!.relations = relations + await satellite.subscribe([shapeDef1]) + const { synced } = await satellite.subscribe([shapeDef2]) + await synced - try { - const row = await adapter.query({ - sql: `SELECT id FROM "${namespace}"."${tablename}"`, - }) - t.is(row.length, 1) + try { + const row = await adapter.query({ + sql: `SELECT id FROM "${namespace}"."${tablename}"`, + }) + t.is(row.length, 1) - const shadowRows = await adapter.query({ - sql: `SELECT tags FROM _electric_shadow`, - }) - t.is(shadowRows.length, 2) + const shadowRows = await adapter.query({ + sql: `SELECT tags FROM main._electric_shadow`, + }) + t.is(shadowRows.length, 2) - const subsMeta = await satellite._getMeta('subscriptions') - const subsObj = JSON.parse(subsMeta) - t.is(Object.keys(subsObj).length, 2) - } catch (e) { - t.fail(JSON.stringify(e)) - } -}) + const subsMeta = await satellite._getMeta('subscriptions') + const subsObj = JSON.parse(subsMeta) + t.is(Object.keys(subsObj).length, 2) + } catch (e) { + t.fail(JSON.stringify(e)) + } + }) test('a single subscribe with multiple tables with FKs', async (t) => { const { client, satellite, adapter, runMigrations, authState, token } = t.context await runMigrations() - // relations must be present at subscription delivery - client.setRelations(relations) - client.setRelationData('parent', parentRecord) - client.setRelationData('child', childRecord) - - const conn = await startSatellite(satellite, authState, token) - await conn.connectionPromise + // relations must be present at subscription delivery + client.setRelations(relations) + client.setRelationData('parent', parentRecord) + client.setRelationData('child', childRecord) - const shapeDef1: Shape = { - tablename: 'child', - } - const shapeDef2: Shape = { - tablename: 'parent', - } + const conn = await startSatellite(satellite, authState, token) + await conn.connectionPromise - satellite!.relations = relations + const shapeDef1: Shape = { + tablename: 'child', + } + const shapeDef2: Shape = { + tablename: 'parent', + } - const prom = new Promise((res, rej) => { - client.subscribeToSubscriptionEvents( - (data: SubscriptionData) => { - // child is applied first - t.is(data.data[0].relation.table, 'child') - t.is(data.data[1].relation.table, 'parent') - - setTimeout(async () => { - try { - const row = await adapter.query({ - sql: `SELECT id FROM "main"."child"`, - }) - t.is(row.length, 1) - - res() - } catch (e) { - rej(e) - } - }, 10) - }, - () => undefined - ) - }) + satellite!.relations = relations + + const prom = new Promise((res, rej) => { + client.subscribeToSubscriptionEvents( + (data: SubscriptionData) => { + // child is applied first + t.is(data.data[0].relation.table, 'child') + t.is(data.data[1].relation.table, 'parent') + + setTimeout(async () => { + try { + const row = await adapter.query({ + sql: `SELECT id FROM "main"."child"`, + }) + t.is(row.length, 1) + + res() + } catch (e) { + rej(e) + } + }, 10) + }, + () => undefined + ) + }) - await satellite.subscribe([shapeDef1, shapeDef2]) + await satellite.subscribe([shapeDef1, shapeDef2]) - return prom -}) + return prom + }) test.serial('a shape delivery that triggers garbage collection', async (t) => { const { client, satellite, adapter, runMigrations, authState, token } = @@ -1881,11 +1938,12 @@ test.serial('a shape delivery that triggers garbage collection', async (t) => { const namespace = 'main' const tablename = 'parent' + const childTable = 'child' // relations must be present at subscription delivery client.setRelations(relations) - client.setRelationData('parent', parentRecord) - client.setRelationData('child', childRecord) + client.setRelationData(tablename, parentRecord) + client.setRelationData(childTable, childRecord) client.setRelationData('another', {}) const conn = await startSatellite(satellite, authState, token) @@ -1918,14 +1976,14 @@ test.serial('a shape delivery that triggers garbage collection', async (t) => { sql: `SELECT id FROM "${namespace}"."${tablename}"`, }) t.is(row.length, 0) - const row1 = await adapter.query({ sql: `SELECT id FROM main.child` }) + const row1 = await adapter.query({ sql: `SELECT id FROM "${namespace}"."${childTable}"` }) t.is(row1.length, 0) const shadowRows = await adapter.query({ - sql: `SELECT tags FROM _electric_shadow`, + sql: `SELECT tags FROM "${namespace}"._electric_shadow`, }) t.is(shadowRows.length, 2) - + const subsMeta = await satellite._getMeta('subscriptions') const subsObj = JSON.parse(subsMeta) t.deepEqual(subsObj, {}) @@ -1941,360 +1999,311 @@ test('a subscription request failure does not clear the manager state', async (t t.context await runMigrations() - // relations must be present at subscription delivery - const namespace = 'main' - const tablename = 'parent' - client.setRelations(relations) - client.setRelationData(tablename, parentRecord) - - const conn = await startSatellite(satellite, authState, token) - await conn.connectionPromise - - const shapeDef1: Shape = { - tablename: tablename, - } - - const shapeDef2: Shape = { - tablename: 'failure', - } - - satellite!.relations = relations - const { synced } = await satellite.subscribe([shapeDef1]) - await synced - - try { - const row = await adapter.query({ - sql: `SELECT id FROM "${namespace}"."${tablename}"`, - }) - t.is(row.length, 1) - } catch (e) { - t.fail(JSON.stringify(e)) - } - - try { - await satellite.subscribe([shapeDef2]) - } catch (error: any) { - t.is(error.code, SatelliteErrorCode.TABLE_NOT_FOUND) - } -}) - -test('unsubscribing all subscriptions does not trigger FK violations', async (t) => { - const { satellite, runMigrations } = t.context - - await runMigrations() // because the meta tables need to exist for shape GC + // relations must be present at subscription delivery + const namespace = 'main' + const tablename = 'parent' + client.setRelations(relations) + client.setRelationData(tablename, parentRecord) - const subsManager = new MockSubscriptionsManager( - satellite._garbageCollectShapeHandler.bind(satellite) - ) + const conn = await startSatellite(satellite, authState, token) + await conn.connectionPromise - // Create the 'users' and 'posts' tables expected by sqlite - // populate it with foreign keys and check that the subscription - // manager does not violate the FKs when unsubscribing from all subscriptions - await satellite.adapter.runInTransaction( - { sql: `CREATE TABLE users (id TEXT PRIMARY KEY, name TEXT)` }, - { - sql: `CREATE TABLE posts (id TEXT PRIMARY KEY, title TEXT, author_id TEXT, FOREIGN KEY(author_id) REFERENCES users(id))`, - }, - { sql: `INSERT INTO users (id, name) VALUES ('u1', 'user1')` }, - { - sql: `INSERT INTO posts (id, title, author_id) VALUES ('p1', 'My first post', 'u1')`, + const shapeDef1: Shape = { + tablename: tablename, + } + + const shapeDef2: Shape = { + tablename: 'failure', } - ) - await subsManager.unsubscribeAll() - // if we reach here, the FKs were not violated + satellite!.relations = relations + const { synced } = await satellite.subscribe([shapeDef1]) + await synced - // Check that everything was deleted - const users = await satellite.adapter.query({ sql: 'SELECT * FROM users' }) - t.assert(users.length === 0) + try { + const row = await adapter.query({ + sql: `SELECT id FROM "${namespace}"."${tablename}"`, + }) + t.is(row.length, 1) + } catch (e) { + t.fail(JSON.stringify(e)) + } - const posts = await satellite.adapter.query({ sql: 'SELECT * FROM posts' }) - t.assert(posts.length === 0) + try { + await satellite.subscribe([shapeDef2]) + } catch (error: any) { + t.is(error.code, SatelliteErrorCode.TABLE_NOT_FOUND) + } }) + + test("snapshot while not fully connected doesn't throw", async (t) => { + const { adapter, runMigrations, satellite, client, authState, token } = + t.context + client.setStartReplicationDelayMs(100) + + await runMigrations() + + // Add log entry while offline + await adapter.run({ sql: `INSERT INTO parent(id) VALUES ('1'),('2')` }) + + const conn = await startSatellite(satellite, authState, token) + + // Performing a snapshot while the replication connection has not been stablished + // should not throw + await satellite._performSnapshot() + + await conn.connectionPromise + + await satellite._performSnapshot() + + t.pass() + }) -test("Garbage collecting the subscription doesn't generate oplog entries", async (t) => { - const { adapter, runMigrations, satellite, authState, token } = t.context - await startSatellite(satellite, authState, token) - await runMigrations() - await adapter.run({ sql: `INSERT INTO parent(id) VALUES ('1'),('2')` }) - const ts = await satellite._performSnapshot() - await satellite._garbageCollectOplog(ts) - t.is((await satellite._getEntries(0)).length, 0) - - satellite._garbageCollectShapeHandler([ - { uuid: '', definition: { tablename: 'parent' } }, - ]) + test('unsubscribing all subscriptions does not trigger FK violations', async (t) => { + const { satellite, runMigrations, builder } = t.context - await satellite._performSnapshot() - t.deepEqual(await satellite._getEntries(0), []) -}) + satellite._garbageCollectShapeHandler([ + { uuid: '', definition: { tablename: 'parent' } }, + ]) -test("snapshot while not fully connected doesn't throw", async (t) => { - const { adapter, runMigrations, satellite, client, authState, token } = - t.context - client.setStartReplicationDelayMs(100) + await runMigrations() // because the meta tables need to exist for shape GC - await runMigrations() + const subsManager = new MockSubscriptionsManager( + satellite._garbageCollectShapeHandler.bind(satellite) + ) - // Add log entry while offline - await adapter.run({ sql: `INSERT INTO parent(id) VALUES ('1'),('2')` }) + // Create the 'users' and 'posts' tables expected by sqlite + // populate it with foreign keys and check that the subscription + // manager does not violate the FKs when unsubscribing from all subscriptions + try { + await satellite.adapter.runInTransaction( + { sql: `CREATE TABLE main.users (id TEXT PRIMARY KEY, name TEXT)` }, + { + sql: `CREATE TABLE main.posts (id TEXT PRIMARY KEY, title TEXT, author_id TEXT, FOREIGN KEY(author_id) REFERENCES ${builder.pgOnly( + 'main.' + )}users(id) ${builder.pgOnly('DEFERRABLE INITIALLY IMMEDIATE')})`, + }, + { sql: `INSERT INTO main.users (id, name) VALUES ('u1', 'user1')` }, + { + sql: `INSERT INTO main.posts (id, title, author_id) VALUES ('p1', 'My first post', 'u1')`, + } + ) + } catch (e: any) { + throw e + } - const conn = await startSatellite(satellite, authState, token) + await subsManager.unsubscribeAll() + // if we reach here, the FKs were not violated - // Performing a snapshot while the replication connection has not been stablished - // should not throw - await satellite._performSnapshot() + // Check that everything was deleted + const users = await satellite.adapter.query({ + sql: 'SELECT * FROM main.users', + }) + t.assert(users.length === 0) - await conn.connectionPromise + const posts = await satellite.adapter.query({ + sql: 'SELECT * FROM main.posts', + }) + t.assert(posts.length === 0) + }) - await satellite._performSnapshot() + test("Garbage collecting the subscription doesn't generate oplog entries", async (t) => { + const { adapter, runMigrations, satellite, authState, token } = t.context + await startSatellite(satellite, authState, token) + await runMigrations() + await adapter.run({ sql: `INSERT INTO main.parent(id) VALUES ('1'),('2')` }) + const ts = await satellite._performSnapshot() + await satellite._garbageCollectOplog(ts) + t.is((await satellite._getEntries(0)).length, 0) + + satellite._garbageCollectShapeHandler([ + { uuid: '', definition: { selects: [{ tablename: 'parent' }] } }, + ]) - t.pass() -}) + await satellite._performSnapshot() + t.deepEqual(await satellite._getEntries(0), []) + }) -test('snapshots: generated oplog entries have the correct tags', async (t) => { - const { + test('snapshots: generated oplog entries have the correct tags', async (t) => { + const { client, satellite, adapter, tableInfo, - runMigrations, + runMigrations, authState, token, } = t.context - await runMigrations() + await runMigrations() - const namespace = 'main' - const tablename = 'parent' - const qualified = `"${namespace}"."${tablename}"` + const namespace = 'main' + const tablename = 'parent' + const qualified = `"${namespace}"."${tablename}"` - // relations must be present at subscription delivery - client.setRelations(relations) - client.setRelationData(tablename, parentRecord) + // relations must be present at subscription delivery + client.setRelations(relations) + client.setRelationData(tablename, parentRecord) - const conn = await startSatellite(satellite, authState, token) - await conn.connectionPromise + const conn = await startSatellite(satellite, authState, token) + await conn.connectionPromise - const shapeDef: Shape = { - tablename, - } + const shapeDef: Shape = { + tablename, + } - satellite!.relations = relations - const { synced } = await satellite.subscribe([shapeDef]) - await synced + satellite!.relations = relations + const { synced } = await satellite.subscribe([shapeDef]) + await synced - const expectedTs = new Date().getTime() - const incoming = generateRemoteOplogEntry( - tableInfo, - 'main', - 'parent', - OPTYPES.insert, - expectedTs, - genEncodedTags('remote', [expectedTs]), - { - id: 2, - } - ) - const incomingChange = opLogEntryToChange(incoming, relations) + const expectedTs = new Date().getTime() + const incoming = generateRemoteOplogEntry( + tableInfo, + 'main', + 'parent', + OPTYPES.insert, + expectedTs, + genEncodedTags('remote', [expectedTs]), + { + id: 2, + } + ) + const incomingChange = opLogEntryToChange(incoming, relations) - await satellite._applyTransaction({ - origin: 'remote', - commit_timestamp: Long.fromNumber(expectedTs), - changes: [incomingChange], - lsn: new Uint8Array(), - }) + await satellite._applyTransaction({ + origin: 'remote', + commit_timestamp: Long.fromNumber(expectedTs), + changes: [incomingChange], + lsn: new Uint8Array(), + }) - const row = await adapter.query({ - sql: `SELECT id FROM ${qualified}`, - }) - t.is(row.length, 2) + const row = await adapter.query({ + sql: `SELECT id FROM ${qualified}`, + }) + t.is(row.length, 2) - const shadowRows = await adapter.query({ - sql: `SELECT * FROM _electric_shadow`, - }) - t.is(shadowRows.length, 2) - t.like(shadowRows[0], { - namespace: 'main', - tablename: 'parent', - }) + const shadowRows = await adapter.query({ + sql: `SELECT * FROM main._electric_shadow`, + }) + t.is(shadowRows.length, 2) + t.like(shadowRows[0], { + namespace: 'main', + tablename: 'parent', + }) - await adapter.run({ sql: `DELETE FROM ${qualified} WHERE id = 2` }) - const deleteTx = await satellite._performSnapshot() + await adapter.run({ sql: `DELETE FROM ${qualified} WHERE id = 2` }) + await satellite._performSnapshot() - const oplogs = await adapter.query({ - sql: `SELECT * FROM _electric_oplog`, + const oplogs = await adapter.query({ + sql: `SELECT * FROM main._electric_oplog`, + }) + t.is(oplogs[0].clearTags, genEncodedTags('remote', [expectedTs])) }) - t.is( - oplogs[0].clearTags, - encodeTags([ - generateTag(satellite._authState!.clientId, deleteTx), - generateTag('remote', expectedTs), - ]) - ) -}) - -test('DELETE after DELETE sends clearTags', async (t) => { - const { adapter, runMigrations, satellite, authState } = t.context - await runMigrations() + test('DELETE after DELETE sends clearTags', async (t) => { + const { adapter, runMigrations, satellite, authState } = t.context + await runMigrations() - await satellite._setAuthState(authState) + await satellite._setAuthState(authState) - await adapter.run({ - sql: `INSERT INTO parent(id, value) VALUES (1,'val1')`, - }) - await adapter.run({ - sql: `INSERT INTO parent(id, value) VALUES (2,'val2')`, - }) + await adapter.run({ + sql: `INSERT INTO main.parent(id, value) VALUES (1,'val1')`, + }) + await adapter.run({ + sql: `INSERT INTO main.parent(id, value) VALUES (2,'val2')`, + }) - await adapter.run({ sql: `DELETE FROM parent WHERE id=1` }) + await adapter.run({ sql: `DELETE FROM main.parent WHERE id=1` }) - await satellite._performSnapshot() + await satellite._performSnapshot() - await adapter.run({ sql: `DELETE FROM parent WHERE id=2` }) + await adapter.run({ sql: `DELETE FROM main.parent WHERE id=2` }) - await satellite._performSnapshot() + await satellite._performSnapshot() - const entries = await satellite._getEntries() + const entries = await satellite._getEntries() - t.is(entries.length, 4) + t.is(entries.length, 4) - const delete1 = entries[2] - const delete2 = entries[3] + const delete1 = entries[2] + const delete2 = entries[3] - t.is(delete1.primaryKey, '{"id":1}') - t.is(delete1.optype, 'DELETE') - // No tags for first delete - t.is(delete1.clearTags, '[]') + t.is(delete1.primaryKey, '{"id":1}') + t.is(delete1.optype, 'DELETE') + // No tags for first delete + t.is(delete1.clearTags, '[]') - t.is(delete2.primaryKey, '{"id":2}') - t.is(delete2.optype, 'DELETE') - // The second should have clearTags - t.not(delete2.clearTags, '[]') -}) + t.is(delete2.primaryKey, '{"id":2}') + t.is(delete2.optype, 'DELETE') + // The second should have clearTags + t.not(delete2.clearTags, '[]') + }) -test.serial('connection backoff success', async (t) => { - t.plan(3) - const { client, satellite } = t.context + test.serial('connection backoff success', async (t) => { + t.plan(3) + const { client, satellite } = t.context - client.shutdown() + client.shutdown() - const retry = (_e: any, a: number) => { - if (a > 0) { - t.pass() - return false + const retry = (_e: any, a: number) => { + if (a > 0) { + t.pass() + return false + } + return true } - return true - } - satellite['_connectRetryHandler'] = retry + satellite['_connectRetryHandler'] = retry - await Promise.all( - [satellite.connectWithBackoff(), satellite['initializing']?.waitOn()].map( - (p) => p?.catch(() => t.pass()) + await Promise.all( + [ + satellite.connectWithBackoff(), + satellite['initializing']?.waitOn(), + ].map((p) => p?.catch(() => t.pass())) ) - ) -}) - -test.serial('connection cancelled on disconnect', async (t) => { - const { client, satellite, authState, token } = t.context - client.shutdown() // such that satellite can't connect to Electric and will keep retrying - const { connectionPromise } = await startSatellite( - satellite, - authState, - token - ) - - // We expect the connection to be cancelled - const prom = t.throwsAsync(connectionPromise, { - code: SatelliteErrorCode.CONNECTION_CANCELLED_BY_DISCONNECT, }) - // Disconnect Satellite - satellite.clientDisconnect() - - // Await until the connection promise is rejected - await prom -}) - -// check that performing snapshot doesn't throw without resetting the performing snapshot assertions -test('(regression) performSnapshot handles exceptions gracefully', async (t) => { - const { adapter, runMigrations, satellite, authState } = t.context - await runMigrations() - await satellite._setAuthState(authState) - - const error = 'FAKE TRANSACTION' - - const txnFn = adapter.transaction - adapter.transaction = () => { - throw new Error(error) - } - - try { - await satellite._performSnapshot() - } catch (e: any) { - t.is(e.message, error) - adapter.transaction = txnFn - } - - await satellite._performSnapshot() - t.pass() -}) - -test("don't leave a snapshot running when stopping", async (t) => { - const { adapter, runMigrations, satellite, authState } = t.context - await runMigrations() - await satellite._setAuthState(authState) - - // Make the adapter slower, to interleave stopping the process and closing the db with a snapshot - const transaction = satellite.adapter.transaction.bind(satellite.adapter) - satellite.adapter.transaction = (f) => - new Promise((res) => { - setTimeout(() => transaction(f).then(res), 500) + test.serial('connection cancelled on disconnect', async (t) => { + const { client, satellite, authState, token } = t.context + client.shutdown() // such that satellite can't connect to Electric and will keep retrying + const { connectionPromise } = await startSatellite( + satellite, + authState, + token + ) + + // We expect the connection to be cancelled + const prom = t.throwsAsync(connectionPromise, { + code: SatelliteErrorCode.CONNECTION_CANCELLED_BY_DISCONNECT, }) - - // Add something to the oplog - await adapter.run({ - sql: `INSERT INTO parent(id, value) VALUES (1,'val1')`, + + // Disconnect Satellite + satellite.clientDisconnect() + + // Await until the connection promise is rejected + await prom }) - // // Perform snapshot with the mutex, to emulate a real scenario - const snapshotPromise = satellite._mutexSnapshot() - // Give some time to start the "slow" snapshot - await sleepAsync(100) - - // Stop the process while the snapshot is being performed - await satellite.stop() - - // Remove/close the database connection - await clean(t) - - // Wait for the snapshot to finish to consider the test successful - await snapshotPromise - - t.pass() -}) - -test("don't snapshot after closing satellite process", async (t) => { - // open and then immediately close - // check that no snapshot is called after close - const { satellite, authState, token } = t.context - const { connectionPromise } = await startSatellite( - satellite, - authState, - token - ) + // check that performing snapshot doesn't throw without resetting the performing snapshot assertions + test('(regression) performSnapshot handles exceptions gracefully', async (t) => { + const { adapter, runMigrations, satellite, authState } = t.context + await runMigrations() + await satellite._setAuthState(authState) - await connectionPromise - await satellite.stop() + const error = 'FAKE TRANSACTION' - satellite._performSnapshot = () => { - t.fail('Snapshot was called') - return Promise.resolve(new Date()) - } + const txnFn = adapter.transaction + adapter.transaction = () => { + throw new Error(error) + } - // wait some time to see that mutexSnapshot is not called - await sleepAsync(50) + try { + await satellite._performSnapshot() + } catch (e: any) { + t.is(e.message, error) + adapter.transaction = txnFn + } - t.pass() -}) + await satellite._performSnapshot() + t.pass() + }) +} diff --git a/clients/typescript/test/satellite/sqlite/process.test.ts b/clients/typescript/test/satellite/sqlite/process.test.ts new file mode 100644 index 0000000000..ec73fc687a --- /dev/null +++ b/clients/typescript/test/satellite/sqlite/process.test.ts @@ -0,0 +1,18 @@ +import anyTest, { TestFn } from 'ava' + +import { getMatchingShadowEntries as getSQLiteMatchingShadowEntries } from '../../support/satellite-helpers' + +import { makeContext, cleanAndStopSatellite } from '../common' + +import { sqliteBuilder } from '../../../src/migrators/query-builder' +import { processTests, ContextType } from '../process.test' + +const test = anyTest as TestFn +test.beforeEach(async (t) => { + await makeContext(t) + t.context.builder = sqliteBuilder + t.context.getMatchingShadowEntries = getSQLiteMatchingShadowEntries +}) +test.afterEach.always(cleanAndStopSatellite) + +processTests(test) From b0e64319d96d50a82a9f62ffc89f63341158d99f Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Tue, 27 Feb 2024 08:51:00 +0100 Subject: [PATCH 018/156] Fix bulk insert for PG --- clients/typescript/src/migrators/query-builder/builder.ts | 1 + 1 file changed, 1 insertion(+) diff --git a/clients/typescript/src/migrators/query-builder/builder.ts b/clients/typescript/src/migrators/query-builder/builder.ts index ae4118dae3..24705dfee0 100644 --- a/clients/typescript/src/migrators/query-builder/builder.ts +++ b/clients/typescript/src/migrators/query-builder/builder.ts @@ -353,6 +353,7 @@ export abstract class QueryBuilder { const batchMaxSize = (maxParameters - (maxParameters % columnCount)) / columnCount while (processed < recordCount) { + positionalParam = 1 // start counting parameters from 1 again const currentInsertCount = Math.min(recordCount - processed, batchMaxSize) const sql = baseSql + From f5d456a5f34beaa4905df941cfe7882f84f3b986 Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Mon, 4 Mar 2024 15:18:11 +0100 Subject: [PATCH 019/156] Refactored Satellite unit tests to have PG and SQLite versions reuse a common implementation. --- clients/typescript/test/satellite/common.ts | 24 +-- .../typescript/test/satellite/merge.test.ts | 155 +++++++++++------- .../test/satellite/serialization.test.ts | 136 ++++++++------- 3 files changed, 166 insertions(+), 149 deletions(-) diff --git a/clients/typescript/test/satellite/common.ts b/clients/typescript/test/satellite/common.ts index 3871f13f6c..3876734478 100644 --- a/clients/typescript/test/satellite/common.ts +++ b/clients/typescript/test/satellite/common.ts @@ -28,20 +28,6 @@ import { makePgDatabase } from '../support/node-postgres' import { DatabaseAdapter as PgDatabaseAdapter } from '../../src/drivers/node-postgres/adapter' import { DatabaseAdapter } from '../../src/electric/adapter' -export type Database = { - exec(statement: { sql: string }): Promise -} - -export function wrapDB(db: SqliteDB): Database { - const wrappedDB = { - exec: async ({ sql }: { sql: string }) => { - console.log('EXECCC:\n' + sql) - db.exec(sql) - }, - } - return wrappedDB -} - export const dbDescription = new DbSchema( { child: { @@ -414,7 +400,7 @@ export const cleanAndStopSatellite = async ( } export async function migrateDb( - db: Database, + db: DatabaseAdapter, table: Table, builder: QueryBuilder ) { @@ -422,17 +408,17 @@ export async function migrateDb( const initialMigration = makeInitialMigration(builder) const migration = initialMigration.migrations[0].statements const [createMainSchema, ...restMigration] = migration - await db.exec({ sql: createMainSchema }) + await db.run({ sql: createMainSchema }) const namespace = table.namespace const tableName = table.tableName // Create the table in the database on the given namespace const createTableSQL = `CREATE TABLE "${namespace}"."${tableName}" (id REAL PRIMARY KEY, name TEXT, age INTEGER, bmi REAL, int8 INTEGER, blob BLOB)` - await db.exec({ sql: createTableSQL }) + await db.run({ sql: createTableSQL }) // Apply the initial migration on the database for (const stmt of restMigration) { - await db.exec({ sql: stmt }) + await db.run({ sql: stmt }) } // Generate the table triggers @@ -440,7 +426,7 @@ export async function migrateDb( // Apply the triggers on the database for (const trigger of triggers) { - await db.exec({ sql: trigger.sql }) + await db.run({ sql: trigger.sql }) } } diff --git a/clients/typescript/test/satellite/merge.test.ts b/clients/typescript/test/satellite/merge.test.ts index 7a07090cce..bae70e555a 100644 --- a/clients/typescript/test/satellite/merge.test.ts +++ b/clients/typescript/test/satellite/merge.test.ts @@ -15,7 +15,16 @@ import Long from 'long' import { relations, migrateDb, personTable, wrapDB } from './common' import Database from 'better-sqlite3' import { satelliteDefaults } from '../../src/satellite/config' -import { sqliteBuilder } from '../../src/migrators/query-builder' +import { + QueryBuilder, + pgBuilder, + sqliteBuilder, +} from '../../src/migrators/query-builder' +import { DatabaseAdapter as SQLiteDatabaseAdapter } from '../../src/drivers/better-sqlite3' +import { DatabaseAdapter as PgDatabaseAdapter } from '../../src/drivers/node-postgres/adapter' +import { DatabaseAdapter as DatabaseAdapterInterface } from '../../src/electric/adapter' +import { makePgDatabase } from '../support/node-postgres' +import { randomValue } from '../../src/util/random' const qualifiedMergeTable = new QualifiedTablename( 'main', @@ -168,73 +177,99 @@ function _mergeTableTest( }) } -test('merge works on oplog entries', async (t) => { +type MaybePromise = T | Promise +type SetupFn = ( + t: ExecutionContext +) => MaybePromise<[DatabaseAdapterInterface, QueryBuilder]> +const setupSqlite: SetupFn = (t: ExecutionContext) => { const db = new Database(':memory:') - const wrappedDb = wrapDB(db) + t.teardown(() => db.close()) + return [new SQLiteDatabaseAdapter(db), sqliteBuilder] +} - // Migrate the DB with the necessary tables and triggers - await migrateDb(wrappedDb, personTable, sqliteBuilder) +let port = 4800 +const setupPG: SetupFn = async (t: ExecutionContext) => { + const dbName = `merge-test-${randomValue()}` + const { db, stop } = await makePgDatabase(dbName, port++) + t.teardown(async () => await stop()) + return [new PgDatabaseAdapter(db), pgBuilder] +} - // Insert a row in the table - const insertRowSQL = `INSERT INTO ${personTable.tableName} (id, name, age, bmi, int8, blob) VALUES (9e999, 'John Doe', 30, 25.5, 7, x'0001ff')` - db.exec(insertRowSQL) +;( + [ + ['SQLite', setupSqlite], + ['Postgres', setupPG], + ] as const +).forEach(([dialect, setup]) => { + test(`(${dialect}) merge works on oplog entries`, async (t) => { + const [adapter, builder] = await setup(t) - // Fetch the oplog entry for the inserted row - const oplogTable = `"${satelliteDefaults.oplogTable.namespace}"."${satelliteDefaults.oplogTable.tablename}"` - const oplogRows = db.prepare(`SELECT * FROM ${oplogTable}`).all() + // Migrate the DB with the necessary tables and triggers + await migrateDb(adapter, personTable, builder) - t.is(oplogRows.length, 1) + // Insert a row in the table + const insertRowSQL = `INSERT INTO "${personTable.namespace}"."${personTable.tableName}" (id, name, age, bmi, int8, blob) VALUES (54321, 'John Doe', 30, 25.5, 7, x'0001ff')` + await adapter.run({ sql: insertRowSQL }) - const oplogEntry = oplogRows[0] as OplogEntry + // Fetch the oplog entry for the inserted row + const oplogTable = `"${satelliteDefaults.oplogTable.namespace}"."${satelliteDefaults.oplogTable.tablename}"` + const oplogRows = await adapter.query({ + sql: `SELECT * FROM ${oplogTable}`, + }) - // Define a transaction that happened concurrently - // and inserts a row with the same id but different values - const tx: DataTransaction = { - lsn: DEFAULT_LOG_POS, - commit_timestamp: to_commit_timestamp('1970-01-02T03:46:42.000Z'), - changes: [ - { - relation: relations[personTable.tableName as keyof typeof relations], - type: DataChangeType.INSERT, - record: { - // fields must be ordered alphabetically to match the behavior of the triggers - age: 30, - blob: new Uint8Array([0, 1, 255]), - bmi: 8e888, - id: 9e999, - int8: '224', // Big ints are serialized as strings in the oplog - name: 'John Doe', + t.is(oplogRows.length, 1) + + const oplogEntry = oplogRows[0] as unknown as OplogEntry + + // Define a transaction that happened concurrently + // and inserts a row with the same id but different values + const tx: DataTransaction = { + lsn: DEFAULT_LOG_POS, + commit_timestamp: to_commit_timestamp('1970-01-02T03:46:42.000Z'), + changes: [ + { + relation: relations[personTable.tableName as keyof typeof relations], + type: DataChangeType.INSERT, + record: { + // fields must be ordered alphabetically to match the behavior of the triggers + age: 30, + blob: new Uint8Array([0, 1, 255]), + bmi: 21.3, + id: 54321, + int8: '224', // Big ints are serialized as strings in the oplog + name: 'John Doe', + }, + tags: [], }, - tags: [], - }, - ], - } + ], + } - // Merge the oplog entry with the transaction - const merged = mergeEntries( - 'local', - [oplogEntry], - 'remote', - fromTransaction(tx, relations), - relations - ) - - const pk = primaryKeyToStr({ id: 9e999 }) - - // the incoming transaction wins - const qualifiedTableName = new QualifiedTablename( - personTable.namespace, - personTable.tableName - ).toString() - t.like(merged, { - [qualifiedTableName]: { [pk]: { optype: 'UPSERT' } }, - }) - t.deepEqual(merged[qualifiedTableName][pk].fullRow, { - id: 9e999, - name: 'John Doe', - age: 30, - blob: new Uint8Array([0, 1, 255]), - bmi: Infinity, - int8: 224n, + // Merge the oplog entry with the transaction + const merged = mergeEntries( + 'local', + [oplogEntry], + 'remote', + fromTransaction(tx, relations), + relations + ) + + const pk = primaryKeyToStr({ id: 54321 }) + + // the incoming transaction wins + const qualifiedTableName = new QualifiedTablename( + personTable.namespace, + personTable.tableName + ).toString() + t.like(merged, { + [qualifiedTableName]: { [pk]: { optype: 'UPSERT' } }, + }) + t.deepEqual(merged[qualifiedTableName][pk].fullRow, { + id: 54321, + name: 'John Doe', + age: 30, + blob: new Uint8Array([0, 1, 255]), + bmi: 21.3, + int8: 224n, + }) }) }) diff --git a/clients/typescript/test/satellite/serialization.test.ts b/clients/typescript/test/satellite/serialization.test.ts index 7c9fbf764b..3998debfba 100644 --- a/clients/typescript/test/satellite/serialization.test.ts +++ b/clients/typescript/test/satellite/serialization.test.ts @@ -19,8 +19,6 @@ import { import { makePgDatabase } from '../support/node-postgres' import { randomValue } from '../../src/util/random' -const builder = sqliteBuilder - test('serialize/deserialize row data', async (t) => { const rel: Relation = { id: 1, @@ -273,73 +271,6 @@ test('Null mask uses bits as if they were a list', async (t) => { t.is(mask, '1101000010000000') }) -test('Prioritize PG types in the schema before inferred SQLite types', async (t) => { - const db = new Database(':memory:') - t.teardown(() => db.close()) - - const adapter = new SQLiteDatabaseAdapter(db) - await adapter.run({ - sql: 'CREATE TABLE bools (id INTEGER PRIMARY KEY, b INTEGER)', - }) - - const sqliteInferredRelations = await inferRelationsFromDb( - adapter, - satelliteDefaults, - builder - ) - const boolsInferredRelation = sqliteInferredRelations['bools'] - - // Inferred types only support SQLite types, so the bool column is INTEGER - const boolColumn = boolsInferredRelation.columns[1] - t.is(boolColumn.name, 'b') - t.is(boolColumn.type, 'INTEGER') - - // Db schema holds the correct Postgres types - const boolsDbDescription = new DbSchema( - { - bools: { - fields: new Map([ - ['id', PgBasicType.PG_INTEGER], - ['b', PgBasicType.PG_BOOL], - ]), - relations: [], - } as unknown as TableSchema< - any, - any, - any, - any, - any, - any, - any, - any, - any, - HKT - >, - }, - [] - ) - - const satOpRow = serializeRow( - { id: 5, b: 1 }, - boolsInferredRelation, - boolsDbDescription - ) - - // Encoded values ["5", "t"] - t.deepEqual(satOpRow.values, [ - new Uint8Array(['5'.charCodeAt(0)]), - new Uint8Array(['t'.charCodeAt(0)]), - ]) - - const deserializedRow = deserializeRow( - satOpRow, - boolsInferredRelation, - boolsDbDescription - ) - - t.deepEqual(deserializedRow, { id: 5, b: 1 }) -}) - type MaybePromise = T | Promise type SetupFn = ( t: ExecutionContext @@ -347,7 +278,7 @@ type SetupFn = ( const setupSqlite: SetupFn = (t: ExecutionContext) => { const db = new Database(':memory:') t.teardown(() => db.close()) - return [new SQLiteDatabaseAdapter(db), builder] + return [new SQLiteDatabaseAdapter(db), sqliteBuilder] } let port = 4800 @@ -364,6 +295,71 @@ const setupPG: SetupFn = async (t: ExecutionContext) => { ['Postgres', setupPG], ] as const ).forEach(([dialect, setup]) => { + test(`(${dialect}) Prioritize PG types in the schema before inferred SQLite types`, async (t) => { + const [adapter, builder] = await setup(t) + + await adapter.run({ + sql: 'CREATE TABLE bools (id INTEGER PRIMARY KEY, b INTEGER)', + }) + + const sqliteInferredRelations = await inferRelationsFromDb( + adapter, + satelliteDefaults, + builder + ) + const boolsInferredRelation = sqliteInferredRelations['bools'] + + // Inferred types only support SQLite types, so the bool column is INTEGER + const boolColumn = boolsInferredRelation.columns[1] + t.is(boolColumn.name, 'b') + t.is(boolColumn.type, 'INTEGER') + + // Db schema holds the correct Postgres types + const boolsDbDescription = new DbSchema( + { + bools: { + fields: new Map([ + ['id', PgBasicType.PG_INTEGER], + ['b', PgBasicType.PG_BOOL], + ]), + relations: [], + } as unknown as TableSchema< + any, + any, + any, + any, + any, + any, + any, + any, + any, + HKT + >, + }, + [] + ) + + const satOpRow = serializeRow( + { id: 5, b: 1 }, + boolsInferredRelation, + boolsDbDescription + ) + + // Encoded values ["5", "t"] + t.deepEqual(satOpRow.values, [ + new Uint8Array(['5'.charCodeAt(0)]), + new Uint8Array(['t'.charCodeAt(0)]), + ]) + + const deserializedRow = deserializeRow( + satOpRow, + boolsInferredRelation, + boolsDbDescription + ) + + t.deepEqual(deserializedRow, { id: 5, b: 1 }) + }) + test(`(${dialect}) Use incoming Relation types if not found in the schema`, async (t) => { const [adapter, builder] = await setup(t) From a454749fa824e41b721d4d844398d788eb241972 Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Tue, 5 Mar 2024 11:01:41 +0100 Subject: [PATCH 020/156] Refactorings for database adapter --- .../typescript/test/migrators/postgres/triggers.test.ts | 5 +++-- clients/typescript/test/migrators/sqlite/triggers.test.ts | 7 ++++--- clients/typescript/test/satellite/merge.test.ts | 2 +- 3 files changed, 8 insertions(+), 6 deletions(-) diff --git a/clients/typescript/test/migrators/postgres/triggers.test.ts b/clients/typescript/test/migrators/postgres/triggers.test.ts index e55209cb4e..be15ab399d 100644 --- a/clients/typescript/test/migrators/postgres/triggers.test.ts +++ b/clients/typescript/test/migrators/postgres/triggers.test.ts @@ -5,7 +5,7 @@ import { satelliteDefaults } from '../../../src/satellite/config' import { migrateDb, personTable } from '../../satellite/common' import { pgBuilder } from '../../../src/migrators/query-builder' import { makePgDatabase } from '../../support/node-postgres' -import { Database } from '../../../src/drivers/node-postgres' +import { Database, DatabaseAdapter } from '../../../src/drivers/node-postgres' type Context = { db: Database @@ -24,10 +24,11 @@ let port = 5300 test.beforeEach(async (t) => { const dbName = `triggers-test-${i++}` const { db, stop } = await makePgDatabase(dbName, port++) + const adapter = new DatabaseAdapter(db) t.context = { db, - migrateDb: migrateDb.bind(null, db, personTable, pgBuilder), + migrateDb: migrateDb.bind(null, adapter, personTable, pgBuilder), stopPG: stop, } }) diff --git a/clients/typescript/test/migrators/sqlite/triggers.test.ts b/clients/typescript/test/migrators/sqlite/triggers.test.ts index 0a80c37384..975c9646bf 100644 --- a/clients/typescript/test/migrators/sqlite/triggers.test.ts +++ b/clients/typescript/test/migrators/sqlite/triggers.test.ts @@ -4,8 +4,9 @@ import { Database } from 'better-sqlite3' import testAny, { TestFn } from 'ava' import { generateTableTriggers } from '../../../src/migrators/triggers' import { satelliteDefaults } from '../../../src/satellite/config' -import { migrateDb, personTable, wrapDB } from '../../satellite/common' +import { migrateDb, personTable } from '../../satellite/common' import { sqliteBuilder } from '../../../src/migrators/query-builder' +import { DatabaseAdapter } from '../../../src/drivers/better-sqlite3' type Context = { db: Database; migrateDb: () => Promise } const test = testAny as TestFn @@ -13,11 +14,11 @@ const oplogTable = `"${satelliteDefaults.oplogTable.namespace}"."${satelliteDefa test.beforeEach(async (t) => { const db = new OriginalDatabase(':memory:') - const wrappedDb = wrapDB(db) + const adapter = new DatabaseAdapter(db) t.context = { db, - migrateDb: migrateDb.bind(null, wrappedDb, personTable, sqliteBuilder), + migrateDb: migrateDb.bind(null, adapter, personTable, sqliteBuilder), } }) diff --git a/clients/typescript/test/satellite/merge.test.ts b/clients/typescript/test/satellite/merge.test.ts index bae70e555a..a9ef785769 100644 --- a/clients/typescript/test/satellite/merge.test.ts +++ b/clients/typescript/test/satellite/merge.test.ts @@ -12,7 +12,7 @@ import { QualifiedTablename, } from '../../src/util' import Long from 'long' -import { relations, migrateDb, personTable, wrapDB } from './common' +import { relations, migrateDb, personTable } from './common' import Database from 'better-sqlite3' import { satelliteDefaults } from '../../src/satellite/config' import { From 4c573bf4bcb91665ebd30322c18d7fe7a76e4df0 Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Tue, 5 Mar 2024 11:02:08 +0100 Subject: [PATCH 021/156] Refactored migrator bundle tests --- .../typescript/test/migrators/bundle.test.ts | 52 +++++++++++++++++++ .../test/migrators/postgres/bundle.test.ts | 52 +++++-------------- .../test/migrators/sqlite/bundle.test.ts | 44 ++++------------ 3 files changed, 74 insertions(+), 74 deletions(-) create mode 100644 clients/typescript/test/migrators/bundle.test.ts diff --git a/clients/typescript/test/migrators/bundle.test.ts b/clients/typescript/test/migrators/bundle.test.ts new file mode 100644 index 0000000000..25b965ff4a --- /dev/null +++ b/clients/typescript/test/migrators/bundle.test.ts @@ -0,0 +1,52 @@ +import { TestFn } from 'ava' + +import { makeStmtMigration } from '../../src/migrators' +import { DatabaseAdapter } from '../../src/electric/adapter' +import { Migration } from '../../src/migrators' +import { BundleMigratorBase as BundleMigrator } from '../../src/migrators/bundle' + +export type ContextType = { + dbName: string + adapter: DatabaseAdapter + migrations: Migration[] + BundleMigrator: new ( + adapter: DatabaseAdapter, + migrations?: Migration[] + ) => BundleMigrator + stop: () => Promise +} + +export const bundleTests = (test: TestFn) => { + test('run the bundle migrator', async (t) => { + const { adapter, BundleMigrator, migrations } = t.context as any + + const migrator = new BundleMigrator(adapter, migrations) + t.is(await migrator.up(), 3) + t.is(await migrator.up(), 0) + }) + + test('applyIfNotAlready applies new migrations', async (t) => { + const { adapter, BundleMigrator, migrations } = t.context as any + + const allButLastMigrations = migrations.slice(0, -1) + const lastMigration = makeStmtMigration(migrations[migrations.length - 1]) + + const migrator = new BundleMigrator(adapter, allButLastMigrations) + t.is(await migrator.up(), 2) + + const wasApplied = await migrator.applyIfNotAlready(lastMigration) + t.assert(wasApplied) + }) + + test('applyIfNotAlready ignores already applied migrations', async (t) => { + const { adapter, BundleMigrator, migrations } = t.context as any + + const migrator = new BundleMigrator(adapter, migrations) + t.is(await migrator.up(), 3) + + const wasApplied = await migrator.applyIfNotAlready( + makeStmtMigration(migrations[0]) + ) + t.assert(!wasApplied) + }) +} diff --git a/clients/typescript/test/migrators/postgres/bundle.test.ts b/clients/typescript/test/migrators/postgres/bundle.test.ts index c65484e768..c2ae7c07cd 100644 --- a/clients/typescript/test/migrators/postgres/bundle.test.ts +++ b/clients/typescript/test/migrators/postgres/bundle.test.ts @@ -1,13 +1,16 @@ -import test from 'ava' +import anyTest, { TestFn } from 'ava' import { DatabaseAdapter } from '../../../src/drivers/node-postgres' import { PgBundleMigrator as BundleMigrator } from '../../../src/migrators/bundle' -import { makeStmtMigration } from '../../../src/migrators' import { randomValue } from '../../../src/util/random' -import migrations from '../../support/migrations/pg-migrations.js' import { makePgDatabase } from '../../support/node-postgres' +import { ContextType, bundleTests } from '../bundle.test' + +import migrations from '../../support/migrations/pg-migrations.js' + +const test = anyTest as TestFn let port = 5532 test.beforeEach(async (t) => { @@ -16,46 +19,17 @@ test.beforeEach(async (t) => { const adapter = new DatabaseAdapter(db) t.context = { - adapter, dbName, - stopPG: stop, + adapter, + migrations, + BundleMigrator, + stop, } }) test.afterEach.always(async (t) => { - const { stopPG } = t.context as any - await stopPG() -}) - -test('run the bundle migrator', async (t) => { - const { adapter } = t.context as any - - const migrator = new BundleMigrator(adapter, migrations) - t.is(await migrator.up(), 3) - t.is(await migrator.up(), 0) + const { stop } = t.context as ContextType + await stop() }) -test('applyIfNotAlready applies new migrations', async (t) => { - const { adapter } = t.context as any - - const allButLastMigrations = migrations.slice(0, -1) - const lastMigration = makeStmtMigration(migrations[migrations.length - 1]) - - const migrator = new BundleMigrator(adapter, allButLastMigrations) - t.is(await migrator.up(), 2) - - const wasApplied = await migrator.applyIfNotAlready(lastMigration) - t.assert(wasApplied) -}) - -test('applyIfNotAlready ignores already applied migrations', async (t) => { - const { adapter } = t.context as any - - const migrator = new BundleMigrator(adapter, migrations) - t.is(await migrator.up(), 3) - - const wasApplied = await migrator.applyIfNotAlready( - makeStmtMigration(migrations[0]) - ) - t.assert(!wasApplied) -}) +bundleTests(test) diff --git a/clients/typescript/test/migrators/sqlite/bundle.test.ts b/clients/typescript/test/migrators/sqlite/bundle.test.ts index 8408667068..1b13d035a0 100644 --- a/clients/typescript/test/migrators/sqlite/bundle.test.ts +++ b/clients/typescript/test/migrators/sqlite/bundle.test.ts @@ -1,24 +1,29 @@ -import test from 'ava' +import anyTest, { TestFn } from 'ava' import Database from 'better-sqlite3' import { rm as removeFile } from 'node:fs/promises' import { DatabaseAdapter } from '../../../src/drivers/better-sqlite3/adapter' import { SqliteBundleMigrator as BundleMigrator } from '../../../src/migrators/bundle' -import { makeStmtMigration } from '../../../src/migrators' import { randomValue } from '../../../src/util/random' +import { ContextType, bundleTests } from '../bundle.test' import migrations from '../../support/migrations/migrations.js' +const test = anyTest as TestFn + test.beforeEach((t) => { const dbName = `bundle-migrator-${randomValue()}.db` const db = new Database(dbName) const adapter = new DatabaseAdapter(db) t.context = { - adapter, dbName, + adapter, + migrations, + BundleMigrator, + stop: () => Promise.resolve(), } }) @@ -29,35 +34,4 @@ test.afterEach.always(async (t) => { await removeFile(`${dbName}-journal`, { force: true }) }) -test('run the bundle migrator', async (t) => { - const { adapter } = t.context as any - - const migrator = new BundleMigrator(adapter, migrations) - t.is(await migrator.up(), 3) - t.is(await migrator.up(), 0) -}) - -test('applyIfNotAlready applies new migrations', async (t) => { - const { adapter } = t.context as any - - const allButLastMigrations = migrations.slice(0, -1) - const lastMigration = makeStmtMigration(migrations[migrations.length - 1]) - - const migrator = new BundleMigrator(adapter, allButLastMigrations) - t.is(await migrator.up(), 2) - - const wasApplied = await migrator.applyIfNotAlready(lastMigration) - t.assert(wasApplied) -}) - -test('applyIfNotAlready ignores already applied migrations', async (t) => { - const { adapter } = t.context as any - - const migrator = new BundleMigrator(adapter, migrations) - t.is(await migrator.up(), 3) - - const wasApplied = await migrator.applyIfNotAlready( - makeStmtMigration(migrations[0]) - ) - t.assert(!wasApplied) -}) +bundleTests(test) From 369c56a4afe97816c2158681ca69147bf368ef1b Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Tue, 5 Mar 2024 12:41:04 +0100 Subject: [PATCH 022/156] Refactor migrator builder tests --- .../typescript/test/migrators/builder.test.ts | 359 ++++++++++++++++++ .../test/migrators/postgres/builder.test.ts | 348 ++--------------- .../test/migrators/sqlite/builder.test.ts | 341 ++--------------- 3 files changed, 408 insertions(+), 640 deletions(-) create mode 100644 clients/typescript/test/migrators/builder.test.ts diff --git a/clients/typescript/test/migrators/builder.test.ts b/clients/typescript/test/migrators/builder.test.ts new file mode 100644 index 0000000000..cd598ef89d --- /dev/null +++ b/clients/typescript/test/migrators/builder.test.ts @@ -0,0 +1,359 @@ +import { TestFn } from 'ava' +import { dedent } from 'ts-dedent' +import { makeMigration, parseMetadata } from '../../src/migrators/builder' +import { loadMigrations } from '../../src/cli/migrations/builder' +import { + SatOpMigrate, + SatOpMigrate_Table, + SatOpMigrate_Type, + SatOpMigrate_Stmt, + SatOpMigrate_Column, + SatOpMigrate_PgColumnType, + SatOpMigrate_ForeignKey, +} from '../../src/_generated/protocol/satellite' +import _m0 from 'protobufjs/minimal.js' +import path from 'path' +import { QueryBuilder } from '../../src/migrators/query-builder' + +function encodeSatOpMigrateMsg(request: SatOpMigrate) { + return ( + SatOpMigrate.encode(request, _m0.Writer.create()).finish() as any + ).toString('base64') +} + +export const makeMigrationMetaData = (builder: QueryBuilder) => { + return { + format: 'SatOpMigrate', + ops: [ + encodeSatOpMigrateMsg( + SatOpMigrate.fromPartial({ + version: '20230613112725_814', + stmts: [ + SatOpMigrate_Stmt.fromPartial({ + type: SatOpMigrate_Type.CREATE_TABLE, + sql: `CREATE TABLE "main"."stars" (\n "id" TEXT NOT NULL PRIMARY KEY,\n "avatar_url" TEXT NOT NULL,\n "name" TEXT,\n "starred_at" TEXT NOT NULL,\n "username" TEXT NOT NULL\n)${builder.sqliteOnly( + ' WITHOUT ROWID' + )};\n`, + }), + ], + table: SatOpMigrate_Table.fromPartial({ + name: 'stars', + columns: [ + SatOpMigrate_Column.fromPartial({ + name: 'id', + sqliteType: 'TEXT', + pgType: SatOpMigrate_PgColumnType.fromPartial({ + name: 'text', + array: [], + size: [], + }), + }), + SatOpMigrate_Column.fromPartial({ + name: 'avatar_url', + sqliteType: 'TEXT', + pgType: SatOpMigrate_PgColumnType.fromPartial({ + name: 'text', + array: [], + size: [], + }), + }), + SatOpMigrate_Column.fromPartial({ + name: 'name', + sqliteType: 'TEXT', + pgType: SatOpMigrate_PgColumnType.fromPartial({ + name: 'text', + array: [], + size: [], + }), + }), + SatOpMigrate_Column.fromPartial({ + name: 'starred_at', + sqliteType: 'TEXT', + pgType: SatOpMigrate_PgColumnType.fromPartial({ + name: 'text', + array: [], + size: [], + }), + }), + SatOpMigrate_Column.fromPartial({ + name: 'username', + sqliteType: 'TEXT', + pgType: SatOpMigrate_PgColumnType.fromPartial({ + name: 'text', + array: [], + size: [], + }), + }), + ], + fks: [], + pks: ['id'], + }), + }) + ), + ], + protocol_version: 'Electric.Satellite', + version: '20230613112725_814', + } +} + +/* + How to make adapter for PG: + //const { db, stop } = await makePgDatabase('load-migration-meta-data', 5500) + //const adapter = new DatabaseAdapter(db) + */ + +export type ContextType = { + migrationMetaData: ReturnType + builder: QueryBuilder +} + +export const bundleTests = (test: TestFn) => { + test('parse migration meta data', (t) => { + const { migrationMetaData } = t.context + const metaData = parseMetadata(migrationMetaData) + t.is(metaData.ops[0].table?.name, 'stars') + t.is(metaData.ops[0].table?.columns.length, 5) + }) + + test('generate migration from meta data', (t) => { + const { migrationMetaData, builder } = t.context + const metaData = parseMetadata(migrationMetaData) + const migration = makeMigration(metaData, builder) + t.is(migration.version, migrationMetaData.version) + t.is( + migration.statements[0], + `CREATE TABLE "main"."stars" (\n "id" TEXT NOT NULL PRIMARY KEY,\n "avatar_url" TEXT NOT NULL,\n "name" TEXT,\n "starred_at" TEXT NOT NULL,\n "username" TEXT NOT NULL\n)${builder.sqliteOnly( + ' WITHOUT ROWID' + )};\n` + ) + + if (builder.dialect === 'SQLite') { + t.is( + migration.statements[3], + 'CREATE TRIGGER update_ensure_main_stars_primarykey\n BEFORE UPDATE ON "main"."stars"\nBEGIN\n SELECT\n CASE\n WHEN old."id" != new."id" THEN\n \t\tRAISE (ABORT, \'cannot change the value of column id as it belongs to the primary key\')\n END;\nEND;' + ) + } else { + // Postgres + t.is( + migration.statements[3], + dedent` + CREATE OR REPLACE FUNCTION update_ensure_main_stars_primarykey_function() + RETURNS TRIGGER AS $$ + BEGIN + IF OLD."id" IS DISTINCT FROM NEW."id" THEN + RAISE EXCEPTION 'Cannot change the value of column id as it belongs to the primary key'; + END IF; + RETURN NEW; + END; + $$ LANGUAGE plpgsql; + ` + ) + + t.is( + migration.statements[4], + dedent` + CREATE TRIGGER update_ensure_main_stars_primarykey + BEFORE UPDATE ON "main"."stars" + FOR EACH ROW + EXECUTE FUNCTION update_ensure_main_stars_primarykey_function(); + ` + ) + } + }) + + test('make migration for table with FKs', (t) => { + const { builder } = t.context + /* + SatOpMigrate_ForeignKey.fromPartial({ + fkCols: [''] + }) + */ + + const migration = { + format: 'SatOpMigrate', + ops: [ + encodeSatOpMigrateMsg( + SatOpMigrate.fromPartial({ + version: '1', + stmts: [ + SatOpMigrate_Stmt.fromPartial({ + type: 0, + sql: 'CREATE TABLE "main"."tenants" (\n "id" TEXT NOT NULL,\n "name" TEXT NOT NULL,\n CONSTRAINT "tenants_pkey" PRIMARY KEY ("id")\n) WITHOUT ROWID;\n', + }), + ], + table: SatOpMigrate_Table.fromPartial({ + name: 'tenants', + columns: [ + SatOpMigrate_Column.fromPartial({ + name: 'id', + sqliteType: 'TEXT', + pgType: { + $type: 'Electric.Satellite.SatOpMigrate.PgColumnType', + name: 'uuid', + array: [], + size: [], + }, + }), + SatOpMigrate_Column.fromPartial({ + name: 'name', + sqliteType: 'TEXT', + pgType: { + $type: 'Electric.Satellite.SatOpMigrate.PgColumnType', + name: 'text', + array: [], + size: [], + }, + }), + ], + fks: [], + pks: ['id'], + }), + }) + ), + encodeSatOpMigrateMsg( + SatOpMigrate.fromPartial({ + version: '1', + stmts: [ + SatOpMigrate_Stmt.fromPartial({ + type: 0, + sql: 'CREATE TABLE "main"."users" (\n "id" TEXT NOT NULL,\n "name" TEXT NOT NULL,\n "email" TEXT NOT NULL,\n "password_hash" TEXT NOT NULL,\n CONSTRAINT "users_pkey" PRIMARY KEY ("id")\n) WITHOUT ROWID;\n', + }), + ], + table: SatOpMigrate_Table.fromPartial({ + name: 'users', + columns: [ + SatOpMigrate_Column.fromPartial({ + name: 'id', + sqliteType: 'TEXT', + pgType: SatOpMigrate_PgColumnType.fromPartial({ + name: 'uuid', + array: [], + size: [], + }), + }), + SatOpMigrate_Column.fromPartial({ + name: 'name', + sqliteType: 'TEXT', + pgType: SatOpMigrate_PgColumnType.fromPartial({ + name: 'text', + array: [], + size: [], + }), + }), + SatOpMigrate_Column.fromPartial({ + name: 'email', + sqliteType: 'TEXT', + pgType: SatOpMigrate_PgColumnType.fromPartial({ + name: 'text', + array: [], + size: [], + }), + }), + SatOpMigrate_Column.fromPartial({ + name: 'password_hash', + sqliteType: 'TEXT', + pgType: SatOpMigrate_PgColumnType.fromPartial({ + name: 'text', + array: [], + size: [], + }), + }), + ], + fks: [], + pks: ['id'], + }), + }) + ), + encodeSatOpMigrateMsg( + SatOpMigrate.fromPartial({ + version: '1', + stmts: [ + SatOpMigrate_Stmt.fromPartial({ + type: 0, + sql: 'CREATE TABLE "main"."tenant_users" (\n "tenant_id" TEXT NOT NULL,\n "user_id" TEXT NOT NULL,\n CONSTRAINT "tenant_users_tenant_id_fkey" FOREIGN KEY ("tenant_id") REFERENCES "tenants" ("id") ON DELETE CASCADE,\n CONSTRAINT "tenant_users_user_id_fkey" FOREIGN KEY ("user_id") REFERENCES "users" ("id") ON DELETE CASCADE,\n CONSTRAINT "tenant_users_pkey" PRIMARY KEY ("tenant_id", "user_id")\n) WITHOUT ROWID;\n', + }), + ], + table: SatOpMigrate_Table.fromPartial({ + name: 'tenant_users', + columns: [ + SatOpMigrate_Column.fromPartial({ + name: 'tenant_id', + sqliteType: 'TEXT', + pgType: SatOpMigrate_PgColumnType.fromPartial({ + name: 'uuid', + array: [], + size: [], + }), + }), + SatOpMigrate_Column.fromPartial({ + name: 'user_id', + sqliteType: 'TEXT', + pgType: SatOpMigrate_PgColumnType.fromPartial({ + name: 'uuid', + array: [], + size: [], + }), + }), + ], + fks: [ + SatOpMigrate_ForeignKey.fromPartial({ + fkCols: ['tenant_id'], + pkTable: 'tenants', + pkCols: ['id'], + }), + SatOpMigrate_ForeignKey.fromPartial({ + fkCols: ['user_id'], + pkTable: 'users', + pkCols: ['id'], + }), + ], + pks: ['tenant_id', 'user_id'], + }), + }) + ), + ], + protocol_version: 'Electric.Satellite', + version: '1', + } + + const metaData = parseMetadata(migration) + makeMigration(metaData, builder) + t.pass() + }) + + test('generate index creation migration from meta data', (t) => { + const { migrationMetaData, builder } = t.context + const metaData = parseMetadata({ + format: 'SatOpMigrate', + ops: [ + encodeSatOpMigrateMsg( + SatOpMigrate.fromPartial({ + version: '20230613112725_814', + stmts: [ + SatOpMigrate_Stmt.create({ + type: SatOpMigrate_Type.CREATE_INDEX, + sql: 'CREATE INDEX idx_stars_username ON stars(username);', + }), + ], + }) + ), + ], + protocol_version: 'Electric.Satellite', + version: '20230613112725_814', + }) + const migration = makeMigration(metaData, builder) + t.is(migration.version, migrationMetaData.version) + t.deepEqual(migration.statements, [ + 'CREATE INDEX idx_stars_username ON stars(username);', + ]) + }) + + const migrationsFolder = path.join('./test/migrators/support/migrations') + + test('read migration meta data', async (t) => { + const { builder } = t.context + const migrations = await loadMigrations(migrationsFolder, builder) + const versions = migrations.map((m) => m.version) + t.deepEqual(versions, ['20230613112725_814', '20230613112735_992']) + }) +} diff --git a/clients/typescript/test/migrators/postgres/builder.test.ts b/clients/typescript/test/migrators/postgres/builder.test.ts index d8a57c045d..8b11088770 100644 --- a/clients/typescript/test/migrators/postgres/builder.test.ts +++ b/clients/typescript/test/migrators/postgres/builder.test.ts @@ -1,337 +1,33 @@ -import test from 'ava' -import { dedent } from 'ts-dedent' +import anyTest, { TestFn } from 'ava' import { makeMigration, parseMetadata } from '../../../src/migrators/builder' -import { loadMigrations } from '../../../src/cli/migrations/builder' import { - SatOpMigrate, - SatOpMigrate_Table, - SatOpMigrate_Type, - SatOpMigrate_Stmt, - SatOpMigrate_Column, - SatOpMigrate_PgColumnType, - SatOpMigrate_ForeignKey, -} from '../../../src/_generated/protocol/satellite' -import _m0 from 'protobufjs/minimal.js' -import path from 'path' -import { pgBuilder } from '../../../src/migrators/query-builder' -import { DatabaseAdapter } from '../../../src/drivers/node-postgres' + ContextType, + bundleTests, + makeMigrationMetaData, +} from '../builder.test' import { makePgDatabase } from '../../support/node-postgres' +import { DatabaseAdapter } from '../../../src/drivers/node-postgres' import { PgBundleMigrator } from '../../../src/migrators' +import { pgBuilder } from '../../../src/migrators/query-builder' -function encodeSatOpMigrateMsg(request: SatOpMigrate) { - return ( - SatOpMigrate.encode(request, _m0.Writer.create()).finish() as any - ).toString('base64') -} - -const migrationMetaData = { - format: 'SatOpMigrate', - ops: [ - encodeSatOpMigrateMsg( - SatOpMigrate.fromPartial({ - version: '20230613112725_814', - stmts: [ - SatOpMigrate_Stmt.fromPartial({ - type: SatOpMigrate_Type.CREATE_TABLE, - sql: 'CREATE TABLE "main"."stars" (\n "id" TEXT NOT NULL PRIMARY KEY,\n "avatar_url" TEXT NOT NULL,\n "name" TEXT,\n "starred_at" TEXT NOT NULL,\n "username" TEXT NOT NULL\n);\n', - }), - ], - table: SatOpMigrate_Table.fromPartial({ - name: 'stars', - columns: [ - SatOpMigrate_Column.fromPartial({ - name: 'id', - sqliteType: 'TEXT', - pgType: SatOpMigrate_PgColumnType.fromPartial({ - name: 'text', - array: [], - size: [], - }), - }), - SatOpMigrate_Column.fromPartial({ - name: 'avatar_url', - sqliteType: 'TEXT', - pgType: SatOpMigrate_PgColumnType.fromPartial({ - name: 'text', - array: [], - size: [], - }), - }), - SatOpMigrate_Column.fromPartial({ - name: 'name', - sqliteType: 'TEXT', - pgType: SatOpMigrate_PgColumnType.fromPartial({ - name: 'text', - array: [], - size: [], - }), - }), - SatOpMigrate_Column.fromPartial({ - name: 'starred_at', - sqliteType: 'TEXT', - pgType: SatOpMigrate_PgColumnType.fromPartial({ - name: 'text', - array: [], - size: [], - }), - }), - SatOpMigrate_Column.fromPartial({ - name: 'username', - sqliteType: 'TEXT', - pgType: SatOpMigrate_PgColumnType.fromPartial({ - name: 'text', - array: [], - size: [], - }), - }), - ], - fks: [], - pks: ['id'], - }), - }) - ), - ], - protocol_version: 'Electric.Satellite', - version: '20230613112725_814', -} - -test('parse migration meta data', (t) => { - const metaData = parseMetadata(migrationMetaData) - t.is(metaData.ops[0].table?.name, 'stars') - t.is(metaData.ops[0].table?.columns.length, 5) -}) - -test('generate migration from meta data', (t) => { - const metaData = parseMetadata(migrationMetaData) - const migration = makeMigration(metaData, pgBuilder) - t.is(migration.version, migrationMetaData.version) - t.is( - migration.statements[0], - 'CREATE TABLE "main"."stars" (\n "id" TEXT NOT NULL PRIMARY KEY,\n "avatar_url" TEXT NOT NULL,\n "name" TEXT,\n "starred_at" TEXT NOT NULL,\n "username" TEXT NOT NULL\n);\n' - ) - t.is( - migration.statements[3], - dedent` - CREATE OR REPLACE FUNCTION update_ensure_main_stars_primarykey_function() - RETURNS TRIGGER AS $$ - BEGIN - IF OLD."id" IS DISTINCT FROM NEW."id" THEN - RAISE EXCEPTION 'Cannot change the value of column id as it belongs to the primary key'; - END IF; - RETURN NEW; - END; - $$ LANGUAGE plpgsql; - ` - ) - - t.is( - migration.statements[4], - dedent` - CREATE TRIGGER update_ensure_main_stars_primarykey - BEFORE UPDATE ON "main"."stars" - FOR EACH ROW - EXECUTE FUNCTION update_ensure_main_stars_primarykey_function(); - ` - ) -}) +const test = anyTest as TestFn -test('make migration for table with FKs', (t) => { - /* - SatOpMigrate_ForeignKey.fromPartial({ - fkCols: [''] - }) - */ +test.beforeEach(async (t) => { + const builder = pgBuilder + const migrationMetaData = makeMigrationMetaData(builder) - const migration = { - format: 'SatOpMigrate', - ops: [ - encodeSatOpMigrateMsg( - SatOpMigrate.fromPartial({ - version: '1', - stmts: [ - SatOpMigrate_Stmt.fromPartial({ - type: 0, - sql: 'CREATE TABLE "main"."tenants" (\n "id" TEXT NOT NULL,\n "name" TEXT NOT NULL,\n CONSTRAINT "tenants_pkey" PRIMARY KEY ("id")\n) WITHOUT ROWID;\n', - }), - ], - table: SatOpMigrate_Table.fromPartial({ - name: 'tenants', - columns: [ - SatOpMigrate_Column.fromPartial({ - name: 'id', - sqliteType: 'TEXT', - pgType: { - $type: 'Electric.Satellite.SatOpMigrate.PgColumnType', - name: 'uuid', - array: [], - size: [], - }, - }), - SatOpMigrate_Column.fromPartial({ - name: 'name', - sqliteType: 'TEXT', - pgType: { - $type: 'Electric.Satellite.SatOpMigrate.PgColumnType', - name: 'text', - array: [], - size: [], - }, - }), - ], - fks: [], - pks: ['id'], - }), - }) - ), - encodeSatOpMigrateMsg( - SatOpMigrate.fromPartial({ - version: '1', - stmts: [ - SatOpMigrate_Stmt.fromPartial({ - type: 0, - sql: 'CREATE TABLE "main"."users" (\n "id" TEXT NOT NULL,\n "name" TEXT NOT NULL,\n "email" TEXT NOT NULL,\n "password_hash" TEXT NOT NULL,\n CONSTRAINT "users_pkey" PRIMARY KEY ("id")\n) WITHOUT ROWID;\n', - }), - ], - table: SatOpMigrate_Table.fromPartial({ - name: 'users', - columns: [ - SatOpMigrate_Column.fromPartial({ - name: 'id', - sqliteType: 'TEXT', - pgType: SatOpMigrate_PgColumnType.fromPartial({ - name: 'uuid', - array: [], - size: [], - }), - }), - SatOpMigrate_Column.fromPartial({ - name: 'name', - sqliteType: 'TEXT', - pgType: SatOpMigrate_PgColumnType.fromPartial({ - name: 'text', - array: [], - size: [], - }), - }), - SatOpMigrate_Column.fromPartial({ - name: 'email', - sqliteType: 'TEXT', - pgType: SatOpMigrate_PgColumnType.fromPartial({ - name: 'text', - array: [], - size: [], - }), - }), - SatOpMigrate_Column.fromPartial({ - name: 'password_hash', - sqliteType: 'TEXT', - pgType: SatOpMigrate_PgColumnType.fromPartial({ - name: 'text', - array: [], - size: [], - }), - }), - ], - fks: [], - pks: ['id'], - }), - }) - ), - encodeSatOpMigrateMsg( - SatOpMigrate.fromPartial({ - version: '1', - stmts: [ - SatOpMigrate_Stmt.fromPartial({ - type: 0, - sql: 'CREATE TABLE "main"."tenant_users" (\n "tenant_id" TEXT NOT NULL,\n "user_id" TEXT NOT NULL,\n CONSTRAINT "tenant_users_tenant_id_fkey" FOREIGN KEY ("tenant_id") REFERENCES "tenants" ("id") ON DELETE CASCADE,\n CONSTRAINT "tenant_users_user_id_fkey" FOREIGN KEY ("user_id") REFERENCES "users" ("id") ON DELETE CASCADE,\n CONSTRAINT "tenant_users_pkey" PRIMARY KEY ("tenant_id", "user_id")\n) WITHOUT ROWID;\n', - }), - ], - table: SatOpMigrate_Table.fromPartial({ - name: 'tenant_users', - columns: [ - SatOpMigrate_Column.fromPartial({ - name: 'tenant_id', - sqliteType: 'TEXT', - pgType: SatOpMigrate_PgColumnType.fromPartial({ - name: 'uuid', - array: [], - size: [], - }), - }), - SatOpMigrate_Column.fromPartial({ - name: 'user_id', - sqliteType: 'TEXT', - pgType: SatOpMigrate_PgColumnType.fromPartial({ - name: 'uuid', - array: [], - size: [], - }), - }), - ], - fks: [ - SatOpMigrate_ForeignKey.fromPartial({ - fkCols: ['tenant_id'], - pkTable: 'tenants', - pkCols: ['id'], - }), - SatOpMigrate_ForeignKey.fromPartial({ - fkCols: ['user_id'], - pkTable: 'users', - pkCols: ['id'], - }), - ], - pks: ['tenant_id', 'user_id'], - }), - }) - ), - ], - protocol_version: 'Electric.Satellite', - version: '1', + t.context = { + migrationMetaData, + builder, } - - //const migrateMetaData = JSON.parse(`{"format":"SatOpMigrate","ops":["GjcKB3RlbmFudHMSEgoCaWQSBFRFWFQaBgoEdXVpZBIUCgRuYW1lEgRURVhUGgYKBHRleHQiAmlkCgExEooBEocBQ1JFQVRFIFRBQkxFICJ0ZW5hbnRzIiAoCiAgImlkIiBURVhUIE5PVCBOVUxMLAogICJuYW1lIiBURVhUIE5PVCBOVUxMLAogIENPTlNUUkFJTlQgInRlbmFudHNfcGtleSIgUFJJTUFSWSBLRVkgKCJpZCIpCikgV0lUSE9VVCBST1dJRDsK","GmsKBXVzZXJzEhIKAmlkEgRURVhUGgYKBHV1aWQSFAoEbmFtZRIEVEVYVBoGCgR0ZXh0EhUKBWVtYWlsEgRURVhUGgYKBHRleHQSHQoNcGFzc3dvcmRfaGFzaBIEVEVYVBoGCgR0ZXh0IgJpZAoBMRLAARK9AUNSRUFURSBUQUJMRSAidXNlcnMiICgKICAiaWQiIFRFWFQgTk9UIE5VTEwsCiAgIm5hbWUiIFRFWFQgTk9UIE5VTEwsCiAgImVtYWlsIiBURVhUIE5PVCBOVUxMLAogICJwYXNzd29yZF9oYXNoIiBURVhUIE5PVCBOVUxMLAogIENPTlNUUkFJTlQgInVzZXJzX3BrZXkiIFBSSU1BUlkgS0VZICgiaWQiKQopIFdJVEhPVVQgUk9XSUQ7Cg==","GoYBCgx0ZW5hbnRfdXNlcnMSGQoJdGVuYW50X2lkEgRURVhUGgYKBHV1aWQSFwoHdXNlcl9pZBIEVEVYVBoGCgR1dWlkGhgKCXRlbmFudF9pZBIHdGVuYW50cxoCaWQaFAoHdXNlcl9pZBIFdXNlcnMaAmlkIgl0ZW5hbnRfaWQiB3VzZXJfaWQKATESkgMSjwNDUkVBVEUgVEFCTEUgInRlbmFudF91c2VycyIgKAogICJ0ZW5hbnRfaWQiIFRFWFQgTk9UIE5VTEwsCiAgInVzZXJfaWQiIFRFWFQgTk9UIE5VTEwsCiAgQ09OU1RSQUlOVCAidGVuYW50X3VzZXJzX3RlbmFudF9pZF9ma2V5IiBGT1JFSUdOIEtFWSAoInRlbmFudF9pZCIpIFJFRkVSRU5DRVMgInRlbmFudHMiICgiaWQiKSBPTiBERUxFVEUgQ0FTQ0FERSwKICBDT05TVFJBSU5UICJ0ZW5hbnRfdXNlcnNfdXNlcl9pZF9ma2V5IiBGT1JFSUdOIEtFWSAoInVzZXJfaWQiKSBSRUZFUkVOQ0VTICJ1c2VycyIgKCJpZCIpIE9OIERFTEVURSBDQVNDQURFLAogIENPTlNUUkFJTlQgInRlbmFudF91c2Vyc19wa2V5IiBQUklNQVJZIEtFWSAoInRlbmFudF9pZCIsICJ1c2VyX2lkIikKKSBXSVRIT1VUIFJPV0lEOwo="],"protocol_version":"Electric.Satellite","version":"1"}`) - const metaData = parseMetadata(migration) - makeMigration(metaData, pgBuilder) - t.pass() -}) - -test('generate index creation migration from meta data', (t) => { - const metaData = parseMetadata({ - format: 'SatOpMigrate', - ops: [ - encodeSatOpMigrateMsg( - SatOpMigrate.fromPartial({ - version: '20230613112725_814', - stmts: [ - SatOpMigrate_Stmt.create({ - type: SatOpMigrate_Type.CREATE_INDEX, - sql: 'CREATE INDEX idx_stars_username ON stars(username);', - }), - ], - }) - ), - ], - protocol_version: 'Electric.Satellite', - version: '20230613112725_814', - }) - const migration = makeMigration(metaData, pgBuilder) - t.is(migration.version, migrationMetaData.version) - t.deepEqual(migration.statements, [ - 'CREATE INDEX idx_stars_username ON stars(username);', - ]) }) -const migrationsFolder = path.join('./test/migrators/support/migrations') - -test('read migration meta data', async (t) => { - const migrations = await loadMigrations(migrationsFolder, pgBuilder) - const versions = migrations.map((m) => m.version) - t.deepEqual(versions, ['20230613112725_814', '20230613112735_992']) -}) +bundleTests(test) test('load migration from meta data', async (t) => { + const { migrationMetaData, builder } = t.context + const migration = makeMigration(parseMetadata(migrationMetaData), builder) const { db, stop } = await makePgDatabase('load-migration-meta-data', 5500) - const migration = makeMigration(parseMetadata(migrationMetaData), pgBuilder) const adapter = new DatabaseAdapter(db) const migrator = new PgBundleMigrator(adapter, [migration]) @@ -341,9 +37,9 @@ test('load migration from meta data', async (t) => { // Check that the DB is initialized with the stars table const tables = await adapter.query({ sql: ` - SELECT table_name - FROM information_schema.tables - WHERE table_schema = 'main' AND table_name = 'stars';`, + SELECT table_name + FROM information_schema.tables + WHERE table_schema = 'main' AND table_name = 'stars';`, }) const starIdx = tables.findIndex((tbl) => tbl.table_name === 'stars') @@ -352,8 +48,8 @@ test('load migration from meta data', async (t) => { const columns = await adapter .query({ sql: `SELECT column_name - FROM information_schema.columns - WHERE table_name = 'stars';`, + FROM information_schema.columns + WHERE table_name = 'stars';`, }) .then((columns) => columns.map((column) => column.column_name)) diff --git a/clients/typescript/test/migrators/sqlite/builder.test.ts b/clients/typescript/test/migrators/sqlite/builder.test.ts index 33710525f4..031f5448e3 100644 --- a/clients/typescript/test/migrators/sqlite/builder.test.ts +++ b/clients/typescript/test/migrators/sqlite/builder.test.ts @@ -1,337 +1,50 @@ -import test from 'ava' +import anyTest, { TestFn } from 'ava' import { makeMigration, parseMetadata } from '../../../src/migrators/builder' -import { loadMigrations } from '../../../src/cli/migrations/builder' -import { - SatOpMigrate, - SatOpMigrate_Table, - SatOpMigrate_Type, - SatOpMigrate_Stmt, - SatOpMigrate_Column, - SatOpMigrate_PgColumnType, - SatOpMigrate_ForeignKey, -} from '../../../src/_generated/protocol/satellite' -import _m0 from 'protobufjs/minimal.js' import Database from 'better-sqlite3' -import { electrify } from '../../../src/drivers/better-sqlite3' -import path from 'path' -import { DbSchema } from '../../../src/client/model' -import { MockSocket } from '../../../src/sockets/mock' +import { DatabaseAdapter } from '../../../src/drivers/better-sqlite3' import { sqliteBuilder } from '../../../src/migrators/query-builder' +import { + ContextType, + bundleTests, + makeMigrationMetaData, +} from '../builder.test' +import { SqliteBundleMigrator } from '../../../src/migrators' -function encodeSatOpMigrateMsg(request: SatOpMigrate) { - return ( - SatOpMigrate.encode(request, _m0.Writer.create()).finish() as any - ).toString('base64') -} - -const migrationMetaData = { - format: 'SatOpMigrate', - ops: [ - encodeSatOpMigrateMsg( - SatOpMigrate.fromPartial({ - version: '20230613112725_814', - stmts: [ - SatOpMigrate_Stmt.fromPartial({ - type: SatOpMigrate_Type.CREATE_TABLE, - sql: 'CREATE TABLE "stars" (\n "id" TEXT NOT NULL,\n "avatar_url" TEXT NOT NULL,\n "name" TEXT,\n "starred_at" TEXT NOT NULL,\n "username" TEXT NOT NULL,\n CONSTRAINT "stars_pkey" PRIMARY KEY ("id")\n) WITHOUT ROWID;\n', - }), - ], - table: SatOpMigrate_Table.fromPartial({ - name: 'stars', - columns: [ - SatOpMigrate_Column.fromPartial({ - name: 'id', - sqliteType: 'TEXT', - pgType: SatOpMigrate_PgColumnType.fromPartial({ - name: 'text', - array: [], - size: [], - }), - }), - SatOpMigrate_Column.fromPartial({ - name: 'avatar_url', - sqliteType: 'TEXT', - pgType: SatOpMigrate_PgColumnType.fromPartial({ - name: 'text', - array: [], - size: [], - }), - }), - SatOpMigrate_Column.fromPartial({ - name: 'name', - sqliteType: 'TEXT', - pgType: SatOpMigrate_PgColumnType.fromPartial({ - name: 'text', - array: [], - size: [], - }), - }), - SatOpMigrate_Column.fromPartial({ - name: 'starred_at', - sqliteType: 'TEXT', - pgType: SatOpMigrate_PgColumnType.fromPartial({ - name: 'text', - array: [], - size: [], - }), - }), - SatOpMigrate_Column.fromPartial({ - name: 'username', - sqliteType: 'TEXT', - pgType: SatOpMigrate_PgColumnType.fromPartial({ - name: 'text', - array: [], - size: [], - }), - }), - ], - fks: [], - pks: ['id'], - }), - }) - ), - ], - protocol_version: 'Electric.Satellite', - version: '20230613112725_814', -} - -test('parse migration meta data', (t) => { - const metaData = parseMetadata(migrationMetaData) - t.is(metaData.ops[0].table?.name, 'stars') - t.is(metaData.ops[0].table?.columns.length, 5) -}) - -test('generate migration from meta data', (t) => { - const metaData = parseMetadata(migrationMetaData) - const migration = makeMigration(metaData, sqliteBuilder) - t.is(migration.version, migrationMetaData.version) - t.is( - migration.statements[0], - 'CREATE TABLE "stars" (\n "id" TEXT NOT NULL,\n "avatar_url" TEXT NOT NULL,\n "name" TEXT,\n "starred_at" TEXT NOT NULL,\n "username" TEXT NOT NULL,\n CONSTRAINT "stars_pkey" PRIMARY KEY ("id")\n) WITHOUT ROWID;\n' - ) - t.is( - migration.statements[3], - 'CREATE TRIGGER update_ensure_main_stars_primarykey\n BEFORE UPDATE ON "main"."stars"\nBEGIN\n SELECT\n CASE\n WHEN old."id" != new."id" THEN\n \t\tRAISE (ABORT, \'cannot change the value of column id as it belongs to the primary key\')\n END;\nEND;' - ) -}) +const test = anyTest as TestFn -test('make migration for table with FKs', (t) => { - /* - SatOpMigrate_ForeignKey.fromPartial({ - fkCols: [''] - }) - */ +test.beforeEach(async (t) => { + const builder = sqliteBuilder + const migrationMetaData = makeMigrationMetaData(builder) - const migration = { - format: 'SatOpMigrate', - ops: [ - encodeSatOpMigrateMsg( - SatOpMigrate.fromPartial({ - version: '1', - stmts: [ - SatOpMigrate_Stmt.fromPartial({ - type: 0, - sql: 'CREATE TABLE "tenants" (\n "id" TEXT NOT NULL,\n "name" TEXT NOT NULL,\n CONSTRAINT "tenants_pkey" PRIMARY KEY ("id")\n) WITHOUT ROWID;\n', - }), - ], - table: SatOpMigrate_Table.fromPartial({ - name: 'tenants', - columns: [ - SatOpMigrate_Column.fromPartial({ - name: 'id', - sqliteType: 'TEXT', - pgType: { - $type: 'Electric.Satellite.SatOpMigrate.PgColumnType', - name: 'uuid', - array: [], - size: [], - }, - }), - SatOpMigrate_Column.fromPartial({ - name: 'name', - sqliteType: 'TEXT', - pgType: { - $type: 'Electric.Satellite.SatOpMigrate.PgColumnType', - name: 'text', - array: [], - size: [], - }, - }), - ], - fks: [], - pks: ['id'], - }), - }) - ), - encodeSatOpMigrateMsg( - SatOpMigrate.fromPartial({ - version: '1', - stmts: [ - SatOpMigrate_Stmt.fromPartial({ - type: 0, - sql: 'CREATE TABLE "users" (\n "id" TEXT NOT NULL,\n "name" TEXT NOT NULL,\n "email" TEXT NOT NULL,\n "password_hash" TEXT NOT NULL,\n CONSTRAINT "users_pkey" PRIMARY KEY ("id")\n) WITHOUT ROWID;\n', - }), - ], - table: SatOpMigrate_Table.fromPartial({ - name: 'users', - columns: [ - SatOpMigrate_Column.fromPartial({ - name: 'id', - sqliteType: 'TEXT', - pgType: SatOpMigrate_PgColumnType.fromPartial({ - name: 'uuid', - array: [], - size: [], - }), - }), - SatOpMigrate_Column.fromPartial({ - name: 'name', - sqliteType: 'TEXT', - pgType: SatOpMigrate_PgColumnType.fromPartial({ - name: 'text', - array: [], - size: [], - }), - }), - SatOpMigrate_Column.fromPartial({ - name: 'email', - sqliteType: 'TEXT', - pgType: SatOpMigrate_PgColumnType.fromPartial({ - name: 'text', - array: [], - size: [], - }), - }), - SatOpMigrate_Column.fromPartial({ - name: 'password_hash', - sqliteType: 'TEXT', - pgType: SatOpMigrate_PgColumnType.fromPartial({ - name: 'text', - array: [], - size: [], - }), - }), - ], - fks: [], - pks: ['id'], - }), - }) - ), - encodeSatOpMigrateMsg( - SatOpMigrate.fromPartial({ - version: '1', - stmts: [ - SatOpMigrate_Stmt.fromPartial({ - type: 0, - sql: 'CREATE TABLE "tenant_users" (\n "tenant_id" TEXT NOT NULL,\n "user_id" TEXT NOT NULL,\n CONSTRAINT "tenant_users_tenant_id_fkey" FOREIGN KEY ("tenant_id") REFERENCES "tenants" ("id") ON DELETE CASCADE,\n CONSTRAINT "tenant_users_user_id_fkey" FOREIGN KEY ("user_id") REFERENCES "users" ("id") ON DELETE CASCADE,\n CONSTRAINT "tenant_users_pkey" PRIMARY KEY ("tenant_id", "user_id")\n) WITHOUT ROWID;\n', - }), - ], - table: SatOpMigrate_Table.fromPartial({ - name: 'tenant_users', - columns: [ - SatOpMigrate_Column.fromPartial({ - name: 'tenant_id', - sqliteType: 'TEXT', - pgType: SatOpMigrate_PgColumnType.fromPartial({ - name: 'uuid', - array: [], - size: [], - }), - }), - SatOpMigrate_Column.fromPartial({ - name: 'user_id', - sqliteType: 'TEXT', - pgType: SatOpMigrate_PgColumnType.fromPartial({ - name: 'uuid', - array: [], - size: [], - }), - }), - ], - fks: [ - SatOpMigrate_ForeignKey.fromPartial({ - fkCols: ['tenant_id'], - pkTable: 'tenants', - pkCols: ['id'], - }), - SatOpMigrate_ForeignKey.fromPartial({ - fkCols: ['user_id'], - pkTable: 'users', - pkCols: ['id'], - }), - ], - pks: ['tenant_id', 'user_id'], - }), - }) - ), - ], - protocol_version: 'Electric.Satellite', - version: '1', + t.context = { + migrationMetaData, + builder, } - - //const migrateMetaData = JSON.parse(`{"format":"SatOpMigrate","ops":["GjcKB3RlbmFudHMSEgoCaWQSBFRFWFQaBgoEdXVpZBIUCgRuYW1lEgRURVhUGgYKBHRleHQiAmlkCgExEooBEocBQ1JFQVRFIFRBQkxFICJ0ZW5hbnRzIiAoCiAgImlkIiBURVhUIE5PVCBOVUxMLAogICJuYW1lIiBURVhUIE5PVCBOVUxMLAogIENPTlNUUkFJTlQgInRlbmFudHNfcGtleSIgUFJJTUFSWSBLRVkgKCJpZCIpCikgV0lUSE9VVCBST1dJRDsK","GmsKBXVzZXJzEhIKAmlkEgRURVhUGgYKBHV1aWQSFAoEbmFtZRIEVEVYVBoGCgR0ZXh0EhUKBWVtYWlsEgRURVhUGgYKBHRleHQSHQoNcGFzc3dvcmRfaGFzaBIEVEVYVBoGCgR0ZXh0IgJpZAoBMRLAARK9AUNSRUFURSBUQUJMRSAidXNlcnMiICgKICAiaWQiIFRFWFQgTk9UIE5VTEwsCiAgIm5hbWUiIFRFWFQgTk9UIE5VTEwsCiAgImVtYWlsIiBURVhUIE5PVCBOVUxMLAogICJwYXNzd29yZF9oYXNoIiBURVhUIE5PVCBOVUxMLAogIENPTlNUUkFJTlQgInVzZXJzX3BrZXkiIFBSSU1BUlkgS0VZICgiaWQiKQopIFdJVEhPVVQgUk9XSUQ7Cg==","GoYBCgx0ZW5hbnRfdXNlcnMSGQoJdGVuYW50X2lkEgRURVhUGgYKBHV1aWQSFwoHdXNlcl9pZBIEVEVYVBoGCgR1dWlkGhgKCXRlbmFudF9pZBIHdGVuYW50cxoCaWQaFAoHdXNlcl9pZBIFdXNlcnMaAmlkIgl0ZW5hbnRfaWQiB3VzZXJfaWQKATESkgMSjwNDUkVBVEUgVEFCTEUgInRlbmFudF91c2VycyIgKAogICJ0ZW5hbnRfaWQiIFRFWFQgTk9UIE5VTEwsCiAgInVzZXJfaWQiIFRFWFQgTk9UIE5VTEwsCiAgQ09OU1RSQUlOVCAidGVuYW50X3VzZXJzX3RlbmFudF9pZF9ma2V5IiBGT1JFSUdOIEtFWSAoInRlbmFudF9pZCIpIFJFRkVSRU5DRVMgInRlbmFudHMiICgiaWQiKSBPTiBERUxFVEUgQ0FTQ0FERSwKICBDT05TVFJBSU5UICJ0ZW5hbnRfdXNlcnNfdXNlcl9pZF9ma2V5IiBGT1JFSUdOIEtFWSAoInVzZXJfaWQiKSBSRUZFUkVOQ0VTICJ1c2VycyIgKCJpZCIpIE9OIERFTEVURSBDQVNDQURFLAogIENPTlNUUkFJTlQgInRlbmFudF91c2Vyc19wa2V5IiBQUklNQVJZIEtFWSAoInRlbmFudF9pZCIsICJ1c2VyX2lkIikKKSBXSVRIT1VUIFJPV0lEOwo="],"protocol_version":"Electric.Satellite","version":"1"}`) - const metaData = parseMetadata(migration) - makeMigration(metaData, sqliteBuilder) - t.pass() }) -test('generate index creation migration from meta data', (t) => { - const metaData = parseMetadata({ - format: 'SatOpMigrate', - ops: [ - encodeSatOpMigrateMsg( - SatOpMigrate.fromPartial({ - version: '20230613112725_814', - stmts: [ - SatOpMigrate_Stmt.create({ - type: SatOpMigrate_Type.CREATE_INDEX, - sql: 'CREATE INDEX idx_stars_username ON stars(username);', - }), - ], - }) - ), - ], - protocol_version: 'Electric.Satellite', - version: '20230613112725_814', - }) - const migration = makeMigration(metaData, sqliteBuilder) - t.is(migration.version, migrationMetaData.version) - t.deepEqual(migration.statements, [ - 'CREATE INDEX idx_stars_username ON stars(username);', - ]) -}) - -const migrationsFolder = path.join('./test/migrators/support/migrations') - -test('read migration meta data', async (t) => { - const migrations = await loadMigrations(migrationsFolder, sqliteBuilder) - const versions = migrations.map((m) => m.version) - t.deepEqual(versions, ['20230613112725_814', '20230613112735_992']) -}) +bundleTests(test) test('load migration from meta data', async (t) => { + const { migrationMetaData, builder } = t.context + const migration = makeMigration(parseMetadata(migrationMetaData), builder) + const db = new Database(':memory:') - const migration = makeMigration( - parseMetadata(migrationMetaData), - sqliteBuilder - ) - const electric = await electrify( - db, - new DbSchema({}, [migration]), - {}, - { socketFactory: MockSocket } - ) + const adapter = new DatabaseAdapter(db) + const migrator = new SqliteBundleMigrator(adapter, [migration]) + + // Apply the migration + await migrator.up() // Check that the DB is initialized with the stars table - const tables = await electric.db.rawQuery({ + const tables = await adapter.query({ sql: `SELECT name FROM sqlite_master WHERE type='table' AND name='stars';`, }) const starIdx = tables.findIndex((tbl) => tbl.name === 'stars') t.assert(starIdx >= 0) // must exist - const columns = await electric.db - .rawQuery({ + const columns = await adapter + .query({ sql: `PRAGMA table_info(stars);`, }) .then((columns) => columns.map((column) => column.name)) From 33ae045bbfa67c5e8cf8f914e8373ce269af5767 Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Tue, 5 Mar 2024 13:05:17 +0100 Subject: [PATCH 023/156] Remove obsolete comments --- .../src/drivers/better-sqlite3/adapter.ts | 5 ----- .../src/drivers/node-postgres/database.ts | 1 - .../src/drivers/tauri-postgres/database.ts | 14 +------------- 3 files changed, 1 insertion(+), 19 deletions(-) diff --git a/clients/typescript/src/drivers/better-sqlite3/adapter.ts b/clients/typescript/src/drivers/better-sqlite3/adapter.ts index eb888dc9b0..78d79b2680 100644 --- a/clients/typescript/src/drivers/better-sqlite3/adapter.ts +++ b/clients/typescript/src/drivers/better-sqlite3/adapter.ts @@ -26,7 +26,6 @@ export class DatabaseAdapter } async runInTransaction(...statements: DbStatement[]): Promise { - console.log(`runInTransaction: ${JSON.stringify(statements)}`) const txn = this.db.transaction((stmts: DbStatement[]) => { let rowsAffected = 0 for (const stmt of stmts) { @@ -52,7 +51,6 @@ export class DatabaseAdapter // Promise interface, but impl not actually async async run({ sql, args }: DbStatement): Promise { - console.log(`RUN: ${sql} - ${JSON.stringify(args)}`) const prep = this.db.prepare(sql) const res = prep.run(...wrapBindParams(args)) return { @@ -62,7 +60,6 @@ export class DatabaseAdapter // This `query` function does not enforce that the query is read-only async query({ sql, args }: DbStatement): Promise { - console.log(`QUERY: ${sql} - ${JSON.stringify(args)}`) const stmt = this.db.prepare(sql) return stmt.all(...wrapBindParams(args)) } @@ -86,7 +83,6 @@ class WrappedTx implements Tx { successCallback?: (tx: WrappedTx, res: RunResult) => void, errorCallback?: (error: any) => void ): void { - console.log(`wrapped tx run: ${sql} - ${JSON.stringify(args)}`) try { const prep = this.db.prepare(sql) const res = prep.run(...wrapBindParams(args)) @@ -103,7 +99,6 @@ class WrappedTx implements Tx { successCallback: (tx: WrappedTx, res: Row[]) => void, errorCallback?: (error: any) => void ): void { - console.log(`wrapped tx query: ${sql} - ${JSON.stringify(args)}`) try { const stmt = this.db.prepare(sql) const rows = stmt.all(...wrapBindParams(args)) diff --git a/clients/typescript/src/drivers/node-postgres/database.ts b/clients/typescript/src/drivers/node-postgres/database.ts index 6c981854a4..d9fb618d3d 100644 --- a/clients/typescript/src/drivers/node-postgres/database.ts +++ b/clients/typescript/src/drivers/node-postgres/database.ts @@ -32,7 +32,6 @@ export class ElectricDatabase implements Database { ) {} async exec(statement: Statement): Promise { - console.log(`EXEC: ${statement.sql} - ${JSON.stringify(statement.args)}`) const { rows, rowCount } = await this.db.query( statement.sql, statement.args diff --git a/clients/typescript/src/drivers/tauri-postgres/database.ts b/clients/typescript/src/drivers/tauri-postgres/database.ts index df412330f2..fb3afe7df8 100644 --- a/clients/typescript/src/drivers/tauri-postgres/database.ts +++ b/clients/typescript/src/drivers/tauri-postgres/database.ts @@ -30,22 +30,10 @@ export class ElectricDatabase implements Database { // Create a Database instance using the static `init` method instead. private constructor(public name: string, private invoke: Function) {} - /* - async tauri_init(name: string) { - this.invoke("tauri_init", { name }); - } - */ - private tauriExec(statement: Statement): Promise { return this.invoke('tauri_exec_command', { sql: statement.sql, - values: statement.args ?? [], // TODO: have to modify the Rust code to expect just the values instead of bind params - /* - bind_params: { - keys: [], - values: statement.args ?? [], - } - */ + values: statement.args ?? [], }) } From fa09c20cc9ad8ce1bbd1e7b46e92c222be57be56 Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Thu, 15 Feb 2024 10:12:47 +0100 Subject: [PATCH 024/156] Port the Satellite process to Postgres and all unit tests. --- clients/typescript/test/satellite/common.ts | 14 ++++++++++++++ .../test/satellite/process.migration.test.ts | 1 - 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/clients/typescript/test/satellite/common.ts b/clients/typescript/test/satellite/common.ts index 3876734478..cc6dfd8af5 100644 --- a/clients/typescript/test/satellite/common.ts +++ b/clients/typescript/test/satellite/common.ts @@ -28,6 +28,20 @@ import { makePgDatabase } from '../support/node-postgres' import { DatabaseAdapter as PgDatabaseAdapter } from '../../src/drivers/node-postgres/adapter' import { DatabaseAdapter } from '../../src/electric/adapter' +export type Database = { + exec(statement: { sql: string }): Promise +} + +export function wrapDB(db: SqliteDB): Database { + const wrappedDB = { + exec: async ({ sql }: { sql: string }) => { + console.log('EXECCC:\n' + sql) + db.exec(sql) + }, + } + return wrappedDB +} + export const dbDescription = new DbSchema( { child: { diff --git a/clients/typescript/test/satellite/process.migration.test.ts b/clients/typescript/test/satellite/process.migration.test.ts index 8127906311..c280a79d07 100644 --- a/clients/typescript/test/satellite/process.migration.test.ts +++ b/clients/typescript/test/satellite/process.migration.test.ts @@ -5,7 +5,6 @@ import { SatOpMigrate_Type, SatRelation_RelationType, } from '../../src/_generated/protocol/satellite' - import { generateTag } from '../../src/satellite/oplog' import { From f7b59a1e2aedce469f1cc10f8adab992ed8e4914 Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Thu, 15 Feb 2024 13:54:15 +0100 Subject: [PATCH 025/156] Modified CLI and generator to bundle migrations for both SQLite and Postgres. --- .../typescript/src/cli/migrations/migrate.ts | 67 +++++++++++++------ clients/typescript/src/client/model/schema.ts | 11 ++- .../src/drivers/node-postgres/index.ts | 2 +- .../src/drivers/tauri-postgres/index.ts | 2 +- .../typescript/test/satellite/client.test.ts | 3 + clients/typescript/test/satellite/common.ts | 1 + .../test/satellite/serialization.test.ts | 7 +- .../writeTableSchemas.ts | 4 +- .../writeSingleFileImportStatements.ts | 1 + 9 files changed, 73 insertions(+), 25 deletions(-) diff --git a/clients/typescript/src/cli/migrations/migrate.ts b/clients/typescript/src/cli/migrations/migrate.ts index fe13e416eb..53296f0138 100644 --- a/clients/typescript/src/cli/migrations/migrate.ts +++ b/clients/typescript/src/cli/migrations/migrate.ts @@ -16,7 +16,7 @@ import { getConfig, type Config } from '../config' import { start } from '../docker-commands/command-start' import { stop } from '../docker-commands/command-stop' import { withConfig } from '../configure/command-with-config' -import { sqliteBuilder } from '../../migrators/query-builder' +import { pgBuilder, sqliteBuilder } from '../../migrators/query-builder' // Rather than run `npx prisma` we resolve the path to the prisma binary so that // we can be sure we are using the same version of Prisma that is a dependency of @@ -33,6 +33,8 @@ const generatorPath = path.join( ) const appRoot = path.resolve() // path where the user ran `npx electric migrate` +const sqliteMigrationsFileName = 'migrations.ts' +const pgMigrationsFileName = 'pg-migrations.ts' export const defaultPollingInterval = 1000 // in ms @@ -175,7 +177,7 @@ async function watchMigrations(opts: GeneratorOptions) { async function getLatestMigration( opts: Omit ): Promise { - const migrationsFile = migrationsFilePath(opts) + const migrationsFile = migrationsFilePath(opts, sqliteMigrationsFileName) // Read the migrations file contents and parse it // need to strip the `export default` before parsing. @@ -216,6 +218,33 @@ async function getLatestMigration( } } +async function bundleMigrationsFor( + dialect: 'sqlite' | 'postgresql', + opts: Omit, + tmpFolder: string +) { + const config = opts.config + const folder = dialect === 'sqlite' ? 'migrations' : 'pg-migrations' + const migrationsPath = path.join(tmpFolder, folder) + await fs.mkdir(migrationsPath) + const migrationEndpoint = + config.SERVICE + `/api/migrations?dialect=${dialect}` + + const migrationsFolder = path.resolve(migrationsPath) + const migrationsFileName = + dialect === 'sqlite' ? sqliteMigrationsFileName : pgMigrationsFileName + const migrationsFile = migrationsFilePath(opts, migrationsFileName) + + // Fetch the migrations from Electric endpoint and write them into `tmpFolder` + await fetchMigrations(migrationEndpoint, migrationsFolder, tmpFolder) + + // Build the migrations + const builder = dialect === 'sqlite' ? sqliteBuilder : pgBuilder + return async () => { + await buildMigrations(migrationsFolder, migrationsFile, builder) + } +} + /** * This function migrates the application. * To this end, it fetches the migrations from Electric, @@ -231,10 +260,6 @@ async function getLatestMigration( * @param configFolder Absolute path to the configuration folder. */ async function _generate(opts: Omit) { - // TODO: introduce an option for the generator which indicates - // whether the app runs on Sqlite or PG - // and then here use the right query builder - const builder = sqliteBuilder const config = opts.config // Create a unique temporary folder in which to save // intermediate files without risking collisions @@ -242,15 +267,16 @@ async function _generate(opts: Omit) { let generationFailed = false try { - const migrationsPath = path.join(tmpFolder, 'migrations') - await fs.mkdir(migrationsPath) - const migrationEndpoint = config.SERVICE + '/api/migrations?dialect=sqlite' - - const migrationsFolder = path.resolve(migrationsPath) - const migrationsFile = migrationsFilePath(opts) - - // Fetch the migrations from Electric endpoint and write them into `tmpFolder` - await fetchMigrations(migrationEndpoint, migrationsFolder, tmpFolder) + const buildSqliteMigrations = await bundleMigrationsFor( + 'sqlite', + opts, + tmpFolder + ) + const buildPgMigrations = await bundleMigrationsFor( + 'postgresql', + opts, + tmpFolder + ) const prismaSchema = await createIntrospectionSchema(tmpFolder, opts) @@ -263,9 +289,9 @@ async function _generate(opts: Omit) { const relativePath = path.relative(appRoot, config.CLIENT_PATH) console.log(`Successfully generated Electric client at: ./${relativePath}`) - // Build the migrations console.log('Building migrations...') - await buildMigrations(migrationsFolder, migrationsFile, builder) + await buildSqliteMigrations() + await buildPgMigrations() console.log('Successfully built migrations') if ( @@ -687,9 +713,12 @@ async function fetchMigrations( return gotNewMigrations } -function migrationsFilePath(opts: Omit) { +function migrationsFilePath( + opts: Omit, + filename: string +) { const outFolder = path.resolve(opts.config.CLIENT_PATH) - return path.join(outFolder, 'migrations.ts') + return path.join(outFolder, filename) } function capitaliseFirstLetter(word: string): string { diff --git a/clients/typescript/src/client/model/schema.ts b/clients/typescript/src/client/model/schema.ts index 5672e35904..390730236e 100644 --- a/clients/typescript/src/client/model/schema.ts +++ b/clients/typescript/src/client/model/schema.ts @@ -120,7 +120,16 @@ export class DbSchema { Record> > - constructor(public tables: T, public migrations: Migration[]) { + /** + * @param tables Description of the database tables + * @param migrations Bundled SQLite migrations + * @param pgMigrations Bundled Postgres migrations + */ + constructor( + public tables: T, + public migrations: Migration[], + public pgMigrations: Migration[] + ) { this.extendedTables = this.extend(tables) this.incomingRelationsIndex = this.indexIncomingRelations() } diff --git a/clients/typescript/src/drivers/node-postgres/index.ts b/clients/typescript/src/drivers/node-postgres/index.ts index 98b39bd978..8870d00f40 100644 --- a/clients/typescript/src/drivers/node-postgres/index.ts +++ b/clients/typescript/src/drivers/node-postgres/index.ts @@ -24,7 +24,7 @@ export const electrify = async >( const dbName = db.name const adapter = opts?.adapter || new DatabaseAdapter(db) const migrator = - opts?.migrator || new PgBundleMigrator(adapter, dbDescription.migrations) + opts?.migrator || new PgBundleMigrator(adapter, dbDescription.pgMigrations) const socketFactory = opts?.socketFactory || WebSocketWeb const prepare = async (_connection: DatabaseAdapterI) => {} diff --git a/clients/typescript/src/drivers/tauri-postgres/index.ts b/clients/typescript/src/drivers/tauri-postgres/index.ts index afd6ab908c..830c347939 100644 --- a/clients/typescript/src/drivers/tauri-postgres/index.ts +++ b/clients/typescript/src/drivers/tauri-postgres/index.ts @@ -23,7 +23,7 @@ export const electrify = async >( const dbName = db.name const adapter = opts?.adapter || new DatabaseAdapter(db) const migrator = - opts?.migrator || new PgBundleMigrator(adapter, dbDescription.migrations) + opts?.migrator || new PgBundleMigrator(adapter, dbDescription.pgMigrations) const socketFactory = opts?.socketFactory || WebSocketWeb const prepare = async (_connection: DatabaseAdapterI) => {} diff --git a/clients/typescript/test/satellite/client.test.ts b/clients/typescript/test/satellite/client.test.ts index d737594d43..d64640916a 100644 --- a/clients/typescript/test/satellite/client.test.ts +++ b/clients/typescript/test/satellite/client.test.ts @@ -291,6 +291,7 @@ test.serial('receive transaction over multiple messages', async (t) => { HKT >, }, + [], [] ) @@ -703,6 +704,7 @@ test.serial('default and null test', async (t) => { table: tbl, Items: tbl, }, + [], [] ) @@ -1042,6 +1044,7 @@ test.serial('subscription correct protocol sequence with data', async (t) => { table: tbl, [tablename]: tbl, }, + [], [] ) diff --git a/clients/typescript/test/satellite/common.ts b/clients/typescript/test/satellite/common.ts index cc6dfd8af5..2048d3618c 100644 --- a/clients/typescript/test/satellite/common.ts +++ b/clients/typescript/test/satellite/common.ts @@ -67,6 +67,7 @@ export const dbDescription = new DbSchema( string, TableSchema >, + [], [] ) diff --git a/clients/typescript/test/satellite/serialization.test.ts b/clients/typescript/test/satellite/serialization.test.ts index 3998debfba..1a48370634 100644 --- a/clients/typescript/test/satellite/serialization.test.ts +++ b/clients/typescript/test/satellite/serialization.test.ts @@ -86,6 +86,7 @@ test('serialize/deserialize row data', async (t) => { HKT >, }, + [], [] ) @@ -250,6 +251,7 @@ test('Null mask uses bits as if they were a list', async (t) => { HKT >, }, + [], [] ) @@ -336,7 +338,8 @@ const setupPG: SetupFn = async (t: ExecutionContext) => { HKT >, }, - [] + [], + [] ) const satOpRow = serializeRow( @@ -372,7 +375,7 @@ const setupPG: SetupFn = async (t: ExecutionContext) => { t.is(Object.keys(inferredRelations).length, 0) // Empty Db schema - const testDbDescription = new DbSchema({}, []) + const testDbDescription = new DbSchema({}, [], []) const newTableRelation: Relation = { id: 1, diff --git a/generator/src/functions/tableDescriptionWriters/writeTableSchemas.ts b/generator/src/functions/tableDescriptionWriters/writeTableSchemas.ts index 7f62c177c1..0d3e8de4fc 100644 --- a/generator/src/functions/tableDescriptionWriters/writeTableSchemas.ts +++ b/generator/src/functions/tableDescriptionWriters/writeTableSchemas.ts @@ -85,7 +85,9 @@ export function writeTableSchemas( .blankLine() writer - .writeLine('export const schema = new DbSchema(tableSchemas, migrations)') + .writeLine( + 'export const schema = new DbSchema(tableSchemas, migrations, pgMigrations)' + ) .writeLine('export type Electric = ElectricClient') .conditionalWriteLine( dmmf.schema.hasJsonTypes, diff --git a/generator/src/functions/writeSingleFileImportStatements.ts b/generator/src/functions/writeSingleFileImportStatements.ts index 18f03cb167..410187d79a 100644 --- a/generator/src/functions/writeSingleFileImportStatements.ts +++ b/generator/src/functions/writeSingleFileImportStatements.ts @@ -33,4 +33,5 @@ export const writeSingleFileImportStatements: WriteStatements = ( writeImport(`{ ${imports.join(', ')} }`, 'electric-sql/client/model') writeImport(`migrations`, './migrations') + writeImport(`pgMigrations`, './pg-migrations') } From aa1c411561cf3351b3f11c9f140c0a1e4788eb1b Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Tue, 5 Mar 2024 13:54:25 +0100 Subject: [PATCH 026/156] Removed obsolete code --- clients/typescript/src/satellite/process.ts | 7 ------- clients/typescript/test/satellite/common.ts | 14 -------------- .../test/satellite/serialization.test.ts | 2 +- 3 files changed, 1 insertion(+), 22 deletions(-) diff --git a/clients/typescript/src/satellite/process.ts b/clients/typescript/src/satellite/process.ts index c7a7de7196..1499a479fe 100644 --- a/clients/typescript/src/satellite/process.ts +++ b/clients/typescript/src/satellite/process.ts @@ -535,8 +535,6 @@ export class SatelliteProcess implements Satellite { ...Array.from(groupedChanges.values()).map((chg) => chg.table), ] - console.log(`Apply subs data: ${JSON.stringify(qualifiedTableNames)}`) - // Disable trigger for all affected tables stmts.push(...this._disableTriggers(qualifiedTableNames)) @@ -1289,8 +1287,6 @@ export class SatelliteProcess implements Satellite { const lsn = transaction.lsn let firstDMLChunk = true - // switches off on transaction commit/abort - //stmts.push({ sql: this.builder.deferForeignKeys }) // update lsn. stmts.push(this.updateLsnStmt(lsn)) stmts.push(this._resetSeenAdditionalDataStmt()) @@ -1359,7 +1355,6 @@ export class SatelliteProcess implements Satellite { const createdQualifiedTables = Array.from(createdTables).map( QualifiedTablename.parse ) - console.log(`createdTablenames IN TRANSACTION: ${createdTables}`) stmts.push(...this._disableTriggers(createdQualifiedTables)) newTables = new Set([...newTables, ...createdTables]) } @@ -1381,10 +1376,8 @@ export class SatelliteProcess implements Satellite { // Now run the DML and DDL statements in-order in a transaction const tablenames = Array.from(tablenamesSet) - console.log(`tablenames IN TRANSACTION: ${tablenames}`) const qualifiedTables = tablenames.map(QualifiedTablename.parse) const notNewTableNames = tablenames.filter((t) => !newTables.has(t)) - console.log(`notNewTablenames IN TRANSACTION: ${notNewTableNames}`) const notNewQualifiedTables = notNewTableNames.map(QualifiedTablename.parse) const allStatements = this._disableTriggers(notNewQualifiedTables) diff --git a/clients/typescript/test/satellite/common.ts b/clients/typescript/test/satellite/common.ts index 2048d3618c..9552ce126c 100644 --- a/clients/typescript/test/satellite/common.ts +++ b/clients/typescript/test/satellite/common.ts @@ -28,20 +28,6 @@ import { makePgDatabase } from '../support/node-postgres' import { DatabaseAdapter as PgDatabaseAdapter } from '../../src/drivers/node-postgres/adapter' import { DatabaseAdapter } from '../../src/electric/adapter' -export type Database = { - exec(statement: { sql: string }): Promise -} - -export function wrapDB(db: SqliteDB): Database { - const wrappedDB = { - exec: async ({ sql }: { sql: string }) => { - console.log('EXECCC:\n' + sql) - db.exec(sql) - }, - } - return wrappedDB -} - export const dbDescription = new DbSchema( { child: { diff --git a/clients/typescript/test/satellite/serialization.test.ts b/clients/typescript/test/satellite/serialization.test.ts index 1a48370634..7865c94d0b 100644 --- a/clients/typescript/test/satellite/serialization.test.ts +++ b/clients/typescript/test/satellite/serialization.test.ts @@ -339,7 +339,7 @@ const setupPG: SetupFn = async (t: ExecutionContext) => { >, }, [], - [] + [] ) const satOpRow = serializeRow( From ecd0ea2e3b51a8a5a8e846d0139c2de5e445f48f Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Tue, 5 Mar 2024 17:04:40 +0100 Subject: [PATCH 027/156] Fixes after rebase --- .../src/migrators/query-builder/builder.ts | 7 +- clients/typescript/src/satellite/process.ts | 27 +- clients/typescript/test/satellite/common.ts | 27 +- .../typescript/test/satellite/process.test.ts | 278 +++++++++--------- 4 files changed, 160 insertions(+), 179 deletions(-) diff --git a/clients/typescript/src/migrators/query-builder/builder.ts b/clients/typescript/src/migrators/query-builder/builder.ts index 24705dfee0..f47075aff5 100644 --- a/clients/typescript/src/migrators/query-builder/builder.ts +++ b/clients/typescript/src/migrators/query-builder/builder.ts @@ -327,13 +327,15 @@ export abstract class QueryBuilder { * @param columns columns that describe records * @param records records to be inserted * @param maxParameters max parameters this SQLite can accept - determines batching factor + * @param suffixSql optional SQL string to append to each insert statement * @returns array of statements ready to be executed by the adapter */ prepareInsertBatchedStatements( baseSql: string, columns: string[], records: Record[], - maxParameters: number + maxParameters: number, + suffixSql: string = '', ): Statement[] { const stmts: Statement[] = [] const columnCount = columns.length @@ -357,7 +359,8 @@ export abstract class QueryBuilder { const currentInsertCount = Math.min(recordCount - processed, batchMaxSize) const sql = baseSql + - Array.from({ length: currentInsertCount }, makeInsertPattern).join(',') + Array.from({ length: currentInsertCount }, makeInsertPattern).join(',') + + ' ' + suffixSql const args = records .slice(processed, processed + currentInsertCount) .flatMap((record) => columns.map((col) => record[col] as SqlValue)) diff --git a/clients/typescript/src/satellite/process.ts b/clients/typescript/src/satellite/process.ts index 1499a479fe..e73bc01013 100644 --- a/clients/typescript/src/satellite/process.ts +++ b/clients/typescript/src/satellite/process.ts @@ -490,7 +490,7 @@ export class SatelliteProcess implements Satellite { const groupedChanges = new Map< string, { - columns: string[] + relation: Relation records: InitialDataChange['record'][] table: QualifiedTablename } @@ -506,11 +506,10 @@ export class SatelliteProcess implements Satellite { const tableName = new QualifiedTablename('main', op.relation.table) const tableNameString = tableName.toString() if (groupedChanges.has(tableNameString)) { - const changeGroup = groupedChanges.get(tableNameString)! - changeGroup.dataChanges.push(op) + groupedChanges.get(tableName.toString())?.records.push(op.record) } else { groupedChanges.set(tableName.toString(), { - columns: op.relation.columns.map((x) => x.name), + relation: op.relation, records: [op.record], table: tableName, }) @@ -539,20 +538,23 @@ export class SatelliteProcess implements Satellite { stmts.push(...this._disableTriggers(qualifiedTableNames)) // For each table, do a batched insert - for (const [_table, { relation, dataChanges, table }] of groupedChanges) { - const records = dataChanges.map((change) => change.record) + for (const [_table, { relation, records, table }] of groupedChanges) { const columnNames = relation.columns.map((col) => col.name) const qualifiedTableName = `"${table.namespace}"."${table.tablename}"` - const sqlBase = `INSERT OR IGNORE INTO ${qualifiedTableName} (${columnNames.join( + const orIgnore = this.builder.sqliteOnly('OR IGNORE') + const onConflictDoNothing = this.builder.pgOnly('ON CONFLICT DO NOTHING') + const sqlBase = `INSERT ${orIgnore} INTO ${qualifiedTableName} (${columnNames.join( ', ' )}) VALUES ` + // Must be an insert or ignore into stmts.push( ...this.builder.prepareInsertBatchedStatements( sqlBase, columnNames, records as Record[], - this.maxSqlParameters + this.maxSqlParameters, + onConflictDoNothing ) ) } @@ -586,18 +588,19 @@ export class SatelliteProcess implements Satellite { // because nobody uses them and we don't have the machinery to to a // `RETURNING` clause in the middle of `runInTransaction`. const notificationChanges: Change[] = [] - groupedChanges.forEach(({ dataChanges, tableName, relation }) => { + + groupedChanges.forEach(({ records, table, relation }) => { const primaryKeyColNames = relation.columns .filter((col) => col.primaryKey) .map((col) => col.name) notificationChanges.push({ - qualifiedTablename: tableName, + qualifiedTablename: table, rowids: [], - recordChanges: dataChanges.map((change) => { + recordChanges: records.map((change) => { return { primaryKey: Object.fromEntries( primaryKeyColNames.map((col_name) => { - return [col_name, change.record[col_name]] + return [col_name, change[col_name]] }) ), type: 'INITIAL', diff --git a/clients/typescript/test/satellite/common.ts b/clients/typescript/test/satellite/common.ts index 9552ce126c..23abbc1076 100644 --- a/clients/typescript/test/satellite/common.ts +++ b/clients/typescript/test/satellite/common.ts @@ -16,7 +16,7 @@ import { buildInitialMigration as makeInitialMigration } from '../../src/migrato import sqliteMigrations from '../support/migrations/migrations.js' import pgMigrations from '../support/migrations/pg-migrations.js' import { ExecutionContext } from 'ava' -import { AuthState } from '../../src/auth' +import { AuthState, insecureAuthToken } from '../../src/auth' import { DbSchema, TableSchema } from '../../src/client/model/schema' import { PgBasicType } from '../../src/client/conversions/types' import { HKT } from '../../src/client/util/hkt' @@ -320,31 +320,6 @@ export const makePgContext = async ( t.context.stop = stop } -export const makeContext = async ( - t: ExecutionContext, - options: Opts = opts -) => { - await mkdir('.tmp', { recursive: true }) - const dbName = `.tmp/test-${randomValue()}.db` - const db = new SqliteDatabase(dbName) - const adapter = new SqliteDatabaseAdapter(db) - const migrator = new SqliteBundleMigrator(adapter, sqliteMigrations) - makeContextInternal(t, dbName, adapter, migrator, options) -} - -export const makePgContext = async ( - t: ExecutionContext, - port: number, - options: Opts = opts -) => { - const dbName = `test-${randomValue()}` - const { db, stop } = await makePgDatabase(dbName, port) - const adapter = new PgDatabaseAdapter(db) - const migrator = new PgBundleMigrator(adapter, pgMigrations) - makeContextInternal(t, dbName, adapter, migrator, options) - t.context.stop = stop -} - export const mockElectricClient = async ( db: SqliteDB, registry: Registry | GlobalRegistry, diff --git a/clients/typescript/test/satellite/process.test.ts b/clients/typescript/test/satellite/process.test.ts index bfd510b813..4eee7a92d1 100644 --- a/clients/typescript/test/satellite/process.test.ts +++ b/clients/typescript/test/satellite/process.test.ts @@ -130,14 +130,14 @@ export const processTests = (test: TestFn) => { const { satellite, authState, token } = t.context const { connectionPromise } = await startSatellite( - satellite, - authState, - token - ) + satellite, + authState, + token + ) const clientId1 = satellite._authState!.clientId t.truthy(clientId1) - await connectionPromise + await connectionPromise await satellite.stop() @@ -148,55 +148,55 @@ export const processTests = (test: TestFn) => { t.assert(clientId1 === clientId2) }) -test('can use user_id in JWT', async (t) => { - const { satellite, authState } = t.context + test('can use user_id in JWT', async (t) => { + const { satellite, authState } = t.context - await t.notThrowsAsync(async () => { - await startSatellite( - satellite, - authState, - insecureAuthToken({ user_id: 'test-userA' }) - ) + await t.notThrowsAsync(async () => { + await startSatellite( + satellite, + authState, + insecureAuthToken({ user_id: 'test-userA' }) + ) + }) }) -}) -test('can use sub in JWT', async (t) => { - const { satellite, authState } = t.context + test('can use sub in JWT', async (t) => { + const { satellite, authState } = t.context - await t.notThrowsAsync(async () => { - await startSatellite( - satellite, - authState, - insecureAuthToken({ sub: 'test-userB' }) - ) + await t.notThrowsAsync(async () => { + await startSatellite( + satellite, + authState, + insecureAuthToken({ sub: 'test-userB' }) + ) + }) }) -}) -test('require user_id or sub in JWT', async (t) => { - const { satellite, authState } = t.context + test('require user_id or sub in JWT', async (t) => { + const { satellite, authState } = t.context - const error = await t.throwsAsync(async () => { - await startSatellite( - satellite, - authState, - insecureAuthToken({ custom_user_claim: 'test-userC' }) - ) + const error = await t.throwsAsync(async () => { + await startSatellite( + satellite, + authState, + insecureAuthToken({ custom_user_claim: 'test-userC' }) + ) + }) + t.is(error?.message, 'Token does not contain a sub or user_id claim') }) - t.is(error?.message, 'Token does not contain a sub or user_id claim') -}) -test('cannot update user id', async (t) => { - const { satellite, authState, token } = t.context + test('cannot update user id', async (t) => { + const { satellite, authState, token } = t.context - await startSatellite(satellite, authState, token) - const error = t.throws(() => { - satellite.setToken(insecureAuthToken({ sub: 'test-user2' })) + await startSatellite(satellite, authState, token) + const error = t.throws(() => { + satellite.setToken(insecureAuthToken({ sub: 'test-user2' })) + }) + t.is( + error?.message, + "Can't change user ID when reconnecting. Previously connected with user ID 'test-user' but trying to reconnect with user ID 'test-user2'" + ) }) - t.is( - error?.message, - "Can't change user ID when reconnecting. Previously connected with user ID 'test-user' but trying to reconnect with user ID 'test-user2'" - ) -}) test('cannot UPDATE primary key', async (t) => { const { adapter, runMigrations } = t.context @@ -238,10 +238,10 @@ test('cannot update user id', async (t) => { const expectedChange = { qualifiedTablename: new QualifiedTablename('main', 'parent'), rowids: [1, 2], - recordChanges: [ - { primaryKey: { id: 1 }, type: 'INSERT' }, - { primaryKey: { id: 2 }, type: 'INSERT' }, - ], + recordChanges: [ + { primaryKey: { id: 1 }, type: 'INSERT' }, + { primaryKey: { id: 2 }, type: 'INSERT' }, + ], } t.deepEqual(changes, [expectedChange]) @@ -294,7 +294,7 @@ test('cannot update user id', async (t) => { test('starting and stopping the process works', async (t) => { const { adapter, notifier, runMigrations, satellite, authState, token } = - t.context + t.context await runMigrations() await adapter.run({ sql: `INSERT INTO main.parent(id) VALUES ('1'),('2')` }) @@ -1387,11 +1387,11 @@ test('cannot update user id', async (t) => { const { runMigrations, satellite, adapter, authState, token } = t.context await runMigrations() const { connectionPromise } = await startSatellite( - satellite, - authState, - token - ) - await connectionPromise + satellite, + authState, + token + ) + await connectionPromise adapter.run({ sql: `INSERT INTO main.parent(id, value, other) VALUES (1, 'local', 1)`, @@ -1422,46 +1422,46 @@ test('cannot update user id', async (t) => { t.deepEqual(lsn2, numberToBytes(2)) }) -test('notifies about JWT expiration', async (t) => { - const { - satellite, - authState, - runMigrations, - client, - notifier, - dbName, - token, - } = t.context - await runMigrations() - await startSatellite(satellite, authState, token) - - // give some time for Satellite to start - // (needed because connecting and starting replication are async) - await sleepAsync(100) - - // we're expecting 2 assertions - t.plan(4) + test('notifies about JWT expiration', async (t) => { + const { + satellite, + authState, + runMigrations, + client, + notifier, + dbName, + token, + } = t.context + await runMigrations() + await startSatellite(satellite, authState, token) - notifier.subscribeToConnectivityStateChanges( - (notification: ConnectivityStateChangeNotification) => { - t.is(notification.dbName, dbName) - t.is(notification.connectivityState.status, 'disconnected') - t.is( - notification.connectivityState.reason?.code, - SatelliteErrorCode.AUTH_EXPIRED - ) - } - ) + // give some time for Satellite to start + // (needed because connecting and starting replication are async) + await sleepAsync(100) + + // we're expecting 2 assertions + t.plan(4) + + notifier.subscribeToConnectivityStateChanges( + (notification: ConnectivityStateChangeNotification) => { + t.is(notification.dbName, dbName) + t.is(notification.connectivityState.status, 'disconnected') + t.is( + notification.connectivityState.reason?.code, + SatelliteErrorCode.AUTH_EXPIRED + ) + } + ) - // mock JWT expiration - client.emitSocketClosedError(SatelliteErrorCode.AUTH_EXPIRED) + // mock JWT expiration + client.emitSocketClosedError(SatelliteErrorCode.AUTH_EXPIRED) - // give the notifier some time to fire - await sleepAsync(100) + // give the notifier some time to fire + await sleepAsync(100) - // check that the client is disconnected - t.false(client.isConnected()) -}) + // check that the client is disconnected + t.false(client.isConnected()) + }) test('garbage collection is triggered when transaction from the same origin is replicated', async (t) => { const { satellite } = t.context @@ -1516,10 +1516,10 @@ test('garbage collection is triggered when transaction from the same origin is r // TODO: test clear subscriptions }) -test('throw other replication errors', async (t) => { - t.plan(2) - const { satellite, runMigrations, authState, token } = t.context - await runMigrations() + test('throw other replication errors', async (t) => { + t.plan(2) + const { satellite, runMigrations, authState, token } = t.context + await runMigrations() const base64lsn = base64.fromBytes(numberToBytes(MOCK_INTERNAL_ERROR)) await satellite._setMeta('lsn', base64lsn) @@ -1563,12 +1563,12 @@ test('throw other replication errors', async (t) => { t.is(notifier.notifications[1].changes.length, 1) t.deepEqual(notifier.notifications[1].changes[0], { qualifiedTablename: qualified, - recordChanges: [ - { - primaryKey: { id: 1 }, - type: 'INITIAL', - }, - ], + recordChanges: [ + { + primaryKey: { id: 1 }, + type: 'INITIAL', + }, + ], rowids: [], }) @@ -1596,9 +1596,9 @@ test('throw other replication errors', async (t) => { } }) -test('(regression) shape subscription succeeds even if subscription data is delivered before the SatSubsReq RPC call receives its SatSubsResp answer', async (t) => { - const { client, satellite, runMigrations, authState, token } = t.context - await runMigrations() + test('(regression) shape subscription succeeds even if subscription data is delivered before the SatSubsReq RPC call receives its SatSubsResp answer', async (t) => { + const { client, satellite, runMigrations, authState, token } = t.context + await runMigrations() const tablename = 'parent' @@ -1627,9 +1627,9 @@ test('(regression) shape subscription succeeds even if subscription data is deli t.pass() }) -test('multiple subscriptions for the same shape are deduplicated', async (t) => { - const { client, satellite, runMigrations, authState, token } = t.context - await runMigrations() + test('multiple subscriptions for the same shape are deduplicated', async (t) => { + const { client, satellite, runMigrations, authState, token } = t.context + await runMigrations() const tablename = 'parent' @@ -1666,10 +1666,10 @@ test('multiple subscriptions for the same shape are deduplicated', async (t) => t.is(satellite.subscriptions.getFulfilledSubscriptions().length, 1) }) -test('applied shape data will be acted upon correctly', async (t) => { - const { client, satellite, adapter, runMigrations, authState, token } = - t.context - await runMigrations() + test('applied shape data will be acted upon correctly', async (t) => { + const { client, satellite, adapter, runMigrations, authState, token } = + t.context + await runMigrations() const namespace = 'main' const tablename = 'parent' @@ -1833,10 +1833,10 @@ test('a subscription that failed to apply because of FK constraint triggers GC', } }) -test('a second successful subscription', async (t) => { - const { client, satellite, adapter, runMigrations, authState, token } = - t.context - await runMigrations() + test('a second successful subscription', async (t) => { + const { client, satellite, adapter, runMigrations, authState, token } = + t.context + await runMigrations() const namespace = 'main' const tablename = 'child' @@ -1880,10 +1880,10 @@ test('a second successful subscription', async (t) => { } }) -test('a single subscribe with multiple tables with FKs', async (t) => { - const { client, satellite, adapter, runMigrations, authState, token } = - t.context - await runMigrations() + test('a single subscribe with multiple tables with FKs', async (t) => { + const { client, satellite, adapter, runMigrations, authState, token } = + t.context + await runMigrations() // relations must be present at subscription delivery client.setRelations(relations) @@ -1931,10 +1931,12 @@ test('a single subscribe with multiple tables with FKs', async (t) => { return prom }) -test.serial('a shape delivery that triggers garbage collection', async (t) => { - const { client, satellite, adapter, runMigrations, authState, token } = - t.context - await runMigrations() + test.serial( + 'a shape delivery that triggers garbage collection', + async (t) => { + const { client, satellite, adapter, runMigrations, authState, token } = + t.context + await runMigrations() const namespace = 'main' const tablename = 'parent' @@ -1994,10 +1996,10 @@ test.serial('a shape delivery that triggers garbage collection', async (t) => { } }) -test('a subscription request failure does not clear the manager state', async (t) => { - const { client, satellite, adapter, runMigrations, authState, token } = - t.context - await runMigrations() + test('a subscription request failure does not clear the manager state', async (t) => { + const { client, satellite, adapter, runMigrations, authState, token } = + t.context + await runMigrations() // relations must be present at subscription delivery const namespace = 'main' @@ -2126,14 +2128,14 @@ test('a subscription request failure does not clear the manager state', async (t test('snapshots: generated oplog entries have the correct tags', async (t) => { const { - client, - satellite, - adapter, - tableInfo, + client, + satellite, + adapter, + tableInfo, runMigrations, - authState, - token, - } = t.context + authState, + token, + } = t.context await runMigrations() const namespace = 'main' @@ -2255,10 +2257,9 @@ test('a subscription request failure does not clear the manager state', async (t satellite['_connectRetryHandler'] = retry await Promise.all( - [ - satellite.connectWithBackoff(), - satellite['initializing']?.waitOn(), - ].map((p) => p?.catch(() => t.pass())) + [satellite.connectWithBackoff(), satellite['initializing']?.waitOn()].map( + (p) => p?.catch(() => t.pass()) + ) ) }) @@ -2270,15 +2271,14 @@ test('a subscription request failure does not clear the manager state', async (t authState, token ) - // We expect the connection to be cancelled const prom = t.throwsAsync(connectionPromise, { code: SatelliteErrorCode.CONNECTION_CANCELLED_BY_DISCONNECT, }) - + // Disconnect Satellite satellite.clientDisconnect() - + // Await until the connection promise is rejected await prom }) From fca065c57b0c4aaa8f5621a3eefc4a9ecf1fab72 Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Tue, 5 Mar 2024 17:05:09 +0100 Subject: [PATCH 028/156] Update minimal expo-sqlite version needed for expo-sqlite/next --- clients/typescript/package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clients/typescript/package.json b/clients/typescript/package.json index 750ab9f3f1..d96e32b7bd 100644 --- a/clients/typescript/package.json +++ b/clients/typescript/package.json @@ -238,7 +238,7 @@ "concurrently": "^8.2.2", "embedded-postgres": "16.1.1-beta.9", "eslint": "^8.22.0", - "expo-sqlite": "^13.0.0", + "expo-sqlite": "^13.1.0", "glob": "^10.3.10", "global-jsdom": "24.0.0", "husky": "^8.0.3", From a23910e6b53eab1fd7b38f28447b9963e1d57caf Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Tue, 12 Mar 2024 09:25:10 +0100 Subject: [PATCH 029/156] Fixes after rebase --- clients/typescript/src/migrators/triggers.ts | 1 - .../typescript/test/satellite/process.test.ts | 32 +++++++++---------- 2 files changed, 16 insertions(+), 17 deletions(-) diff --git a/clients/typescript/src/migrators/triggers.ts b/clients/typescript/src/migrators/triggers.ts index 69c3a11b79..c2add6a9fc 100644 --- a/clients/typescript/src/migrators/triggers.ts +++ b/clients/typescript/src/migrators/triggers.ts @@ -1,5 +1,4 @@ import { Statement } from '../util' -import { dedent } from 'ts-dedent' import { QueryBuilder } from './query-builder' export type ForeignKey = { diff --git a/clients/typescript/test/satellite/process.test.ts b/clients/typescript/test/satellite/process.test.ts index 4eee7a92d1..bf2d7d1477 100644 --- a/clients/typescript/test/satellite/process.test.ts +++ b/clients/typescript/test/satellite/process.test.ts @@ -1463,12 +1463,12 @@ export const processTests = (test: TestFn) => { t.false(client.isConnected()) }) -test('garbage collection is triggered when transaction from the same origin is replicated', async (t) => { - const { satellite } = t.context - const { runMigrations, adapter, authState, token } = t.context - await runMigrations() - const conn = await startSatellite(satellite, authState, token) - await conn.connectionPromise + test('garbage collection is triggered when transaction from the same origin is replicated', async (t) => { + const { satellite } = t.context + const { runMigrations, adapter, authState, token } = t.context + await runMigrations() + const conn = await startSatellite(satellite, authState, token) + await conn.connectionPromise adapter.run({ sql: `INSERT INTO main.parent(id, value, other) VALUES (1, 'local', 1);`, @@ -2036,28 +2036,28 @@ test('a subscription that failed to apply because of FK constraint triggers GC', } catch (error: any) { t.is(error.code, SatelliteErrorCode.TABLE_NOT_FOUND) } -}) - + }) + test("snapshot while not fully connected doesn't throw", async (t) => { const { adapter, runMigrations, satellite, client, authState, token } = t.context client.setStartReplicationDelayMs(100) - + await runMigrations() - + // Add log entry while offline - await adapter.run({ sql: `INSERT INTO parent(id) VALUES ('1'),('2')` }) - + await adapter.run({ sql: `INSERT INTO main.parent(id) VALUES ('1'),('2')` }) + const conn = await startSatellite(satellite, authState, token) - + // Performing a snapshot while the replication connection has not been stablished // should not throw await satellite._performSnapshot() - + await conn.connectionPromise - + await satellite._performSnapshot() - + t.pass() }) From 3f349361bb9b82b1a809ee27eec8d4decef535ac Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Tue, 12 Mar 2024 13:55:47 +0100 Subject: [PATCH 030/156] Modify DAL to transform JS input to PG values and the other way around. --- .../src/client/conversions/converter.ts | 28 + .../src/client/conversions/datatypes/date.ts | 3 +- .../src/client/conversions/input.ts | 572 +++--- .../src/client/conversions/postgres.ts | 61 + .../src/client/conversions/sqlite.ts | 22 +- .../src/client/execution/executor.ts | 8 +- .../client/execution/nonTransactionalDB.ts | 16 +- .../src/client/execution/transactionalDB.ts | 36 +- .../typescript/src/client/model/builder.ts | 44 +- clients/typescript/src/client/model/client.ts | 13 +- clients/typescript/src/client/model/table.ts | 52 +- .../src/drivers/node-postgres/database.ts | 44 +- .../src/drivers/tauri-postgres/database.ts | 9 - clients/typescript/src/electric/index.ts | 4 +- .../src/migrators/query-builder/builder.ts | 3 +- .../test/client/model/builder.test.ts | 3 +- .../test/client/model/datatype.pg.test.ts | 55 + .../test/client/model/datatype.sqlite.test.ts | 49 + .../test/client/model/datatype.test.ts | 1550 +++++++++-------- .../test/client/model/shapes.test.ts | 3 +- .../typescript/test/frameworks/react.test.tsx | 3 +- .../typescript/test/frameworks/vuejs.test.ts | 3 +- .../test/migrators/postgres/schema.test.ts | 5 +- 23 files changed, 1443 insertions(+), 1143 deletions(-) create mode 100644 clients/typescript/src/client/conversions/converter.ts create mode 100644 clients/typescript/src/client/conversions/postgres.ts create mode 100644 clients/typescript/test/client/model/datatype.pg.test.ts create mode 100644 clients/typescript/test/client/model/datatype.sqlite.test.ts diff --git a/clients/typescript/src/client/conversions/converter.ts b/clients/typescript/src/client/conversions/converter.ts new file mode 100644 index 0000000000..72da5a0493 --- /dev/null +++ b/clients/typescript/src/client/conversions/converter.ts @@ -0,0 +1,28 @@ +import { PgType } from './types' + +export interface Converter { + /** + * Encodes the provided value for storing in the database. + * @param v The value to encode. + * @param pgType The Postgres type of the column in which to store the value. + */ + encode(v: any, pgType: PgType): any + /** + * Decodes the provided value from the database. + * @param v The value to decode. + * @param pgType The Postgres type of the column from which to decode the value. + */ + decode(v: any, pgType: PgType): any +} + +/** + * Checks whether the provided value is a user-provided data object, e.g. a timestamp. + * This is important because `input.ts` needs to distinguish between data objects and filter objects. + * Data objects need to be converted to a SQLite storeable value, whereas filter objects need to be treated specially + * as we have to transform the values of the filter's fields (cf. `transformFieldsAllowingFilters` in `input.ts`). + * @param v The value to check + * @returns True if it is a data object, false otherwise. + */ +export function isDataObject(v: unknown): boolean { + return v instanceof Date || typeof v === 'bigint' +} diff --git a/clients/typescript/src/client/conversions/datatypes/date.ts b/clients/typescript/src/client/conversions/datatypes/date.ts index 2225cbb16f..d0f4559943 100644 --- a/clients/typescript/src/client/conversions/datatypes/date.ts +++ b/clients/typescript/src/client/conversions/datatypes/date.ts @@ -37,8 +37,9 @@ export function deserialiseDate(v: string, pgType: PgDateType): Date { switch (pgType) { case PgDateType.PG_TIMESTAMP: case PgDateType.PG_TIMESTAMPTZ: - case PgDateType.PG_DATE: return parse(v) + case PgDateType.PG_DATE: + return parse(`${v} 00:00:00.000`) case PgDateType.PG_TIME: // interpret as local time diff --git a/clients/typescript/src/client/conversions/input.ts b/clients/typescript/src/client/conversions/input.ts index 71f738ceff..fdab24c0d8 100644 --- a/clients/typescript/src/client/conversions/input.ts +++ b/clients/typescript/src/client/conversions/input.ts @@ -1,14 +1,14 @@ import mapValues from 'lodash.mapvalues' import { FieldName, Fields } from '../model/schema' -import { fromSqlite, toSqlite, isDataObject } from './sqlite' +import { Converter, isDataObject } from './converter' import { InvalidArgumentError } from '../validation/errors/invalidArgumentError' import { mapObject } from '../util/functions' import { PgType } from './types' import { isObject } from '../../util' export enum Transformation { - Js2Sqlite, - Sqlite2Js, + Encode, // encode values from JS to SQLite/Postgres + Decode, // decode values from SQLite/Postgres to JS } type UpdateInput = { data: object; where: object } @@ -21,161 +21,322 @@ type WhereInput = { where?: object } type Swap = Omit & Pick -/** - * Takes the data input of a `create` operation and - * converts the JS values to their corresponding SQLite values. - * e.g. JS `Date` objects are converted into strings. - * @param i The validated input of the `create` operation. - * @param fields The table's fields. - * @returns The transformed input. - */ -export function transformCreate( - i: T, - fields: Fields -): Swap { - return { - ...i, - data: transformFields(i.data, fields), +export class InputTransformer { + constructor(public converter: Converter) {} + + /** + * Takes the data input of a `create` operation and + * converts the JS values to their corresponding SQLite/PG values. + * e.g. JS `Date` objects are converted into strings. + * @param i The validated input of the `create` operation. + * @param fields The table's fields. + * @returns The transformed input. + */ + transformCreate( + i: T, + fields: Fields + ): Swap { + return { + ...i, + data: transformFields(i.data, fields, this.converter), + } } -} -/** - * Takes the data input of a `createMany` operation and - * converts the JS values to their corresponding SQLite values. - * e.g. JS `Date` objects are converted into strings. - * @param i The validated input of the `createMany` operation. - * @param fields The table's fields. - * @returns The transformed input. - */ -export function transformCreateMany( - i: T, - fields: Fields -): Swap { - return { - ...i, - data: i.data.map((o) => transformFields(o, fields)), + /** + * Takes the data input of a `createMany` operation and + * converts the JS values to their corresponding SQLite/PG values. + * e.g. JS `Date` objects are converted into strings. + * @param i The validated input of the `createMany` operation. + * @param fields The table's fields. + * @returns The transformed input. + */ + transformCreateMany( + i: T, + fields: Fields + ): Swap { + return { + ...i, + data: i.data.map((o) => transformFields(o, fields, this.converter)), + } } -} -/** - * Takes the data input of an `update` operation and - * converts the JS values to their corresponding SQLite values. - * e.g. JS `Date` objects are converted into strings. - * @param i The validated input of the `update` operation. - * @param fields The table's fields. - * @returns The transformed input. - */ -export function transformUpdate( - i: T, - fields: Fields -): Swap { - return { - ...i, - data: transformFields(i.data, fields), - where: transformWhere(i.where, fields), + /** + * Takes the data input of an `update` operation and + * converts the JS values to their corresponding SQLite/PG values. + * e.g. JS `Date` objects are converted into strings. + * @param i The validated input of the `update` operation. + * @param fields The table's fields. + * @returns The transformed input. + */ + transformUpdate( + i: T, + fields: Fields + ): Swap { + return { + ...i, + data: transformFields(i.data, fields, this.converter), + where: this.transformWhere(i.where, fields), + } } -} -/** - * Takes the data input of an `updateMany` operation and - * converts the JS values to their corresponding SQLite values. - * @param i The validated input of the `updateMany` operation. - * @param fields The table's fields. - * @returns The transformed input. - */ -export function transformUpdateMany( - i: T, - fields: Fields -): UpdateManyInput { - const whereObj = transformWhereInput(i, fields) - return { - ...whereObj, - data: transformFields(i.data, fields), + /** + * Takes the data input of an `updateMany` operation and + * converts the JS values to their corresponding SQLite/PG values. + * @param i The validated input of the `updateMany` operation. + * @param fields The table's fields. + * @returns The transformed input. + */ + transformUpdateMany( + i: T, + fields: Fields + ): UpdateManyInput { + const whereObj = this.transformWhereInput(i, fields) + return { + ...whereObj, + data: transformFields(i.data, fields, this.converter), + } } -} -/** - * Takes the data input of a `delete` operation and - * converts the JS values to their corresponding SQLite values. - */ -export const transformDelete = transformWhereUniqueInput + /** + * Takes the data input of a `delete` operation and + * converts the JS values to their corresponding SQLite/PG values. + */ + transformDelete = this.transformWhereUniqueInput + + /** + * Takes the data input of a `deleteMany` operation and + * converts the JS values to their corresponding SQLite/PG values. + * @param i The validated input of the `deleteMany` operation. + * @param fields The table's fields. + * @returns The transformed input. + */ + transformDeleteMany = this.transformWhereInput + + /** + * Takes the data input of a `findUnique` operation and + * converts the JS values to their corresponding SQLite/PG values. + */ + transformFindUnique = this.transformWhereUniqueInput + + /** + * Takes the data input of a `findFirst` or `findMany` operation and + * converts the JS values to their corresponding SQLite/PG values. + */ + transformFindNonUnique = this.transformWhereInput + + /** + * Takes the data input of an operation containing a required `where` clause and + * converts the JS values of the `where` clause to their corresponding SQLite/PG values. + * @param i The validated input of the `where` clause. + * @param fields The table's fields. + * @returns The transformed input. + */ + transformWhereUniqueInput( + i: T, + fields: Fields + ): Swap { + return { + ...i, + where: this.transformWhere(i.where, fields), + } + } -/** - * Takes the data input of a `deleteMany` operation and - * converts the JS values to their corresponding SQLite values. - * @param i The validated input of the `deleteMany` operation. - * @param fields The table's fields. - * @returns The transformed input. - */ -export const transformDeleteMany = transformWhereInput + /** + * Takes the data input of an operation containing an optional `where` clause and + * converts the JS values of the `where` clause to their corresponding SQLite/PG values. + * @param i The validated input of the `where` clause. + * @param fields The table's fields. + * @returns The transformed input. + */ + transformWhereInput( + i: T, + fields: Fields + ): Swap { + const whereObj = i.where + ? { where: this.transformWhere(i.where, fields) } + : {} + return { + ...i, + ...whereObj, + } + } -/** - * Takes the data input of a `findUnique` operation and - * converts the JS values to their corresponding SQLite values. - */ -export const transformFindUnique = transformWhereUniqueInput + transformWhere(o: object, fields: Fields): object { + const transformedFields = this.transformWhereFields(o, fields) + const transformedBooleanConnectors = this.transformBooleanConnectors( + o, + fields + ) + return { + ...o, + ...transformedFields, + ...transformedBooleanConnectors, + } + } -/** - * Takes the data input of a `findFirst` or `findMany` operation and - * converts the JS values to their corresponding SQLite values. - */ -export const transformFindNonUnique = transformWhereInput + transformBooleanConnectors( + o: { + AND?: object | object[] + OR?: object | object[] + NOT?: object | object[] + }, + fields: Fields + ): object { + // Within a `where` object, boolean connectors AND/OR/NOT will contain + // a nested `where` object or an array of nested `where` objects + // if it is a single `where` object we wrap it in an array + // and we map `transformWhere` to recursively handle all nested objects + const makeArray = (v: any) => (Array.isArray(v) ? v : [v]) + const andObj = o.AND + ? { AND: makeArray(o.AND).map((x) => this.transformWhere(x, fields)) } + : {} + const orObj = o.OR + ? { OR: makeArray(o.OR).map((x) => this.transformWhere(x, fields)) } + : {} + const notObj = o.NOT + ? { NOT: makeArray(o.NOT).map((x) => this.transformWhere(x, fields)) } + : {} + + // we use spread syntax such that the filter is not included if it is undefined + // we cannot set it to undefined because then it appears in `hasOwnProperty` + // and the query builder will try to write `undefined` to the database. + return { + ...andObj, + ...orObj, + ...notObj, + } + } -/** - * Takes the data input of an operation containing a required `where` clause and - * converts the JS values of the `where` clause to their corresponding SQLite values. - * @param i The validated input of the `where` clause. - * @param fields The table's fields. - * @returns The transformed input. - */ -function transformWhereUniqueInput( - i: T, - fields: Fields -): Swap { - return { - ...i, - where: transformWhere(i.where, fields), + /** + * Iterates over the properties of a `where` object + * in order to transform the values to SQLite/PG compatible values + * based on additional type information about the fields. + * @param o The `where` object to transform. + * @param fields Type information about the fields. + * @returns A `where` object with the values converted to SQLite/PG. + */ + transformWhereFields(o: object, fields: Fields): object { + // only transform fields that are part of this table and not related fields + // as those will be transformed later when the query on the related field is processed. + const objWithoutRelatedFields = keepTableFieldsOnly(o, fields) + const transformedObj = mapObject( + objWithoutRelatedFields, + (field, value) => { + // each field can be the value itself or an object containing filters like `lt`, `gt`, etc. + return this.transformFieldsAllowingFilters(field, value, fields) + } + ) + + return { + ...o, + ...transformedObj, + } } -} -/** - * Takes the data input of an operation containing an optional `where` clause and - * converts the JS values of the `where` clause to their corresponding SQLite values. - * @param i The validated input of the `where` clause. - * @param fields The table's fields. - * @returns The transformed input. - */ -function transformWhereInput( - i: T, - fields: Fields -): Swap { - const whereObj = i.where ? { where: transformWhere(i.where, fields) } : {} - return { - ...i, - ...whereObj, + /** + * Transforms a value that may contain filters. + * e.g. `where` clauses of a query allow to pass a value directly or an object containing filters. + * If it is an object of filters, we need to transform the values that are nested in those filters. + * @param field The name of the field we are transforming. + * @param value The value for that field. + * @param fields Type information about the fields of this table. + * @returns The transformed value. + */ + transformFieldsAllowingFilters( + field: FieldName, + value: any, + fields: Fields + ): any { + const pgType = fields.get(field) + + if (!pgType) throw new InvalidArgumentError(`Unknown field ${field}`) + + if (isFilterObject(value)) { + // transform the values that are nested in those filters + return this.transformFilterObject(field, value, pgType, fields) + } + + return this.converter.encode(value, pgType) + } + + /** + * Transforms an object containing filters + * @example For example: + * ``` + * { + * lt: Date('2023-09-12'), + * notIn: [ Date('2023-09-09'), Date('2023-09-01') ], + * not: { + * lt: Date('2022-09-01') + * } + * } + * ``` + * @param field The name of the field we are transforming. + * @param o The object containing the filters. + * @param pgType Type of this field. + * @param fields Type information about the fields of this table. + * @returns A transformed filter object. + */ + transformFilterObject( + field: FieldName, + o: any, + pgType: PgType, + fields: Fields + ) { + const simpleFilters = new Set(['equals', 'lt', 'lte', 'gt', 'gte']) // filters whose value is an optional value of type `pgType` + const arrayFilters = new Set(['in', 'notIn']) // filters whose value is an optional array of values of type `pgType` + + // Handle the simple filters + const simpleFilterObj = filterKeys(o, simpleFilters) + const transformedSimpleFilterObj = mapValues(simpleFilterObj, (v: any) => + this.converter.encode(v, pgType) + ) + + // Handle the array filters + const arrayFilterObj = filterKeys(o, arrayFilters) + const transformedArrayFilterObj = mapValues(arrayFilterObj, (arr) => + arr.map((v: any) => this.converter.encode(v, pgType)) + ) + + // Handle `not` filter + // `not` is a special one as it accepts a value or a nested object of filters + // hence it is just like the properties of a `where` object which accept values or filters + const notFilterObj = filterKeys(o, new Set(['not'])) + const transformedNotFilterObj = mapValues(notFilterObj, (v) => { + // each field can be the value itself or an object containing filters like `lt`, `gt`, etc. + return this.transformFieldsAllowingFilters(field, v, fields) + }) + + return { + ...o, + ...transformedSimpleFilterObj, + ...transformedArrayFilterObj, + ...transformedNotFilterObj, + } } } /** * Iterates over the properties of the object `o` - * in order to transform their values to SQLite compatible values + * in order to transform their values to SQLite/PG compatible values * based on additional type information about the fields. * @param o The object to transform. * @param fields Type information about the fields. * @param transformation Which transformation to execute. - * @returns An object with the values converted to SQLite. + * @returns An object with the values converted to SQLite/PG. */ export function transformFields( o: object, fields: Fields, - transformation: Transformation = Transformation.Js2Sqlite + converter: Converter, + transformation: Transformation = Transformation.Encode ): object { // only transform fields that are part of this table and not related fields // as those will be transformed later when the query on the related field is processed. const fieldsAndValues = Object.entries(keepTableFieldsOnly(o, fields)) const fieldsAndTransformedValues = fieldsAndValues.map((entry) => { const [field, value] = entry - return transformField(field, value, o, fields, transformation) + return transformField(field, value, o, fields, converter, transformation) }) return { ...o, @@ -184,7 +345,7 @@ export function transformFields( } /** - * Transforms the provided value into a SQLite compatible value + * Transforms the provided value into a SQLite/PG compatible value * based on the type of this field. * @param field The name of the field. * @param value The value of the field. @@ -193,12 +354,13 @@ export function transformFields( * @param transformation Which transformation to execute. * @returns The transformed field. */ -function transformField( +export function transformField( field: FieldName, value: any, o: object, fields: Fields, - transformation: Transformation = Transformation.Js2Sqlite + converter: Converter, + transformation: Transformation = Transformation.Encode ): any { const pgType = fields.get(field) @@ -208,164 +370,20 @@ function transformField( ) const transformedValue = - transformation === Transformation.Js2Sqlite - ? toSqlite(value, pgType) - : fromSqlite(value, pgType) + transformation === Transformation.Encode + ? converter.encode(value, pgType) + : converter.decode(value, pgType) return [field, transformedValue] } -function transformWhere(o: object, fields: Fields): object { - const transformedFields = transformWhereFields(o, fields) - const transformedBooleanConnectors = transformBooleanConnectors(o, fields) - return { - ...o, - ...transformedFields, - ...transformedBooleanConnectors, - } -} - -function transformBooleanConnectors( - o: { - AND?: object | object[] - OR?: object | object[] - NOT?: object | object[] - }, - fields: Fields -): object { - // Within a `where` object, boolean connectors AND/OR/NOT will contain - // a nested `where` object or an array of nested `where` objects - // if it is a single `where` object we wrap it in an array - // and we map `transformWhere` to recursively handle all nested objects - const makeArray = (v: any) => (Array.isArray(v) ? v : [v]) - const andObj = o.AND - ? { AND: makeArray(o.AND).map((x) => transformWhere(x, fields)) } - : {} - const orObj = o.OR - ? { OR: makeArray(o.OR).map((x) => transformWhere(x, fields)) } - : {} - const notObj = o.NOT - ? { NOT: makeArray(o.NOT).map((x) => transformWhere(x, fields)) } - : {} - - // we use spread syntax such that the filter is not included if it is undefined - // we cannot set it to undefined because then it appears in `hasOwnProperty` - // and the query builder will try to write `undefined` to the database. - return { - ...andObj, - ...orObj, - ...notObj, - } -} - -/** - * Iterates over the properties of a `where` object - * in order to transform the values to SQLite compatible values - * based on additional type information about the fields. - * @param o The `where` object to transform. - * @param fields Type information about the fields. - * @returns A `where` object with the values converted to SQLite. - */ -function transformWhereFields(o: object, fields: Fields): object { - // only transform fields that are part of this table and not related fields - // as those will be transformed later when the query on the related field is processed. - const objWithoutRelatedFields = keepTableFieldsOnly(o, fields) - const transformedObj = mapObject(objWithoutRelatedFields, (field, value) => { - // each field can be the value itself or an object containing filters like `lt`, `gt`, etc. - return transformFieldsAllowingFilters(field, value, fields) - }) - - return { - ...o, - ...transformedObj, - } -} - -/** - * Transforms a value that may contain filters. - * e.g. `where` clauses of a query allow to pass a value directly or an object containing filters. - * If it is an object of filters, we need to transform the values that are nested in those filters. - * @param field The name of the field we are transforming. - * @param value The value for that field. - * @param fields Type information about the fields of this table. - * @returns The transformed value. - */ -function transformFieldsAllowingFilters( - field: FieldName, - value: any, - fields: Fields -): any { - const pgType = fields.get(field) - - if (!pgType) throw new InvalidArgumentError(`Unknown field ${field}`) - - if (isFilterObject(value)) { - // transform the values that are nested in those filters - return transformFilterObject(field, value, pgType, fields) - } - - return toSqlite(value, pgType) -} - function isFilterObject(value: any): boolean { // if it is an object it can only be a data object or a filter object return isObject(value) && !isDataObject(value) } -/** - * Transforms an object containing filters - * @example For example: - * ``` - * { - * lt: Date('2023-09-12'), - * notIn: [ Date('2023-09-09'), Date('2023-09-01') ], - * not: { - * lt: Date('2022-09-01') - * } - * } - * ``` - * @param field The name of the field we are transforming. - * @param o The object containing the filters. - * @param pgType Type of this field. - * @param fields Type information about the fields of this table. - * @returns A transformed filter object. - */ -function transformFilterObject( - field: FieldName, - o: any, - pgType: PgType, - fields: Fields -) { - const simpleFilters = new Set(['equals', 'lt', 'lte', 'gt', 'gte']) // filters whose value is an optional value of type `pgType` - const arrayFilters = new Set(['in', 'notIn']) // filters whose value is an optional array of values of type `pgType` - - // Handle the simple filters - const simpleFilterObj = filterKeys(o, simpleFilters) - const transformedSimpleFilterObj = mapValues(simpleFilterObj, (v: any) => - toSqlite(v, pgType) - ) - - // Handle the array filters - const arrayFilterObj = filterKeys(o, arrayFilters) - const transformedArrayFilterObj = mapValues(arrayFilterObj, (arr) => - arr.map((v: any) => toSqlite(v, pgType)) - ) - - // Handle `not` filter - // `not` is a special one as it accepts a value or a nested object of filters - // hence it is just like the properties of a `where` object which accept values or filters - const notFilterObj = filterKeys(o, new Set(['not'])) - const transformedNotFilterObj = mapValues(notFilterObj, (v) => { - // each field can be the value itself or an object containing filters like `lt`, `gt`, etc. - return transformFieldsAllowingFilters(field, v, fields) - }) - - return { - ...o, - ...transformedSimpleFilterObj, - ...transformedArrayFilterObj, - ...transformedNotFilterObj, - } +function isObject(v: any): boolean { + return typeof v === 'object' && !Array.isArray(v) && v !== null } /** diff --git a/clients/typescript/src/client/conversions/postgres.ts b/clients/typescript/src/client/conversions/postgres.ts new file mode 100644 index 0000000000..69bf400d3f --- /dev/null +++ b/clients/typescript/src/client/conversions/postgres.ts @@ -0,0 +1,61 @@ +import { InvalidArgumentError } from '../validation/errors/invalidArgumentError' +import { Converter } from './converter' +import { deserialiseDate, serialiseDate } from './datatypes/date' +import { deserialiseJSON, serialiseJSON } from './datatypes/json' +import { PgBasicType, PgDateType, PgType } from './types' + +/** + * This module takes care of converting TypeScript values to a Postgres storeable value and back. + * These conversions are needed when the developer uses the DAL such that we can convert those JS values to Postgres values + * and such that values that are read from the Postgres DB can be converted into JS values. + * Currently, no conversions are needed for the data types we support. + */ + +function toPostgres(v: any, pgType: PgType): any { + if (v === null) { + // don't transform null values + return v + } + + if (pgType === PgDateType.PG_TIME || pgType === PgDateType.PG_TIMETZ) { + if (!(v instanceof Date)) + throw new InvalidArgumentError( + `Unexpected value ${v}. Expected a Date object.` + ) + + return serialiseDate(v, pgType as PgDateType) + } + + if (pgType === PgBasicType.PG_JSON || pgType === PgBasicType.PG_JSONB) { + return serialiseJSON(v) + } + + return v +} + +function fromPostgres(v: any, pgType: PgType): any { + if (v === null) { + // don't transform null values + return v + } + + if (pgType === PgDateType.PG_TIME || pgType === PgDateType.PG_TIMETZ) { + // it's a serialised date + return deserialiseDate(v, pgType as PgDateType) + } + + if (pgType === PgBasicType.PG_JSON || pgType === PgBasicType.PG_JSONB) { + return deserialiseJSON(v) + } + + if (pgType === PgBasicType.PG_INT8) { + return BigInt(v) // needed because the node-pg driver returns bigints as strings + } + + return v +} + +export const postgresConverter: Converter = { + encode: toPostgres, + decode: fromPostgres, +} diff --git a/clients/typescript/src/client/conversions/sqlite.ts b/clients/typescript/src/client/conversions/sqlite.ts index 70f0538814..9a95386720 100644 --- a/clients/typescript/src/client/conversions/sqlite.ts +++ b/clients/typescript/src/client/conversions/sqlite.ts @@ -1,4 +1,5 @@ import { InvalidArgumentError } from '../validation/errors/invalidArgumentError' +import { Converter } from './converter' import { deserialiseBoolean, serialiseBoolean } from './datatypes/boolean' import { deserialiseBlob, serialiseBlob } from './datatypes/blob' import { deserialiseDate, serialiseDate } from './datatypes/date' @@ -13,7 +14,7 @@ import { PgBasicType, PgDateType, PgType } from './types' * When reading from the SQLite database, the string can be parsed back into a `Date` object. */ -export function toSqlite(v: any, pgType: PgType): any { +function toSqlite(v: any, pgType: PgType): any { if (v === null) { // don't transform null values return v @@ -48,7 +49,7 @@ export function toSqlite(v: any, pgType: PgType): any { } } -export function fromSqlite(v: any, pgType: PgType): any { +function fromSqlite(v: any, pgType: PgType): any { if (v === null) { // don't transform null values return v @@ -90,18 +91,11 @@ export function fromSqlite(v: any, pgType: PgType): any { } } -/** - * Checks whether the provided value is a user-provided data object, e.g. a timestamp. - * This is important because `input.ts` needs to distinguish between data objects and filter objects. - * Data objects need to be converted to a SQLite storeable value, whereas filter objects need to be treated specially - * as we have to transform the values of the filter's fields (cf. `transformFieldsAllowingFilters` in `input.ts`). - * @param v The value to check - * @returns True if it is a data object, false otherwise. - */ -export function isDataObject(v: unknown): boolean { - return v instanceof Date -} - function isPgDateType(pgType: PgType): boolean { return (Object.values(PgDateType) as Array).includes(pgType) } + +export const sqliteConverter: Converter = { + encode: toSqlite, + decode: fromSqlite, +} diff --git a/clients/typescript/src/client/execution/executor.ts b/clients/typescript/src/client/execution/executor.ts index 0a02c79757..065c5c83e8 100644 --- a/clients/typescript/src/client/execution/executor.ts +++ b/clients/typescript/src/client/execution/executor.ts @@ -5,12 +5,14 @@ import { TransactionalDB } from './transactionalDB' import { NonTransactionalDB } from './nonTransactionalDB' import { Notifier } from '../../notifiers' import { Fields } from '../model/schema' +import { Converter } from '../conversions/converter' export class Executor { constructor( private _adapter: DatabaseAdapter, private _notifier: Notifier, - private _fields: Fields + private _fields: Fields, + private _converter: Converter ) {} async runInTransaction( @@ -47,7 +49,7 @@ export class Executor { // and thus the promise will always be resolved with the value that was passed to `setResult` which is of type `A` return (await this._adapter.transaction((tx, setResult) => f( - new TransactionalDB(tx, this._fields), + new TransactionalDB(tx, this._fields, this._converter), (res) => { if (notify) { this._notifier.potentiallyChanged() // inform the notifier that the data may have changed @@ -73,7 +75,7 @@ export class Executor { ): Promise { return new Promise((resolve, reject) => { f( - new NonTransactionalDB(this._adapter, this._fields), + new NonTransactionalDB(this._adapter, this._fields, this._converter), (res) => { if (notify) { this._notifier.potentiallyChanged() // inform the notifier that the data may have changed diff --git a/clients/typescript/src/client/execution/nonTransactionalDB.ts b/clients/typescript/src/client/execution/nonTransactionalDB.ts index 4ecae0e3b5..79c9c0c8d9 100644 --- a/clients/typescript/src/client/execution/nonTransactionalDB.ts +++ b/clients/typescript/src/client/execution/nonTransactionalDB.ts @@ -5,9 +5,14 @@ import * as z from 'zod' import { Row, Statement } from '../../util' import { Transformation, transformFields } from '../conversions/input' import { Fields } from '../model/schema' +import { Converter } from '../conversions/converter' export class NonTransactionalDB implements DB { - constructor(private _adapter: DatabaseAdapter, private _fields: Fields) {} + constructor( + private _adapter: DatabaseAdapter, + private _fields: Fields, + private _converter: Converter + ) {} withTableSchema(fields: Fields) { return new NonTransactionalDB(this._adapter, fields) @@ -18,7 +23,7 @@ export class NonTransactionalDB implements DB { successCallback?: (db: DB, res: RunResult) => void, errorCallback?: (error: any) => void ) { - const { text, values } = statement.toParam({ numberedParameters: false }) + const { text, values } = statement.toParam() //{ numberedParameters: false }) this._adapter .run({ sql: text, args: values }) .then((res) => { @@ -45,19 +50,20 @@ export class NonTransactionalDB implements DB { successCallback: (db: DB, res: Z[]) => void, errorCallback?: (error: any) => void ) { - const { text, values } = statement.toParam({ numberedParameters: false }) + const { text, values } = statement.toParam() //{ numberedParameters: false }) this._adapter .query({ sql: text, args: values }) .then((rows) => { try { const objects = rows.map((row) => { - // convert SQLite values back to JS values + // convert SQLite/PG values back to JS values // and then parse the transformed object // with the Zod schema to validate it const transformedRow = transformFields( row, this._fields, - Transformation.Sqlite2Js + this._converter, + Transformation.Decode ) return schema.parse(transformedRow) }) diff --git a/clients/typescript/src/client/execution/transactionalDB.ts b/clients/typescript/src/client/execution/transactionalDB.ts index c9ed6d9552..fc4b2a0a56 100644 --- a/clients/typescript/src/client/execution/transactionalDB.ts +++ b/clients/typescript/src/client/execution/transactionalDB.ts @@ -5,24 +5,33 @@ import * as z from 'zod' import { Row, Statement } from '../../util' import { Fields } from '../model/schema' import { Transformation, transformFields } from '../conversions/input' +import { Converter } from '../conversions/converter' export class TransactionalDB implements DB { - constructor(private _tx: Transaction, private _fields: Fields) {} - + constructor( + private _tx: Transaction, + private _fields: Fields, + private _converter: Converter + ) { } + withTableSchema(fields: Fields) { - return new TransactionalDB(this._tx, fields) + return new TransactionalDB(this._tx, fields, this._converter) } + run( statement: QueryBuilder, successCallback?: (db: DB, res: RunResult) => void, errorCallback?: (error: any) => void ): void { - const { text, values } = statement.toParam({ numberedParameters: false }) + const { text, values } = statement.toParam() this._tx.run( { sql: text, args: values }, (tx, res) => { if (typeof successCallback !== 'undefined') - successCallback(new TransactionalDB(tx, this._fields), res) + successCallback( + new TransactionalDB(tx, this._fields, this._converter), + res + ) }, errorCallback ) @@ -34,23 +43,27 @@ export class TransactionalDB implements DB { successCallback: (db: DB, res: Z[]) => void, errorCallback?: (error: any) => void ): void { - const { text, values } = statement.toParam({ numberedParameters: false }) + const { text, values } = statement.toParam() this._tx.query( { sql: text, args: values }, (tx, rows) => { if (typeof successCallback !== 'undefined') { const objects = rows.map((row) => { - // convert SQLite values back to JS values + // convert SQLite/PG values back to JS values // and then parse the transformed object // with the Zod schema to validate it const transformedRow = transformFields( row, this._fields, - Transformation.Sqlite2Js + this._converter, + Transformation.Decode ) return schema.parse(transformedRow) }) - successCallback(new TransactionalDB(tx, this._fields), objects) + successCallback( + new TransactionalDB(tx, this._fields, this._converter), + objects + ) } }, errorCallback @@ -66,7 +79,10 @@ export class TransactionalDB implements DB { sql, (tx, rows) => { if (typeof successCallback !== 'undefined') { - successCallback(new TransactionalDB(tx, this._fields), rows) + successCallback( + new TransactionalDB(tx, this._fields, this._converter), + rows + ) } }, errorCallback diff --git a/clients/typescript/src/client/model/builder.ts b/clients/typescript/src/client/model/builder.ts index 045b10e0e5..e966fda907 100644 --- a/clients/typescript/src/client/model/builder.ts +++ b/clients/typescript/src/client/model/builder.ts @@ -17,6 +17,7 @@ import { ExtendedTableSchema } from './schema' import { PgBasicType } from '../conversions/types' import { HKT } from '../util/hkt' import { isObject } from '../../util' +import { Dialect } from '../../migrators/query-builder/builder' const squelPostgres = squel.useFlavour('postgres') squelPostgres.registerValueHandler('bigint', function (bigint) { @@ -44,8 +45,22 @@ export class Builder { any, any, HKT - > - ) {} + >, + public dialect: Dialect + ) { + if (dialect === 'Postgres') { + squelPostgres.cls.DefaultQueryBuilderOptions.nameQuoteCharacter = '"' + squelPostgres.cls.DefaultQueryBuilderOptions.autoQuoteTableNames = true + squelPostgres.cls.DefaultQueryBuilderOptions.autoQuoteFieldNames = true + squelPostgres.cls.DefaultQueryBuilderOptions.autoQuoteAliasNames = true + // need to register it, otherwise squel complains that the Date type is not registered + // as Squel does not support it out-of-the-box but our Postgres drivers do support it. + squelPostgres.registerValueHandler(Date, (date) => date) + } else { + // Don't use numbered parameters if dialect is SQLite + squelPostgres.cls.DefaultQueryBuilderOptions.numberedParameters = false + } + } create(i: CreateInput): QueryBuilder { // Make a SQL query out of the data @@ -232,7 +247,7 @@ export class Builder { */ private castBigIntToText(field: string) { const pgType = this._tableDescription.fields.get(field) - if (pgType === PgBasicType.PG_INT8) { + if (pgType === PgBasicType.PG_INT8 && this.dialect === 'SQLite') { return `cast(${field} as TEXT) AS ${field}` } return field @@ -285,8 +300,25 @@ export class Builder { // because not all adapters deal well with BigInts // the DAL will convert the string into a BigInt in the `fromSqlite` function from `../conversions/sqlite.ts`. const pgType = this._tableDescription.fields.get(field) - if (pgType === PgBasicType.PG_INT8) { - return query.returning(`cast(${field} as TEXT) AS ${field}`) + if (pgType === PgBasicType.PG_INT8 && this.dialect === 'SQLite') { + //squelPostgres.function('cast(?)', `"${field}" as TEXT`) + // FIXME: squel adds quotes around the entire cast... + // tried to override squel's internal _formatFieldName to special case this field but it still quoted it + const f = `cast("${field}" as TEXT) AS "${field}"` + const res = query.returning(f) //, field) + /* + const returningBlock = query.blocks[query.blocks.length - 1] + const originalFormatter = returningBlock._formatFieldName.bind(returningBlock) + returningBlock._formatFieldName = (field, opts) => { + console.log(`formatting field name: ${field}`) + if (field === f) { + console.log(`returning field: ${field}`) + return field + } + else return originalFormatter(field, opts) + } + */ + return res } return query.returning(field) }, query) @@ -329,7 +361,7 @@ export function makeFilter( prefixFieldsWith ), ] - } else if (isObject(fieldValue)) { + } else if (isObject(fieldValue) && !(fieldValue instanceof Date)) { // an object containing filters is provided // e.g. users.findMany({ where: { id: { in: [1, 2, 3] } } }) const fs = { diff --git a/clients/typescript/src/client/model/client.ts b/clients/typescript/src/client/model/client.ts index 2ebf4296ab..e092393369 100644 --- a/clients/typescript/src/client/model/client.ts +++ b/clients/typescript/src/client/model/client.ts @@ -8,6 +8,10 @@ import { DatabaseAdapter } from '../../electric/adapter' import { GlobalRegistry, Registry, Satellite } from '../../satellite' import { ShapeManager } from './shapes' import { ReplicationTransformManager } from './transforms' +import { Dialect } from '../../migrators/query-builder/builder' +import { InputTransformer } from '../conversions/input' +import { sqliteConverter } from '../conversions/sqlite' +import { postgresConverter } from '../conversions/postgres' export type ClientTables> = { [Tbl in keyof DB['tables']]: DB['tables'][Tbl] extends TableSchema< @@ -132,13 +136,16 @@ export class ElectricClient< adapter: DatabaseAdapter, notifier: Notifier, satellite: Satellite, - registry: Registry | GlobalRegistry + registry: Registry | GlobalRegistry, + dialect: Dialect ): ElectricClient { const tables = dbDescription.extendedTables const shapeManager = new ShapeManager(satellite) const replicationTransformManager = new ReplicationTransformManager( satellite ) + const converter = dialect === 'SQLite' ? sqliteConverter : postgresConverter + const inputTransformer = new InputTransformer(converter) const createTable = (tableName: string) => { return new Table( @@ -147,7 +154,9 @@ export class ElectricClient< notifier, shapeManager, replicationTransformManager, - dbDescription + dbDescription, + inputTransformer, + dialect ) } diff --git a/clients/typescript/src/client/model/table.ts b/clients/typescript/src/client/model/table.ts index b801029233..6d36abc3f0 100644 --- a/clients/typescript/src/client/model/table.ts +++ b/clients/typescript/src/client/model/table.ts @@ -41,21 +41,13 @@ import { import { NarrowInclude } from '../input/inputNarrowing' import { IShapeManager } from './shapes' import { ShapeSubscription } from '../../satellite' -import { - transformCreate, - transformCreateMany, - transformDelete, - transformDeleteMany, - transformFindNonUnique, - transformFindUnique, - transformUpdate, - transformUpdateMany, -} from '../conversions/input' import { Rel, Shape } from '../../satellite/shapes/types' import { IReplicationTransformManager, transformTableRecord, } from './transforms' +import { InputTransformer } from '../conversions/input' +import { Dialect } from '../../migrators/query-builder/builder' type AnyTable = Table @@ -116,7 +108,9 @@ export class Table< private _notifier: Notifier, private _shapeManager: IShapeManager, private _replicationTransformManager: IReplicationTransformManager, - private _dbDescription: DbSchema + private _dbDescription: DbSchema, + private _transformer: InputTransformer, + public dialect: Dialect ) { this._fields = this._dbDescription.getFields(tableName) const fieldNames = this._dbDescription.getFieldNames(tableName) @@ -124,10 +118,16 @@ export class Table< this._builder = new Builder( tableName, fieldNames, - _shapeManager, - tableDescription + this._shapeManager, + tableDescription, + this.dialect + ) + this._executor = new Executor( + adapter, + _notifier, + this._fields, + this._transformer.converter ) - this._executor = new Executor(adapter, _notifier, this._fields) this._qualifiedTableName = new QualifiedTablename('main', tableName) this._tables = new Map() this._schema = tableDescription.modelSchema @@ -432,7 +432,7 @@ export class Table< continuation: (record: Kind & Record) => void, onError: (err: any) => void ) { - const validatedInput = transformCreate( + const validatedInput = this._transformer.transformCreate( validate(i, this.createSchema), this._fields ) @@ -588,7 +588,7 @@ export class Table< continuation: (res: BatchPayload) => void, onError: (err: any) => void ) { - const data = transformCreateMany( + const data = this._transformer.transformCreateMany( validate(i, this.createManySchema), this._fields ) @@ -608,7 +608,7 @@ export class Table< continuation: (res: Kind | null) => void, onError: (err: any) => void ) { - const data = transformFindUnique( + const data = this._transformer.transformFindUnique( validate(i, this.findUniqueSchema), this._fields ) @@ -647,7 +647,7 @@ export class Table< continuation: (res: Kind | null) => void, onError: (err: any) => void ) { - const data = transformFindNonUnique( + const data = this._transformer.transformFindNonUnique( validate(i ?? {}, this.findSchema), this._fields ) @@ -885,7 +885,7 @@ export class Table< continuation: (res: Kind[]) => void, onError: (err: any) => void ) { - const data = transformFindNonUnique( + const data = this._transformer.transformFindNonUnique( validate(i ?? {}, this.findSchema), this._fields ) @@ -1311,7 +1311,10 @@ export class Table< continuation: (res: Kind) => void, onError: (err: any) => void ) { - const data = transformUpdate(validate(i, this.updateSchema), this._fields) + const data = this._transformer.transformUpdate( + validate(i, this.updateSchema), + this._fields + ) // Find the record and make sure it is unique this._findUnique( @@ -1473,7 +1476,7 @@ export class Table< continuation: (res: BatchPayload) => void, onError: (err: any) => void ) { - const data = transformUpdateMany( + const data = this._transformer.transformUpdateMany( validate(i, this.updateManySchema), this._fields ) @@ -1548,7 +1551,10 @@ export class Table< continuation: (res: Kind) => void, onError: (err: any) => void ) { - const data = transformDelete(validate(i, this.deleteSchema), this._fields) + const data = this._transformer.transformDelete( + validate(i, this.deleteSchema), + this._fields + ) // Check that the record exists this._findUniqueWithoutAutoSelect( data as any, @@ -1573,7 +1579,7 @@ export class Table< continuation: (res: BatchPayload) => void, onError: (err: any) => void ) { - const data = transformDeleteMany( + const data = this._transformer.transformDeleteMany( validate(i ?? {}, this.deleteManySchema), this._fields ) diff --git a/clients/typescript/src/drivers/node-postgres/database.ts b/clients/typescript/src/drivers/node-postgres/database.ts index d9fb618d3d..d71c8e61cd 100644 --- a/clients/typescript/src/drivers/node-postgres/database.ts +++ b/clients/typescript/src/drivers/node-postgres/database.ts @@ -1,16 +1,20 @@ -// TODO: fix the below -// was probably added because the driver does not support passing a BigInt -// and expects it to be passed as a string instead -/* -(BigInt.prototype as any).toJSON = function () { - return this.toString(); -}; -*/ - +import pg from 'pg' import type { Client } from 'pg' import EmbeddedPostgres from 'embedded-postgres' import { Row, Statement } from '../../util' +// Modify how 'pg' parses JSON values +// simply return it as a string +// our conversions will correctly parse it +/* +const parseJSON = (value: string) => { + return value +} +pg.types.setTypeParser(pg.types.builtins.JSON, parseJSON) +pg.types.setTypeParser(pg.types.builtins.JSONB, parseJSON) +*/ +const originalGetTypeParser = pg.types.getTypeParser + export type QueryResult = { rows: Row[] rowsModified: number @@ -32,10 +36,24 @@ export class ElectricDatabase implements Database { ) {} async exec(statement: Statement): Promise { - const { rows, rowCount } = await this.db.query( - statement.sql, - statement.args - ) + const { rows, rowCount } = await this.db.query({ + text: statement.sql, + values: statement.args, + types: { + // Modify the parser to not parse JSON values + // Instead, return them as strings + // our conversions will correctly parse them + getTypeParser: ((oid: number) => { + if ( + oid === pg.types.builtins.JSON || + oid === pg.types.builtins.JSONB + ) { + return (val) => val + } + return originalGetTypeParser(oid) + }) as typeof pg.types.getTypeParser, + }, + }) return { rows, rowsModified: rowCount ?? 0, diff --git a/clients/typescript/src/drivers/tauri-postgres/database.ts b/clients/typescript/src/drivers/tauri-postgres/database.ts index fb3afe7df8..6472e30ab2 100644 --- a/clients/typescript/src/drivers/tauri-postgres/database.ts +++ b/clients/typescript/src/drivers/tauri-postgres/database.ts @@ -1,12 +1,3 @@ -// TODO: fix the below -// was probably added because the driver does not support passing a BigInt -// and expects it to be passed as a string instead -/* -(BigInt.prototype as any).toJSON = function () { - return this.toString(); -}; -*/ - import { Row, Statement } from '../../util' export type QueryResult = { diff --git a/clients/typescript/src/electric/index.ts b/clients/typescript/src/electric/index.ts index 3274a6d7f4..57e7da1567 100644 --- a/clients/typescript/src/electric/index.ts +++ b/clients/typescript/src/electric/index.ts @@ -75,13 +75,15 @@ export const electrify = async >( configWithDefaults ) + const dialect = migrator.electricQueryBuilder.dialect const electric = ElectricClient.create( dbName, dbDescription, adapter, notifier, satellite, - registry + registry, + dialect ) if (satellite.connectivityState !== undefined) { diff --git a/clients/typescript/src/migrators/query-builder/builder.ts b/clients/typescript/src/migrators/query-builder/builder.ts index f47075aff5..86c0dcf9ea 100644 --- a/clients/typescript/src/migrators/query-builder/builder.ts +++ b/clients/typescript/src/migrators/query-builder/builder.ts @@ -1,8 +1,9 @@ import { ForeignKey } from '../triggers' import { QualifiedTablename, SqlValue, Statement } from '../../util' +export type Dialect = 'SQLite' | 'Postgres' export abstract class QueryBuilder { - abstract readonly dialect: 'SQLite' | 'Postgres' + abstract readonly dialect: Dialect abstract readonly paramSign: '?' | '$' /** diff --git a/clients/typescript/test/client/model/builder.test.ts b/clients/typescript/test/client/model/builder.test.ts index fd8609a6d0..815b311548 100644 --- a/clients/typescript/test/client/model/builder.test.ts +++ b/clients/typescript/test/client/model/builder.test.ts @@ -11,7 +11,8 @@ const tbl = new Builder( 'Post', ['id', 'title', 'contents', 'nbr'], shapeManager, - postTableDescription + postTableDescription, + 'SQLite' ) // Sync all shapes such that we don't get warnings on every query diff --git a/clients/typescript/test/client/model/datatype.pg.test.ts b/clients/typescript/test/client/model/datatype.pg.test.ts new file mode 100644 index 0000000000..02cac7cb70 --- /dev/null +++ b/clients/typescript/test/client/model/datatype.pg.test.ts @@ -0,0 +1,55 @@ +import anyTest, { TestFn } from 'ava' + +import { MockRegistry } from '../../../src/satellite/mock' + +import { electrify } from '../../../src/drivers/node-postgres' +import { + _NOT_UNIQUE_, + _RECORD_NOT_FOUND_, +} from '../../../src/client/validation/errors/messages' +import { schema } from '../generated' +import { ContextType, datatypeTests } from './datatype.test' +import { makePgDatabase } from '../../support/node-postgres' +import { randomValue } from '../../../src/util' + +// Run all tests in this file serially +// because there are a lot of tests +// and it would lead to PG running out of shared memory +const test = anyTest.serial as TestFn< + ContextType & { + stop: () => Promise + } +> + +let port = 9000 +test.beforeEach(async (t) => { + port++ + const dbName = `test-datatypes-${port}-${randomValue()}` + const { db, stop } = await makePgDatabase(dbName, port) + const electric = await electrify( + db, + schema, + {}, + { registry: new MockRegistry() } + ) + + const tbl = electric.db.DataTypes + + // Sync all shapes such that we don't get warnings on every query + await tbl.sync() + + await db.exec({ + sql: `CREATE TABLE "DataTypes"("id" INT4 PRIMARY KEY, "date" DATE, "time" TIME, "timetz" TIMETZ, "timestamp" TIMESTAMP, "timestamptz" TIMESTAMPTZ, "bool" BOOL, "uuid" UUID, "int2" INT2, "int4" INT4, "int8" INT8, "float4" FLOAT4, "float8" FLOAT8, "json" JSONB, "bytea" BYTEA, "relatedId" INT4);`, + }) + + t.context = { + tbl, + stop, + } +}) + +test.afterEach.always(async (t) => { + await t.context.stop() +}) + +datatypeTests(test as unknown as TestFn) diff --git a/clients/typescript/test/client/model/datatype.sqlite.test.ts b/clients/typescript/test/client/model/datatype.sqlite.test.ts new file mode 100644 index 0000000000..ad12513b04 --- /dev/null +++ b/clients/typescript/test/client/model/datatype.sqlite.test.ts @@ -0,0 +1,49 @@ +import anyTest, { TestFn } from 'ava' +import Database from 'better-sqlite3' +import type { Database as BetterSqlite3Database } from 'better-sqlite3' + +import { MockRegistry } from '../../../src/satellite/mock' + +import { electrify } from '../../../src/drivers/better-sqlite3' +import { + _NOT_UNIQUE_, + _RECORD_NOT_FOUND_, +} from '../../../src/client/validation/errors/messages' +import { schema } from '../generated' +import { ContextType, datatypeTests } from './datatype.test' + +const test = anyTest as TestFn< + ContextType & { + db: BetterSqlite3Database + } +> + +test.beforeEach(async (t) => { + const db = new Database(':memory:') + const electric = await electrify( + db, + schema, + {}, + { registry: new MockRegistry() } + ) + + const tbl = electric.db.DataTypes + + // Sync all shapes such that we don't get warnings on every query + await tbl.sync() + + db.exec( + "CREATE TABLE DataTypes('id' int PRIMARY KEY, 'date' varchar, 'time' varchar, 'timetz' varchar, 'timestamp' varchar, 'timestamptz' varchar, 'bool' int, 'uuid' varchar, 'int2' int2, 'int4' int4, 'int8' int8, 'float4' real, 'float8' real, 'json' varchar, 'bytea' blob, 'relatedId' int);" + ) + + t.context = { + db, + tbl, + } +}) + +test.afterEach.always((t) => { + t.context.db.close() +}) + +datatypeTests(test as unknown as TestFn) diff --git a/clients/typescript/test/client/model/datatype.test.ts b/clients/typescript/test/client/model/datatype.test.ts index bfc9bf20de..61c5f1ffd1 100644 --- a/clients/typescript/test/client/model/datatype.test.ts +++ b/clients/typescript/test/client/model/datatype.test.ts @@ -1,38 +1,15 @@ -import test from 'ava' -import Database from 'better-sqlite3' - -import { MockRegistry } from '../../../src/satellite/mock' - -import { electrify } from '../../../src/drivers/better-sqlite3' +import { TestFn } from 'ava' import { _NOT_UNIQUE_, _RECORD_NOT_FOUND_, } from '../../../src/client/validation/errors/messages' -import { schema, JsonNull } from '../generated' +import { JsonNull, Electric } from '../generated' import { ZodError } from 'zod' -const db = new Database(':memory:') -const electric = await electrify( - db, - schema, - {}, - { registry: new MockRegistry() } -) - -const tbl = electric.db.DataTypes - -// Sync all shapes such that we don't get warnings on every query -await tbl.sync() - -function setupDB() { - db.exec('DROP TABLE IF EXISTS DataTypes') - db.exec( - "CREATE TABLE DataTypes('id' int PRIMARY KEY, 'date' varchar, 'time' varchar, 'timetz' varchar, 'timestamp' varchar, 'timestamptz' varchar, 'bool' int, 'uuid' varchar, 'int2' int2, 'int4' int4, 'int8' int8, 'float4' real, 'float8' real, 'json' varchar, 'bytea' blob, 'relatedId' int);" - ) +export type ContextType = { + tbl: Electric['db']['DataTypes'] } -test.beforeEach(setupDB) - /* * The tests below check that advanced data types * can be written into the DB, thereby, testing that @@ -40,224 +17,262 @@ test.beforeEach(setupDB) * and then be converted back to JS objects on reads. */ -test.serial('support date type', async (t) => { - const date = '2023-08-07' - const d = new Date(`${date} 23:28:35.421`) - const res = await tbl.create({ - data: { - id: 1, - date: d, - }, - }) - - t.deepEqual(res.date, new Date(date)) +export const datatypeTests = (test: TestFn) => { + test('support date type', async (t) => { + const { tbl } = t.context + const date = '2023-08-07' + const d = new Date(`${date} 23:28:35.421`) + const res = await tbl.create({ + data: { + id: 1, + date: d, + }, + }) - const fetchRes = await tbl.findUnique({ - where: { - id: 1, - }, - }) + const expectedDate = new Date(`${date} 00:00:00.000`) + t.deepEqual(res.date, expectedDate) - t.deepEqual(fetchRes?.date, new Date(date)) -}) + const fetchRes = await tbl.findUnique({ + where: { + id: 1, + }, + }) -test.serial('support date type passed as string', async (t) => { - const date = '2023-08-07' - const res = await tbl.create({ - data: { - id: 1, - date: date, - }, + t.deepEqual(fetchRes?.date, expectedDate) }) - t.deepEqual(res.date, new Date(date)) + test('support date type passed as string', async (t) => { + const { tbl } = t.context + const date = '2023-08-07' + const res = await tbl.create({ + data: { + id: 1, + date: date, + }, + }) - const fetchRes = await tbl.findUnique({ - where: { - id: 1, - }, - }) + const expectedDate = new Date(`${date} 00:00:00.000`) + t.deepEqual(res.date, expectedDate) - t.deepEqual(fetchRes?.date, new Date(date)) -}) + const fetchRes = await tbl.findUnique({ + where: { + id: 1, + }, + }) -test.serial('support time type', async (t) => { - const date = new Date('2023-08-07 18:28:35.421') - const res = await tbl.create({ - data: { - id: 1, - time: date, - }, + t.deepEqual(fetchRes?.date, expectedDate) }) - t.deepEqual(res.time, new Date('1970-01-01 18:28:35.421')) + test('support time type', async (t) => { + const { tbl } = t.context + const date = new Date('2023-08-07 18:28:35.421') + const res = await tbl.create({ + data: { + id: 1, + time: date, + }, + }) - const fetchRes = await tbl.findUnique({ - where: { - id: 1, - }, - }) + t.deepEqual(res.time, new Date('1970-01-01 18:28:35.421')) - t.deepEqual(fetchRes?.time, new Date('1970-01-01 18:28:35.421')) -}) - -test.serial('support timetz type', async (t) => { - // Check that we store the time without taking into account timezones - // such that upon reading we get the same time even if we are in a different time zone - // test with 2 different time zones such that they cannot both coincide with the machine's timezone. - const date1 = new Date('2023-08-07 18:28:35.421+02') - const date2 = new Date('2023-08-07 18:28:35.421+03') - const res1 = await tbl.create({ - data: { - id: 1, - timetz: date1, - }, - }) + const fetchRes = await tbl.findUnique({ + where: { + id: 1, + }, + }) - const res2 = await tbl.create({ - data: { - id: 2, - timetz: date2, - }, + t.deepEqual(fetchRes?.time, new Date('1970-01-01 18:28:35.421')) }) - t.deepEqual(res1.timetz, new Date('1970-01-01 18:28:35.421+02')) - t.deepEqual(res2.timetz, new Date('1970-01-01 18:28:35.421+03')) + test('support timetz type', async (t) => { + const { tbl } = t.context + // Check that we store the time without taking into account timezones + // such that upon reading we get the same time even if we are in a different time zone + // test with 2 different time zones such that they cannot both coincide with the machine's timezone. + const date1 = new Date('2023-08-07 18:28:35.421+02') + const date2 = new Date('2023-08-07 18:28:35.421+03') + const res1 = await tbl.create({ + data: { + id: 1, + timetz: date1, + }, + }) - const fetchRes1 = await tbl.findUnique({ - where: { - id: 1, - }, - }) + const res2 = await tbl.create({ + data: { + id: 2, + timetz: date2, + }, + }) - const fetchRes2 = await tbl.findUnique({ - where: { - id: 2, - }, - }) + t.deepEqual(res1.timetz, new Date('1970-01-01 18:28:35.421+02')) + t.deepEqual(res2.timetz, new Date('1970-01-01 18:28:35.421+03')) - t.deepEqual(fetchRes1?.timetz, new Date('1970-01-01 18:28:35.421+02')) - t.deepEqual(fetchRes2?.timetz, new Date('1970-01-01 18:28:35.421+03')) -}) + const fetchRes1 = await tbl.findUnique({ + where: { + id: 1, + }, + }) -test.serial('support timestamp type', async (t) => { - const date = new Date('2023-08-07 18:28:35.421') + const fetchRes2 = await tbl.findUnique({ + where: { + id: 2, + }, + }) - const res = await tbl.create({ - data: { - id: 1, - timestamp: date, - }, + t.deepEqual(fetchRes1?.timetz, new Date('1970-01-01 18:28:35.421+02')) + t.deepEqual(fetchRes2?.timetz, new Date('1970-01-01 18:28:35.421+03')) }) - t.deepEqual(res.timestamp, new Date('2023-08-07 18:28:35.421')) + test('support timestamp type', async (t) => { + const { tbl } = t.context + const date = new Date('2023-08-07 18:28:35.421') - const fetchRes = await tbl.findUnique({ - where: { - id: 1, - }, - }) + const res = await tbl.create({ + data: { + id: 1, + timestamp: date, + }, + }) - t.deepEqual(fetchRes?.timestamp, new Date('2023-08-07 18:28:35.421')) -}) + t.deepEqual(res.timestamp, new Date('2023-08-07 18:28:35.421')) -test.serial('support timestamp type - input date with offset', async (t) => { - const date = new Date('2023-08-07 18:28:35.421+05') + const fetchRes = await tbl.findUnique({ + where: { + id: 1, + }, + }) - const res = await tbl.create({ - data: { - id: 1, - timestamp: date, - }, + t.deepEqual(fetchRes?.timestamp, new Date('2023-08-07 18:28:35.421')) }) - t.deepEqual(res.timestamp, date) + test('support timestamp type - input date with offset', async (t) => { + const { tbl } = t.context + const date = new Date('2023-08-07 18:28:35.421+05') - const fetchRes = await tbl.findUnique({ - where: { - id: 1, - }, - }) + const res = await tbl.create({ + data: { + id: 1, + timestamp: date, + }, + }) - t.deepEqual(fetchRes?.timestamp, date) -}) + t.deepEqual(res.timestamp, date) -test.serial('support timestamptz type', async (t) => { - // Check that we store the timestamp without taking into account timezones - // such that upon reading we get the same timestamp even if we are in a different time zone - // test with 2 different time zones such that they cannot both coincide with the machine's timezone. - const date1 = new Date('2023-08-07 18:28:35.421+02') - const date2 = new Date('2023-08-07 18:28:35.421+03') + const fetchRes = await tbl.findUnique({ + where: { + id: 1, + }, + }) - const res1 = await tbl.create({ - data: { - id: 1, - timestamptz: date1, - }, + t.deepEqual(fetchRes?.timestamp, date) }) - const res2 = await tbl.create({ - data: { - id: 2, - timestamptz: date2, - }, - }) + test('support timestamptz type', async (t) => { + const { tbl } = t.context + // Check that we store the timestamp without taking into account timezones + // such that upon reading we get the same timestamp even if we are in a different time zone + // test with 2 different time zones such that they cannot both coincide with the machine's timezone. + const date1 = new Date('2023-08-07 18:28:35.421+02') + const date2 = new Date('2023-08-07 18:28:35.421+03') - t.deepEqual(res1.timestamptz, date1) - t.deepEqual(res2.timestamptz, date2) + const res1 = await tbl.create({ + data: { + id: 1, + timestamptz: date1, + }, + }) - const fetchRes1 = await tbl.findUnique({ - where: { - id: 1, - }, - }) + const res2 = await tbl.create({ + data: { + id: 2, + timestamptz: date2, + }, + }) - const fetchRes2 = await tbl.findUnique({ - where: { - id: 2, - }, - }) + t.deepEqual(res1.timestamptz, date1) + t.deepEqual(res2.timestamptz, date2) + + const fetchRes1 = await tbl.findUnique({ + where: { + id: 1, + }, + }) - t.deepEqual(fetchRes1?.timestamptz, date1) - t.deepEqual(fetchRes2?.timestamptz, date2) -}) + const fetchRes2 = await tbl.findUnique({ + where: { + id: 2, + }, + }) -test.serial('support null value for timestamptz type', async (t) => { - const expectedRes = { - id: 1, - timestamptz: null, - } + t.deepEqual(fetchRes1?.timestamptz, date1) + t.deepEqual(fetchRes2?.timestamptz, date2) + }) - const res = await tbl.create({ - data: { + test('support null value for timestamptz type', async (t) => { + const { tbl } = t.context + const expectedRes = { id: 1, timestamptz: null, - }, - select: { - id: true, - timestamptz: true, - }, - }) + } - t.deepEqual(res, expectedRes) + const res = await tbl.create({ + data: { + id: 1, + timestamptz: null, + }, + select: { + id: true, + timestamptz: true, + }, + }) - const fetchRes = await tbl.findUnique({ - where: { - id: 1, - }, - select: { - id: true, - timestamptz: true, - }, + t.deepEqual(res, expectedRes) + + const fetchRes = await tbl.findUnique({ + where: { + id: 1, + }, + select: { + id: true, + timestamptz: true, + }, + }) + + t.deepEqual(fetchRes, expectedRes) }) - t.deepEqual(fetchRes, expectedRes) -}) + test('support boolean type', async (t) => { + const { tbl } = t.context + // Check that we can store booleans + const res = await tbl.createMany({ + data: [ + { + id: 1, + bool: true, + }, + { + id: 2, + bool: false, + }, + ], + }) + + t.deepEqual(res, { + count: 2, + }) + + const rows = await tbl.findMany({ + select: { + id: true, + bool: true, + }, + orderBy: { + id: 'asc', + }, + }) -test.serial('support boolean type', async (t) => { - // Check that we can store booleans - const res = await tbl.createMany({ - data: [ + t.deepEqual(rows, [ { id: 1, bool: true, @@ -266,567 +281,556 @@ test.serial('support boolean type', async (t) => { id: 2, bool: false, }, - ], - }) - - t.deepEqual(res, { - count: 2, - }) + ]) - const rows = await tbl.findMany({ - select: { - id: true, - bool: true, - }, - orderBy: { - id: 'asc', - }, + // Check that it rejects invalid values + await t.throwsAsync( + tbl.create({ + data: { + id: 3, + // @ts-ignore + bool: 'true', + }, + }), + { + instanceOf: ZodError, + message: /Expected boolean, received string/, + } + ) }) - t.deepEqual(rows, [ - { + test('support null value for boolean type', async (t) => { + const { tbl } = t.context + const expectedRes = { id: 1, - bool: true, - }, - { - id: 2, - bool: false, - }, - ]) - - // Check that it rejects invalid values - await t.throwsAsync( - tbl.create({ - data: { - id: 3, - // @ts-ignore - bool: 'true', - }, - }), - { - instanceOf: ZodError, - message: /Expected boolean, received string/, + bool: null, } - ) -}) -test.serial('support null value for boolean type', async (t) => { - const expectedRes = { - id: 1, - bool: null, - } + const res = await tbl.create({ + data: { + id: 1, + bool: null, + }, + select: { + id: true, + bool: true, + }, + }) - const res = await tbl.create({ - data: { - id: 1, - bool: null, - }, - select: { - id: true, - bool: true, - }, - }) + t.deepEqual(res, expectedRes) - t.deepEqual(res, expectedRes) + const fetchRes = await tbl.findUnique({ + where: { + id: 1, + }, + select: { + id: true, + bool: true, + }, + }) - const fetchRes = await tbl.findUnique({ - where: { - id: 1, - }, - select: { - id: true, - bool: true, - }, + t.deepEqual(fetchRes, expectedRes) }) - t.deepEqual(fetchRes, expectedRes) -}) + test('support uuid type', async (t) => { + const { tbl } = t.context + const uuid = 'a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11' + const res = await tbl.create({ + data: { + id: 1, + uuid: uuid, + }, + }) -test.serial('support uuid type', async (t) => { - const uuid = 'a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11' - const res = await tbl.create({ - data: { - id: 1, - uuid: uuid, - }, - }) + t.assert(res.id === 1 && res.uuid === uuid) - t.assert(res.id === 1 && res.uuid === uuid) + const fetchRes = await tbl.findUnique({ + where: { + id: 1, + }, + }) - const fetchRes = await tbl.findUnique({ - where: { - id: 1, - }, + t.is(fetchRes?.uuid, uuid) + + // Check that it rejects invalid uuids + await t.throwsAsync( + tbl.create({ + data: { + id: 2, + // the UUID below has 1 character too much in the last group + uuid: 'a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a111', + }, + }), + { + instanceOf: ZodError, + message: /Invalid uuid/, + } + ) }) - t.is(fetchRes?.uuid, uuid) + test('support null value for uuid type', async (t) => { + const { tbl } = t.context + const expectedRes = { + id: 1, + uuid: null, + } - // Check that it rejects invalid uuids - await t.throwsAsync( - tbl.create({ + const res = await tbl.create({ data: { - id: 2, - // the UUID below has 1 character too much in the last group - uuid: 'a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a111', + id: 1, + uuid: null, }, - }), - { - instanceOf: ZodError, - message: /Invalid uuid/, - } - ) -}) + select: { + id: true, + uuid: true, + }, + }) -test.serial('support null value for uuid type', async (t) => { - const expectedRes = { - id: 1, - uuid: null, - } + t.deepEqual(res, expectedRes) - const res = await tbl.create({ - data: { - id: 1, - uuid: null, - }, - select: { - id: true, - uuid: true, - }, + const fetchRes = await tbl.findUnique({ + where: { + id: 1, + }, + select: { + id: true, + uuid: true, + }, + }) + + t.deepEqual(fetchRes, expectedRes) }) - t.deepEqual(res, expectedRes) + test('support int2 type', async (t) => { + const { tbl } = t.context - const fetchRes = await tbl.findUnique({ - where: { - id: 1, - }, - select: { - id: true, - uuid: true, - }, + const validInt1 = 32767 + const invalidInt1 = 32768 + + const validInt2 = -32768 + const invalidInt2 = -32769 + + const res = await tbl.createMany({ + data: [ + { + id: 1, + int2: validInt1, + }, + { + id: 2, + int2: validInt2, + }, + ], + }) + + t.deepEqual(res, { + count: 2, + }) + + // Check that it rejects invalid integers + const invalidInts = [invalidInt1, invalidInt2] + let id = 3 + for (const invalidInt of invalidInts) { + await t.throwsAsync( + tbl.create({ + data: { + id: id++, + int2: invalidInt, + }, + }), + { + instanceOf: ZodError, + message: + /(Number must be less than or equal to 32767)|(Number must be greater than or equal to -32768)/, + } + ) + } }) - t.deepEqual(fetchRes, expectedRes) -}) + test('support null values for int2 type', async (t) => { + const { tbl } = t.context + const expectedRes = { + id: 1, + int2: null, + } -test.serial('support int2 type', async (t) => { - const validInt1 = 32767 - const invalidInt1 = 32768 + const res = await tbl.create({ + data: { + id: 1, + int2: null, + }, + select: { + id: true, + int2: true, + }, + }) - const validInt2 = -32768 - const invalidInt2 = -32769 + t.deepEqual(res, expectedRes) - const res = await tbl.createMany({ - data: [ - { + const fetchRes = await tbl.findUnique({ + where: { id: 1, - int2: validInt1, }, - { - id: 2, - int2: validInt2, + select: { + id: true, + int2: true, }, - ], - }) + }) - t.deepEqual(res, { - count: 2, + t.deepEqual(fetchRes, expectedRes) }) - // Check that it rejects invalid integers - const invalidInts = [invalidInt1, invalidInt2] - let id = 3 - for (const invalidInt of invalidInts) { - await t.throwsAsync( - tbl.create({ - data: { - id: id++, - int2: invalidInt, - }, - }), - { - instanceOf: ZodError, - message: - /(Number must be less than or equal to 32767)|(Number must be greater than or equal to -32768)/, - } - ) - } -}) + test('support int4 type', async (t) => { + const { tbl } = t.context -test.serial('support null values for int2 type', async (t) => { - const expectedRes = { - id: 1, - int2: null, - } + const validInt1 = 2147483647 + const invalidInt1 = 2147483648 - const res = await tbl.create({ - data: { - id: 1, - int2: null, - }, - select: { - id: true, - int2: true, - }, - }) + const validInt2 = -2147483648 + const invalidInt2 = -2147483649 - t.deepEqual(res, expectedRes) + const res = await tbl.createMany({ + data: [ + { + id: 1, + int4: validInt1, + }, + { + id: 2, + int4: validInt2, + }, + ], + }) + + t.deepEqual(res, { + count: 2, + }) + + // Check that it rejects invalid integers + const invalidInts = [invalidInt1, invalidInt2] + let id = 3 + for (const invalidInt of invalidInts) { + await t.throwsAsync( + tbl.create({ + data: { + id: id++, + int4: invalidInt, + }, + }), + { + instanceOf: ZodError, + message: + /(Number must be less than or equal to 2147483647)|(Number must be greater than or equal to -2147483648)/, + } + ) + } + }) - const fetchRes = await tbl.findUnique({ - where: { + test('support null values for int4 type', async (t) => { + const { tbl } = t.context + const expectedRes = { id: 1, - }, - select: { - id: true, - int2: true, - }, - }) + int4: null, + } + + const res = await tbl.create({ + data: { + id: 1, + int4: null, + }, + select: { + id: true, + int4: true, + }, + }) - t.deepEqual(fetchRes, expectedRes) -}) + t.deepEqual(res, expectedRes) -test.serial('support int4 type', async (t) => { - const validInt1 = 2147483647 - const invalidInt1 = 2147483648 + const fetchRes = await tbl.findUnique({ + where: { + id: 1, + }, + select: { + id: true, + int4: true, + }, + }) - const validInt2 = -2147483648 - const invalidInt2 = -2147483649 + t.deepEqual(fetchRes, expectedRes) + }) - const res = await tbl.createMany({ - data: [ + test('support float4 type', async (t) => { + const { tbl } = t.context + const validFloat1 = 1.402823e36 + const validFloat2 = -1.402823e36 + const floats = [ { id: 1, - int4: validInt1, + float4: validFloat1, }, { id: 2, - int4: validInt2, + float4: validFloat2, }, - ], - }) - - t.deepEqual(res, { - count: 2, - }) - - // Check that it rejects invalid integers - const invalidInts = [invalidInt1, invalidInt2] - let id = 3 - for (const invalidInt of invalidInts) { - await t.throwsAsync( - tbl.create({ - data: { - id: id++, - int4: invalidInt, - }, - }), { - instanceOf: ZodError, - message: - /(Number must be less than or equal to 2147483647)|(Number must be greater than or equal to -2147483648)/, - } - ) - } -}) + id: 3, + float4: +Infinity, + }, + { + id: 4, + float4: -Infinity, + }, + { + id: 5, + float4: NaN, + }, + ] -test.serial('support null values for int4 type', async (t) => { - const expectedRes = { - id: 1, - int4: null, - } + const res = await tbl.createMany({ + data: floats, + }) - const res = await tbl.create({ - data: { - id: 1, - int4: null, - }, - select: { - id: true, - int4: true, - }, - }) + t.deepEqual(res, { + count: 5, + }) - t.deepEqual(res, expectedRes) + // Check that we can read the floats back + const fetchRes = await tbl.findMany({ + select: { + id: true, + float4: true, + }, + orderBy: { + id: 'asc', + }, + }) - const fetchRes = await tbl.findUnique({ - where: { - id: 1, - }, - select: { - id: true, - int4: true, - }, + t.deepEqual( + fetchRes.map((o) => ({ ...o, float4: Math.fround(o.float4!) })), + floats.map((o) => ({ ...o, float4: Math.fround(o.float4) })) + ) }) - t.deepEqual(fetchRes, expectedRes) -}) + test('converts numbers outside float4 range', async (t) => { + const { tbl } = t.context + const tooPositive = 2 ** 150 + const tooNegative = -(2 ** 150) + const tooSmallPositive = 2 ** -150 + const tooSmallNegative = -(2 ** -150) + const floats = [ + { + id: 1, + float4: tooPositive, + }, + { + id: 2, + float4: tooNegative, + }, + { + id: 3, + float4: tooSmallPositive, + }, + { + id: 4, + float4: tooSmallNegative, + }, + ] -test.serial('support float4 type', async (t) => { - const validFloat1 = 1.402823e36 - const validFloat2 = -1.402823e36 - const floats = [ - { - id: 1, - float4: validFloat1, - }, - { - id: 2, - float4: validFloat2, - }, - { - id: 3, - float4: +Infinity, - }, - { - id: 4, - float4: -Infinity, - }, - { - id: 5, - float4: NaN, - }, - ] - - const res = await tbl.createMany({ - data: floats, - }) + const res = await tbl.createMany({ + data: floats, + }) - t.deepEqual(res, { - count: 5, - }) + t.deepEqual(res, { + count: 4, + }) - // Check that we can read the floats back - const fetchRes = await tbl.findMany({ - select: { - id: true, - float4: true, - }, - orderBy: { - id: 'asc', - }, - }) + // Check that we can read the floats back + const fetchRes = await tbl.findMany({ + select: { + id: true, + float4: true, + }, + orderBy: { + id: 'asc', + }, + }) - t.deepEqual( - fetchRes, - floats.map((o) => ({ ...o, float4: Math.fround(o.float4) })) - ) -}) - -test.serial('converts numbers outside float4 range', async (t) => { - const tooPositive = 2 ** 150 - const tooNegative = -(2 ** 150) - const tooSmallPositive = 2 ** -150 - const tooSmallNegative = -(2 ** -150) - const floats = [ - { - id: 1, - float4: tooPositive, - }, - { - id: 2, - float4: tooNegative, - }, - { - id: 3, - float4: tooSmallPositive, - }, - { - id: 4, - float4: tooSmallNegative, - }, - ] - - const res = await tbl.createMany({ - data: floats, + t.deepEqual(fetchRes, [ + { + id: 1, + float4: Infinity, + }, + { + id: 2, + float4: -Infinity, + }, + { + id: 3, + float4: 0, + }, + { + id: 4, + float4: 0, + }, + ]) }) - t.deepEqual(res, { - count: 4, - }) + test('support float8 type', async (t) => { + const { tbl } = t.context + const validFloat1 = 1.7976931348623157e308 + const validFloat2 = -1.7976931348623157e308 + const floats = [ + { + id: 1, + float8: validFloat1, + }, + { + id: 2, + float8: validFloat2, + }, + { + id: 3, + float8: +Infinity, + }, + { + id: 4, + float8: -Infinity, + }, + { + id: 5, + float8: NaN, + }, + ] - // Check that we can read the floats back - const fetchRes = await tbl.findMany({ - select: { - id: true, - float4: true, - }, - orderBy: { - id: 'asc', - }, - }) + const res = await tbl.createMany({ + data: floats, + }) - t.deepEqual(fetchRes, [ - { - id: 1, - float4: Infinity, - }, - { - id: 2, - float4: -Infinity, - }, - { - id: 3, - float4: 0, - }, - { - id: 4, - float4: 0, - }, - ]) -}) -test.serial('support float8 type', async (t) => { - const validFloat1 = 1.7976931348623157e308 - const validFloat2 = -1.7976931348623157e308 - const floats = [ - { - id: 1, - float8: validFloat1, - }, - { - id: 2, - float8: validFloat2, - }, - { - id: 3, - float8: +Infinity, - }, - { - id: 4, - float8: -Infinity, - }, - { - id: 5, - float8: NaN, - }, - ] - - const res = await tbl.createMany({ - data: floats, - }) + t.deepEqual(res, { + count: 5, + }) - t.deepEqual(res, { - count: 5, - }) + // Check that we can read the floats back + const fetchRes = await tbl.findMany({ + select: { + id: true, + float8: true, + }, + orderBy: { + id: 'asc', + }, + }) - // Check that we can read the floats back - const fetchRes = await tbl.findMany({ - select: { - id: true, - float8: true, - }, - orderBy: { - id: 'asc', - }, + t.deepEqual(fetchRes, floats) }) - t.deepEqual(fetchRes, floats) -}) - -test.serial('support null values for float8 type', async (t) => { - const expectedRes = { - id: 1, - float8: null, - } - - const res = await tbl.create({ - data: { + test('support null values for float8 type', async (t) => { + const { tbl } = t.context + const expectedRes = { id: 1, float8: null, - }, - select: { - id: true, - float8: true, - }, - }) + } - t.deepEqual(res, expectedRes) + const res = await tbl.create({ + data: { + id: 1, + float8: null, + }, + select: { + id: true, + float8: true, + }, + }) - const fetchRes = await tbl.findUnique({ - where: { - id: 1, - }, - select: { - id: true, - float8: true, - }, - }) + t.deepEqual(res, expectedRes) - t.deepEqual(fetchRes, expectedRes) -}) + const fetchRes = await tbl.findUnique({ + where: { + id: 1, + }, + select: { + id: true, + float8: true, + }, + }) -test.serial('support BigInt type', async (t) => { - //db.defaultSafeIntegers(true) // enables BigInt support - const validBigInt1 = BigInt('9223372036854775807') - const validBigInt2 = BigInt('-9223372036854775808') - const bigInts = [ - { - id: 1, - int8: validBigInt1, - }, - { - id: 2, - int8: validBigInt2, - }, - ] - - const res = await tbl.createMany({ - data: bigInts, + t.deepEqual(fetchRes, expectedRes) }) - t.deepEqual(res, { - count: 2, - }) + test('support BigInt type', async (t) => { + const { tbl } = t.context + //db.defaultSafeIntegers(true) // enables BigInt support + const validBigInt1 = BigInt('9223372036854775807') + const validBigInt2 = BigInt('-9223372036854775808') + const bigInts = [ + { + id: 1, + int8: validBigInt1, + }, + { + id: 2, + int8: validBigInt2, + }, + ] - // Check that we can read the big ints back - const fetchRes = await tbl.findMany({ - select: { - id: true, - int8: true, - }, - orderBy: { - id: 'asc', - }, - }) + const res = await tbl.createMany({ + data: bigInts, + }) + + t.deepEqual(res, { + count: 2, + }) - t.deepEqual(fetchRes, bigInts) - //db.defaultSafeIntegers(false) // disables BigInt support -}) + // Check that we can read the big ints back + const fetchRes = await tbl.findMany({ + select: { + id: true, + int8: true, + }, + orderBy: { + id: 'asc', + }, + }) -test.serial('support null values for BigInt type', async (t) => { - const expectedRes = { - id: 1, - int8: null, - } + t.deepEqual(fetchRes, bigInts) + //db.defaultSafeIntegers(false) // disables BigInt support + }) - const res = await tbl.create({ - data: { + test('support null values for BigInt type', async (t) => { + const { tbl } = t.context + const expectedRes = { id: 1, int8: null, - }, - select: { - id: true, - int8: true, - }, - }) + } - t.deepEqual(res, expectedRes) + const res = await tbl.create({ + data: { + id: 1, + int8: null, + }, + select: { + id: true, + int8: true, + }, + }) - const fetchRes = await tbl.findUnique({ - where: { - id: 1, - }, - select: { - id: true, - int8: true, - }, - }) + t.deepEqual(res, expectedRes) - t.deepEqual(fetchRes, expectedRes) -}) + const fetchRes = await tbl.findUnique({ + where: { + id: 1, + }, + select: { + id: true, + int8: true, + }, + }) -test.serial( - 'throw error when value is out of range for BigInt type', - async (t) => { + t.deepEqual(fetchRes, expectedRes) + }) + + test('throw error when value is out of range for BigInt type', async (t) => { + const { tbl } = t.context const invalidBigInt1 = BigInt('9223372036854775808') const invalidBigInt2 = BigInt('-9223372036854775809') @@ -855,127 +859,131 @@ test.serial( message: /too_small/, } ) - } -) - -test.serial('support JSON type', async (t) => { - const json = { a: 1, b: true, c: { d: 'nested' }, e: [1, 2, 3], f: null } - const res = await tbl.create({ - data: { - id: 1, - json, - }, }) - t.deepEqual(res.json, json) + test('support JSONB type', async (t) => { + const { tbl } = t.context + const json = { a: 1, b: true, c: { d: 'nested' }, e: [1, 2, 3], f: null } + const res = await tbl.create({ + data: { + id: 1, + json, + }, + }) - const fetchRes = await tbl.findUnique({ - where: { - id: 1, - }, - }) + t.deepEqual(res.json, json) - t.deepEqual(fetchRes?.json, json) + const fetchRes = await tbl.findUnique({ + where: { + id: 1, + }, + }) - // Also test that we can write the special JsonNull value - const res2 = await tbl.create({ - data: { - id: 2, - json: JsonNull, - }, - }) + t.deepEqual(fetchRes?.json, json) - t.deepEqual(res2.json, JsonNull) + // Also test that we can write the special JsonNull value + const res2 = await tbl.create({ + data: { + id: 2, + json: JsonNull, + }, + }) - const fetchRes2 = await tbl.findUnique({ - where: { - id: 2, - }, - }) + t.deepEqual(res2.json, JsonNull) - t.deepEqual(fetchRes2?.json, JsonNull) -}) + const fetchRes2 = await tbl.findUnique({ + where: { + id: 2, + }, + }) -test.serial('support null values for JSON type', async (t) => { - const expectedRes = { - id: 1, - json: null, - } + t.deepEqual(fetchRes2?.json, JsonNull) + }) - const res = await tbl.create({ - data: { + test('support null values for JSONB type', async (t) => { + const { tbl } = t.context + const expectedRes = { id: 1, json: null, - }, - select: { - id: true, - json: true, - }, - }) + } - t.deepEqual(res, expectedRes) + const res = await tbl.create({ + data: { + id: 1, + json: null, + }, + select: { + id: true, + json: true, + }, + }) - const fetchRes = await tbl.findUnique({ - where: { - id: 1, - }, - select: { - id: true, - json: true, - }, - }) + t.deepEqual(res, expectedRes) - t.deepEqual(fetchRes, expectedRes) -}) + const fetchRes = await tbl.findUnique({ + where: { + id: 1, + }, + select: { + id: true, + json: true, + }, + }) -test.serial('support BLOB type', async (t) => { - const blob = new Uint8Array([1, 2, 3, 4, 5]) - const res = await tbl.create({ - data: { - id: 1, - bytea: blob, - }, + t.deepEqual(fetchRes, expectedRes) }) - t.deepEqual(res.bytea, blob) - - const fetchRes = await tbl.findUnique({ - where: { - id: 1, - }, + test('support BLOB type', async (t) => { + const { tbl } = t.context + const blob = new Uint8Array([1, 2, 3, 4, 5]) + const res = await tbl.create({ + data: { + id: 1, + bytea: blob, + }, + }) + + t.deepEqual(res.bytea, blob) + + const fetchRes = await tbl.findUnique({ + where: { + id: 1, + }, + }) + + t.deepEqual(fetchRes?.bytea, blob) }) - t.deepEqual(fetchRes?.bytea, blob) -}) - -test.serial('support null values for BLOB type', async (t) => { - const expectedRes = { - id: 1, - bytea: null, - } - - const res = await tbl.create({ - data: { + test('support null values for BLOB type', async (t) => { + const { tbl } = t.context + const expectedRes = { id: 1, bytea: null, - }, - select: { - id: true, - bytea: true, - }, - }) + } - t.deepEqual(res, expectedRes) + const res = await tbl.create({ + data: { + id: 1, + bytea: null, + }, + select: { + id: true, + bytea: true, + }, + }) - const fetchRes = await tbl.findUnique({ - where: { - id: 1, - }, - select: { - id: true, - bytea: true, - }, - }) + t.deepEqual(res, expectedRes) - t.deepEqual(fetchRes, expectedRes) -}) + const fetchRes = await tbl.findUnique({ + where: { + id: 1, + }, + select: { + id: true, + bytea: true, + }, + }) + + t.deepEqual(fetchRes, expectedRes) + }) +} \ No newline at end of file diff --git a/clients/typescript/test/client/model/shapes.test.ts b/clients/typescript/test/client/model/shapes.test.ts index baa5c08070..94a2a0ff7c 100644 --- a/clients/typescript/test/client/model/shapes.test.ts +++ b/clients/typescript/test/client/model/shapes.test.ts @@ -64,7 +64,8 @@ async function makeContext(t: ExecutionContext) { adapter, notifier, satellite, - registry + registry, + 'SQLite' ) const Post = electric.db.Post const Items = electric.db.Items diff --git a/clients/typescript/test/frameworks/react.test.tsx b/clients/typescript/test/frameworks/react.test.tsx index 91d3091f39..d4a483ff80 100644 --- a/clients/typescript/test/frameworks/react.test.tsx +++ b/clients/typescript/test/frameworks/react.test.tsx @@ -63,7 +63,8 @@ test.beforeEach((t) => { adapter, notifier, satellite, - registry + registry, + 'SQLite' ) dal.db.Items.sync() diff --git a/clients/typescript/test/frameworks/vuejs.test.ts b/clients/typescript/test/frameworks/vuejs.test.ts index 09773f68cc..2d4c25b25b 100644 --- a/clients/typescript/test/frameworks/vuejs.test.ts +++ b/clients/typescript/test/frameworks/vuejs.test.ts @@ -52,7 +52,8 @@ test.beforeEach((t) => { adapter, notifier, satellite, - registry + registry, + 'SQLite' ) dal.db.Items.sync() diff --git a/clients/typescript/test/migrators/postgres/schema.test.ts b/clients/typescript/test/migrators/postgres/schema.test.ts index 3dd24b5c85..65a1876617 100644 --- a/clients/typescript/test/migrators/postgres/schema.test.ts +++ b/clients/typescript/test/migrators/postgres/schema.test.ts @@ -1,7 +1,6 @@ import test from 'ava' -import { AnyDatabase } from '../../../src/drivers' -import { DatabaseAdapter } from '../../../src/drivers/node-postgres' +import { Database, DatabaseAdapter } from '../../../src/drivers/node-postgres' import { PgBundleMigrator as BundleMigrator } from '../../../src/migrators/bundle' import { satelliteDefaults } from '../../../src/satellite/config' @@ -13,7 +12,7 @@ import { makePgDatabase } from '../../support/node-postgres' type Context = { dbName: string adapter: DatabaseAdapter - db: AnyDatabase + db: Database stopPG: () => Promise } From 7a867d3b8b916746f0333ee1f150f00f310ab9e8 Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Tue, 12 Mar 2024 14:15:48 +0100 Subject: [PATCH 031/156] Style improvements --- .../typescript/src/drivers/node-postgres/index.ts | 2 +- .../src/drivers/tauri-postgres/database.ts | 6 ++++-- .../typescript/src/drivers/tauri-postgres/index.ts | 2 +- .../src/migrators/query-builder/sqliteBuilder.ts | 13 +++++++++++++ 4 files changed, 19 insertions(+), 4 deletions(-) diff --git a/clients/typescript/src/drivers/node-postgres/index.ts b/clients/typescript/src/drivers/node-postgres/index.ts index 8870d00f40..9828f1cbcc 100644 --- a/clients/typescript/src/drivers/node-postgres/index.ts +++ b/clients/typescript/src/drivers/node-postgres/index.ts @@ -26,7 +26,7 @@ export const electrify = async >( const migrator = opts?.migrator || new PgBundleMigrator(adapter, dbDescription.pgMigrations) const socketFactory = opts?.socketFactory || WebSocketWeb - const prepare = async (_connection: DatabaseAdapterI) => {} + const prepare = async (_connection: DatabaseAdapterI) => undefined const client = await baseElectrify( dbName, diff --git a/clients/typescript/src/drivers/tauri-postgres/database.ts b/clients/typescript/src/drivers/tauri-postgres/database.ts index 6472e30ab2..a6f8c17110 100644 --- a/clients/typescript/src/drivers/tauri-postgres/database.ts +++ b/clients/typescript/src/drivers/tauri-postgres/database.ts @@ -10,6 +10,8 @@ type TauriQueryResult = { rows_modified: number } +type TauriInvokeFn = (cmd: string, params?: object) => Promise + export interface Database { name: string exec(statement: Statement): Promise @@ -19,7 +21,7 @@ export interface Database { export class ElectricDatabase implements Database { // Do not use this constructor directly. // Create a Database instance using the static `init` method instead. - private constructor(public name: string, private invoke: Function) {} + private constructor(public name: string, private invoke: TauriInvokeFn) {} private tauriExec(statement: Statement): Promise { return this.invoke('tauri_exec_command', { @@ -58,7 +60,7 @@ export class ElectricDatabase implements Database { await this.invoke('tauri_stop_postgres') } - static async init(dbName: string, invoke: Function) { + static async init(dbName: string, invoke: TauriInvokeFn) { await invoke('tauri_init_command', { name: dbName }) return new ElectricDatabase(dbName, invoke) } diff --git a/clients/typescript/src/drivers/tauri-postgres/index.ts b/clients/typescript/src/drivers/tauri-postgres/index.ts index 830c347939..02cd9b4bd4 100644 --- a/clients/typescript/src/drivers/tauri-postgres/index.ts +++ b/clients/typescript/src/drivers/tauri-postgres/index.ts @@ -25,7 +25,7 @@ export const electrify = async >( const migrator = opts?.migrator || new PgBundleMigrator(adapter, dbDescription.pgMigrations) const socketFactory = opts?.socketFactory || WebSocketWeb - const prepare = async (_connection: DatabaseAdapterI) => {} + const prepare = async (_connection: DatabaseAdapterI) => undefined const client = await baseElectrify( dbName, diff --git a/clients/typescript/src/migrators/query-builder/sqliteBuilder.ts b/clients/typescript/src/migrators/query-builder/sqliteBuilder.ts index ba90f1df99..1730a839fb 100644 --- a/clients/typescript/src/migrators/query-builder/sqliteBuilder.ts +++ b/clients/typescript/src/migrators/query-builder/sqliteBuilder.ts @@ -54,6 +54,19 @@ class SqliteBuilder extends QueryBuilder { )})` } + getLocalTableNames(notIn: string[] = []): Statement { + const ignore = this.metaTables.concat(notIn) + const tables = ` + SELECT name FROM sqlite_master + WHERE type = 'table' AND + name NOT IN (${ignore.map(() => '?').join(',')}) + ` + return { + sql: tables, + args: ignore, + } + } + insertOrIgnore( schema: string, table: string, From 1fcfe74db049d235481fdef4197715c65275b0a5 Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Tue, 12 Mar 2024 14:24:36 +0100 Subject: [PATCH 032/156] Reorganize test/client/model structure for PG and SQLite tests --- .../client/model/{datatype.test.ts => datatype.ts} | 0 .../datatype.test.ts} | 14 +++++++------- .../datatype.test.ts} | 10 +++++----- 3 files changed, 12 insertions(+), 12 deletions(-) rename clients/typescript/test/client/model/{datatype.test.ts => datatype.ts} (100%) rename clients/typescript/test/client/model/{datatype.pg.test.ts => postgres/datatype.test.ts} (75%) rename clients/typescript/test/client/model/{datatype.sqlite.test.ts => sqlite/datatype.test.ts} (78%) diff --git a/clients/typescript/test/client/model/datatype.test.ts b/clients/typescript/test/client/model/datatype.ts similarity index 100% rename from clients/typescript/test/client/model/datatype.test.ts rename to clients/typescript/test/client/model/datatype.ts diff --git a/clients/typescript/test/client/model/datatype.pg.test.ts b/clients/typescript/test/client/model/postgres/datatype.test.ts similarity index 75% rename from clients/typescript/test/client/model/datatype.pg.test.ts rename to clients/typescript/test/client/model/postgres/datatype.test.ts index 02cac7cb70..b51b87d5ac 100644 --- a/clients/typescript/test/client/model/datatype.pg.test.ts +++ b/clients/typescript/test/client/model/postgres/datatype.test.ts @@ -1,16 +1,16 @@ import anyTest, { TestFn } from 'ava' -import { MockRegistry } from '../../../src/satellite/mock' +import { MockRegistry } from '../../../../src/satellite/mock' -import { electrify } from '../../../src/drivers/node-postgres' +import { electrify } from '../../../../src/drivers/node-postgres' import { _NOT_UNIQUE_, _RECORD_NOT_FOUND_, -} from '../../../src/client/validation/errors/messages' -import { schema } from '../generated' -import { ContextType, datatypeTests } from './datatype.test' -import { makePgDatabase } from '../../support/node-postgres' -import { randomValue } from '../../../src/util' +} from '../../../../src/client/validation/errors/messages' +import { schema } from '../../generated' +import { ContextType, datatypeTests } from '../datatype' +import { makePgDatabase } from '../../../support/node-postgres' +import { randomValue } from '../../../../src/util' // Run all tests in this file serially // because there are a lot of tests diff --git a/clients/typescript/test/client/model/datatype.sqlite.test.ts b/clients/typescript/test/client/model/sqlite/datatype.test.ts similarity index 78% rename from clients/typescript/test/client/model/datatype.sqlite.test.ts rename to clients/typescript/test/client/model/sqlite/datatype.test.ts index ad12513b04..c064e046d0 100644 --- a/clients/typescript/test/client/model/datatype.sqlite.test.ts +++ b/clients/typescript/test/client/model/sqlite/datatype.test.ts @@ -2,15 +2,15 @@ import anyTest, { TestFn } from 'ava' import Database from 'better-sqlite3' import type { Database as BetterSqlite3Database } from 'better-sqlite3' -import { MockRegistry } from '../../../src/satellite/mock' +import { MockRegistry } from '../../../../src/satellite/mock' -import { electrify } from '../../../src/drivers/better-sqlite3' +import { electrify } from '../../../../src/drivers/better-sqlite3' import { _NOT_UNIQUE_, _RECORD_NOT_FOUND_, -} from '../../../src/client/validation/errors/messages' -import { schema } from '../generated' -import { ContextType, datatypeTests } from './datatype.test' +} from '../../../../src/client/validation/errors/messages' +import { schema } from '../../generated' +import { ContextType, datatypeTests } from '../datatype' const test = anyTest as TestFn< ContextType & { From 740404be700880fb2a4b1336272c58e3edd5e83a Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Tue, 12 Mar 2024 14:35:21 +0100 Subject: [PATCH 033/156] Remove obsolete comments --- .../typescript/src/drivers/node-postgres/database.ts | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/clients/typescript/src/drivers/node-postgres/database.ts b/clients/typescript/src/drivers/node-postgres/database.ts index d71c8e61cd..49ee7b79df 100644 --- a/clients/typescript/src/drivers/node-postgres/database.ts +++ b/clients/typescript/src/drivers/node-postgres/database.ts @@ -3,16 +3,6 @@ import type { Client } from 'pg' import EmbeddedPostgres from 'embedded-postgres' import { Row, Statement } from '../../util' -// Modify how 'pg' parses JSON values -// simply return it as a string -// our conversions will correctly parse it -/* -const parseJSON = (value: string) => { - return value -} -pg.types.setTypeParser(pg.types.builtins.JSON, parseJSON) -pg.types.setTypeParser(pg.types.builtins.JSONB, parseJSON) -*/ const originalGetTypeParser = pg.types.getTypeParser export type QueryResult = { From 118391c48b3bbf01f66cf54f1f8a01627486eead Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Tue, 12 Mar 2024 14:41:18 +0100 Subject: [PATCH 034/156] Updated package.json --- clients/typescript/package.json | 13 +++++++++++++ pnpm-lock.yaml | 3 +++ 2 files changed, 16 insertions(+) diff --git a/clients/typescript/package.json b/clients/typescript/package.json index d96e32b7bd..323e960f0e 100644 --- a/clients/typescript/package.json +++ b/clients/typescript/package.json @@ -55,6 +55,7 @@ "./op-sqlite": "./dist/drivers/op-sqlite/index.js", "./generic": "./dist/drivers/generic/index.js", "./node": "./dist/drivers/better-sqlite3/index.js", + "./node-postgres": "./dist/drivers/node-postgres/index.js", "./react": "./dist/frameworks/react/index.js", "./tauri-postgres": "./dist/drivers/tauri-postgres/index.js", "./vuejs": "./dist/frameworks/vuejs/index.js", @@ -91,6 +92,9 @@ "node": [ "./dist/drivers/better-sqlite3/index.d.ts" ], + "node-postgres": [ + "./dist/drivers/node-postgres/index.d.ts" + ], "react": [ "./dist/frameworks/react/index.d.ts" ], @@ -246,6 +250,7 @@ "lint-staged": "^13.1.0", "memorystorage": "^0.12.0", "nodemon": "^3.0.2", + "pg": "^8.11.3", "prettier": "2.8.2", "react": "^18.2.0", "react-dom": "^18.2.0", @@ -265,7 +270,9 @@ "@capacitor-community/sqlite": ">= 5.6.2", "@op-engineering/op-sqlite": ">= 2.0.16", "@tauri-apps/plugin-sql": "2.0.0-alpha.5", + "embedded-postgres": "16.1.1-beta.9", "expo-sqlite": ">= 13.0.0", + "pg": "^8.11.3", "prisma": "4.8.1", "react": ">= 16.8.0", "react-dom": ">= 16.8.0", @@ -285,9 +292,15 @@ "@tauri-apps/plugin-sql": { "optional": true }, + "embedded-postgres": { + "optional": true + }, "expo-sqlite": { "optional": true }, + "pg": { + "optional": true + }, "prisma": { "optional": true }, diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 7e0143a784..dface65568 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -237,6 +237,9 @@ importers: nodemon: specifier: ^3.0.2 version: 3.0.2 + pg: + specifier: ^8.11.3 + version: 8.11.3 prettier: specifier: 2.8.2 version: 2.8.2 From 1cf85abc482e51ec7b1f1063b51c6d3ed8676650 Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Thu, 14 Mar 2024 14:39:25 +0100 Subject: [PATCH 035/156] Fixes after rebase --- .../src/migrators/query-builder/pgBuilder.ts | 24 +++++++ .../test/satellite/process.tags.test.ts | 70 +++++++++---------- 2 files changed, 59 insertions(+), 35 deletions(-) diff --git a/clients/typescript/src/migrators/query-builder/pgBuilder.ts b/clients/typescript/src/migrators/query-builder/pgBuilder.ts index b8ecd2ad05..7643e2d72f 100644 --- a/clients/typescript/src/migrators/query-builder/pgBuilder.ts +++ b/clients/typescript/src/migrators/query-builder/pgBuilder.ts @@ -390,6 +390,30 @@ class PgBuilder extends QueryBuilder { ): string { const oplog = `"${oplogTable.namespace}"."${oplogTable.tablename}"` const shadow = `"${shadowTable.namespace}"."${shadowTable.tablename}"` + /* + return dedent` + UPDATE ${oplog} + SET "clearTags" = + CASE WHEN rowid = updates.rowid_of_first_op_in_tx + THEN updates.tags + ELSE $1 -- singleton array containing tag of thix TX + END + FROM ( + SELECT shadow.tags as tags, rowid_of_first_op_in_tx + FROM ( + SELECT min(op.rowid) as rowid_of_first_op_in_tx + FROM ${shadow} AS shadow + JOIN ${oplog} as op + ON op.namespace = shadow.namespace + AND op.tablename = shadow.tablename + AND op."primaryKey" = shadow."primaryKey" + WHERE op.timestamp = $2 + GROUP BY op.namespace, op.tablename, op."primaryKey" + ) t JOIN ${oplog} s ON s.rowid = t.rowid_of_first_op_in_tx + ) AS updates + WHERE ${oplog}.timestamp = $3 -- only update operations from this TX + ` + */ return dedent` UPDATE ${oplog} SET "clearTags" = ${shadow}.tags diff --git a/clients/typescript/test/satellite/process.tags.test.ts b/clients/typescript/test/satellite/process.tags.test.ts index 4edf1294b2..6cd613b673 100644 --- a/clients/typescript/test/satellite/process.tags.test.ts +++ b/clients/typescript/test/satellite/process.tags.test.ts @@ -466,109 +466,109 @@ export const processTagsTests = (test: TestFn) => { test('Tags are correctly set on subsequent operations in a TX', async (t) => { const { adapter, runMigrations, satellite, authState } = t.context - + await runMigrations() - + await adapter.run({ - sql: `INSERT INTO parent(id, value) VALUES (1,'val1')`, + sql: `INSERT INTO main.parent(id, value) VALUES (1,'val1')`, }) - + // Since no snapshot was made yet // the timestamp in the oplog is not yet set const insertEntry = await adapter.query({ - sql: `SELECT timestamp, clearTags FROM _electric_oplog WHERE rowid = 1`, + sql: `SELECT timestamp, "clearTags" FROM main._electric_oplog WHERE rowid = 1`, }) t.is(insertEntry[0].timestamp, null) t.deepEqual(JSON.parse(insertEntry[0].clearTags as string), []) - + await satellite._setAuthState(authState) await satellite._performSnapshot() - + const parseDate = (date: string) => new Date(date).getTime() - + // Now the timestamp is set const insertEntryAfterSnapshot = await adapter.query({ - sql: `SELECT timestamp, clearTags FROM _electric_oplog WHERE rowid = 1`, + sql: `SELECT timestamp, "clearTags" FROM main._electric_oplog WHERE rowid = 1`, }) t.assert(insertEntryAfterSnapshot[0].timestamp != null) const insertTimestamp = parseDate( insertEntryAfterSnapshot[0].timestamp as string ) t.deepEqual(JSON.parse(insertEntryAfterSnapshot[0].clearTags as string), []) - + // Now update the entry, then delete it, and then insert it again await adapter.run({ - sql: `UPDATE parent SET value = 'val2' WHERE id=1`, + sql: `UPDATE main.parent SET value = 'val2' WHERE id=1`, }) - + await adapter.run({ - sql: `DELETE FROM parent WHERE id=1`, + sql: `DELETE FROM main.parent WHERE id=1`, }) - + await adapter.run({ - sql: `INSERT INTO parent(id, value) VALUES (1,'val3')`, + sql: `INSERT INTO main.parent(id, value) VALUES (1,'val3')`, }) - + // Since no snapshot has been taken for these operations // their timestamp and clearTags should not be set const updateEntry = await adapter.query({ - sql: `SELECT timestamp, clearTags FROM _electric_oplog WHERE rowid = 2`, + sql: `SELECT timestamp, "clearTags" FROM main._electric_oplog WHERE rowid = 2`, }) - + t.is(updateEntry[0].timestamp, null) t.deepEqual(JSON.parse(updateEntry[0].clearTags as string), []) - + const deleteEntry = await adapter.query({ - sql: `SELECT timestamp, clearTags FROM _electric_oplog WHERE rowid = 3`, + sql: `SELECT timestamp, "clearTags" FROM main._electric_oplog WHERE rowid = 3`, }) - + t.is(deleteEntry[0].timestamp, null) t.deepEqual(JSON.parse(deleteEntry[0].clearTags as string), []) - + const reinsertEntry = await adapter.query({ - sql: `SELECT timestamp, clearTags FROM _electric_oplog WHERE rowid = 4`, + sql: `SELECT timestamp, "clearTags" FROM main._electric_oplog WHERE rowid = 4`, }) - + t.is(reinsertEntry[0].timestamp, null) t.deepEqual(JSON.parse(reinsertEntry[0].clearTags as string), []) - + // Now take a snapshot for these operations await satellite._performSnapshot() - + // Now the timestamps should be set // The first operation (update) should override // the original insert (i.e. clearTags must contain the timestamp of the insert) const updateEntryAfterSnapshot = await adapter.query({ - sql: `SELECT timestamp, clearTags FROM _electric_oplog WHERE rowid = 2`, + sql: `SELECT timestamp, "clearTags" FROM main._electric_oplog WHERE rowid = 2`, }) - + const rawTimestampTx2 = updateEntryAfterSnapshot[0].timestamp t.assert(rawTimestampTx2 != null) const timestampTx2 = parseDate(rawTimestampTx2 as string) - + t.is( updateEntryAfterSnapshot[0].clearTags, genEncodedTags(authState.clientId, [insertTimestamp]) ) - + // The second operation (delete) should have the same timestamp // and should contain the tag of the TX in its clearTags const deleteEntryAfterSnapshot = await adapter.query({ - sql: `SELECT timestamp, clearTags FROM _electric_oplog WHERE rowid = 3`, + sql: `SELECT timestamp, "clearTags" FROM main._electric_oplog WHERE rowid = 3`, }) - + t.assert(deleteEntryAfterSnapshot[0].timestamp === rawTimestampTx2) t.is( deleteEntryAfterSnapshot[0].clearTags, genEncodedTags(authState.clientId, [timestampTx2]) ) - + // The third operation (reinsert) should have the same timestamp // and should contain the tag of the TX in its clearTags const reinsertEntryAfterSnapshot = await adapter.query({ - sql: `SELECT timestamp, clearTags FROM _electric_oplog WHERE rowid = 4`, + sql: `SELECT timestamp, "clearTags" FROM main._electric_oplog WHERE rowid = 4`, }) - + t.assert(reinsertEntryAfterSnapshot[0].timestamp === rawTimestampTx2) t.is( reinsertEntryAfterSnapshot[0].clearTags, From 59db2417efd284a00633becdb06ada180fbdbe3d Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Thu, 14 Mar 2024 18:07:33 +0100 Subject: [PATCH 036/156] (WIP) Porting e2e tests for PG --- .../typescript/src/client/model/builder.ts | 17 +-- .../src/drivers/node-postgres/database.ts | 101 +++++++++--------- .../src/drivers/node-postgres/index.ts | 15 ++- .../src/drivers/tauri-postgres/index.ts | 7 +- clients/typescript/src/electric/index.ts | 2 +- clients/typescript/src/migrators/builder.ts | 4 +- clients/typescript/src/satellite/oplog.ts | 2 +- clients/typescript/src/satellite/process.ts | 5 + .../typescript/test/support/node-postgres.ts | 5 +- e2e/Makefile | 6 ++ e2e/common.mk | 4 + e2e/init.sql | 3 + e2e/satellite_client/package.json | 1 + e2e/satellite_client/src/client.ts | 44 ++++++-- ....13_node_satellite_can_sync_timestamps.lux | 4 +- ...ode_satellite_can_sync_dates_and_times.lux | 2 +- ...node_satellite_authentication.lux.disabled | 2 +- ..._is_informed_when_jwt_expires.lux.disabled | 2 +- e2e/tests/Makefile | 3 + e2e/tests/_satellite_macros.luxinc | 14 ++- 20 files changed, 162 insertions(+), 81 deletions(-) diff --git a/clients/typescript/src/client/model/builder.ts b/clients/typescript/src/client/model/builder.ts index e966fda907..1d7a84fed8 100644 --- a/clients/typescript/src/client/model/builder.ts +++ b/clients/typescript/src/client/model/builder.ts @@ -30,6 +30,8 @@ squelPostgres.registerValueHandler(Uint8Array, function (uint8) { type AnyFindInput = FindInput export class Builder { + private _fullyQualifiedTableName: string + constructor( private _tableName: string, private _fields: string[], @@ -48,23 +50,26 @@ export class Builder { >, public dialect: Dialect ) { + this._fullyQualifiedTableName = `"${this._tableName}"` if (dialect === 'Postgres') { squelPostgres.cls.DefaultQueryBuilderOptions.nameQuoteCharacter = '"' - squelPostgres.cls.DefaultQueryBuilderOptions.autoQuoteTableNames = true + //squelPostgres.cls.DefaultQueryBuilderOptions.autoQuoteTableNames = true squelPostgres.cls.DefaultQueryBuilderOptions.autoQuoteFieldNames = true squelPostgres.cls.DefaultQueryBuilderOptions.autoQuoteAliasNames = true // need to register it, otherwise squel complains that the Date type is not registered // as Squel does not support it out-of-the-box but our Postgres drivers do support it. squelPostgres.registerValueHandler(Date, (date) => date) + //this._fullyQualifiedTableName = `public."${this._tableName}"` } else { // Don't use numbered parameters if dialect is SQLite squelPostgres.cls.DefaultQueryBuilderOptions.numberedParameters = false + //this._fullyQualifiedTableName = `main."${this._tableName}"` } } create(i: CreateInput): QueryBuilder { // Make a SQL query out of the data - const query = squelPostgres.insert().into(this._tableName).setFields(i.data) + const query = squelPostgres.insert().into(this._fullyQualifiedTableName).setFields(i.data) // Adds a `RETURNING` statement that returns all known fields const queryWithReturn = this.returnAllFields(query) @@ -74,7 +79,7 @@ export class Builder { createMany(i: CreateManyInput): QueryBuilder { const insert = squelPostgres .insert() - .into(this._tableName) + .into(this._fullyQualifiedTableName) .setFieldsRows(i.data) return i.skipDuplicates ? insert.onConflict() // adds "ON CONFLICT DO NOTHING" to the query @@ -119,7 +124,7 @@ export class Builder { i: DeleteManyInput, idRequired = false ): QueryBuilder { - const deleteQuery = squel.delete().from(this._tableName) + const deleteQuery = squel.delete().from(this._fullyQualifiedTableName) const whereObject = i.where // safe because the schema for `where` adds an empty object as default which is provided if the `where` field is absent const fields = this.getFields(whereObject, idRequired) return addFilters(fields, whereObject, deleteQuery) @@ -142,7 +147,7 @@ export class Builder { const query = squelPostgres .update() - .table(this._tableName) + .table(this._fullyQualifiedTableName) .setFields(i.data) // Adds a `RETURNING` statement that returns all known fields @@ -174,7 +179,7 @@ export class Builder { if (!this.shapeManager.hasBeenSubscribed(this._tableName)) Log.debug('Reading from unsynced table ' + this._tableName) - const query = squelPostgres.select().from(this._tableName) // specify from which table to select + const query = squelPostgres.select().from(this._fullyQualifiedTableName) // specify from which table to select // only select the fields provided in `i.select` and the ones in `i.where` const addFieldSelectionP = this.addFieldSelection.bind( this, diff --git a/clients/typescript/src/drivers/node-postgres/database.ts b/clients/typescript/src/drivers/node-postgres/database.ts index 49ee7b79df..58d74280d8 100644 --- a/clients/typescript/src/drivers/node-postgres/database.ts +++ b/clients/typescript/src/drivers/node-postgres/database.ts @@ -1,6 +1,5 @@ import pg from 'pg' import type { Client } from 'pg' -import EmbeddedPostgres from 'embedded-postgres' import { Row, Statement } from '../../util' const originalGetTypeParser = pg.types.getTypeParser @@ -13,67 +12,73 @@ export type QueryResult = { export interface Database { name: string exec(statement: Statement): Promise - stop(): Promise } export class ElectricDatabase implements Database { - // Do not use this constructor directly. - // Create a Database instance using the static `init` method instead. - private constructor( + constructor( public name: string, - private postgres: EmbeddedPostgres, + //private postgres: EmbeddedPostgres, private db: Client ) {} async exec(statement: Statement): Promise { - const { rows, rowCount } = await this.db.query({ - text: statement.sql, - values: statement.args, - types: { - // Modify the parser to not parse JSON values - // Instead, return them as strings - // our conversions will correctly parse them - getTypeParser: ((oid: number) => { - if ( - oid === pg.types.builtins.JSON || - oid === pg.types.builtins.JSONB - ) { - return (val) => val - } - return originalGetTypeParser(oid) - }) as typeof pg.types.getTypeParser, - }, - }) - return { - rows, - rowsModified: rowCount ?? 0, + try { + const { rows, rowCount } = await this.db.query({ + text: statement.sql, + values: statement.args, + types: { + // Modify the parser to not parse JSON values + // Instead, return them as strings + // our conversions will correctly parse them + getTypeParser: ((oid: number) => { + if ( + oid === pg.types.builtins.JSON || + oid === pg.types.builtins.JSONB + ) { + return (val) => val + } + return originalGetTypeParser(oid) + }) as typeof pg.types.getTypeParser, + }, + }) + return { + rows, + rowsModified: rowCount ?? 0, + } + } catch (e) { + console.log("EXEC failed: " + JSON.stringify(e) + "\n" + "Statement was: " + JSON.stringify(statement)) + throw e } } +} - async stop() { - await this.postgres.stop() - } +type StopFn = () => Promise - // Creates and opens a DB backed by Postgres - static async init(config: PostgresConfig) { - // Initialize Postgres - const pg = new EmbeddedPostgres({ - databaseDir: config.databaseDir, - user: config.user ?? 'postgres', - password: config.password ?? 'password', - port: config.port ?? 54321, - persistent: config.persistent ?? true, - }) +/** + * Creates and opens a DB backed by Postgres + */ +export async function createEmbeddedPostgres(config: PostgresConfig): Promise<{ db: ElectricDatabase, stop: StopFn }> { + const EmbeddedPostgres = (await import('embedded-postgres')).default + // Initialize Postgres + const pg = new EmbeddedPostgres({ + databaseDir: config.databaseDir, + user: config.user ?? 'postgres', + password: config.password ?? 'password', + port: config.port ?? 54321, + persistent: config.persistent ?? true, + }) - await pg.initialise() - await pg.start() - await pg.createDatabase(config.name) - const db = pg.getPgClient() - await db.connect() + await pg.initialise() + await pg.start() + await pg.createDatabase(config.name) + const db = pg.getPgClient() + await db.connect() - // We use the database directory as the name - // because it uniquely identifies the DB - return new ElectricDatabase(config.databaseDir, pg, db) + // We use the database directory as the name + // because it uniquely identifies the DB + return { + db: new ElectricDatabase(config.databaseDir, db), + stop: () => pg.stop() } } diff --git a/clients/typescript/src/drivers/node-postgres/index.ts b/clients/typescript/src/drivers/node-postgres/index.ts index 9828f1cbcc..22c548e151 100644 --- a/clients/typescript/src/drivers/node-postgres/index.ts +++ b/clients/typescript/src/drivers/node-postgres/index.ts @@ -1,13 +1,13 @@ import { DatabaseAdapter as DatabaseAdapterI } from '../../electric/adapter' import { DatabaseAdapter } from './adapter' -import { Database, ElectricDatabase } from './database' +import { Database, ElectricDatabase, createEmbeddedPostgres } from './database' import { ElectricConfig } from '../../config' import { electrify as baseElectrify, ElectrifyOptions } from '../../electric' -import { WebSocketWeb } from '../../sockets/web' +import { WebSocketNode } from '../../sockets/node' import { ElectricClient, DbSchema } from '../../client/model' import { PgBundleMigrator } from '../../migrators/bundle' -export { DatabaseAdapter, ElectricDatabase } +export { DatabaseAdapter, ElectricDatabase, createEmbeddedPostgres } export type { Database } /** @@ -25,15 +25,20 @@ export const electrify = async >( const adapter = opts?.adapter || new DatabaseAdapter(db) const migrator = opts?.migrator || new PgBundleMigrator(adapter, dbDescription.pgMigrations) - const socketFactory = opts?.socketFactory || WebSocketWeb + const socketFactory = opts?.socketFactory || WebSocketNode const prepare = async (_connection: DatabaseAdapterI) => undefined + const configWithDialect = { + ...config, + dialect: 'Postgres', + } as const + const client = await baseElectrify( dbName, dbDescription, adapter, socketFactory, - config, + configWithDialect, { migrator, prepare, diff --git a/clients/typescript/src/drivers/tauri-postgres/index.ts b/clients/typescript/src/drivers/tauri-postgres/index.ts index 02cd9b4bd4..1d40ba0f69 100644 --- a/clients/typescript/src/drivers/tauri-postgres/index.ts +++ b/clients/typescript/src/drivers/tauri-postgres/index.ts @@ -27,12 +27,17 @@ export const electrify = async >( const socketFactory = opts?.socketFactory || WebSocketWeb const prepare = async (_connection: DatabaseAdapterI) => undefined + const configWithDialect = { + ...config, + dialect: 'Postgres', + } as const + const client = await baseElectrify( dbName, dbDescription, adapter, socketFactory, - config, + configWithDialect, { migrator, prepare, diff --git a/clients/typescript/src/electric/index.ts b/clients/typescript/src/electric/index.ts index 57e7da1567..b36b1edc5f 100644 --- a/clients/typescript/src/electric/index.ts +++ b/clients/typescript/src/electric/index.ts @@ -75,7 +75,7 @@ export const electrify = async >( configWithDefaults ) - const dialect = migrator.electricQueryBuilder.dialect + const dialect = configWithDefaults.replication.dialect const electric = ElectricClient.create( dbName, dbDescription, diff --git a/clients/typescript/src/migrators/builder.ts b/clients/typescript/src/migrators/builder.ts index dc30b83dae..11ef578abc 100644 --- a/clients/typescript/src/migrators/builder.ts +++ b/clients/typescript/src/migrators/builder.ts @@ -3,7 +3,9 @@ import { SatOpMigrate } from '../_generated/protocol/satellite' import { base64, getProtocolVersion } from '../util' import { Migration } from './index' import { generateTriggersForTable } from '../satellite/process' -import { QueryBuilder } from './query-builder' +import { sqliteBuilder, pgBuilder, QueryBuilder } from './query-builder' + +export { sqliteBuilder, pgBuilder, QueryBuilder } const metaDataSchema = z .object({ diff --git a/clients/typescript/src/satellite/oplog.ts b/clients/typescript/src/satellite/oplog.ts index 1e87d282b3..a5eab53630 100644 --- a/clients/typescript/src/satellite/oplog.ts +++ b/clients/typescript/src/satellite/oplog.ts @@ -387,7 +387,7 @@ function deserialiseRow(str: string, rel: Pick): Rec { export const fromTransaction = ( transaction: DataTransaction, - relations: RelationsCache + relations: RelationsCache, ): OplogEntry[] => { return transaction.changes.map((t) => { const columnValues = t.record ? t.record : t.oldRecord! diff --git a/clients/typescript/src/satellite/process.ts b/clients/typescript/src/satellite/process.ts index e73bc01013..be68b9542a 100644 --- a/clients/typescript/src/satellite/process.ts +++ b/clients/typescript/src/satellite/process.ts @@ -1266,6 +1266,7 @@ export class SatelliteProcess implements Satellite { } async _applyTransaction(transaction: Transaction) { + console.log("APPLY TX: " + JSON.stringify(transaction)) const origin = transaction.origin! const commitTimestamp = new Date(transaction.commit_timestamp.toNumber()) @@ -1314,6 +1315,7 @@ export class SatelliteProcess implements Satellite { const { statements, tablenames } = await this._apply(entries, origin) entries.forEach((e) => opLogEntries.push(e)) statements.forEach((s) => { + console.log("DML stmt: " + JSON.stringify(s)) stmts.push(s) }) tablenames.forEach((n) => tablenamesSet.add(n)) @@ -1323,6 +1325,7 @@ export class SatelliteProcess implements Satellite { const affectedTables: Map = new Map() changes.forEach((change) => { const changeStmt = { sql: change.sql } + console.log("DDL stmt: " + JSON.stringify(changeStmt)) stmts.push(changeStmt) if ( @@ -1390,11 +1393,13 @@ export class SatelliteProcess implements Satellite { if (transaction.migrationVersion) { // If a migration version is specified // then the transaction is a migration + console.log("APPLYING MIGRATION") await this.migrator.applyIfNotAlready({ statements: allStatements, version: transaction.migrationVersion, }) } else { + console.log("APPLYING TRANSACTION") await this.adapter.runInTransaction(...allStatements) } diff --git a/clients/typescript/test/support/node-postgres.ts b/clients/typescript/test/support/node-postgres.ts index 73f28a42d2..303df73ca4 100644 --- a/clients/typescript/test/support/node-postgres.ts +++ b/clients/typescript/test/support/node-postgres.ts @@ -1,11 +1,12 @@ import fs from 'fs/promises' import { ElectricDatabase } from '../../src/drivers/node-postgres' +import { createEmbeddedPostgres } from '../../src/drivers/node-postgres/database'; export async function makePgDatabase( name: string, port: number ): Promise<{ db: ElectricDatabase; stop: () => Promise }> { - const db = await ElectricDatabase.init({ + const { db, stop: stopPg } = await createEmbeddedPostgres({ name, databaseDir: `./tmp-${name}`, persistent: false, @@ -13,7 +14,7 @@ export async function makePgDatabase( }) const stop = async () => { - await db.stop() + await stopPg() await fs.rm(`./tmp-${name}`, { recursive: true, force: true }) } return { db, stop } diff --git a/e2e/Makefile b/e2e/Makefile index 6e42d075b8..ae4fd5820f 100644 --- a/e2e/Makefile +++ b/e2e/Makefile @@ -10,6 +10,12 @@ test_only: test: deps pull test_only +test_pg: + DIALECT=Postgres make test + +test_only_pg: + DIALECT=Postgres make test_only + pull: docker compose -f services_templates.yaml pull \ postgresql diff --git a/e2e/common.mk b/e2e/common.mk index 10bb971054..39d2148f51 100644 --- a/e2e/common.mk +++ b/e2e/common.mk @@ -98,6 +98,7 @@ start_satellite_client_%: docker compose -f ${DOCKER_COMPOSE_FILE} run \ --rm \ -e TERM=dumb \ + -e DIALECT=${DIALECT} \ satellite_client_$* @@ -133,3 +134,6 @@ single_test: single_test_debug: ${LUX} --debug ${TEST} + +single_test_pg: + DIALECT=Postgres ${LUX} --progress doc ${TEST} \ No newline at end of file diff --git a/e2e/init.sql b/e2e/init.sql index 6641664e73..a0b86e59d9 100755 --- a/e2e/init.sql +++ b/e2e/init.sql @@ -1,3 +1,6 @@ +CREATE DATABASE e2e_client_1_db; +CREATE DATABASE e2e_client_2_db; + CREATE TABLE entries ( id UUID PRIMARY KEY, content VARCHAR NOT NULL, diff --git a/e2e/satellite_client/package.json b/e2e/satellite_client/package.json index cea4040ab2..826d416d57 100644 --- a/e2e/satellite_client/package.json +++ b/e2e/satellite_client/package.json @@ -20,6 +20,7 @@ "better-sqlite3": "^8.4.0", "electric-sql": "workspace:*", "jsonwebtoken": "^9.0.0", + "pg": "^8.11.3", "uuid": "^9.0.0", "zod": "^3.21.4" }, diff --git a/e2e/satellite_client/src/client.ts b/e2e/satellite_client/src/client.ts index 56909045e9..faba0799d1 100644 --- a/e2e/satellite_client/src/client.ts +++ b/e2e/satellite_client/src/client.ts @@ -1,22 +1,52 @@ -import Database from 'better-sqlite3' +import fs from 'fs/promises' +import pg from 'pg' +import SQLiteDatabase from 'better-sqlite3' import { ElectricConfig } from 'electric-sql' import { mockSecureAuthToken } from 'electric-sql/auth/secure' +import { ElectricDatabase } from 'electric-sql/node-postgres' import { setLogLevel } from 'electric-sql/debug' -import { electrify } from 'electric-sql/node' +import { electrify as electrifySqlite } from 'electric-sql/node' +import { electrify as electrifyPg } from 'electric-sql/node-postgres' import { v4 as uuidv4 } from 'uuid' import { schema, Electric, ColorType as Color } from './generated/client' export { JsonNull } from './generated/client' import { globalRegistry } from 'electric-sql/satellite' import { SatelliteErrorCode } from 'electric-sql/util' import { Shape } from 'electric-sql/satellite' +import { pgBuilder, sqliteBuilder, QueryBuilder } from 'electric-sql/migrators/builder' setLogLevel('DEBUG') let dbName: string +let electrify = electrifySqlite +let builder: QueryBuilder = sqliteBuilder + +async function makePgDatabase(): Promise { + const client = new pg.Client({ + host: 'pg_1', + port: 5432, + database: dbName, + user: 'postgres', + password: 'password', + }) + + await client.connect() + + //const stop = () => client.end() + const db = new ElectricDatabase(dbName, client) + return db //{ db, stop } +} -export const make_db = (name: string): any => { +export const make_db = async (name: string): Promise => { dbName = name - return new Database(name) + console.log("DIALECT: " + process.env.DIALECT) + if (process.env.DIALECT === 'Postgres') { + electrify = electrifyPg + builder = pgBuilder + return makePgDatabase() + } + + return new SQLiteDatabase(name) } export const electrify_db = async ( @@ -107,11 +137,11 @@ export const lowLevelSubscribe = async (electric: Electric, shape: Shape) => { } export const get_tables = (electric: Electric) => { - return electric.db.rawQuery({ sql: `SELECT name FROM sqlite_master WHERE type='table';` }) + return electric.db.rawQuery(builder.getLocalTableNames()) } export const get_columns = (electric: Electric, table: string) => { - return electric.db.rawQuery({ sql: `SELECT * FROM pragma_table_info(?);`, args: [table] }) + return electric.db.rawQuery(builder.getTableInfo(table)) } export const get_rows = (electric: Electric, table: string) => { @@ -273,7 +303,7 @@ export const get_json_raw = async (electric: Electric, id: string) => { export const get_jsonb_raw = async (electric: Electric, id: string) => { const res = await electric.db.rawQuery({ - sql: `SELECT jsb FROM jsons WHERE id = ?;`, + sql: `SELECT jsb FROM jsons WHERE id = ${builder.paramSign};`, args: [id] }) as unknown as Array<{ jsb: string }> return res[0]?.jsb diff --git a/e2e/tests/03.13_node_satellite_can_sync_timestamps.lux b/e2e/tests/03.13_node_satellite_can_sync_timestamps.lux index 98ab033e99..5721b3eee4 100644 --- a/e2e/tests/03.13_node_satellite_can_sync_timestamps.lux +++ b/e2e/tests/03.13_node_satellite_can_sync_timestamps.lux @@ -19,7 +19,7 @@ [invoke setup_client 1 electric_1 5133] [shell satellite_1] - [invoke node_await_table "name: 'timestamps'"] + [invoke node_await_table "timestamps"] [invoke node_sync_table "timestamps"] [shell pg_1] @@ -36,7 +36,7 @@ [invoke setup_client 2 electric_1 5133] [shell satellite_2] - [invoke node_await_table "name: 'timestamps'"] + [invoke node_await_table "timestamps"] [invoke node_sync_table "timestamps"] # check that 2nd satellite also reads the row [invoke node_await_assert_timestamp "00000000-0000-0000-0000-000000000001" "2023-09-21 14:39:53.000" "2023-09-21T14:39:53.001Z"] diff --git a/e2e/tests/03.14_node_satellite_can_sync_dates_and_times.lux b/e2e/tests/03.14_node_satellite_can_sync_dates_and_times.lux index c5c4ea742a..464d37a098 100644 --- a/e2e/tests/03.14_node_satellite_can_sync_dates_and_times.lux +++ b/e2e/tests/03.14_node_satellite_can_sync_dates_and_times.lux @@ -19,7 +19,7 @@ [invoke setup_client 1 electric_1 5133] [shell satellite_1] - [invoke node_await_table "name: 'datetimes'"] + [invoke node_await_table "datetimes"] [invoke node_sync_table "datetimes"] [shell pg_1] diff --git a/e2e/tests/03.xx_node_satellite_authentication.lux.disabled b/e2e/tests/03.xx_node_satellite_authentication.lux.disabled index bb684d0baa..870d5dcb10 100644 --- a/e2e/tests/03.xx_node_satellite_authentication.lux.disabled +++ b/e2e/tests/03.xx_node_satellite_authentication.lux.disabled @@ -17,7 +17,7 @@ -JWT expired too early # Set expiration time for the JWT to 10 seconds from now !exp="10s" - [invoke connect_to_electric electric_1 5133 "[]"] + [invoke connect_to_electric electric_1 5133 "[]" 1] # Wait for the items table migration and sync the table ??[rpc] recv: #SatInStartReplicationResp ??Connectivity state changed: connected diff --git a/e2e/tests/03.xx_node_satellite_is_informed_when_jwt_expires.lux.disabled b/e2e/tests/03.xx_node_satellite_is_informed_when_jwt_expires.lux.disabled index 33fd06b837..cad8389b89 100644 --- a/e2e/tests/03.xx_node_satellite_is_informed_when_jwt_expires.lux.disabled +++ b/e2e/tests/03.xx_node_satellite_is_informed_when_jwt_expires.lux.disabled @@ -10,7 +10,7 @@ ??$node # Set expiration time for the JWT to 5 seconds from now !exp="5s" - [invoke connect_to_electric electric_1 5133 "[]" true] + [invoke connect_to_electric electric_1 5133 "[]" true 1] # Subscribe to auth status changes #!client.subscribe_to_auth_status(db) #?New auth status: EXPIRED diff --git a/e2e/tests/Makefile b/e2e/tests/Makefile index cb45f2f21c..98f680c5f8 100644 --- a/e2e/tests/Makefile +++ b/e2e/tests/Makefile @@ -4,3 +4,6 @@ DOCKER_COMPOSE_FILE=compose.yaml test: ${LUX} *.lux + +test_pg: + DIALECT=Postgres ${LUX} 03.*.lux \ No newline at end of file diff --git a/e2e/tests/_satellite_macros.luxinc b/e2e/tests/_satellite_macros.luxinc index 46c4360c36..86ae0f34f2 100644 --- a/e2e/tests/_satellite_macros.luxinc +++ b/e2e/tests/_satellite_macros.luxinc @@ -1,14 +1,20 @@ [global node=>] -[macro connect_to_electric host port migrations connectToElectric] +[macro connect_to_electric host port migrations connectToElectric satellite_number] !client = await import('./dist/client.js') ??$node # !migrations = await client.read_migrations(process.env.MIGRATION_DIRS + "/index.js") # ?$node !migrations = $migrations ??$node - !originalDb = client.make_db(process.env.SATELLITE_DB_PATH + "/$LUX_SHELLNAME") + # Temporarily disable the failure pattern + # because the printed value contains "_connectionError" + # which matches the failure pattern... + - + !originalDb = await client.make_db('e2e_client_${satellite_number}_db') ??$node + # Restore the failure pattern + -$fail_pattern [invoke electrify_db "originalDb" $host $port $migrations $connectToElectric] ??(in electrify_db) config: [endmacro] @@ -31,7 +37,7 @@ [invoke start_satellite $satellite_number] -$fail_pattern ??$node - [invoke connect_to_electric $electric $port $migrations $connectToElectric] + [invoke connect_to_electric $electric $port $migrations $connectToElectric $satellite_number] [endmacro] [macro client_disconnect] @@ -179,7 +185,7 @@ [endmacro] [macro node_await_table match] - [invoke wait-for "await client.get_tables(db)" "${match}" 10 $node] + [invoke wait-for "await client.get_tables(db)" "name: '${match}'" 10 $node] [endmacro] [macro node_await_column table column] From d7be4756d88e4fe61f54f87fc832dd8b8e60c604 Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Thu, 4 Apr 2024 12:41:18 +0200 Subject: [PATCH 037/156] Fixes after rebase --- .../src/_generated/protocol/satellite.ts | 5 +- .../src/client/conversions/input.ts | 4 - .../client/execution/nonTransactionalDB.ts | 2 +- .../src/client/execution/transactionalDB.ts | 4 +- .../typescript/src/client/model/builder.ts | 5 +- .../src/drivers/node-postgres/database.ts | 14 +- .../src/migrators/query-builder/builder.ts | 12 +- .../migrators/query-builder/sqliteBuilder.ts | 191 +++++++++++++++++- clients/typescript/src/migrators/triggers.ts | 15 +- clients/typescript/src/satellite/oplog.ts | 2 +- clients/typescript/src/satellite/process.ts | 10 +- clients/typescript/src/util/relations.ts | 4 +- .../typescript/test/client/model/datatype.ts | 8 +- .../test/migrators/postgres/triggers.test.ts | 23 +++ .../test/migrators/sqlite/triggers.test.ts | 8 +- .../typescript/test/satellite/client.test.ts | 1 + clients/typescript/test/satellite/common.ts | 4 +- .../test/satellite/process.migration.test.ts | 18 +- .../test/satellite/process.tags.test.ts | 6 +- .../typescript/test/satellite/process.test.ts | 159 ++++++++------- .../typescript/test/support/node-postgres.ts | 2 +- .../electric/satellite/protobuf_messages.ex | 168 ++++++++++++++- protocol/satellite.proto | 4 +- 23 files changed, 524 insertions(+), 145 deletions(-) diff --git a/clients/typescript/src/_generated/protocol/satellite.ts b/clients/typescript/src/_generated/protocol/satellite.ts index bee41f38e9..5bd4e7f18e 100644 --- a/clients/typescript/src/_generated/protocol/satellite.ts +++ b/clients/typescript/src/_generated/protocol/satellite.ts @@ -1147,7 +1147,7 @@ export const SatInStartReplicationReq = { } writer.ldelim(); if (message.sqlDialect !== undefined) { - writer.uint32(48).int32(message.sqlDialect); + writer.uint32(56).int32(message.sqlDialect); } return writer; }, @@ -1214,7 +1214,8 @@ export const SatInStartReplicationReq = { } break; - if (tag !== 48) { + case 7: + if (tag !== 56) { break; } diff --git a/clients/typescript/src/client/conversions/input.ts b/clients/typescript/src/client/conversions/input.ts index fdab24c0d8..8adc61d5d2 100644 --- a/clients/typescript/src/client/conversions/input.ts +++ b/clients/typescript/src/client/conversions/input.ts @@ -382,10 +382,6 @@ function isFilterObject(value: any): boolean { return isObject(value) && !isDataObject(value) } -function isObject(v: any): boolean { - return typeof v === 'object' && !Array.isArray(v) && v !== null -} - /** * Filters out all properties that are not fields (i.e. columns) of this table. * e.g. it removes related fields or filters like `lt`, `equals`, etc. diff --git a/clients/typescript/src/client/execution/nonTransactionalDB.ts b/clients/typescript/src/client/execution/nonTransactionalDB.ts index 79c9c0c8d9..00b0d4188a 100644 --- a/clients/typescript/src/client/execution/nonTransactionalDB.ts +++ b/clients/typescript/src/client/execution/nonTransactionalDB.ts @@ -15,7 +15,7 @@ export class NonTransactionalDB implements DB { ) {} withTableSchema(fields: Fields) { - return new NonTransactionalDB(this._adapter, fields) + return new NonTransactionalDB(this._adapter, fields, this._converter) } run( diff --git a/clients/typescript/src/client/execution/transactionalDB.ts b/clients/typescript/src/client/execution/transactionalDB.ts index fc4b2a0a56..1dc97a757f 100644 --- a/clients/typescript/src/client/execution/transactionalDB.ts +++ b/clients/typescript/src/client/execution/transactionalDB.ts @@ -12,8 +12,8 @@ export class TransactionalDB implements DB { private _tx: Transaction, private _fields: Fields, private _converter: Converter - ) { } - + ) {} + withTableSchema(fields: Fields) { return new TransactionalDB(this._tx, fields, this._converter) } diff --git a/clients/typescript/src/client/model/builder.ts b/clients/typescript/src/client/model/builder.ts index 1d7a84fed8..333dbd1235 100644 --- a/clients/typescript/src/client/model/builder.ts +++ b/clients/typescript/src/client/model/builder.ts @@ -69,7 +69,10 @@ export class Builder { create(i: CreateInput): QueryBuilder { // Make a SQL query out of the data - const query = squelPostgres.insert().into(this._fullyQualifiedTableName).setFields(i.data) + const query = squelPostgres + .insert() + .into(this._fullyQualifiedTableName) + .setFields(i.data) // Adds a `RETURNING` statement that returns all known fields const queryWithReturn = this.returnAllFields(query) diff --git a/clients/typescript/src/drivers/node-postgres/database.ts b/clients/typescript/src/drivers/node-postgres/database.ts index 58d74280d8..997794d703 100644 --- a/clients/typescript/src/drivers/node-postgres/database.ts +++ b/clients/typescript/src/drivers/node-postgres/database.ts @@ -46,7 +46,13 @@ export class ElectricDatabase implements Database { rowsModified: rowCount ?? 0, } } catch (e) { - console.log("EXEC failed: " + JSON.stringify(e) + "\n" + "Statement was: " + JSON.stringify(statement)) + console.log( + 'EXEC failed: ' + + JSON.stringify(e) + + '\n' + + 'Statement was: ' + + JSON.stringify(statement) + ) throw e } } @@ -57,7 +63,9 @@ type StopFn = () => Promise /** * Creates and opens a DB backed by Postgres */ -export async function createEmbeddedPostgres(config: PostgresConfig): Promise<{ db: ElectricDatabase, stop: StopFn }> { +export async function createEmbeddedPostgres( + config: PostgresConfig +): Promise<{ db: ElectricDatabase; stop: StopFn }> { const EmbeddedPostgres = (await import('embedded-postgres')).default // Initialize Postgres const pg = new EmbeddedPostgres({ @@ -78,7 +86,7 @@ export async function createEmbeddedPostgres(config: PostgresConfig): Promise<{ // because it uniquely identifies the DB return { db: new ElectricDatabase(config.databaseDir, db), - stop: () => pg.stop() + stop: () => pg.stop(), } } diff --git a/clients/typescript/src/migrators/query-builder/builder.ts b/clients/typescript/src/migrators/query-builder/builder.ts index 86c0dcf9ea..8e9f03c119 100644 --- a/clients/typescript/src/migrators/query-builder/builder.ts +++ b/clients/typescript/src/migrators/query-builder/builder.ts @@ -336,7 +336,7 @@ export abstract class QueryBuilder { columns: string[], records: Record[], maxParameters: number, - suffixSql: string = '', + suffixSql: string = '' ): Statement[] { const stmts: Statement[] = [] const columnCount = columns.length @@ -358,10 +358,14 @@ export abstract class QueryBuilder { while (processed < recordCount) { positionalParam = 1 // start counting parameters from 1 again const currentInsertCount = Math.min(recordCount - processed, batchMaxSize) - const sql = + let sql = baseSql + - Array.from({ length: currentInsertCount }, makeInsertPattern).join(',') + - ' ' + suffixSql + Array.from({ length: currentInsertCount }, makeInsertPattern).join(',') + + if (suffixSql !== '') { + sql += ' ' + suffixSql + } + const args = records .slice(processed, processed + currentInsertCount) .flatMap((record) => columns.map((col) => record[col] as SqlValue)) diff --git a/clients/typescript/src/migrators/query-builder/sqliteBuilder.ts b/clients/typescript/src/migrators/query-builder/sqliteBuilder.ts index 1730a839fb..c5a03e6fb0 100644 --- a/clients/typescript/src/migrators/query-builder/sqliteBuilder.ts +++ b/clients/typescript/src/migrators/query-builder/sqliteBuilder.ts @@ -1,11 +1,21 @@ import { dedent } from 'ts-dedent' -import { QualifiedTablename } from '../../util' +import { QualifiedTablename, SqlValue, Statement } from '../../util' import { QueryBuilder } from './builder' import { ForeignKey } from '../triggers' class SqliteBuilder extends QueryBuilder { + readonly dialect = 'SQLite' readonly AUTOINCREMENT_PK = 'INTEGER PRIMARY KEY AUTOINCREMENT' readonly BLOB = 'BLOB' + readonly deferForeignKeys = 'PRAGMA defer_foreign_keys = ON;' + readonly getVersion = 'SELECT sqlite_version() AS version' + readonly maxSqlParameters = 65535 + readonly paramSign = '?' + readonly metaTables = [ + 'sqlite_schema', + 'sqlite_sequence', + 'sqlite_temp_schema', + ] pgOnly(_query: string) { return '' @@ -71,12 +81,79 @@ class SqliteBuilder extends QueryBuilder { schema: string, table: string, columns: string[], - values: string[] - ) { - return dedent` - INSERT OR IGNORE INTO ${schema}.${table} (${columns.join(', ')}) - VALUES (${values.join(', ')}); - ` + values: SqlValue[] + ): Statement { + return { + sql: dedent` + INSERT OR IGNORE INTO ${schema}.${table} (${columns.join(', ')}) + VALUES (${columns.map(() => '?').join(', ')}); + `, + args: values, + } + } + + insertOrReplace( + schema: string, + table: string, + columns: string[], + values: Array, + _conflictCols: string[], + _updateCols: string[] + ): Statement { + return { + sql: dedent` + INSERT OR REPLACE INTO ${schema}.${table} (${columns.join(', ')}) + VALUES (${columns.map(() => '?').join(', ')}) + `, + args: values, + } + } + + insertOrReplaceWith( + schema: string, + table: string, + columns: string[], + values: Array, + conflictCols: string[], + updateCols: string[], + updateVals: SqlValue[] + ): Statement { + const { sql: baseSql, args } = this.insertOrReplace( + schema, + table, + columns, + values, + conflictCols, + updateCols + ) + return { + sql: + baseSql + + ` ON CONFLICT DO UPDATE SET ${updateCols + .map((col) => `${col} = ?`) + .join(', ')}`, + args: args!.concat(updateVals), + } + } + + batchedInsertOrReplace( + schema: string, + table: string, + columns: string[], + records: Array>, + _conflictCols: string[], + _updateCols: string[], + maxSqlParameters: number + ): Statement[] { + const baseSql = `INSERT OR REPLACE INTO ${schema}.${table} (${columns.join( + ', ' + )}) VALUES ` + return this.prepareInsertBatchedStatements( + baseSql, + columns, + records, + maxSqlParameters + ) } dropTriggerIfExists( @@ -115,6 +192,24 @@ class SqliteBuilder extends QueryBuilder { return `json_object(${rows})` } + // removes null values from the JSON + // to be consistent with PG behaviour + removeSpaceAndNullValuesFromJson(json: string): string { + return `json_patch('{}', ${json})` + } + + createPKJsonObject(rows: string) { + return this.removeSpaceAndNullValuesFromJson(this.createJsonObject(rows)) + } + + setTriggerSetting( + namespace: string, + tableName: string, + value: 0 | 1 + ): string { + return `INSERT OR IGNORE INTO _electric_trigger_settings (namespace, tablename, flag) VALUES ('${namespace}', '${tableName}', ${value});` + } + createOplogTrigger( opType: 'INSERT' | 'UPDATE' | 'DELETE', namespace: string, @@ -124,7 +219,7 @@ class SqliteBuilder extends QueryBuilder { oldRows: string ): string[] { const opTypeLower = opType.toLowerCase() - const pk = this.createJsonObject(newPKs) + const pk = this.createPKJsonObject(newPKs) // Update has both the old and the new row // Delete only has the old row const newRecord = @@ -165,12 +260,88 @@ class SqliteBuilder extends QueryBuilder { 1 = (SELECT value from _electric_meta WHERE key = 'compensations') BEGIN INSERT INTO _electric_oplog (namespace, tablename, optype, primaryKey, newRow, oldRow, timestamp) - SELECT '${fkTableNamespace}', '${fkTableName}', 'COMPENSATION', json_object(${joinedFkPKs}), json_object(${joinedFkPKs}), NULL, NULL - FROM "${fkTableNamespace}"."${fkTableName}" WHERE "${foreignKey.parentKey}" = new."${foreignKey.childKey}"; + SELECT '${fkTableNamespace}', '${fkTableName}', 'COMPENSATION', ${this.createPKJsonObject( + joinedFkPKs + )}, json_object(${joinedFkPKs}), NULL, NULL + FROM "${fkTableNamespace}"."${fkTableName}" WHERE "${ + foreignKey.parentKey + }" = new."${foreignKey.childKey}"; END; `, ] } + + setClearTagsForTimestamp( + oplogTable: QualifiedTablename, + shadowTable: QualifiedTablename + ): string { + const oplog = `"${oplogTable.namespace}"."${oplogTable.tablename}"` + const shadow = `"${shadowTable.namespace}"."${shadowTable.tablename}"` + return dedent` + UPDATE ${oplog} + SET clearTags = + CASE WHEN rowid = updates.rowid_of_first_op_in_tx + THEN updates.tags + ELSE ? -- singleton array containing tag of thix TX + END + FROM ( + SELECT shadow.tags as tags, min(op.rowid) as rowid_of_first_op_in_tx + FROM ${shadow} AS shadow + JOIN ${oplog} as op + ON op.namespace = shadow.namespace + AND op.tablename = shadow.tablename + AND op.primaryKey = shadow.primaryKey + WHERE op.timestamp = ? + GROUP BY op.namespace, op.tablename, op.primaryKey + ) AS updates + WHERE ${oplog}.timestamp = ? -- only update operations from this TX + ` + } + + setTagsForShadowRows( + oplogTable: QualifiedTablename, + shadowTable: QualifiedTablename + ): string { + const oplog = `"${oplogTable.namespace}"."${oplogTable.tablename}"` + const shadow = `"${shadowTable.namespace}"."${shadowTable.tablename}"` + return dedent` + INSERT OR REPLACE INTO ${shadow} (namespace, tablename, primaryKey, tags) + SELECT namespace, tablename, primaryKey, ? + FROM ${oplog} AS op + WHERE timestamp = ? + GROUP BY namespace, tablename, primaryKey + HAVING rowid = max(rowid) AND optype != 'DELETE' + ` + } + + removeDeletedShadowRows( + oplogTable: QualifiedTablename, + shadowTable: QualifiedTablename + ): string { + const oplog = `"${oplogTable.namespace}"."${oplogTable.tablename}"` + const shadow = `"${shadowTable.namespace}"."${shadowTable.tablename}"` + // We do an inner join in a CTE instead of a `WHERE EXISTS (...)` + // since this is not reliant on re-executing a query + // for every row in the shadow table, but uses a PK join instead. + return dedent` + WITH _to_be_deleted (rowid) AS ( + SELECT shadow.rowid + FROM ${oplog} AS op + INNER JOIN ${shadow} AS shadow + ON shadow.namespace = op.namespace AND shadow.tablename = op.tablename AND shadow.primaryKey = op.primaryKey + WHERE op.timestamp = ? + GROUP BY op.namespace, op.tablename, op.primaryKey + HAVING op.rowid = max(op.rowid) AND op.optype = 'DELETE' + ) + + DELETE FROM ${shadow} + WHERE rowid IN _to_be_deleted + ` + } + + makePositionalParam(_i: number): string { + return this.paramSign + } } export default new SqliteBuilder() diff --git a/clients/typescript/src/migrators/triggers.ts b/clients/typescript/src/migrators/triggers.ts index c2add6a9fc..ec2ed46872 100644 --- a/clients/typescript/src/migrators/triggers.ts +++ b/clients/typescript/src/migrators/triggers.ts @@ -252,17 +252,22 @@ function joinColsForJSON( // Perform transformations on some columns to ensure consistent // serializability into JSON const transformIfNeeded = (col: string, targetedCol: string) => { - const tpes = colTypes[col] - const sqliteType = tpes.sqliteType - const pgType = tpes.pgType + const colType = colTypes[col] // cast REALs, INT8s, BIGINTs to TEXT to work around SQLite's `json_object` bug - if (sqliteType === 'REAL' || pgType === 'INT8' || pgType === 'BIGINT') { + if ( + colType === 'FLOAT4' || + colType === 'REAL' || + colType === 'DOUBLE PRECISION' || + colType === 'FLOAT8' || + colType === 'INT8' || + colType === 'BIGINT' + ) { return `cast(${targetedCol} as TEXT)` } // transform blobs/bytestrings into hexadecimal strings for JSON encoding - if (sqliteType === 'BLOB' || pgType === 'BYTEA') { + if (colType === 'BYTEA') { return `CASE WHEN ${targetedCol} IS NOT NULL THEN hex(${targetedCol}) ELSE NULL END` } return targetedCol diff --git a/clients/typescript/src/satellite/oplog.ts b/clients/typescript/src/satellite/oplog.ts index a5eab53630..1e87d282b3 100644 --- a/clients/typescript/src/satellite/oplog.ts +++ b/clients/typescript/src/satellite/oplog.ts @@ -387,7 +387,7 @@ function deserialiseRow(str: string, rel: Pick): Rec { export const fromTransaction = ( transaction: DataTransaction, - relations: RelationsCache, + relations: RelationsCache ): OplogEntry[] => { return transaction.changes.map((t) => { const columnValues = t.record ? t.record : t.oldRecord! diff --git a/clients/typescript/src/satellite/process.ts b/clients/typescript/src/satellite/process.ts index be68b9542a..18d67f05ea 100644 --- a/clients/typescript/src/satellite/process.ts +++ b/clients/typescript/src/satellite/process.ts @@ -1266,7 +1266,7 @@ export class SatelliteProcess implements Satellite { } async _applyTransaction(transaction: Transaction) { - console.log("APPLY TX: " + JSON.stringify(transaction)) + console.log('APPLY TX: ' + JSON.stringify(transaction)) const origin = transaction.origin! const commitTimestamp = new Date(transaction.commit_timestamp.toNumber()) @@ -1315,7 +1315,7 @@ export class SatelliteProcess implements Satellite { const { statements, tablenames } = await this._apply(entries, origin) entries.forEach((e) => opLogEntries.push(e)) statements.forEach((s) => { - console.log("DML stmt: " + JSON.stringify(s)) + console.log('DML stmt: ' + JSON.stringify(s)) stmts.push(s) }) tablenames.forEach((n) => tablenamesSet.add(n)) @@ -1325,7 +1325,7 @@ export class SatelliteProcess implements Satellite { const affectedTables: Map = new Map() changes.forEach((change) => { const changeStmt = { sql: change.sql } - console.log("DDL stmt: " + JSON.stringify(changeStmt)) + console.log('DDL stmt: ' + JSON.stringify(changeStmt)) stmts.push(changeStmt) if ( @@ -1393,13 +1393,13 @@ export class SatelliteProcess implements Satellite { if (transaction.migrationVersion) { // If a migration version is specified // then the transaction is a migration - console.log("APPLYING MIGRATION") + console.log('APPLYING MIGRATION') await this.migrator.applyIfNotAlready({ statements: allStatements, version: transaction.migrationVersion, }) } else { - console.log("APPLYING TRANSACTION") + console.log('APPLYING TRANSACTION') await this.adapter.runInTransaction(...allStatements) } diff --git a/clients/typescript/src/util/relations.ts b/clients/typescript/src/util/relations.ts index 594762bf32..71b6dd96e2 100644 --- a/clients/typescript/src/util/relations.ts +++ b/clients/typescript/src/util/relations.ts @@ -17,7 +17,9 @@ export async function inferRelationsFromDb( const schema = 'public' // TODO for (const table of tableNames) { const tableName = table.name - const columnsForTable = (await adapter.query(builder.getTableInfo(tableName))) as { + const columnsForTable = (await adapter.query( + builder.getTableInfo(tableName) + )) as { name: string type: string notnull: number diff --git a/clients/typescript/test/client/model/datatype.ts b/clients/typescript/test/client/model/datatype.ts index 61c5f1ffd1..68edf93b3d 100644 --- a/clients/typescript/test/client/model/datatype.ts +++ b/clients/typescript/test/client/model/datatype.ts @@ -942,15 +942,15 @@ export const datatypeTests = (test: TestFn) => { bytea: blob, }, }) - + t.deepEqual(res.bytea, blob) - + const fetchRes = await tbl.findUnique({ where: { id: 1, }, }) - + t.deepEqual(fetchRes?.bytea, blob) }) @@ -986,4 +986,4 @@ export const datatypeTests = (test: TestFn) => { t.deepEqual(fetchRes, expectedRes) }) -} \ No newline at end of file +} diff --git a/clients/typescript/test/migrators/postgres/triggers.test.ts b/clients/typescript/test/migrators/postgres/triggers.test.ts index be15ab399d..c371e2153f 100644 --- a/clients/typescript/test/migrators/postgres/triggers.test.ts +++ b/clients/typescript/test/migrators/postgres/triggers.test.ts @@ -252,3 +252,26 @@ test('oplog trigger should handle Infinity values correctly', async (t) => { clearTags: '[]', }) }) + +test('oplog trigger should separate null blobs from empty blobs', async (t) => { + const { db, migrateDb } = t.context + const namespace = personTable.namespace + const tableName = personTable.tableName + + // Migrate the DB with the necessary tables and triggers + await migrateDb() + + // Insert null and empty rows in the table + const insertRowNullSQL = `INSERT INTO "${namespace}"."${tableName}" (id, name, age, bmi, int8, blob) VALUES (1, 'John Doe', 30, 25.5, 7, NULL)` + const insertRowEmptySQL = `INSERT INTO "${namespace}"."${tableName}" (id, name, age, bmi, int8, blob) VALUES (2, 'John Doe', 30, 25.5, 7, x'')` + await db.exec({ sql: insertRowNullSQL }) + await db.exec({ sql: insertRowEmptySQL }) + + // Check that the oplog table contains an entry for the inserted row + const { rows: oplogRows } = await db.exec({ + sql: `SELECT * FROM "${satelliteDefaults.oplogTable.namespace}"."${satelliteDefaults.oplogTable.tablename}"`, + }) + t.is(oplogRows.length, 2) + t.regex(oplogRows[0].newRow as string, /,"blob":null,/) + t.regex(oplogRows[1].newRow as string, /,"blob":"",/) +}) diff --git a/clients/typescript/test/migrators/sqlite/triggers.test.ts b/clients/typescript/test/migrators/sqlite/triggers.test.ts index 975c9646bf..5a35d576b3 100644 --- a/clients/typescript/test/migrators/sqlite/triggers.test.ts +++ b/clients/typescript/test/migrators/sqlite/triggers.test.ts @@ -153,12 +153,12 @@ test('oplog trigger should handle Infinity values correctly', async (t) => { }) }) -test('oplog trigger should separate null blobs from empty blobs', (t) => { +test('oplog trigger should separate null blobs from empty blobs', async (t) => { const { db, migrateDb } = t.context const tableName = personTable.tableName // Migrate the DB with the necessary tables and triggers - migrateDb() + await migrateDb() // Insert null and empty rows in the table const insertRowNullSQL = `INSERT INTO ${tableName} (id, name, age, bmi, int8, blob) VALUES (1, 'John Doe', 30, 25.5, 7, NULL)` @@ -168,7 +168,9 @@ test('oplog trigger should separate null blobs from empty blobs', (t) => { // Check that the oplog table contains an entry for the inserted row const oplogRows = db - .prepare(`SELECT * FROM ${satelliteDefaults.oplogTable}`) + .prepare( + `SELECT * FROM "${satelliteDefaults.oplogTable.namespace}"."${satelliteDefaults.oplogTable.tablename}"` + ) .all() t.is(oplogRows.length, 2) t.regex(oplogRows[0].newRow, /,"blob":null,/) diff --git a/clients/typescript/test/satellite/client.test.ts b/clients/typescript/test/satellite/client.test.ts index d64640916a..c663923a74 100644 --- a/clients/typescript/test/satellite/client.test.ts +++ b/clients/typescript/test/satellite/client.test.ts @@ -1165,6 +1165,7 @@ test.serial('client correctly handles additional data messages', async (t) => { HKT >, }, + [], [] ) diff --git a/clients/typescript/test/satellite/common.ts b/clients/typescript/test/satellite/common.ts index 23abbc1076..c08c729233 100644 --- a/clients/typescript/test/satellite/common.ts +++ b/clients/typescript/test/satellite/common.ts @@ -1,6 +1,5 @@ import { mkdir, rm as removeFile } from 'node:fs/promises' import { RelationsCache, randomValue } from '../../src/util' -import Database from 'better-sqlite3' import type { Database as SqliteDB } from 'better-sqlite3' import SqliteDatabase from 'better-sqlite3' import { DatabaseAdapter as SqliteDatabaseAdapter } from '../../src/drivers/better-sqlite3' @@ -389,7 +388,8 @@ export async function migrateDb( const namespace = table.namespace const tableName = table.tableName // Create the table in the database on the given namespace - const createTableSQL = `CREATE TABLE "${namespace}"."${tableName}" (id REAL PRIMARY KEY, name TEXT, age INTEGER, bmi REAL, int8 INTEGER, blob BLOB)` + const blobType = builder.dialect === 'SQLite' ? 'BLOB' : 'BYTEA' + const createTableSQL = `CREATE TABLE "${namespace}"."${tableName}" (id REAL PRIMARY KEY, name TEXT, age INTEGER, bmi REAL, int8 INTEGER, blob ${blobType})` await db.run({ sql: createTableSQL }) // Apply the initial migration on the database diff --git a/clients/typescript/test/satellite/process.migration.test.ts b/clients/typescript/test/satellite/process.migration.test.ts index c280a79d07..9ca83dfa93 100644 --- a/clients/typescript/test/satellite/process.migration.test.ts +++ b/clients/typescript/test/satellite/process.migration.test.ts @@ -195,28 +195,28 @@ export const processMigrationTests = (test: TestFn) => { name: 'id', type: 'INTEGER', isNullable: false, - primaryKey: true, + primaryKey: 1, }, { name: 'value', type: 'TEXT', isNullable: true, - primaryKey: false, + primaryKey: undefined, }, { name: 'other', type: 'INTEGER', isNullable: true, - primaryKey: false, + primaryKey: undefined, }, { name: 'baz', type: 'TEXT', isNullable: true, - primaryKey: false, + primaryKey: undefined, }, ], - } + } satisfies Relation const newTableRelation = { id: 2001, // doesn't matter schema: 'public', @@ -227,22 +227,22 @@ export const processMigrationTests = (test: TestFn) => { name: 'id', type: 'TEXT', isNullable: false, - primaryKey: true, + primaryKey: 1, }, { name: 'foo', type: 'INTEGER', isNullable: true, - primaryKey: false, + primaryKey: undefined, }, { name: 'bar', type: 'TEXT', isNullable: true, - primaryKey: false, + primaryKey: undefined, }, ], - } + } satisfies Relation async function checkMigrationIsApplied(t: ExecutionContext) { await assertDbHasTables(t, 'parent', 'child', 'NewTable') diff --git a/clients/typescript/test/satellite/process.tags.test.ts b/clients/typescript/test/satellite/process.tags.test.ts index 6cd613b673..0053fbaf54 100644 --- a/clients/typescript/test/satellite/process.tags.test.ts +++ b/clients/typescript/test/satellite/process.tags.test.ts @@ -428,7 +428,7 @@ export const processTagsTests = (test: TestFn) => { await runMigrations() const clientId = 'test_client' satellite._setAuthState({ ...authState, clientId }) - + // Insert 4 items in separate snapshots await adapter.run({ sql: `INSERT INTO parent (id, value) VALUES (1, 'val1')`, @@ -446,11 +446,11 @@ export const processTagsTests = (test: TestFn) => { sql: `INSERT INTO parent (id, value) VALUES (4, 'val4')`, }) const ts4 = await satellite._performSnapshot() - + // Now delete them all in a single snapshot await adapter.run({ sql: `DELETE FROM parent` }) const ts5 = await satellite._performSnapshot() - + // Now check that each delete clears the correct tag const entries = await satellite._getEntries(4) t.deepEqual( diff --git a/clients/typescript/test/satellite/process.test.ts b/clients/typescript/test/satellite/process.test.ts index bf2d7d1477..ff8c165e00 100644 --- a/clients/typescript/test/satellite/process.test.ts +++ b/clients/typescript/test/satellite/process.test.ts @@ -36,7 +36,6 @@ import { SatelliteErrorCode, } from '../../src/util/types' import { opts, relations, ContextType as CommonContextType } from './common' -import { DEFAULT_LOG_POS, numberToBytes, base64 } from '../../src/util/common' import { DEFAULT_LOG_POS, @@ -343,21 +342,21 @@ export const processTests = (test: TestFn) => { test('snapshot of INSERT with blob/Uint8Array', async (t) => { const { adapter, runMigrations, satellite, authState } = t.context - + await runMigrations() - + const blob = new Uint8Array([1, 2, 255, 244, 160, 1]) - + await adapter.run({ sql: `INSERT INTO blobTable(value) VALUES (?)`, args: [blob], }) - + await satellite._setAuthState(authState) await satellite._performSnapshot() const entries = await satellite._getEntries() const clientId = satellite._authState!.clientId - + const merged = localOperationsToTableChanges( entries, (timestamp: Date) => { @@ -1718,92 +1717,92 @@ export const processTests = (test: TestFn) => { } }) -test('additional data will be stored properly', async (t) => { - const { client, satellite, adapter } = t.context - const { runMigrations, authState, token } = t.context - await runMigrations() - const tablename = 'parent' + test('additional data will be stored properly', async (t) => { + const { client, satellite, adapter } = t.context + const { runMigrations, authState, token } = t.context + await runMigrations() + const tablename = 'parent' - // relations must be present at subscription delivery - client.setRelations(relations) - client.setRelationData(tablename, parentRecord) + // relations must be present at subscription delivery + client.setRelations(relations) + client.setRelationData(tablename, parentRecord) - await startSatellite(satellite, authState, token) + await startSatellite(satellite, authState, token) - const shapeDef: Shape = { - tablename, - } + const shapeDef: Shape = { + tablename, + } - satellite!.relations = relations - const { synced } = await satellite.subscribe([shapeDef]) - await synced - await satellite._performSnapshot() - - // Send additional data - await client.additionalDataCb!({ - ref: new Long(10), - changes: [ - { - relation: relations.parent, - tags: ['server@' + Date.now()], - type: DataChangeType.INSERT, - record: { id: 100, value: 'new_value' }, - }, - ], - }) + satellite!.relations = relations + const { synced } = await satellite.subscribe([shapeDef]) + await synced + await satellite._performSnapshot() + + // Send additional data + await client.additionalDataCb!({ + ref: new Long(10), + changes: [ + { + relation: relations.parent, + tags: ['server@' + Date.now()], + type: DataChangeType.INSERT, + record: { id: 100, value: 'new_value' }, + }, + ], + }) - const [result] = await adapter.query({ - sql: 'SELECT * FROM main.parent WHERE id = 100', + const [result] = await adapter.query({ + sql: 'SELECT * FROM main.parent WHERE id = 100', + }) + t.deepEqual(result, { id: 100, value: 'new_value', other: null }) }) - t.deepEqual(result, { id: 100, value: 'new_value', other: null }) -}) -test('GONE messages are applied as DELETEs', async (t) => { - const { client, satellite, adapter } = t.context - const { runMigrations, authState, token } = t.context - await runMigrations() - const tablename = 'parent' + test('GONE messages are applied as DELETEs', async (t) => { + const { client, satellite, adapter } = t.context + const { runMigrations, authState, token } = t.context + await runMigrations() + const tablename = 'parent' - // relations must be present at subscription delivery - client.setRelations(relations) - client.setRelationData(tablename, parentRecord) + // relations must be present at subscription delivery + client.setRelations(relations) + client.setRelationData(tablename, parentRecord) - await startSatellite(satellite, authState, token) + await startSatellite(satellite, authState, token) - const shapeDef: Shape = { - tablename, - } + const shapeDef: Shape = { + tablename, + } - satellite!.relations = relations - const { synced } = await satellite.subscribe([shapeDef]) - await synced - await satellite._performSnapshot() - - // Send additional data - await client.transactionsCb!({ - commit_timestamp: Long.fromNumber(new Date().getTime()), - id: new Long(10), - lsn: new Uint8Array(), - changes: [ - { - relation: relations.parent, - tags: [], - type: DataChangeType.GONE, - record: { id: 1 }, - }, - ], - }) + satellite!.relations = relations + const { synced } = await satellite.subscribe([shapeDef]) + await synced + await satellite._performSnapshot() + + // Send additional data + await client.transactionsCb!({ + commit_timestamp: Long.fromNumber(new Date().getTime()), + id: new Long(10), + lsn: new Uint8Array(), + changes: [ + { + relation: relations.parent, + tags: [], + type: DataChangeType.GONE, + record: { id: 1 }, + }, + ], + }) - const results = await adapter.query({ - sql: 'SELECT * FROM main.parent', + const results = await adapter.query({ + sql: 'SELECT * FROM main.parent', + }) + t.deepEqual(results, []) }) - t.deepEqual(results, []) -}) -test('a subscription that failed to apply because of FK constraint triggers GC', async (t) => { - const { client, satellite, adapter, runMigrations, authState, token } = - t.context - await runMigrations() + test('a subscription that failed to apply because of FK constraint triggers GC', async (t) => { + const { client, satellite, adapter, runMigrations, authState, token } = + t.context + await runMigrations() const tablename = 'child' const namespace = 'main' @@ -2013,7 +2012,7 @@ test('a subscription that failed to apply because of FK constraint triggers GC', const shapeDef1: Shape = { tablename: tablename, } - + const shapeDef2: Shape = { tablename: 'failure', } @@ -2119,7 +2118,7 @@ test('a subscription that failed to apply because of FK constraint triggers GC', t.is((await satellite._getEntries(0)).length, 0) satellite._garbageCollectShapeHandler([ - { uuid: '', definition: { selects: [{ tablename: 'parent' }] } }, + { uuid: '', definition: { tablename: 'parent' } }, ]) await satellite._performSnapshot() diff --git a/clients/typescript/test/support/node-postgres.ts b/clients/typescript/test/support/node-postgres.ts index 303df73ca4..e720a73bc1 100644 --- a/clients/typescript/test/support/node-postgres.ts +++ b/clients/typescript/test/support/node-postgres.ts @@ -1,6 +1,6 @@ import fs from 'fs/promises' import { ElectricDatabase } from '../../src/drivers/node-postgres' -import { createEmbeddedPostgres } from '../../src/drivers/node-postgres/database'; +import { createEmbeddedPostgres } from '../../src/drivers/node-postgres/database' export async function makePgDatabase( name: string, diff --git a/components/electric/lib/electric/satellite/protobuf_messages.ex b/components/electric/lib/electric/satellite/protobuf_messages.ex index 92254aa900..093c4f4547 100644 --- a/components/electric/lib/electric/satellite/protobuf_messages.ex +++ b/components/electric/lib/electric/satellite/protobuf_messages.ex @@ -216,6 +216,80 @@ ) ) end, + defmodule Electric.Satellite.SatInStartReplicationReq.Dialect do + @moduledoc false + ( + defstruct [] + + ( + @spec default() :: :SQLITE + def default() do + :SQLITE + end + ) + + @spec encode(atom() | String.t()) :: integer() | atom() + [ + ( + def encode(:SQLITE) do + 0 + end + + def encode("SQLITE") do + 0 + end + ), + ( + def encode(:POSTGRES) do + 1 + end + + def encode("POSTGRES") do + 1 + end + ) + ] + + def encode(x) do + x + end + + @spec decode(integer()) :: atom() | integer() + [ + def decode(0) do + :SQLITE + end, + def decode(1) do + :POSTGRES + end + ] + + def decode(x) do + x + end + + @spec constants() :: [{integer(), atom()}] + def constants() do + [{0, :SQLITE}, {1, :POSTGRES}] + end + + @spec has_constant?(any()) :: boolean() + ( + [ + def has_constant?(:SQLITE) do + true + end, + def has_constant?(:POSTGRES) do + true + end + ] + + def has_constant?(_) do + false + end + ) + ) + end, defmodule Electric.Satellite.SatInStartReplicationReq.Option do @moduledoc false ( @@ -3245,7 +3319,8 @@ options: [], subscription_ids: [], schema_version: nil, - observed_transaction_data: [] + observed_transaction_data: [], + sql_dialect: nil ( ( @@ -3262,6 +3337,7 @@ def encode!(msg) do [] |> encode_schema_version(msg) + |> encode_sql_dialect(msg) |> encode_lsn(msg) |> encode_options(msg) |> encode_subscription_ids(msg) @@ -3374,6 +3450,27 @@ reraise Protox.EncodingError.new(:observed_transaction_data, "invalid field value"), __STACKTRACE__ end + end, + defp encode_sql_dialect(acc, msg) do + try do + case msg.sql_dialect do + nil -> + [acc] + + child_field_value -> + [ + acc, + "8", + child_field_value + |> Electric.Satellite.SatInStartReplicationReq.Dialect.encode() + |> Protox.Encode.encode_enum() + ] + end + rescue + ArgumentError -> + reraise Protox.EncodingError.new(:sql_dialect, "invalid field value"), + __STACKTRACE__ + end end ] @@ -3468,6 +3565,15 @@ {value, rest} = Protox.Decode.parse_uint64(bytes) {[observed_transaction_data: msg.observed_transaction_data ++ [value]], rest} + {7, _, bytes} -> + {value, rest} = + Protox.Decode.parse_enum( + bytes, + Electric.Satellite.SatInStartReplicationReq.Dialect + ) + + {[sql_dialect: value], rest} + {tag, wire_type, rest} -> {_, rest} = Protox.Decode.parse_unknown(tag, wire_type, rest) {[], rest} @@ -3529,7 +3635,10 @@ 2 => {:options, :packed, {:enum, Electric.Satellite.SatInStartReplicationReq.Option}}, 4 => {:subscription_ids, :unpacked, :string}, 5 => {:schema_version, {:oneof, :_schema_version}, :string}, - 6 => {:observed_transaction_data, :packed, :uint64} + 6 => {:observed_transaction_data, :packed, :uint64}, + 7 => + {:sql_dialect, {:oneof, :_sql_dialect}, + {:enum, Electric.Satellite.SatInStartReplicationReq.Dialect}} } end @@ -3543,6 +3652,9 @@ observed_transaction_data: {6, :packed, :uint64}, options: {2, :packed, {:enum, Electric.Satellite.SatInStartReplicationReq.Option}}, schema_version: {5, {:oneof, :_schema_version}, :string}, + sql_dialect: + {7, {:oneof, :_sql_dialect}, + {:enum, Electric.Satellite.SatInStartReplicationReq.Dialect}}, subscription_ids: {4, :unpacked, :string} } end @@ -3596,6 +3708,15 @@ name: :observed_transaction_data, tag: 6, type: :uint64 + }, + %{ + __struct__: Protox.Field, + json_name: "sqlDialect", + kind: {:oneof, :_sql_dialect}, + label: :proto3_optional, + name: :sql_dialect, + tag: 7, + type: {:enum, Electric.Satellite.SatInStartReplicationReq.Dialect} } ] end @@ -3780,6 +3901,46 @@ }} end ), + ( + def field_def(:sql_dialect) do + {:ok, + %{ + __struct__: Protox.Field, + json_name: "sqlDialect", + kind: {:oneof, :_sql_dialect}, + label: :proto3_optional, + name: :sql_dialect, + tag: 7, + type: {:enum, Electric.Satellite.SatInStartReplicationReq.Dialect} + }} + end + + def field_def("sqlDialect") do + {:ok, + %{ + __struct__: Protox.Field, + json_name: "sqlDialect", + kind: {:oneof, :_sql_dialect}, + label: :proto3_optional, + name: :sql_dialect, + tag: 7, + type: {:enum, Electric.Satellite.SatInStartReplicationReq.Dialect} + }} + end + + def field_def("sql_dialect") do + {:ok, + %{ + __struct__: Protox.Field, + json_name: "sqlDialect", + kind: {:oneof, :_sql_dialect}, + label: :proto3_optional, + name: :sql_dialect, + tag: 7, + type: {:enum, Electric.Satellite.SatInStartReplicationReq.Dialect} + }} + end + ), def field_def(_) do {:error, :no_such_field} end @@ -3819,6 +3980,9 @@ def default(:observed_transaction_data) do {:error, :no_default_value} end, + def default(:sql_dialect) do + {:error, :no_default_value} + end, def default(_) do {:error, :no_such_field} end diff --git a/protocol/satellite.proto b/protocol/satellite.proto index 6e09632cff..7d91c9af26 100644 --- a/protocol/satellite.proto +++ b/protocol/satellite.proto @@ -137,10 +137,10 @@ message SatInStartReplicationReq { * observed additional data before disconnect */ repeated uint64 observed_transaction_data = 6; - + // The SQL dialect used by the client // Defaults to SQLite if not specified - optional Dialect sql_dialect = 6; + optional Dialect sql_dialect = 7; // Note: // - a client might resume replication only for a subset of previous subscriptions From 9c315d08721738fba17a56884918ba56345fd6fe Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Thu, 4 Apr 2024 12:42:04 +0200 Subject: [PATCH 038/156] Modify PG query for table info to return index of column in PK --- .../src/migrators/query-builder/pgBuilder.ts | 25 +++++++++++-------- 1 file changed, 15 insertions(+), 10 deletions(-) diff --git a/clients/typescript/src/migrators/query-builder/pgBuilder.ts b/clients/typescript/src/migrators/query-builder/pgBuilder.ts index 7643e2d72f..a535108177 100644 --- a/clients/typescript/src/migrators/query-builder/pgBuilder.ts +++ b/clients/typescript/src/migrators/query-builder/pgBuilder.ts @@ -86,16 +86,21 @@ class PgBuilder extends QueryBuilder { ELSE 1 END AS notnull, c.column_default AS dflt_value, - EXISTS ( - SELECT pg_class.relname, pg_attribute.attname - FROM pg_class, pg_attribute, pg_index - WHERE pg_class.oid = pg_attribute.attrelid AND - pg_class.oid = pg_index.indrelid AND - pg_attribute.attnum = ANY(pg_index.indkey) AND - pg_index.indisprimary = 't' AND - pg_class.relname = $1 AND - pg_attribute.attname = c.column_name - ) :: INTEGER AS pk + ( + SELECT CASE + -- if the column is not part of the primary key + -- then return 0 + WHEN NOT pg_attribute.attnum = ANY(pg_index.indkey) THEN 0 + -- else, return the position of the column in the primary key + -- pg_index.indkey is indexed from 0 so we add 1 + ELSE array_position(pg_index.indkey, pg_attribute.attnum) + 1 + END AS pk + FROM pg_class, pg_attribute, pg_index + WHERE pg_class.oid = pg_attribute.attrelid AND + pg_class.oid = pg_index.indrelid AND + pg_class.relname = $1 AND + pg_attribute.attname = c.column_name + ) FROM information_schema.columns AS c WHERE c.table_name = $1; From c9bfb812bdc72994421d29def7e5e2e5ca6dd036 Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Thu, 4 Apr 2024 14:49:12 +0200 Subject: [PATCH 039/156] Turn bytea values into hexadecimal strings for encoding in JSON row in oplog trigger --- .../src/migrators/query-builder/builder.ts | 5 ++++ .../src/migrators/query-builder/pgBuilder.ts | 4 ++++ .../migrators/query-builder/sqliteBuilder.ts | 4 ++++ clients/typescript/src/migrators/triggers.ts | 23 ++++++++++++------- .../test/migrators/postgres/triggers.test.ts | 6 ++--- .../test/migrators/sqlite/triggers.test.ts | 4 ++-- 6 files changed, 33 insertions(+), 13 deletions(-) diff --git a/clients/typescript/src/migrators/query-builder/builder.ts b/clients/typescript/src/migrators/query-builder/builder.ts index 8e9f03c119..cde728572d 100644 --- a/clients/typescript/src/migrators/query-builder/builder.ts +++ b/clients/typescript/src/migrators/query-builder/builder.ts @@ -59,6 +59,11 @@ export abstract class QueryBuilder { */ abstract countTablesIn(countName: string, tables: string[]): Statement + /** + * Converts a column value to a hexidecimal string. + */ + abstract toHex(column: string): string + /** * Create an index on a table. */ diff --git a/clients/typescript/src/migrators/query-builder/pgBuilder.ts b/clients/typescript/src/migrators/query-builder/pgBuilder.ts index a535108177..8f7538b096 100644 --- a/clients/typescript/src/migrators/query-builder/pgBuilder.ts +++ b/clients/typescript/src/migrators/query-builder/pgBuilder.ts @@ -43,6 +43,10 @@ class PgBuilder extends QueryBuilder { } } + toHex(column: string): string { + return `encode(${column}::bytea, 'hex')` + } + createIndex( indexName: string, onTable: QualifiedTablename, diff --git a/clients/typescript/src/migrators/query-builder/sqliteBuilder.ts b/clients/typescript/src/migrators/query-builder/sqliteBuilder.ts index c5a03e6fb0..6b2933f2cc 100644 --- a/clients/typescript/src/migrators/query-builder/sqliteBuilder.ts +++ b/clients/typescript/src/migrators/query-builder/sqliteBuilder.ts @@ -45,6 +45,10 @@ class SqliteBuilder extends QueryBuilder { } } + toHex(column: string): string { + return `hex(${column})` + } + getTableInfo(tablename: string): Statement { return { sql: `SELECT name, type, "notnull", dflt_value, pk FROM pragma_table_info(?)`, diff --git a/clients/typescript/src/migrators/triggers.ts b/clients/typescript/src/migrators/triggers.ts index ec2ed46872..489f4fbffa 100644 --- a/clients/typescript/src/migrators/triggers.ts +++ b/clients/typescript/src/migrators/triggers.ts @@ -43,10 +43,10 @@ export function generateOplogTriggers( ): Statement[] { const { tableName, namespace, columns, primary, columnTypes } = table - const newPKs = joinColsForJSON(primary, columnTypes, 'new') - const oldPKs = joinColsForJSON(primary, columnTypes, 'old') - const newRows = joinColsForJSON(columns, columnTypes, 'new') - const oldRows = joinColsForJSON(columns, columnTypes, 'old') + const newPKs = joinColsForJSON(primary, columnTypes, builder, 'new') + const oldPKs = joinColsForJSON(primary, columnTypes, builder, 'old') + const newRows = joinColsForJSON(columns, columnTypes, builder, 'new') + const oldRows = joinColsForJSON(columns, columnTypes, builder, 'old') const [dropFkTrigger, ...createFkTrigger] = builder.createOrReplaceNoFkUpdateTrigger(namespace, tableName, primary) @@ -119,9 +119,13 @@ function generateCompensationTriggers( // so we need to pass an object containing the column type of the parent key. // We can construct that object because the type of the parent key must be the same // as the type of the child key that is pointing to it. - const joinedFkPKs = joinColsForJSON([fkTablePK], { - [fkTablePK]: columnTypes[foreignKey.childKey], - }) + const joinedFkPKs = joinColsForJSON( + [fkTablePK], + { + [fkTablePK]: columnTypes[foreignKey.childKey], + }, + builder + ) const [dropInsertTrigger, ...createInsertTrigger] = builder.createOrReplaceInsertCompensationTrigger( @@ -247,6 +251,7 @@ export function generateTriggers( function joinColsForJSON( cols: string[], colTypes: ColumnTypes, + builder: QueryBuilder, target?: 'new' | 'old' ) { // Perform transformations on some columns to ensure consistent @@ -268,7 +273,9 @@ function joinColsForJSON( // transform blobs/bytestrings into hexadecimal strings for JSON encoding if (colType === 'BYTEA') { - return `CASE WHEN ${targetedCol} IS NOT NULL THEN hex(${targetedCol}) ELSE NULL END` + return `CASE WHEN ${targetedCol} IS NOT NULL THEN ${builder.toHex( + targetedCol + )} ELSE NULL END` } return targetedCol } diff --git a/clients/typescript/test/migrators/postgres/triggers.test.ts b/clients/typescript/test/migrators/postgres/triggers.test.ts index c371e2153f..1bb1ea25ad 100644 --- a/clients/typescript/test/migrators/postgres/triggers.test.ts +++ b/clients/typescript/test/migrators/postgres/triggers.test.ts @@ -263,7 +263,7 @@ test('oplog trigger should separate null blobs from empty blobs', async (t) => { // Insert null and empty rows in the table const insertRowNullSQL = `INSERT INTO "${namespace}"."${tableName}" (id, name, age, bmi, int8, blob) VALUES (1, 'John Doe', 30, 25.5, 7, NULL)` - const insertRowEmptySQL = `INSERT INTO "${namespace}"."${tableName}" (id, name, age, bmi, int8, blob) VALUES (2, 'John Doe', 30, 25.5, 7, x'')` + const insertRowEmptySQL = `INSERT INTO "${namespace}"."${tableName}" (id, name, age, bmi, int8, blob) VALUES (2, 'John Doe', 30, 25.5, 7, '\\x')` await db.exec({ sql: insertRowNullSQL }) await db.exec({ sql: insertRowEmptySQL }) @@ -272,6 +272,6 @@ test('oplog trigger should separate null blobs from empty blobs', async (t) => { sql: `SELECT * FROM "${satelliteDefaults.oplogTable.namespace}"."${satelliteDefaults.oplogTable.tablename}"`, }) t.is(oplogRows.length, 2) - t.regex(oplogRows[0].newRow as string, /,"blob":null,/) - t.regex(oplogRows[1].newRow as string, /,"blob":"",/) + t.regex(oplogRows[0].newRow as string, /,\s*"blob":\s*null\s*,/) + t.regex(oplogRows[1].newRow as string, /,\s*"blob":\s*""\s*,/) }) diff --git a/clients/typescript/test/migrators/sqlite/triggers.test.ts b/clients/typescript/test/migrators/sqlite/triggers.test.ts index 5a35d576b3..9f4f9f594e 100644 --- a/clients/typescript/test/migrators/sqlite/triggers.test.ts +++ b/clients/typescript/test/migrators/sqlite/triggers.test.ts @@ -173,6 +173,6 @@ test('oplog trigger should separate null blobs from empty blobs', async (t) => { ) .all() t.is(oplogRows.length, 2) - t.regex(oplogRows[0].newRow, /,"blob":null,/) - t.regex(oplogRows[1].newRow, /,"blob":"",/) + t.regex(oplogRows[0].newRow, /,\s*"blob":\s*null\s*,/) + t.regex(oplogRows[1].newRow, /,\s*"blob":\s*""\s*,/) }) From 561602a5116606a1e610c7a735c7e6642ca39a73 Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Thu, 4 Apr 2024 15:00:47 +0200 Subject: [PATCH 040/156] Extend query builder with function to create hexadecimal values. --- clients/typescript/src/migrators/query-builder/builder.ts | 5 +++++ clients/typescript/src/migrators/query-builder/pgBuilder.ts | 4 ++++ .../typescript/src/migrators/query-builder/sqliteBuilder.ts | 4 ++++ clients/typescript/test/satellite/merge.test.ts | 6 +++++- 4 files changed, 18 insertions(+), 1 deletion(-) diff --git a/clients/typescript/src/migrators/query-builder/builder.ts b/clients/typescript/src/migrators/query-builder/builder.ts index cde728572d..0ea6e30277 100644 --- a/clients/typescript/src/migrators/query-builder/builder.ts +++ b/clients/typescript/src/migrators/query-builder/builder.ts @@ -64,6 +64,11 @@ export abstract class QueryBuilder { */ abstract toHex(column: string): string + /** + * Converts a hexidecimal string to a hex value. + */ + abstract hexValue(hexString: string): string + /** * Create an index on a table. */ diff --git a/clients/typescript/src/migrators/query-builder/pgBuilder.ts b/clients/typescript/src/migrators/query-builder/pgBuilder.ts index 8f7538b096..ce71ab0665 100644 --- a/clients/typescript/src/migrators/query-builder/pgBuilder.ts +++ b/clients/typescript/src/migrators/query-builder/pgBuilder.ts @@ -47,6 +47,10 @@ class PgBuilder extends QueryBuilder { return `encode(${column}::bytea, 'hex')` } + hexValue(hexString: string): string { + return `'\\x${hexString}'` + } + createIndex( indexName: string, onTable: QualifiedTablename, diff --git a/clients/typescript/src/migrators/query-builder/sqliteBuilder.ts b/clients/typescript/src/migrators/query-builder/sqliteBuilder.ts index 6b2933f2cc..a5d5d21026 100644 --- a/clients/typescript/src/migrators/query-builder/sqliteBuilder.ts +++ b/clients/typescript/src/migrators/query-builder/sqliteBuilder.ts @@ -49,6 +49,10 @@ class SqliteBuilder extends QueryBuilder { return `hex(${column})` } + hexValue(hexString: string): string { + return `x'${hexString}'` + } + getTableInfo(tablename: string): Statement { return { sql: `SELECT name, type, "notnull", dflt_value, pk FROM pragma_table_info(?)`, diff --git a/clients/typescript/test/satellite/merge.test.ts b/clients/typescript/test/satellite/merge.test.ts index a9ef785769..8c67d35ec5 100644 --- a/clients/typescript/test/satellite/merge.test.ts +++ b/clients/typescript/test/satellite/merge.test.ts @@ -208,7 +208,11 @@ const setupPG: SetupFn = async (t: ExecutionContext) => { await migrateDb(adapter, personTable, builder) // Insert a row in the table - const insertRowSQL = `INSERT INTO "${personTable.namespace}"."${personTable.tableName}" (id, name, age, bmi, int8, blob) VALUES (54321, 'John Doe', 30, 25.5, 7, x'0001ff')` + const insertRowSQL = `INSERT INTO "${personTable.namespace}"."${ + personTable.tableName + }" (id, name, age, bmi, int8, blob) VALUES (54321, 'John Doe', 30, 25.5, 7, ${builder.hexValue( + '0001ff' + )})` await adapter.run({ sql: insertRowSQL }) // Fetch the oplog entry for the inserted row From e915499bb5cf0997298a074b8bad9a100c222b61 Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Thu, 4 Apr 2024 16:19:03 +0200 Subject: [PATCH 041/156] Expect quoted table name in SQL builder tests --- .../test/client/model/builder.test.ts | 55 +++++++++---------- .../test/migrators/postgres/schema.test.ts | 3 +- .../test/migrators/postgres/triggers.test.ts | 2 +- 3 files changed, 30 insertions(+), 30 deletions(-) diff --git a/clients/typescript/test/client/model/builder.test.ts b/clients/typescript/test/client/model/builder.test.ts index 815b311548..9c260abb7b 100644 --- a/clients/typescript/test/client/model/builder.test.ts +++ b/clients/typescript/test/client/model/builder.test.ts @@ -52,7 +52,7 @@ test('null values are inserted as NULL', (t) => { t.is( query, - "INSERT INTO Post (id, title, contents, nbr) VALUES ('i1', 't1', 'c1', NULL) RETURNING id, title, contents, nbr" + `INSERT INTO "Post" (id, title, contents, nbr) VALUES ('i1', 't1', 'c1', NULL) RETURNING id, title, contents, nbr` ) }) @@ -66,7 +66,7 @@ test('create query', (t) => { t.is( query, - "INSERT INTO Post (id, title, contents, nbr) VALUES ('i1', 't1', 'c1', 18) RETURNING id, title, contents, nbr" + `INSERT INTO "Post" (id, title, contents, nbr) VALUES ('i1', 't1', 'c1', 18) RETURNING id, title, contents, nbr` ) }) @@ -79,7 +79,7 @@ test('createMany query', (t) => { t.is( query, - "INSERT INTO Post (id, title, contents, nbr) VALUES ('i1', 't1', 'c1', 18), ('i2', 't2', 'c2', 21)" + `INSERT INTO "Post" (id, title, contents, nbr) VALUES ('i1', 't1', 'c1', 18), ('i2', 't2', 'c2', 21)` ) const query2 = tbl @@ -91,7 +91,7 @@ test('createMany query', (t) => { t.is( query2, - "INSERT INTO Post (id, title, contents, nbr) VALUES ('i1', 't1', 'c1', 18), ('i2', 't2', 'c2', 21) ON CONFLICT DO NOTHING" + `INSERT INTO "Post" (id, title, contents, nbr) VALUES ('i1', 't1', 'c1', 18), ('i2', 't2', 'c2', 21) ON CONFLICT DO NOTHING` ) }) @@ -107,7 +107,7 @@ test('findUnique query', async (t) => { t.is( query, - "SELECT id, nbr, title, contents FROM Post WHERE (id = 'i2') AND (nbr = 21) LIMIT 2" + `SELECT id, nbr, title, contents FROM "Post" WHERE (id = 'i2') AND (nbr = 21) LIMIT 2` ) }) @@ -127,7 +127,7 @@ test('findUnique query with selection', (t) => { t.is( query, - "SELECT id, nbr, title FROM Post WHERE (id = 'i2') AND (nbr = 21) LIMIT 2" + `SELECT id, nbr, title FROM "Post" WHERE (id = 'i2') AND (nbr = 21) LIMIT 2` ) }) @@ -147,7 +147,7 @@ test('findUnique query with selection of NULL value', (t) => { t.is( query, - "SELECT id, nbr, title FROM Post WHERE (id = 'i2') AND (nbr IS NULL) LIMIT 2" + `SELECT id, nbr, title FROM "Post" WHERE (id = 'i2') AND (nbr IS NULL) LIMIT 2` ) }) @@ -167,7 +167,7 @@ test('findUnique query with selection of non-NULL value', (t) => { t.is( query, - "SELECT id, nbr, title FROM Post WHERE (id = 'i2') AND (nbr IS NOT NULL) LIMIT 2" + `SELECT id, nbr, title FROM "Post" WHERE (id = 'i2') AND (nbr IS NOT NULL) LIMIT 2` ) }) @@ -187,7 +187,7 @@ test('findUnique query with selection of row that does not equal a value', (t) = t.is( query, - "SELECT id, nbr, title FROM Post WHERE (id = 'i2') AND (nbr != 5) LIMIT 2" + `SELECT id, nbr, title FROM "Post" WHERE (id = 'i2') AND (nbr != 5) LIMIT 2` ) }) @@ -203,7 +203,7 @@ test('findUnique query supports several filters', (t) => { t.is( query, - "SELECT id, nbr, title, contents FROM Post WHERE (id = 'i2') AND (nbr IN (1, 2, 3)) AND (nbr != 5) LIMIT 2" + `SELECT id, nbr, title, contents FROM "Post" WHERE (id = 'i2') AND (nbr IN (1, 2, 3)) AND (nbr != 5) LIMIT 2` ) }) @@ -242,7 +242,7 @@ test('findMany allows results to be ordered on one field', (t) => { }) .toString() - t.is(query, 'SELECT id, title, contents, nbr FROM Post ORDER BY id ASC') + t.is(query, 'SELECT id, title, contents, nbr FROM "Post" ORDER BY id ASC') }) test('findMany allows results to be ordered on several fields', (t) => { @@ -264,7 +264,7 @@ test('findMany allows results to be ordered on several fields', (t) => { t.is( query, - 'SELECT id, title, contents, nbr FROM Post ORDER BY id ASC, title DESC' + 'SELECT id, title, contents, nbr FROM "Post" ORDER BY id ASC, title DESC' ) }) @@ -279,7 +279,7 @@ test('findMany supports pagination', (t) => { }) .toString() - t.is(query, 'SELECT id, title, contents, nbr FROM Post LIMIT 1 OFFSET 1') + t.is(query, 'SELECT id, title, contents, nbr FROM "Post" LIMIT 1 OFFSET 1') }) test('findMany supports distinct results', (t) => { @@ -292,7 +292,7 @@ test('findMany supports distinct results', (t) => { }) .toString() - t.is(query, 'SELECT DISTINCT ON (nbr) id, title, contents, nbr FROM Post') + t.is(query, 'SELECT DISTINCT ON (nbr) id, title, contents, nbr FROM "Post"') }) test('findMany supports IN filters in where argument', (t) => { @@ -308,7 +308,7 @@ test('findMany supports IN filters in where argument', (t) => { t.is( query, - 'SELECT nbr, id, title, contents FROM Post WHERE (nbr IN (1, 5, 18))' + 'SELECT nbr, id, title, contents FROM "Post" WHERE (nbr IN (1, 5, 18))' ) }) @@ -325,7 +325,7 @@ test('findMany supports NOT IN filters in where argument', (t) => { t.is( query, - 'SELECT nbr, id, title, contents FROM Post WHERE (nbr NOT IN (1, 5, 18))' + 'SELECT nbr, id, title, contents FROM "Post" WHERE (nbr NOT IN (1, 5, 18))' ) }) @@ -345,7 +345,7 @@ test('findMany supports lt, lte, gt, gte filters in where argument', (t) => { t.is( query, - 'SELECT nbr, id, title, contents FROM Post WHERE (nbr < 11) AND (nbr <= 10) AND (nbr > 4) AND (nbr >= 5)' + 'SELECT nbr, id, title, contents FROM "Post" WHERE (nbr < 11) AND (nbr <= 10) AND (nbr > 4) AND (nbr >= 5)' ) }) @@ -362,7 +362,7 @@ test('findMany supports startsWith filter in where argument', (t) => { t.is( query, - "SELECT title, id, contents, nbr FROM Post WHERE (title LIKE 'foo%')" + `SELECT title, id, contents, nbr FROM "Post" WHERE (title LIKE 'foo%')` ) }) @@ -379,7 +379,7 @@ test('findMany supports endsWith filter in where argument', (t) => { t.is( query, - "SELECT title, id, contents, nbr FROM Post WHERE (title LIKE '%foo')" + `SELECT title, id, contents, nbr FROM "Post" WHERE (title LIKE '%foo')` ) }) @@ -396,7 +396,7 @@ test('findMany supports contains filter in where argument', (t) => { t.is( query, - "SELECT title, id, contents, nbr FROM Post WHERE (title LIKE '%foo%')" + `SELECT title, id, contents, nbr FROM "Post" WHERE (title LIKE '%foo%')` ) }) @@ -437,7 +437,7 @@ test('findMany supports boolean filters in where argument', (t) => { t.is( query, - "SELECT nbr, id, title, contents FROM Post WHERE (title LIKE '%foo%' OR title = 'bar') AND (contents = 'content' AND nbr = 6) AND ((NOT title = 'foobar') AND (NOT title = 'barfoo')) AND (nbr = 5)" + `SELECT nbr, id, title, contents FROM "Post" WHERE (title LIKE '%foo%' OR title = 'bar') AND (contents = 'content' AND nbr = 6) AND ((NOT title = 'foobar') AND (NOT title = 'barfoo')) AND (nbr = 5)` ) }) @@ -468,7 +468,7 @@ test('findMany supports single AND filter and single NOT filter in where argumen t.is( query, - "SELECT nbr, id, title, contents FROM Post WHERE (title LIKE '%foo%' OR title = 'bar') AND (contents = 'content') AND (NOT title = 'foobar') AND (nbr = 5)" + `SELECT nbr, id, title, contents FROM "Post" WHERE (title LIKE '%foo%' OR title = 'bar') AND (contents = 'content') AND (NOT title = 'foobar') AND (nbr = 5)` ) }) @@ -482,7 +482,7 @@ test('update query', (t) => { t.is( query, - "UPDATE Post SET title = 'Foo', contents = 'Bar' WHERE (id = '1') RETURNING id, title, contents, nbr" + `UPDATE "Post" SET title = 'Foo', contents = 'Bar' WHERE (id = '1') RETURNING id, title, contents, nbr` ) }) @@ -496,8 +496,7 @@ test('updateMany query', (t) => { }) .toString() - const sql = - "UPDATE Post SET title = 'Foo', contents = 'Bar' RETURNING id, title, contents, nbr" + const sql = `UPDATE "Post" SET title = 'Foo', contents = 'Bar' RETURNING id, title, contents, nbr` t.is(query1, sql) }) @@ -509,7 +508,7 @@ test('delete query', (t) => { }) .toString() - t.is(query, "DELETE FROM Post WHERE (id = 'Foo') AND (title = 'Bar')") + t.is(query, `DELETE FROM "Post" WHERE (id = 'Foo') AND (title = 'Bar')`) }) test('deleteMany query', (t) => { @@ -519,7 +518,7 @@ test('deleteMany query', (t) => { }) .toString() - t.is(query1, "DELETE FROM Post WHERE (id = 'Foo') AND (title = 'Bar')") + t.is(query1, `DELETE FROM "Post" WHERE (id = 'Foo') AND (title = 'Bar')`) const query2 = tbl .deleteMany({ @@ -529,6 +528,6 @@ test('deleteMany query', (t) => { }) .toString() - const sql = 'DELETE FROM Post' + const sql = 'DELETE FROM "Post"' t.is(query2, sql) }) diff --git a/clients/typescript/test/migrators/postgres/schema.test.ts b/clients/typescript/test/migrators/postgres/schema.test.ts index 65a1876617..d58a6655de 100644 --- a/clients/typescript/test/migrators/postgres/schema.test.ts +++ b/clients/typescript/test/migrators/postgres/schema.test.ts @@ -16,9 +16,10 @@ type Context = { stopPG: () => Promise } +let port = 2934 test.beforeEach(async (t) => { const dbName = `schema-migrations-${randomValue()}` - const { db, stop } = await makePgDatabase(dbName, 5432) + const { db, stop } = await makePgDatabase(dbName, port++) const adapter = new DatabaseAdapter(db) t.context = { diff --git a/clients/typescript/test/migrators/postgres/triggers.test.ts b/clients/typescript/test/migrators/postgres/triggers.test.ts index 1bb1ea25ad..56e82ac86f 100644 --- a/clients/typescript/test/migrators/postgres/triggers.test.ts +++ b/clients/typescript/test/migrators/postgres/triggers.test.ts @@ -75,7 +75,7 @@ test('generateTableTriggers should create correct triggers for a table', (t) => 'personTable', 'INSERT', json_strip_nulls(json_build_object('id', cast(new."id" as TEXT))), - jsonb_build_object('age', new."age", 'bmi', cast(new."bmi" as TEXT), 'id', cast(new."id" as TEXT), 'int8', cast(new."int8" as TEXT), 'name', new."name"), + jsonb_build_object('age', new."age", 'blob', CASE WHEN new."blob" IS NOT NULL THEN encode(new."blob"::bytea, 'hex') ELSE NULL END, 'bmi', cast(new."bmi" as TEXT), 'id', cast(new."id" as TEXT), 'int8', cast(new."int8" as TEXT), 'name', new."name"), NULL, NULL ); From 556bc8caf8a9b4d19accac3aa71e005e63ab8f5a Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Thu, 4 Apr 2024 16:34:39 +0200 Subject: [PATCH 042/156] Fix positional params in q2 based on builder --- clients/typescript/src/satellite/process.ts | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/clients/typescript/src/satellite/process.ts b/clients/typescript/src/satellite/process.ts index 18d67f05ea..700817f485 100644 --- a/clients/typescript/src/satellite/process.ts +++ b/clients/typescript/src/satellite/process.ts @@ -1006,15 +1006,19 @@ export class SatelliteProcess implements Satellite { const q2: Statement = { sql: ` UPDATE ${oplog} - SET clearTags = + SET "clearTags" = CASE WHEN shadow.tags = '[]' OR shadow.tags = '' - THEN '["' || ? || '"]' - ELSE '["' || ? || '",' || substring(shadow.tags, 2) + THEN '["' || ${this.builder.makePositionalParam(1)} || '"]' + ELSE '["' || ${this.builder.makePositionalParam( + 2 + )} || '",' || substring(shadow.tags, 2) END FROM ${shadow} AS shadow WHERE ${oplog}.namespace = shadow.namespace AND ${oplog}.tablename = shadow.tablename - AND ${oplog}.primaryKey = shadow.primaryKey AND ${oplog}.timestamp = ? + AND ${oplog}."primaryKey" = shadow."primaryKey" AND ${oplog}.timestamp = ${this.builder.makePositionalParam( + 3 + )} `, args: [newTag, newTag, timestamp.toISOString()], } From c5b326cd0838e43bce9ea5f442e3d8d10e67e644 Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Thu, 4 Apr 2024 16:35:07 +0200 Subject: [PATCH 043/156] Fix some blob and tag tests --- clients/typescript/test/migrators/postgres/triggers.test.ts | 6 +++--- clients/typescript/test/satellite/process.tags.test.ts | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/clients/typescript/test/migrators/postgres/triggers.test.ts b/clients/typescript/test/migrators/postgres/triggers.test.ts index 56e82ac86f..1629beabb5 100644 --- a/clients/typescript/test/migrators/postgres/triggers.test.ts +++ b/clients/typescript/test/migrators/postgres/triggers.test.ts @@ -187,7 +187,7 @@ test('oplog insertion trigger should insert row into oplog table', async (t) => await migrateDb() // Insert a row in the table - const insertRowSQL = `INSERT INTO ${qualifiedPersonTable} (id, name, age, bmi, int8) VALUES (1, 'John Doe', 30, 25.5, 7)` + const insertRowSQL = `INSERT INTO ${qualifiedPersonTable} (id, name, age, bmi, int8, blob) VALUES (1, 'John Doe', 30, 25.5, 7, '\\x0001ff')` await db.exec({ sql: insertRowSQL }) // Check that the oplog table contains an entry for the inserted row @@ -208,7 +208,7 @@ test('oplog insertion trigger should insert row into oplog table', async (t) => // by the `deserialiseRow` function in `src/satellite/oplog.ts` primaryKey: '{"id":"1"}', newRow: - '{"id": "1", "age": 30, "bmi": "25.5", "int8": "7", "name": "John Doe"}', // BigInts are serialized as strings in the oplog + '{"id": "1", "age": 30, "bmi": "25.5", "blob": "0001ff", "int8": "7", "name": "John Doe"}', // BigInts are serialized as strings in the oplog oldRow: null, timestamp: null, rowid: 1, @@ -245,7 +245,7 @@ test('oplog trigger should handle Infinity values correctly', async (t) => { // by the `deserialiseRow` function in `src/satellite/oplog.ts` primaryKey: '{"id":"-Infinity"}', newRow: - '{"id": "-Infinity", "age": 30, "bmi": "Infinity", "int8": "7", "name": "John Doe"}', // BigInts are serialized as strings in the oplog + '{"id": "-Infinity", "age": 30, "bmi": "Infinity", "blob": null, "int8": "7", "name": "John Doe"}', // BigInts are serialized as strings in the oplog oldRow: null, timestamp: null, rowid: 1, diff --git a/clients/typescript/test/satellite/process.tags.test.ts b/clients/typescript/test/satellite/process.tags.test.ts index 0053fbaf54..1b481b17d9 100644 --- a/clients/typescript/test/satellite/process.tags.test.ts +++ b/clients/typescript/test/satellite/process.tags.test.ts @@ -137,7 +137,7 @@ export const processTagsTests = (test: TestFn) => { t.is(0, shadowEntry2.length) // clearTags contains previous shadowTag t.like(localEntries2[1], { - clearTags: tag1, + clearTags: genEncodedTags(clientId, [txDate2, txDate1]), timestamp: txDate2.toISOString(), }) From 72978ba95221749902b768c8c3972d8a389947ab Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Mon, 8 Apr 2024 08:31:52 +0200 Subject: [PATCH 044/156] Fix process tests --- .../test/satellite/process.tags.test.ts | 12 ++++++------ clients/typescript/test/satellite/process.test.ts | 13 ++++++++++--- .../test/support/migrations/migrations.js | 2 +- .../test/support/migrations/pg-migrations.js | 15 +++++++++++++++ 4 files changed, 32 insertions(+), 10 deletions(-) diff --git a/clients/typescript/test/satellite/process.tags.test.ts b/clients/typescript/test/satellite/process.tags.test.ts index 1b481b17d9..93a04457e9 100644 --- a/clients/typescript/test/satellite/process.tags.test.ts +++ b/clients/typescript/test/satellite/process.tags.test.ts @@ -75,9 +75,9 @@ export const processTagsTests = (test: TestFn) => { const entries = await satellite._getEntries() t.is(entries[0].clearTags, encodeTags([])) - t.is(entries[1].clearTags, genEncodedTags(clientId, [txDate1])) - t.is(entries[2].clearTags, genEncodedTags(clientId, [txDate2])) - t.is(entries[3].clearTags, genEncodedTags(clientId, [txDate3])) + t.is(entries[1].clearTags, genEncodedTags(clientId, [txDate2, txDate1])) + t.is(entries[2].clearTags, genEncodedTags(clientId, [txDate3, txDate2])) + t.is(entries[3].clearTags, genEncodedTags(clientId, [txDate4, txDate3])) t.not(txDate1, txDate2) t.not(txDate2, txDate3) @@ -548,7 +548,7 @@ export const processTagsTests = (test: TestFn) => { t.is( updateEntryAfterSnapshot[0].clearTags, - genEncodedTags(authState.clientId, [insertTimestamp]) + genEncodedTags(authState.clientId, [timestampTx2, insertTimestamp]) ) // The second operation (delete) should have the same timestamp @@ -560,7 +560,7 @@ export const processTagsTests = (test: TestFn) => { t.assert(deleteEntryAfterSnapshot[0].timestamp === rawTimestampTx2) t.is( deleteEntryAfterSnapshot[0].clearTags, - genEncodedTags(authState.clientId, [timestampTx2]) + genEncodedTags(authState.clientId, [timestampTx2, insertTimestamp]) ) // The third operation (reinsert) should have the same timestamp @@ -572,7 +572,7 @@ export const processTagsTests = (test: TestFn) => { t.assert(reinsertEntryAfterSnapshot[0].timestamp === rawTimestampTx2) t.is( reinsertEntryAfterSnapshot[0].clearTags, - genEncodedTags(authState.clientId, [timestampTx2]) + genEncodedTags(authState.clientId, [timestampTx2, insertTimestamp]) ) }) diff --git a/clients/typescript/test/satellite/process.test.ts b/clients/typescript/test/satellite/process.test.ts index ff8c165e00..23eef8e863 100644 --- a/clients/typescript/test/satellite/process.test.ts +++ b/clients/typescript/test/satellite/process.test.ts @@ -122,6 +122,7 @@ export const processTests = (test: TestFn) => { lsn: '', clientId: '', subscriptions: '', + seenAdditionalData: '', }) }) @@ -341,14 +342,16 @@ export const processTests = (test: TestFn) => { }) test('snapshot of INSERT with blob/Uint8Array', async (t) => { - const { adapter, runMigrations, satellite, authState } = t.context + const { adapter, runMigrations, satellite, authState, builder } = t.context await runMigrations() const blob = new Uint8Array([1, 2, 255, 244, 160, 1]) await adapter.run({ - sql: `INSERT INTO blobTable(value) VALUES (?)`, + sql: `INSERT INTO "main"."blobTable"(value) VALUES (${builder.makePositionalParam( + 1 + )})`, args: [blob], }) @@ -364,8 +367,12 @@ export const processTests = (test: TestFn) => { }, relations ) + const qualifiedBlobTable = new QualifiedTablename( + 'main', + 'blobTable' + ).toString() const [_, keyChanges] = - merged['main.blobTable'][`{"value":"${blobToHexString(blob)}"}`] + merged[qualifiedBlobTable][`{"value":"${blobToHexString(blob)}"}`] const resultingValue = keyChanges.changes.value.value t.deepEqual(resultingValue, blob) }) diff --git a/clients/typescript/test/support/migrations/migrations.js b/clients/typescript/test/support/migrations/migrations.js index 393315c968..ee493d6fba 100644 --- a/clients/typescript/test/support/migrations/migrations.js +++ b/clients/typescript/test/support/migrations/migrations.js @@ -18,7 +18,7 @@ export default [ statements: [ 'CREATE TABLE IF NOT EXISTS items (\n value TEXT PRIMARY KEY NOT NULL\n) WITHOUT ROWID;', 'CREATE TABLE IF NOT EXISTS bigIntTable (\n value INT8 PRIMARY KEY NOT NULL\n) WITHOUT ROWID;', - 'CREATE TABLE IF NOT EXISTS blobTable (\n value BYTEA PRIMARY KEY NOT NULL\n) WITHOUT ROWID;', + 'CREATE TABLE IF NOT EXISTS blobTable (\n value BLOB PRIMARY KEY NOT NULL\n) WITHOUT ROWID;', 'CREATE TABLE IF NOT EXISTS parent (\n id INTEGER PRIMARY KEY NOT NULL,\n value TEXT,\n other INTEGER DEFAULT 0\n) WITHOUT ROWID;', 'CREATE TABLE IF NOT EXISTS child (\n id INTEGER PRIMARY KEY NOT NULL,\n parent INTEGER NOT NULL,\n FOREIGN KEY(parent) REFERENCES parent(id)\n) WITHOUT ROWID;', 'DROP TABLE IF EXISTS _electric_trigger_settings;', diff --git a/clients/typescript/test/support/migrations/pg-migrations.js b/clients/typescript/test/support/migrations/pg-migrations.js index 9eee3163fc..e2b018058b 100644 --- a/clients/typescript/test/support/migrations/pg-migrations.js +++ b/clients/typescript/test/support/migrations/pg-migrations.js @@ -20,11 +20,14 @@ export default [ 'CREATE TABLE IF NOT EXISTS main."bigIntTable" (\n value BIGINT PRIMARY KEY NOT NULL\n);', 'CREATE TABLE IF NOT EXISTS main.parent (\n id INTEGER PRIMARY KEY NOT NULL,\n value TEXT,\n other INTEGER DEFAULT 0\n);', 'CREATE TABLE IF NOT EXISTS main.child (\n id INTEGER PRIMARY KEY NOT NULL,\n parent INTEGER NOT NULL,\n FOREIGN KEY(parent) REFERENCES main.parent(id) DEFERRABLE INITIALLY IMMEDIATE\n);', + 'CREATE TABLE "main"."blobTable" (value bytea NOT NULL, CONSTRAINT "blobTable_pkey" PRIMARY KEY (value)\n);', 'DROP TABLE IF EXISTS main._electric_trigger_settings;', 'CREATE TABLE main._electric_trigger_settings(namespace TEXT, tablename TEXT, flag INTEGER, PRIMARY KEY (namespace, tablename));', "INSERT INTO main._electric_trigger_settings(namespace, tablename,flag) VALUES ('main', 'child', 1);", "INSERT INTO main._electric_trigger_settings(namespace, tablename,flag) VALUES ('main', 'items', 1);", "INSERT INTO main._electric_trigger_settings(namespace, tablename,flag) VALUES ('main', 'parent', 1);", + "INSERT INTO main._electric_trigger_settings(namespace, tablename,flag) VALUES ('main', 'bigIntTable', 1);", + "INSERT INTO main._electric_trigger_settings(namespace, tablename,flag) VALUES ('main', 'blobTable', 1);", 'DROP TRIGGER IF EXISTS update_ensure_main_child_primarykey ON main.child;', ` @@ -587,6 +590,18 @@ export default [ FOR EACH ROW EXECUTE FUNCTION delete_main_bigIntTable_into_oplog_function(); `, + 'DROP TRIGGER IF EXISTS update_ensure_main_blobTable_primarykey ON "main"."blobTable";', + 'CREATE OR REPLACE FUNCTION update_ensure_main_blobTable_primarykey_function()\nRETURNS TRIGGER AS $$\nBEGIN\n IF OLD."value" IS DISTINCT FROM NEW."value" THEN\n RAISE EXCEPTION \'Cannot change the value of column value as it belongs to the primary key\';\n END IF;\n RETURN NEW;\nEND;\n$$ LANGUAGE plpgsql;', + 'CREATE TRIGGER update_ensure_main_blobTable_primarykey\n BEFORE UPDATE ON "main"."blobTable"\n FOR EACH ROW\n EXECUTE FUNCTION update_ensure_main_blobTable_primarykey_function();', + 'DROP TRIGGER IF EXISTS insert_main_blobTable_into_oplog ON "main"."blobTable";', + "CREATE OR REPLACE FUNCTION insert_main_blobTable_into_oplog_function()\n RETURNS TRIGGER AS $$\n BEGIN\n DECLARE\n flag_value INTEGER;\n BEGIN\n -- Get the flag value from _electric_trigger_settings\n SELECT flag INTO flag_value FROM main._electric_trigger_settings WHERE namespace = 'main' AND tablename = 'blobTable';\n\n IF flag_value = 1 THEN\n -- Insert into _electric_oplog\n INSERT INTO main._electric_oplog (namespace, tablename, optype, \"primaryKey\", \"newRow\", \"oldRow\", timestamp)\n VALUES (\n 'main',\n 'blobTable',\n 'INSERT',\n json_strip_nulls(json_build_object('value', CASE WHEN new.\"value\" IS NOT NULL THEN encode(new.\"value\"::bytea, 'hex') ELSE NULL END)),\n jsonb_build_object('value', CASE WHEN new.\"value\" IS NOT NULL THEN encode(new.\"value\"::bytea, 'hex') ELSE NULL END),\n NULL,\n NULL\n );\n END IF;\n\n RETURN NEW;\n END;\n END;\n $$ LANGUAGE plpgsql;", + 'CREATE TRIGGER insert_main_blobTable_into_oplog\n AFTER INSERT ON "main"."blobTable"\n FOR EACH ROW\n EXECUTE FUNCTION insert_main_blobTable_into_oplog_function();', + 'DROP TRIGGER IF EXISTS update_main_blobTable_into_oplog ON "main"."blobTable";', + "CREATE OR REPLACE FUNCTION update_main_blobTable_into_oplog_function()\n RETURNS TRIGGER AS $$\n BEGIN\n DECLARE\n flag_value INTEGER;\n BEGIN\n -- Get the flag value from _electric_trigger_settings\n SELECT flag INTO flag_value FROM main._electric_trigger_settings WHERE namespace = 'main' AND tablename = 'blobTable';\n\n IF flag_value = 1 THEN\n -- Insert into _electric_oplog\n INSERT INTO main._electric_oplog (namespace, tablename, optype, \"primaryKey\", \"newRow\", \"oldRow\", timestamp)\n VALUES (\n 'main',\n 'blobTable',\n 'UPDATE',\n json_strip_nulls(json_build_object('value', CASE WHEN new.\"value\" IS NOT NULL THEN encode(new.\"value\"::bytea, 'hex') ELSE NULL END)),\n jsonb_build_object('value', CASE WHEN new.\"value\" IS NOT NULL THEN encode(new.\"value\"::bytea, 'hex') ELSE NULL END),\n jsonb_build_object('value', CASE WHEN old.\"value\" IS NOT NULL THEN encode(old.\"value\"::bytea, 'hex') ELSE NULL END),\n NULL\n );\n END IF;\n\n RETURN NEW;\n END;\n END;\n $$ LANGUAGE plpgsql;", + 'CREATE TRIGGER update_main_blobTable_into_oplog\n AFTER UPDATE ON "main"."blobTable"\n FOR EACH ROW\n EXECUTE FUNCTION update_main_blobTable_into_oplog_function();', + 'DROP TRIGGER IF EXISTS delete_main_blobTable_into_oplog ON "main"."blobTable";', + "CREATE OR REPLACE FUNCTION delete_main_blobTable_into_oplog_function()\n RETURNS TRIGGER AS $$\n BEGIN\n DECLARE\n flag_value INTEGER;\n BEGIN\n -- Get the flag value from _electric_trigger_settings\n SELECT flag INTO flag_value FROM main._electric_trigger_settings WHERE namespace = 'main' AND tablename = 'blobTable';\n\n IF flag_value = 1 THEN\n -- Insert into _electric_oplog\n INSERT INTO main._electric_oplog (namespace, tablename, optype, \"primaryKey\", \"newRow\", \"oldRow\", timestamp)\n VALUES (\n 'main',\n 'blobTable',\n 'DELETE',\n json_strip_nulls(json_build_object('value', CASE WHEN old.\"value\" IS NOT NULL THEN encode(old.\"value\"::bytea, 'hex') ELSE NULL END)),\n NULL,\n jsonb_build_object('value', CASE WHEN old.\"value\" IS NOT NULL THEN encode(old.\"value\"::bytea, 'hex') ELSE NULL END),\n NULL\n );\n END IF;\n\n RETURN NEW;\n END;\n END;\n $$ LANGUAGE plpgsql;", + 'CREATE TRIGGER delete_main_blobTable_into_oplog\n AFTER DELETE ON "main"."blobTable"\n FOR EACH ROW\n EXECUTE FUNCTION delete_main_blobTable_into_oplog_function();', ], version: '2', }, From bc316688c4b60ee356f3b5b4bbca349b85882d8a Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Mon, 8 Apr 2024 09:18:32 +0200 Subject: [PATCH 045/156] Make statement for inserting additional data compatible with Postgres --- clients/typescript/src/satellite/process.ts | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/clients/typescript/src/satellite/process.ts b/clients/typescript/src/satellite/process.ts index 700817f485..26c16f4352 100644 --- a/clients/typescript/src/satellite/process.ts +++ b/clients/typescript/src/satellite/process.ts @@ -1470,12 +1470,14 @@ export class SatelliteProcess implements Satellite { } _addSeenAdditionalDataStmt(ref: string): Statement { - const meta = this.opts.metaTable.toString() + const meta = `"${this.opts.metaTable.namespace}"."${this.opts.metaTable.tablename}"` const sql = ` - INSERT INTO ${meta} (key, value) VALUES ('seenAdditionalData', ?) + INSERT INTO ${meta} (key, value) VALUES ('seenAdditionalData', ${this.builder.makePositionalParam( + 1 + )}) ON CONFLICT (key) DO - UPDATE SET value = value || ',' || excluded.value + UPDATE SET value = ${meta}.value || ',' || excluded.value ` const args = [ref] return { sql, args } From f4bd207047289b6c7d26ffafb952c71dbe3dba5e Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Mon, 8 Apr 2024 09:31:51 +0200 Subject: [PATCH 046/156] Fix some failing process tests --- clients/typescript/test/satellite/process.tags.test.ts | 10 +++++----- clients/typescript/test/satellite/process.test.ts | 10 ++++++++-- 2 files changed, 13 insertions(+), 7 deletions(-) diff --git a/clients/typescript/test/satellite/process.tags.test.ts b/clients/typescript/test/satellite/process.tags.test.ts index 93a04457e9..37a8058e48 100644 --- a/clients/typescript/test/satellite/process.tags.test.ts +++ b/clients/typescript/test/satellite/process.tags.test.ts @@ -431,24 +431,24 @@ export const processTagsTests = (test: TestFn) => { // Insert 4 items in separate snapshots await adapter.run({ - sql: `INSERT INTO parent (id, value) VALUES (1, 'val1')`, + sql: `INSERT INTO main.parent (id, value) VALUES (1, 'val1')`, }) const ts1 = await satellite._performSnapshot() await adapter.run({ - sql: `INSERT INTO parent (id, value) VALUES (2, 'val2')`, + sql: `INSERT INTO main.parent (id, value) VALUES (2, 'val2')`, }) const ts2 = await satellite._performSnapshot() await adapter.run({ - sql: `INSERT INTO parent (id, value) VALUES (3, 'val3')`, + sql: `INSERT INTO main.parent (id, value) VALUES (3, 'val3')`, }) const ts3 = await satellite._performSnapshot() await adapter.run({ - sql: `INSERT INTO parent (id, value) VALUES (4, 'val4')`, + sql: `INSERT INTO main.parent (id, value) VALUES (4, 'val4')`, }) const ts4 = await satellite._performSnapshot() // Now delete them all in a single snapshot - await adapter.run({ sql: `DELETE FROM parent` }) + await adapter.run({ sql: `DELETE FROM main.parent` }) const ts5 = await satellite._performSnapshot() // Now check that each delete clears the correct tag diff --git a/clients/typescript/test/satellite/process.test.ts b/clients/typescript/test/satellite/process.test.ts index 23eef8e863..91bd8f30c2 100644 --- a/clients/typescript/test/satellite/process.test.ts +++ b/clients/typescript/test/satellite/process.test.ts @@ -2199,12 +2199,18 @@ export const processTests = (test: TestFn) => { }) await adapter.run({ sql: `DELETE FROM ${qualified} WHERE id = 2` }) - await satellite._performSnapshot() + const deleteTx = await satellite._performSnapshot() const oplogs = await adapter.query({ sql: `SELECT * FROM main._electric_oplog`, }) - t.is(oplogs[0].clearTags, genEncodedTags('remote', [expectedTs])) + t.is( + oplogs[0].clearTags, + encodeTags([ + generateTag(satellite._authState!.clientId, deleteTx), + generateTag('remote', expectedTs), + ]) + ) }) test('DELETE after DELETE sends clearTags', async (t) => { From b28de3d5a105357cc4d879336484ad0573e883c2 Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Mon, 8 Apr 2024 09:48:34 +0200 Subject: [PATCH 047/156] Fix blob type support unit test for PG --- clients/typescript/test/client/model/datatype.ts | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/clients/typescript/test/client/model/datatype.ts b/clients/typescript/test/client/model/datatype.ts index 68edf93b3d..4d8be29859 100644 --- a/clients/typescript/test/client/model/datatype.ts +++ b/clients/typescript/test/client/model/datatype.ts @@ -943,7 +943,10 @@ export const datatypeTests = (test: TestFn) => { }, }) - t.deepEqual(res.bytea, blob) + t.deepEqual( + new Uint8Array(res.bytea!), // convert to Uint8Array for comparison because PG returns a Buffer (which is a subclass of Uint8Array but won't compare equal to it) + blob + ) const fetchRes = await tbl.findUnique({ where: { @@ -951,7 +954,7 @@ export const datatypeTests = (test: TestFn) => { }, }) - t.deepEqual(fetchRes?.bytea, blob) + t.deepEqual(new Uint8Array(fetchRes?.bytea!), blob) }) test('support null values for BLOB type', async (t) => { From e458234aabfb817c938892e7737cfd8c0bf32b89 Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Mon, 8 Apr 2024 12:21:44 +0200 Subject: [PATCH 048/156] Fixes to unit tests --- clients/typescript/test/migrators/postgres/triggers.test.ts | 6 +++--- clients/typescript/test/satellite/process.test.ts | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/clients/typescript/test/migrators/postgres/triggers.test.ts b/clients/typescript/test/migrators/postgres/triggers.test.ts index 1629beabb5..7e1d72521d 100644 --- a/clients/typescript/test/migrators/postgres/triggers.test.ts +++ b/clients/typescript/test/migrators/postgres/triggers.test.ts @@ -120,8 +120,8 @@ test('generateTableTriggers should create correct triggers for a table', (t) => 'personTable', 'UPDATE', json_strip_nulls(json_build_object('id', cast(new."id" as TEXT))), - jsonb_build_object('age', new."age", 'bmi', cast(new."bmi" as TEXT), 'id', cast(new."id" as TEXT), 'int8', cast(new."int8" as TEXT), 'name', new."name"), - jsonb_build_object('age', old."age", 'bmi', cast(old."bmi" as TEXT), 'id', cast(old."id" as TEXT), 'int8', cast(old."int8" as TEXT), 'name', old."name"), + jsonb_build_object('age', new."age", 'blob', CASE WHEN new."blob" IS NOT NULL THEN encode(new."blob"::bytea, 'hex') ELSE NULL END, 'bmi', cast(new."bmi" as TEXT), 'id', cast(new."id" as TEXT), 'int8', cast(new."int8" as TEXT), 'name', new."name"), + jsonb_build_object('age', old."age", 'blob', CASE WHEN old."blob" IS NOT NULL THEN encode(old."blob"::bytea, 'hex') ELSE NULL END, 'bmi', cast(old."bmi" as TEXT), 'id', cast(old."id" as TEXT), 'int8', cast(old."int8" as TEXT), 'name', old."name"), NULL ); END IF; @@ -166,7 +166,7 @@ test('generateTableTriggers should create correct triggers for a table', (t) => 'DELETE', json_strip_nulls(json_build_object('id', cast(old."id" as TEXT))), NULL, - jsonb_build_object('age', old."age", 'bmi', cast(old."bmi" as TEXT), 'id', cast(old."id" as TEXT), 'int8', cast(old."int8" as TEXT), 'name', old."name"), + jsonb_build_object('age', old."age", 'blob', CASE WHEN old."blob" IS NOT NULL THEN encode(old."blob"::bytea, 'hex') ELSE NULL END, 'bmi', cast(old."bmi" as TEXT), 'id', cast(old."id" as TEXT), 'int8', cast(old."int8" as TEXT), 'name', old."name"), NULL ); END IF; diff --git a/clients/typescript/test/satellite/process.test.ts b/clients/typescript/test/satellite/process.test.ts index 91bd8f30c2..f0f4fcbb25 100644 --- a/clients/typescript/test/satellite/process.test.ts +++ b/clients/typescript/test/satellite/process.test.ts @@ -2070,12 +2070,12 @@ export const processTests = (test: TestFn) => { test('unsubscribing all subscriptions does not trigger FK violations', async (t) => { const { satellite, runMigrations, builder } = t.context + await runMigrations() // because the meta tables need to exist for shape GC + satellite._garbageCollectShapeHandler([ { uuid: '', definition: { tablename: 'parent' } }, ]) - await runMigrations() // because the meta tables need to exist for shape GC - const subsManager = new MockSubscriptionsManager( satellite._garbageCollectShapeHandler.bind(satellite) ) From 7cc404f7a6b7c3e9e04846922118f3d455ac7b48 Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Mon, 8 Apr 2024 15:21:46 +0200 Subject: [PATCH 049/156] Remove obsolete method in query builder --- .../src/migrators/query-builder/builder.ts | 9 ---- .../src/migrators/query-builder/pgBuilder.ts | 41 ------------------- .../migrators/query-builder/sqliteBuilder.ts | 27 ------------ 3 files changed, 77 deletions(-) diff --git a/clients/typescript/src/migrators/query-builder/builder.ts b/clients/typescript/src/migrators/query-builder/builder.ts index 0ea6e30277..075c0e2d7b 100644 --- a/clients/typescript/src/migrators/query-builder/builder.ts +++ b/clients/typescript/src/migrators/query-builder/builder.ts @@ -301,15 +301,6 @@ export abstract class QueryBuilder { createOrReplaceUpdateCompensationTrigger = this.createOrReplaceFkCompensationTrigger.bind(this, 'UPDATE') - /** - * For each first oplog entry per element, - * sets `clearTags` array to previous tags from the shadow table - */ - abstract setClearTagsForTimestamp( - oplogTable: QualifiedTablename, - shadowTable: QualifiedTablename - ): string - /** * For each affected shadow row, set new tag array, unless the last oplog operation was a DELETE */ diff --git a/clients/typescript/src/migrators/query-builder/pgBuilder.ts b/clients/typescript/src/migrators/query-builder/pgBuilder.ts index ce71ab0665..c7febae2a9 100644 --- a/clients/typescript/src/migrators/query-builder/pgBuilder.ts +++ b/clients/typescript/src/migrators/query-builder/pgBuilder.ts @@ -397,47 +397,6 @@ class PgBuilder extends QueryBuilder { ] } - setClearTagsForTimestamp( - oplogTable: QualifiedTablename, - shadowTable: QualifiedTablename - ): string { - const oplog = `"${oplogTable.namespace}"."${oplogTable.tablename}"` - const shadow = `"${shadowTable.namespace}"."${shadowTable.tablename}"` - /* - return dedent` - UPDATE ${oplog} - SET "clearTags" = - CASE WHEN rowid = updates.rowid_of_first_op_in_tx - THEN updates.tags - ELSE $1 -- singleton array containing tag of thix TX - END - FROM ( - SELECT shadow.tags as tags, rowid_of_first_op_in_tx - FROM ( - SELECT min(op.rowid) as rowid_of_first_op_in_tx - FROM ${shadow} AS shadow - JOIN ${oplog} as op - ON op.namespace = shadow.namespace - AND op.tablename = shadow.tablename - AND op."primaryKey" = shadow."primaryKey" - WHERE op.timestamp = $2 - GROUP BY op.namespace, op.tablename, op."primaryKey" - ) t JOIN ${oplog} s ON s.rowid = t.rowid_of_first_op_in_tx - ) AS updates - WHERE ${oplog}.timestamp = $3 -- only update operations from this TX - ` - */ - return dedent` - UPDATE ${oplog} - SET "clearTags" = ${shadow}.tags - FROM ${shadow} - WHERE ${oplog}.namespace = ${shadow}.namespace - AND ${oplog}.tablename = ${shadow}.tablename - AND ${shadow}."primaryKey"::jsonb @> ${oplog}."primaryKey"::jsonb AND ${shadow}."primaryKey"::jsonb <@ ${oplog}."primaryKey"::jsonb - AND ${oplog}.timestamp = $1 - ` - } - setTagsForShadowRows( oplogTable: QualifiedTablename, shadowTable: QualifiedTablename diff --git a/clients/typescript/src/migrators/query-builder/sqliteBuilder.ts b/clients/typescript/src/migrators/query-builder/sqliteBuilder.ts index a5d5d21026..7e8f6010c8 100644 --- a/clients/typescript/src/migrators/query-builder/sqliteBuilder.ts +++ b/clients/typescript/src/migrators/query-builder/sqliteBuilder.ts @@ -279,33 +279,6 @@ class SqliteBuilder extends QueryBuilder { ] } - setClearTagsForTimestamp( - oplogTable: QualifiedTablename, - shadowTable: QualifiedTablename - ): string { - const oplog = `"${oplogTable.namespace}"."${oplogTable.tablename}"` - const shadow = `"${shadowTable.namespace}"."${shadowTable.tablename}"` - return dedent` - UPDATE ${oplog} - SET clearTags = - CASE WHEN rowid = updates.rowid_of_first_op_in_tx - THEN updates.tags - ELSE ? -- singleton array containing tag of thix TX - END - FROM ( - SELECT shadow.tags as tags, min(op.rowid) as rowid_of_first_op_in_tx - FROM ${shadow} AS shadow - JOIN ${oplog} as op - ON op.namespace = shadow.namespace - AND op.tablename = shadow.tablename - AND op.primaryKey = shadow.primaryKey - WHERE op.timestamp = ? - GROUP BY op.namespace, op.tablename, op.primaryKey - ) AS updates - WHERE ${oplog}.timestamp = ? -- only update operations from this TX - ` - } - setTagsForShadowRows( oplogTable: QualifiedTablename, shadowTable: QualifiedTablename From 66d02e68248556efc287127b90c50f89d457a644 Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Mon, 8 Apr 2024 15:41:19 +0200 Subject: [PATCH 050/156] Fix generated client --- clients/typescript/test/client/generated/index.ts | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/clients/typescript/test/client/generated/index.ts b/clients/typescript/test/client/generated/index.ts index 10aafc4227..99aee45a6c 100644 --- a/clients/typescript/test/client/generated/index.ts +++ b/clients/typescript/test/client/generated/index.ts @@ -2,6 +2,7 @@ import { z } from 'zod'; import type { Prisma } from './prismaClient'; import { type TableSchema, DbSchema, Relation, ElectricClient, type HKT } from '../../../src/client/model'; import migrations from './migrations'; +import pgMigrations from './pg-migrations'; ///////////////////////////////////////// // HELPER FUNCTIONS @@ -3783,6 +3784,6 @@ export const tableSchemas = { >, } -export const schema = new DbSchema(tableSchemas, migrations) +export const schema = new DbSchema(tableSchemas, migrations, pgMigrations) export type Electric = ElectricClient export const JsonNull = { __is_electric_json_null__: true } From 390cb6872f34528509b714093cbf6cdb765b0476 Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Mon, 8 Apr 2024 15:58:26 +0200 Subject: [PATCH 051/156] Adapt client generation unit tests with PG support. --- .../typescript/test/cli/migrations/migrate.generation.test.ts | 2 ++ clients/typescript/test/client/generateTestClient.ts | 2 ++ clients/typescript/test/client/generated/pg-migrations.ts | 1 + 3 files changed, 5 insertions(+) create mode 100644 clients/typescript/test/client/generated/pg-migrations.ts diff --git a/clients/typescript/test/cli/migrations/migrate.generation.test.ts b/clients/typescript/test/cli/migrations/migrate.generation.test.ts index 5c215f61e9..f6f1f19024 100644 --- a/clients/typescript/test/cli/migrations/migrate.generation.test.ts +++ b/clients/typescript/test/cli/migrations/migrate.generation.test.ts @@ -153,11 +153,13 @@ const generateClientFromPrismaSchema = async ( `${generatedFilePrefix}_client_${token}` ) const migrationsPath = path.join(generatedClientPath, 'migrations.ts') + const pgMigrationsPath = path.join(generatedClientPath, 'pg-migrations.ts') fs.writeFileSync(schemaFilePath, inlinePrismaSchema) // clean up the generated client if present fs.rmSync(generatedClientPath, { recursive: true, force: true }) await generateClient(schemaFilePath, generatedClientPath) await fs.writeFileSync(migrationsPath, 'export default []') + await fs.writeFileSync(pgMigrationsPath, 'export default []') return generatedClientPath } diff --git a/clients/typescript/test/client/generateTestClient.ts b/clients/typescript/test/client/generateTestClient.ts index 3815c343f4..592b64c573 100644 --- a/clients/typescript/test/client/generateTestClient.ts +++ b/clients/typescript/test/client/generateTestClient.ts @@ -80,6 +80,7 @@ const prismaSchemaPath = path.join(prismaSchemaDir, 'schema.prisma') const generatedClientDir = path.join(thisDir, 'generated') const generatedClientPath = path.join(generatedClientDir, 'index.ts') const migrationsPath = path.join(generatedClientDir, 'migrations.ts') +const pgMigrationsPath = path.join(generatedClientDir, 'pg-migrations.ts') // remove the current generated client if present fs.rmSync(generatedClientDir, { recursive: true, force: true }) @@ -93,6 +94,7 @@ fs.writeFileSync(prismaSchemaPath, prismaSchema) // enhance schema and generate client along with mock migrations await generateClient(prismaSchemaPath, generatedClientDir) fs.writeFileSync(migrationsPath, 'export default []') +fs.writeFileSync(pgMigrationsPath, 'export default []') // fix the generated client import path to point to local schema const clientStr = fs.readFileSync(generatedClientPath).toString() diff --git a/clients/typescript/test/client/generated/pg-migrations.ts b/clients/typescript/test/client/generated/pg-migrations.ts new file mode 100644 index 0000000000..4ad71d6e38 --- /dev/null +++ b/clients/typescript/test/client/generated/pg-migrations.ts @@ -0,0 +1 @@ +export default [] \ No newline at end of file From 5964f92e97716621f1faea76ae0ad44733bf0a4d Mon Sep 17 00:00:00 2001 From: msfstef Date: Tue, 9 Apr 2024 11:05:40 +0300 Subject: [PATCH 052/156] Pass conerter to replication transform --- clients/typescript/src/client/model/client.ts | 5 +-- clients/typescript/src/client/model/table.ts | 9 ++--- .../typescript/src/client/model/transforms.ts | 35 +++++++++++++++++-- .../test/client/model/transforms.test.ts | 12 ++++++- 4 files changed, 49 insertions(+), 12 deletions(-) diff --git a/clients/typescript/src/client/model/client.ts b/clients/typescript/src/client/model/client.ts index e092393369..2fb4825f3e 100644 --- a/clients/typescript/src/client/model/client.ts +++ b/clients/typescript/src/client/model/client.ts @@ -141,10 +141,11 @@ export class ElectricClient< ): ElectricClient { const tables = dbDescription.extendedTables const shapeManager = new ShapeManager(satellite) + const converter = dialect === 'SQLite' ? sqliteConverter : postgresConverter const replicationTransformManager = new ReplicationTransformManager( - satellite + satellite, + converter ) - const converter = dialect === 'SQLite' ? sqliteConverter : postgresConverter const inputTransformer = new InputTransformer(converter) const createTable = (tableName: string) => { diff --git a/clients/typescript/src/client/model/table.ts b/clients/typescript/src/client/model/table.ts index 6d36abc3f0..4b27b55a8b 100644 --- a/clients/typescript/src/client/model/table.ts +++ b/clients/typescript/src/client/model/table.ts @@ -42,10 +42,7 @@ import { NarrowInclude } from '../input/inputNarrowing' import { IShapeManager } from './shapes' import { ShapeSubscription } from '../../satellite' import { Rel, Shape } from '../../satellite/shapes/types' -import { - IReplicationTransformManager, - transformTableRecord, -} from './transforms' +import { IReplicationTransformManager } from './transforms' import { InputTransformer } from '../conversions/input' import { Dialect } from '../../migrators/query-builder/builder' @@ -1625,7 +1622,7 @@ export class Table< this._qualifiedTableName, { transformInbound: (record) => - transformTableRecord( + this._replicationTransformManager.transformTableRecord( record, i.transformInbound, this._fields, @@ -1634,7 +1631,7 @@ export class Table< ), transformOutbound: (record) => - transformTableRecord( + this._replicationTransformManager.transformTableRecord( record, i.transformOutbound, this._fields, diff --git a/clients/typescript/src/client/model/transforms.ts b/clients/typescript/src/client/model/transforms.ts index 65ab26df23..8dd33664e7 100644 --- a/clients/typescript/src/client/model/transforms.ts +++ b/clients/typescript/src/client/model/transforms.ts @@ -4,6 +4,7 @@ import { ReplicatedRowTransformer, Record as DataRecord, } from '../../util' +import { Converter } from '../conversions/converter' import { Transformation, transformFields } from '../conversions/input' import { validate, @@ -18,12 +19,20 @@ export interface IReplicationTransformManager { transform: ReplicatedRowTransformer ): void clearTableTransform(tableName: QualifiedTablename): void + + transformTableRecord>( + record: DataRecord, + transformRow: (row: T) => T, + fields: Fields, + schema: z.ZodTypeAny, + immutableFields: string[] + ): DataRecord } export class ReplicationTransformManager implements IReplicationTransformManager { - constructor(private satellite: Satellite) {} + constructor(private satellite: Satellite, private converter: Converter) {} setTableTransform( tableName: QualifiedTablename, @@ -35,6 +44,23 @@ export class ReplicationTransformManager clearTableTransform(tableName: QualifiedTablename): void { this.satellite.clearReplicationTransform(tableName) } + + transformTableRecord>( + record: DataRecord, + transformRow: (row: T) => T, + fields: Fields, + schema: z.ZodTypeAny, + immutableFields: string[] + ): DataRecord { + return transformTableRecord( + record, + transformRow, + fields, + schema, + this.converter, + immutableFields + ) + } } /** @@ -53,13 +79,15 @@ export function transformTableRecord>( transformRow: (row: T) => T, fields: Fields, schema: z.ZodTypeAny, + converter: Converter, immutableFields: string[] ): DataRecord { // parse raw record according to specified fields const parsedRow = transformFields( record, fields, - Transformation.Sqlite2Js + converter, + Transformation.Decode ) as T // apply specified transformation @@ -70,7 +98,8 @@ export function transformTableRecord>( const transformedRecord = transformFields( validatedTransformedParsedRow, fields, - Transformation.Js2Sqlite + converter, + Transformation.Encode ) as DataRecord // check if any of the immutable fields were modified diff --git a/clients/typescript/test/client/model/transforms.test.ts b/clients/typescript/test/client/model/transforms.test.ts index cd1cd07297..8ec91b9ffb 100644 --- a/clients/typescript/test/client/model/transforms.test.ts +++ b/clients/typescript/test/client/model/transforms.test.ts @@ -8,6 +8,7 @@ import { schema, Post } from '../generated' import { transformTableRecord } from '../../../src/client/model/transforms' import { InvalidRecordTransformationError } from '../../../src/client/validation/errors/invalidRecordTransformationError' import { Record } from '../../../src/util' +import { sqliteConverter } from '../../../src/client/conversions/sqlite' const tableName = 'Post' const fields = schema.getFields(tableName) @@ -24,7 +25,14 @@ const post1 = { test('transformTableRecord should validate the input', (t) => { const liftedTransform = (r: Record) => - transformTableRecord(r, (row: Post) => row, fields, modelSchema, []) + transformTableRecord( + r, + (row: Post) => row, + fields, + modelSchema, + sqliteConverter, + [] + ) // should not throw for properly typed input t.notThrows(() => liftedTransform(post1)) @@ -52,6 +60,7 @@ test('transformTableRecord should validate the output', (t) => { }), fields, modelSchema, + sqliteConverter, [] ) // should throw for improperly typed input @@ -68,6 +77,7 @@ test('transformTableRecord should validate output does not modify immutable fiel }), fields, modelSchema, + sqliteConverter, ['title'] ) t.throws(() => liftedTransform(post1), { From bdd6530f3be4f9f77a911dd896feedd07687d00f Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Tue, 9 Apr 2024 16:32:25 +0200 Subject: [PATCH 053/156] Introduce default namespaces for PG and SQLite --- clients/typescript/src/client/model/table.ts | 4 +- clients/typescript/src/config/index.ts | 8 +- clients/typescript/src/migrators/bundle.ts | 21 +- .../src/migrators/query-builder/builder.ts | 77 ++-- .../src/migrators/query-builder/pgBuilder.ts | 53 +-- .../migrators/query-builder/sqliteBuilder.ts | 43 +- clients/typescript/src/migrators/schema.ts | 7 +- clients/typescript/src/migrators/triggers.ts | 36 +- clients/typescript/src/satellite/config.ts | 43 +- clients/typescript/src/satellite/mock.ts | 3 +- clients/typescript/src/satellite/oplog.ts | 5 +- clients/typescript/src/satellite/process.ts | 31 +- clients/typescript/src/satellite/registry.ts | 2 +- .../test/client/model/shapes.test.ts | 2 +- .../typescript/test/migrators/builder.test.ts | 24 +- .../test/migrators/postgres/builder.test.ts | 2 +- .../test/migrators/postgres/schema.test.ts | 5 +- .../test/migrators/postgres/triggers.test.ts | 57 +-- .../test/migrators/sqlite/schema.test.ts | 5 +- .../test/migrators/sqlite/triggers.test.ts | 11 +- clients/typescript/test/satellite/common.ts | 40 +- .../typescript/test/satellite/merge.test.ts | 27 +- .../postgres/process.migration.test.ts | 3 +- .../satellite/postgres/process.tags.test.ts | 3 +- .../test/satellite/postgres/process.test.ts | 8 +- .../satellite/postgres/process.timing.test.ts | 5 +- .../test/satellite/process.migration.test.ts | 22 +- .../test/satellite/process.tags.test.ts | 128 +++--- .../typescript/test/satellite/process.test.ts | 417 +++++++++++------- .../test/satellite/process.timing.test.ts | 22 +- .../test/satellite/registry.test.ts | 3 +- .../test/satellite/serialization.test.ts | 19 +- .../sqlite/process.migration.test.ts | 3 +- .../satellite/sqlite/process.tags.test.ts | 3 +- .../test/satellite/sqlite/process.test.ts | 8 +- .../satellite/sqlite/process.timing.test.ts | 4 +- .../test/support/migrations/pg-migrations.js | 334 +++++++------- .../test/support/satellite-helpers.ts | 25 +- 38 files changed, 872 insertions(+), 641 deletions(-) diff --git a/clients/typescript/src/client/model/table.ts b/clients/typescript/src/client/model/table.ts index 4b27b55a8b..1d2c2b162d 100644 --- a/clients/typescript/src/client/model/table.ts +++ b/clients/typescript/src/client/model/table.ts @@ -1664,7 +1664,9 @@ export function rawQuery( if (isPotentiallyDangerous(sql.sql)) { throw new InvalidArgumentError( 'Cannot use queries that might alter the store - ' + - 'please use read-only queries' + 'please use read-only queries' + + ' - DEBUG:\n' + + JSON.stringify(sql, null, 2) ) } diff --git a/clients/typescript/src/config/index.ts b/clients/typescript/src/config/index.ts index 63dd11288f..0bed84fff9 100644 --- a/clients/typescript/src/config/index.ts +++ b/clients/typescript/src/config/index.ts @@ -45,7 +45,7 @@ export interface ElectricConfig { } export type ElectricConfigWithDialect = ElectricConfig & { - dialect?: 'SQLite' | 'Postgres' + dialect?: 'SQLite' | 'Postgres' // defaults to SQLite } export type HydratedConfig = { @@ -59,6 +59,7 @@ export type HydratedConfig = { } debug: boolean connectionBackOffOptions: ConnectionBackOffOptions + namespace: string } export type InternalElectricConfig = { @@ -88,6 +89,8 @@ export const hydrateConfig = ( const portInt = parseInt(url.port, 10) const port = Number.isNaN(portInt) ? defaultPort : portInt + const defaultNamespace = config.dialect === 'Postgres' ? 'public' : 'main' + const replication = { host: url.hostname, port: port, @@ -105,7 +108,7 @@ export const hydrateConfig = ( timeMultiple, } = config.connectionBackOffOptions ?? - satelliteDefaults.connectionBackOffOptions + satelliteDefaults(defaultNamespace).connectionBackOffOptions const connectionBackOffOptions = { delayFirstAttempt, @@ -121,5 +124,6 @@ export const hydrateConfig = ( replication, debug, connectionBackOffOptions, + namespace: defaultNamespace, } } diff --git a/clients/typescript/src/migrators/bundle.ts b/clients/typescript/src/migrators/bundle.ts index 4217b0c214..ead197a18c 100644 --- a/clients/typescript/src/migrators/bundle.ts +++ b/clients/typescript/src/migrators/bundle.ts @@ -48,14 +48,17 @@ export abstract class BundleMigratorBase implements Migrator { adapter: DatabaseAdapter, migrations: Migration[] = [], queryBuilderConfig: KyselyConfig, - public electricQueryBuilder: QueryBuilder + public electricQueryBuilder: QueryBuilder, + private namespace: string = electricQueryBuilder.defaultNamespace ) { this.adapter = adapter const baseMigration = makeBaseMigration(electricQueryBuilder) this.migrations = [...baseMigration.migrations, ...migrations].map( makeStmtMigration ) - this.queryBuilder = new Kysely(queryBuilderConfig) + this.queryBuilder = new Kysely( + queryBuilderConfig + ).withSchema(namespace) this.eb = expressionBuilder() } @@ -86,7 +89,11 @@ export abstract class BundleMigratorBase implements Migrator { async migrationsTableExists(): Promise { // If this is the first time we're running migrations, then the // migrations table won't exist. - const tableExists = this.createTableExistsStatement('main', this.tableName) + const namespace = this.electricQueryBuilder.defaultNamespace + const tableExists = this.createTableExistsStatement( + namespace, + this.tableName + ) const tables = await this.adapter.query(tableExists) return tables.length > 0 } @@ -97,7 +104,7 @@ export abstract class BundleMigratorBase implements Migrator { } const existingRecords = ` - SELECT version FROM "main"."${this.tableName}" + SELECT version FROM "${this.namespace}"."${this.tableName}" ORDER BY id ASC ` @@ -114,7 +121,7 @@ export abstract class BundleMigratorBase implements Migrator { // The hard-coded version '0' below corresponds to the version of the internal migration defined in `schema.ts`. // We're ignoring it because this function is supposed to return the application schema version. const schemaVersion = ` - SELECT version FROM "main"."${this.tableName}" + SELECT version FROM "${this.namespace}"."${this.tableName}" WHERE version != '0' ORDER BY version DESC LIMIT 1 @@ -166,7 +173,7 @@ export abstract class BundleMigratorBase implements Migrator { } const { sql, parameters } = raw` - INSERT INTO "main".${this.eb.table( + INSERT INTO ${this.eb.table( this.tableName )} (version, applied_at) VALUES (${version}, ${Date.now().toString()}) `.compile(this.queryBuilder) @@ -185,7 +192,7 @@ export abstract class BundleMigratorBase implements Migrator { */ async applyIfNotAlready(migration: StmtMigration): Promise { const { sql, parameters } = raw` - SELECT 1 FROM "main".${this.eb.table(this.tableName)} + SELECT 1 FROM ${this.eb.table(this.tableName)} WHERE version = ${migration.version} `.compile(this.queryBuilder) diff --git a/clients/typescript/src/migrators/query-builder/builder.ts b/clients/typescript/src/migrators/query-builder/builder.ts index 075c0e2d7b..60fb3c27a4 100644 --- a/clients/typescript/src/migrators/query-builder/builder.ts +++ b/clients/typescript/src/migrators/query-builder/builder.ts @@ -5,6 +5,7 @@ export type Dialect = 'SQLite' | 'Postgres' export abstract class QueryBuilder { abstract readonly dialect: Dialect abstract readonly paramSign: '?' | '$' + abstract readonly defaultNamespace: 'main' | 'public' /** * The autoincrementing integer primary key type for the current SQL dialect. @@ -94,22 +95,22 @@ export abstract class QueryBuilder { * Insert a row into a table, ignoring it if it already exists. */ abstract insertOrIgnore( - schema: string, table: string, columns: string[], - values: SqlValue[] + values: SqlValue[], + schema?: string ): Statement /** * Insert a row into a table, replacing it if it already exists. */ abstract insertOrReplace( - schema: string, table: string, columns: string[], values: Array, conflictCols: string[], - updateCols: string[] + updateCols: string[], + schema?: string ): Statement /** @@ -118,26 +119,26 @@ export abstract class QueryBuilder { * with the provided values `updateVals` */ abstract insertOrReplaceWith( - schema: string, table: string, columns: string[], values: Array, conflictCols: string[], updateCols: string[], - updateVals: SqlValue[] + updateVals: SqlValue[], + schema?: string ): Statement /** * Inserts a batch of rows into a table, replacing them if they already exist. */ abstract batchedInsertOrReplace( - schema: string, table: string, columns: string[], records: Array>, conflictCols: string[], updateCols: string[], - maxSqlParameters: number + maxSqlParameters: number, + schema?: string ): Statement[] /** @@ -145,34 +146,34 @@ export abstract class QueryBuilder { */ abstract dropTriggerIfExists( triggerName: string, - namespace: string, - tablename: string + tablename: string, + namespace?: string ): string /** * Create a trigger that prevents updates to the primary key. */ abstract createNoFkUpdateTrigger( - namespace: string, tablename: string, - pk: string[] + pk: string[], + namespace?: string ): string[] /** * Creates or replaces a trigger that prevents updates to the primary key. */ createOrReplaceNoFkUpdateTrigger( - namespace: string, tablename: string, - pk: string[] + pk: string[], + namespace?: string ): string[] { return [ this.dropTriggerIfExists( `update_ensure_${namespace}_${tablename}_primarykey`, - namespace, - tablename + tablename, + namespace ), - ...this.createNoFkUpdateTrigger(namespace, tablename, pk), + ...this.createNoFkUpdateTrigger(tablename, pk, namespace), ] } @@ -180,9 +181,9 @@ export abstract class QueryBuilder { * Modifies the trigger setting for the table identified by its tablename and namespace. */ abstract setTriggerSetting( - namespace: string, tableName: string, - value: 0 | 1 + value: 0 | 1, + namespace?: string ): string /** @@ -190,34 +191,34 @@ export abstract class QueryBuilder { */ abstract createOplogTrigger( opType: 'INSERT' | 'UPDATE' | 'DELETE', - namespace: string, tableName: string, newPKs: string, newRows: string, - oldRows: string + oldRows: string, + namespace?: string ): string[] createOrReplaceOplogTrigger( opType: 'INSERT' | 'UPDATE' | 'DELETE', - namespace: string, tableName: string, newPKs: string, newRows: string, - oldRows: string + oldRows: string, + namespace: string = this.defaultNamespace ): string[] { return [ this.dropTriggerIfExists( `${opType.toLowerCase()}_${namespace}_${tableName}_into_oplog`, - namespace, - tableName + tableName, + namespace ), ...this.createOplogTrigger( opType, - namespace, tableName, newPKs, newRows, - oldRows + oldRows, + namespace ), ] } @@ -251,40 +252,40 @@ export abstract class QueryBuilder { */ abstract createFkCompensationTrigger( opType: 'INSERT' | 'UPDATE', - namespace: string, tableName: string, childKey: string, - fkTableNamespace: string, fkTableName: string, joinedFkPKs: string, - foreignKey: ForeignKey + foreignKey: ForeignKey, + namespace?: string, + fkTableNamespace?: string ): string[] createOrReplaceFkCompensationTrigger( opType: 'INSERT' | 'UPDATE', - namespace: string, tableName: string, childKey: string, - fkTableNamespace: string, fkTableName: string, joinedFkPKs: string, - foreignKey: ForeignKey + foreignKey: ForeignKey, + namespace: string = this.defaultNamespace, + fkTableNamespace: string = this.defaultNamespace ): string[] { return [ this.dropTriggerIfExists( `compensation_${opType.toLowerCase()}_${namespace}_${tableName}_${childKey}_into_oplog`, - namespace, - tableName + tableName, + namespace ), ...this.createFkCompensationTrigger( opType, - namespace, tableName, childKey, - fkTableNamespace, fkTableName, joinedFkPKs, - foreignKey + foreignKey, + namespace, + fkTableNamespace ), ] } diff --git a/clients/typescript/src/migrators/query-builder/pgBuilder.ts b/clients/typescript/src/migrators/query-builder/pgBuilder.ts index c7febae2a9..eedb3fff98 100644 --- a/clients/typescript/src/migrators/query-builder/pgBuilder.ts +++ b/clients/typescript/src/migrators/query-builder/pgBuilder.ts @@ -12,6 +12,7 @@ class PgBuilder extends QueryBuilder { readonly deferForeignKeys = 'SET CONSTRAINTS ALL DEFERRED;' readonly getVersion = 'SELECT version();' readonly paramSign = '$' + readonly defaultNamespace = 'public' pgOnly(query: string) { return query @@ -100,7 +101,7 @@ class PgBuilder extends QueryBuilder { -- then return 0 WHEN NOT pg_attribute.attnum = ANY(pg_index.indkey) THEN 0 -- else, return the position of the column in the primary key - -- pg_index.indkey is indexed from 0 so we add 1 + -- pg_index.indkey is indexed from 0 so we do + 1 ELSE array_position(pg_index.indkey, pg_attribute.attnum) + 1 END AS pk FROM pg_class, pg_attribute, pg_index @@ -118,10 +119,10 @@ class PgBuilder extends QueryBuilder { } insertOrIgnore( - schema: string, table: string, columns: string[], - values: SqlValue[] + values: SqlValue[], + schema: string = this.defaultNamespace ): Statement { return { sql: dedent` @@ -134,12 +135,12 @@ class PgBuilder extends QueryBuilder { } insertOrReplace( - schema: string, table: string, columns: string[], values: Array, conflictCols: string[], - updateCols: string[] + updateCols: string[], + schema: string = this.defaultNamespace ): Statement { return { sql: dedent` @@ -155,13 +156,13 @@ class PgBuilder extends QueryBuilder { } insertOrReplaceWith( - schema: string, table: string, columns: string[], values: Array, conflictCols: string[], updateCols: string[], - updateVals: SqlValue[] + updateVals: SqlValue[], + schema: string = this.defaultNamespace ): Statement { return { sql: dedent` @@ -177,13 +178,13 @@ class PgBuilder extends QueryBuilder { } batchedInsertOrReplace( - schema: string, table: string, columns: string[], records: Array>, conflictCols: string[], updateCols: string[], - maxSqlParameters: number + maxSqlParameters: number, + schema: string = this.defaultNamespace ): Statement[] { const baseSql = `INSERT INTO "${schema}"."${table}" (${columns .map(quote) @@ -208,16 +209,16 @@ class PgBuilder extends QueryBuilder { dropTriggerIfExists( triggerName: string, - namespace: string, - tablename: string + tablename: string, + namespace: string = this.defaultNamespace ) { return `DROP TRIGGER IF EXISTS ${triggerName} ON "${namespace}"."${tablename}";` } createNoFkUpdateTrigger( - namespace: string, tablename: string, - pk: string[] + pk: string[], + namespace: string = this.defaultNamespace ): string[] { return [ dedent` @@ -274,12 +275,12 @@ class PgBuilder extends QueryBuilder { } setTriggerSetting( - namespace: string, tableName: string, - value: 0 | 1 + value: 0 | 1, + namespace: string = this.defaultNamespace ): string { return dedent` - INSERT INTO "main"."_electric_trigger_settings" ("namespace", "tablename", "flag") + INSERT INTO "${namespace}"."_electric_trigger_settings" ("namespace", "tablename", "flag") VALUES ('${namespace}', '${tableName}', ${value}) ON CONFLICT DO NOTHING; ` @@ -287,11 +288,11 @@ class PgBuilder extends QueryBuilder { createOplogTrigger( opType: 'INSERT' | 'UPDATE' | 'DELETE', - namespace: string, tableName: string, newPKs: string, newRows: string, - oldRows: string + oldRows: string, + namespace: string = this.defaultNamespace ): string[] { const opTypeLower = opType.toLowerCase() const pk = this.createPKJsonObject(newPKs) @@ -312,11 +313,11 @@ class PgBuilder extends QueryBuilder { flag_value INTEGER; BEGIN -- Get the flag value from _electric_trigger_settings - SELECT flag INTO flag_value FROM main._electric_trigger_settings WHERE namespace = '${namespace}' AND tablename = '${tableName}'; + SELECT flag INTO flag_value FROM "${namespace}"._electric_trigger_settings WHERE namespace = '${namespace}' AND tablename = '${tableName}'; IF flag_value = 1 THEN -- Insert into _electric_oplog - INSERT INTO main._electric_oplog (namespace, tablename, optype, "primaryKey", "newRow", "oldRow", timestamp) + INSERT INTO "${namespace}"._electric_oplog (namespace, tablename, optype, "primaryKey", "newRow", "oldRow", timestamp) VALUES ( '${namespace}', '${tableName}', @@ -344,13 +345,13 @@ class PgBuilder extends QueryBuilder { createFkCompensationTrigger( opType: 'INSERT' | 'UPDATE', - namespace: string, tableName: string, childKey: string, - fkTableNamespace: string, fkTableName: string, joinedFkPKs: string, - foreignKey: ForeignKey + foreignKey: ForeignKey, + namespace: string = this.defaultNamespace, + fkTableNamespace: string = this.defaultNamespace ): string[] { const opTypeLower = opType.toLowerCase() @@ -363,12 +364,12 @@ class PgBuilder extends QueryBuilder { flag_value INTEGER; meta_value INTEGER; BEGIN - SELECT flag INTO flag_value FROM main._electric_trigger_settings WHERE namespace = '${fkTableNamespace}' AND tablename = '${fkTableName}'; + SELECT flag INTO flag_value FROM "${namespace}"._electric_trigger_settings WHERE namespace = '${fkTableNamespace}' AND tablename = '${fkTableName}'; - SELECT value INTO meta_value FROM main._electric_meta WHERE key = 'compensations'; + SELECT value INTO meta_value FROM "${namespace}"._electric_meta WHERE key = 'compensations'; IF flag_value = 1 AND meta_value = 1 THEN - INSERT INTO main._electric_oplog (namespace, tablename, optype, "primaryKey", "newRow", "oldRow", timestamp) + INSERT INTO "${namespace}"._electric_oplog (namespace, tablename, optype, "primaryKey", "newRow", "oldRow", timestamp) SELECT '${fkTableNamespace}', '${fkTableName}', diff --git a/clients/typescript/src/migrators/query-builder/sqliteBuilder.ts b/clients/typescript/src/migrators/query-builder/sqliteBuilder.ts index 7e8f6010c8..43a8892773 100644 --- a/clients/typescript/src/migrators/query-builder/sqliteBuilder.ts +++ b/clients/typescript/src/migrators/query-builder/sqliteBuilder.ts @@ -11,6 +11,7 @@ class SqliteBuilder extends QueryBuilder { readonly getVersion = 'SELECT sqlite_version() AS version' readonly maxSqlParameters = 65535 readonly paramSign = '?' + readonly defaultNamespace = 'main' readonly metaTables = [ 'sqlite_schema', 'sqlite_sequence', @@ -86,10 +87,10 @@ class SqliteBuilder extends QueryBuilder { } insertOrIgnore( - schema: string, table: string, columns: string[], - values: SqlValue[] + values: SqlValue[], + schema: string = this.defaultNamespace ): Statement { return { sql: dedent` @@ -101,12 +102,12 @@ class SqliteBuilder extends QueryBuilder { } insertOrReplace( - schema: string, table: string, columns: string[], values: Array, _conflictCols: string[], - _updateCols: string[] + _updateCols: string[], + schema: string = this.defaultNamespace ): Statement { return { sql: dedent` @@ -118,21 +119,21 @@ class SqliteBuilder extends QueryBuilder { } insertOrReplaceWith( - schema: string, table: string, columns: string[], values: Array, conflictCols: string[], updateCols: string[], - updateVals: SqlValue[] + updateVals: SqlValue[], + schema: string = this.defaultNamespace ): Statement { const { sql: baseSql, args } = this.insertOrReplace( - schema, table, columns, values, conflictCols, - updateCols + updateCols, + schema ) return { sql: @@ -145,13 +146,13 @@ class SqliteBuilder extends QueryBuilder { } batchedInsertOrReplace( - schema: string, table: string, columns: string[], records: Array>, _conflictCols: string[], _updateCols: string[], - maxSqlParameters: number + maxSqlParameters: number, + schema: string = this.defaultNamespace ): Statement[] { const baseSql = `INSERT OR REPLACE INTO ${schema}.${table} (${columns.join( ', ' @@ -166,16 +167,16 @@ class SqliteBuilder extends QueryBuilder { dropTriggerIfExists( triggerName: string, - _namespace: string, - _tablename: string + _tablename: string, + _namespace?: string ) { return `DROP TRIGGER IF EXISTS ${triggerName};` } createNoFkUpdateTrigger( - namespace: string, tablename: string, - pk: string[] + pk: string[], + namespace: string = this.defaultNamespace ): string[] { return [ dedent` @@ -211,20 +212,20 @@ class SqliteBuilder extends QueryBuilder { } setTriggerSetting( - namespace: string, tableName: string, - value: 0 | 1 + value: 0 | 1, + namespace: string = this.defaultNamespace ): string { return `INSERT OR IGNORE INTO _electric_trigger_settings (namespace, tablename, flag) VALUES ('${namespace}', '${tableName}', ${value});` } createOplogTrigger( opType: 'INSERT' | 'UPDATE' | 'DELETE', - namespace: string, tableName: string, newPKs: string, newRows: string, - oldRows: string + oldRows: string, + namespace: string = this.defaultNamespace ): string[] { const opTypeLower = opType.toLowerCase() const pk = this.createPKJsonObject(newPKs) @@ -251,13 +252,13 @@ class SqliteBuilder extends QueryBuilder { createFkCompensationTrigger( opType: 'INSERT' | 'UPDATE', - namespace: string, tableName: string, childKey: string, - fkTableNamespace: string, fkTableName: string, joinedFkPKs: string, - foreignKey: ForeignKey + foreignKey: ForeignKey, + namespace: string = this.defaultNamespace, + fkTableNamespace: string = this.defaultNamespace ): string[] { const opTypeLower = opType.toLowerCase() return [ diff --git a/clients/typescript/src/migrators/schema.ts b/clients/typescript/src/migrators/schema.ts index 82a5681524..cf32fe0436 100644 --- a/clients/typescript/src/migrators/schema.ts +++ b/clients/typescript/src/migrators/schema.ts @@ -2,16 +2,13 @@ import { satelliteDefaults } from '../satellite/config' import { QueryBuilder } from './query-builder' export type { ElectricSchema } from '../satellite/config' -const { metaTable, migrationsTable, oplogTable, triggersTable, shadowTable } = - satelliteDefaults - export const buildInitialMigration = (builder: QueryBuilder) => { + const { metaTable, migrationsTable, oplogTable, triggersTable, shadowTable } = + satelliteDefaults(builder.defaultNamespace) const data = { migrations: [ { statements: [ - // The main schema, - ...builder.pgOnlyQuery(`CREATE SCHEMA IF NOT EXISTS "main"`), //`-- The ops log table\n`, `CREATE TABLE IF NOT EXISTS "${oplogTable.namespace}"."${oplogTable.tablename}" (\n "rowid" ${builder.AUTOINCREMENT_PK},\n "namespace" TEXT NOT NULL,\n "tablename" TEXT NOT NULL,\n "optype" TEXT NOT NULL,\n "primaryKey" TEXT NOT NULL,\n "newRow" TEXT,\n "oldRow" TEXT,\n "timestamp" TEXT, "clearTags" TEXT DEFAULT '[]' NOT NULL\n);`, // Add an index for the oplog diff --git a/clients/typescript/src/migrators/triggers.ts b/clients/typescript/src/migrators/triggers.ts index 489f4fbffa..cce7e3a83a 100644 --- a/clients/typescript/src/migrators/triggers.ts +++ b/clients/typescript/src/migrators/triggers.ts @@ -49,19 +49,19 @@ export function generateOplogTriggers( const oldRows = joinColsForJSON(columns, columnTypes, builder, 'old') const [dropFkTrigger, ...createFkTrigger] = - builder.createOrReplaceNoFkUpdateTrigger(namespace, tableName, primary) + builder.createOrReplaceNoFkUpdateTrigger(tableName, primary, namespace) const [dropInsertTrigger, ...createInsertTrigger] = builder.createOrReplaceInsertTrigger( - namespace, tableName, newPKs, newRows, - oldRows + oldRows, + namespace ) return [ // Toggles for turning the triggers on and off - builder.setTriggerSetting(namespace, tableName, 1), + builder.setTriggerSetting(tableName, 1, namespace), // Triggers for table ${tableName} // ensures primary key is immutable dropFkTrigger, @@ -70,18 +70,18 @@ export function generateOplogTriggers( dropInsertTrigger, ...createInsertTrigger, ...builder.createOrReplaceUpdateTrigger( - namespace, tableName, newPKs, newRows, - oldRows + oldRows, + namespace ), ...builder.createOrReplaceDeleteTrigger( - namespace, tableName, oldPKs, newRows, - oldRows + oldRows, + namespace ), ].map(mkStatement) } @@ -109,7 +109,7 @@ function generateCompensationTriggers( const makeTriggers = (foreignKey: ForeignKey) => { const { childKey } = foreignKey - const fkTableNamespace = 'main' // currently, Electric always uses the 'main' namespace + const fkTableNamespace = builder.defaultNamespace // currently, Electric always uses the DB's default namespace const fkTableName = foreignKey.table const fkTablePK = foreignKey.parentKey // primary key of the table pointed at by the FK. @@ -129,13 +129,13 @@ function generateCompensationTriggers( const [dropInsertTrigger, ...createInsertTrigger] = builder.createOrReplaceInsertCompensationTrigger( - namespace, tableName, childKey, - fkTableNamespace, fkTableName, joinedFkPKs, - foreignKey + foreignKey, + namespace, + fkTableNamespace ) return [ @@ -146,13 +146,13 @@ function generateCompensationTriggers( dropInsertTrigger, ...createInsertTrigger, ...builder.createOrReplaceUpdateCompensationTrigger( - namespace, tableName, foreignKey.childKey, - fkTableNamespace, fkTableName, joinedFkPKs, - foreignKey + foreignKey, + namespace, + fkTableNamespace ), ].map(mkStatement) } @@ -192,9 +192,11 @@ export function generateTriggers( }) const stmts = [ - { sql: 'DROP TABLE IF EXISTS main._electric_trigger_settings;' }, { - sql: 'CREATE TABLE main._electric_trigger_settings(namespace TEXT, tablename TEXT, flag INTEGER, PRIMARY KEY(namespace, tablename));', + sql: `DROP TABLE IF EXISTS "${builder.defaultNamespace}"._electric_trigger_settings;`, + }, + { + sql: `CREATE TABLE "${builder.defaultNamespace}"._electric_trigger_settings(namespace TEXT, tablename TEXT, flag INTEGER, PRIMARY KEY(namespace, tablename));`, }, ...tableTriggers, ] diff --git a/clients/typescript/src/satellite/config.ts b/clients/typescript/src/satellite/config.ts index a9626d4e78..55db11f62f 100644 --- a/clients/typescript/src/satellite/config.ts +++ b/clients/typescript/src/satellite/config.ts @@ -108,24 +108,31 @@ export type Shadow = Selectable export type NewShadow = Insertable export type ShadowUpdate = Updateable -export const satelliteDefaults: SatelliteOpts = { - metaTable: new QualifiedTablename('main', _electric_meta), - migrationsTable: new QualifiedTablename('main', _electric_migrations), - oplogTable: new QualifiedTablename('main', _electric_oplog), - triggersTable: new QualifiedTablename('main', _electric_trigger_settings), - shadowTable: new QualifiedTablename('main', _electric_shadow), - pollingInterval: 2000, - minSnapshotWindow: 40, - clearOnBehindWindow: true, - connectionBackOffOptions: { - delayFirstAttempt: false, - startingDelay: 1000, - jitter: 'full', - maxDelay: 10000, - numOfAttempts: 50, - timeMultiple: 2, - }, - debug: false, +export const satelliteDefaults: (namespace: string) => SatelliteOpts = ( + namespace: string +) => { + return { + metaTable: new QualifiedTablename(namespace, _electric_meta), + migrationsTable: new QualifiedTablename(namespace, _electric_migrations), + oplogTable: new QualifiedTablename(namespace, _electric_oplog), + triggersTable: new QualifiedTablename( + namespace, + _electric_trigger_settings + ), + shadowTable: new QualifiedTablename(namespace, _electric_shadow), + pollingInterval: 2000, + minSnapshotWindow: 40, + clearOnBehindWindow: true, + connectionBackOffOptions: { + delayFirstAttempt: false, + startingDelay: 1000, + jitter: 'full', + maxDelay: 10000, + numOfAttempts: 50, + timeMultiple: 2, + }, + debug: false, + } } export const satelliteClientDefaults = { diff --git a/clients/typescript/src/satellite/mock.ts b/clients/typescript/src/satellite/mock.ts index 5133e65961..75bb5ae381 100644 --- a/clients/typescript/src/satellite/mock.ts +++ b/clients/typescript/src/satellite/mock.ts @@ -161,7 +161,8 @@ export class MockRegistry extends BaseRegistry { throw new Error('Failed to start satellite process') } - const opts = { ...satelliteDefaults, ...overrides } + const namespace = migrator.electricQueryBuilder.defaultNamespace + const opts = { ...satelliteDefaults(namespace), ...overrides } const satellites = this.satellites if (satellites[dbName] !== undefined) { diff --git a/clients/typescript/src/satellite/oplog.ts b/clients/typescript/src/satellite/oplog.ts index 1e87d282b3..88049e88d3 100644 --- a/clients/typescript/src/satellite/oplog.ts +++ b/clients/typescript/src/satellite/oplog.ts @@ -387,7 +387,8 @@ function deserialiseRow(str: string, rel: Pick): Rec { export const fromTransaction = ( transaction: DataTransaction, - relations: RelationsCache + relations: RelationsCache, + namespace: string ): OplogEntry[] => { return transaction.changes.map((t) => { const columnValues = t.record ? t.record : t.oldRecord! @@ -400,7 +401,7 @@ export const fromTransaction = ( ) return { - namespace: 'main', // TODO: how? + namespace, tablename: t.relation.table, optype: stringToOpType(t.type), newRow: serialiseRow(t.record), diff --git a/clients/typescript/src/satellite/process.ts b/clients/typescript/src/satellite/process.ts index 26c16f4352..63f01a98bc 100644 --- a/clients/typescript/src/satellite/process.ts +++ b/clients/typescript/src/satellite/process.ts @@ -298,9 +298,10 @@ export class SatelliteProcess implements Satellite { async _garbageCollectShapeHandler( shapeDefs: ShapeDefinition[] ): Promise { + const namespace = this.builder.defaultNamespace const allTables = shapeDefs .map((def: ShapeDefinition) => def.definition) - .flatMap((x) => getAllTablesForShape(x)) + .flatMap((x) => getAllTablesForShape(x, namespace)) const tables = uniqWith(allTables, (a, b) => a.isEqual(b)) // TODO: table and schema warrant escaping here too, but they aren't in the triggers table. @@ -477,6 +478,7 @@ export class SatelliteProcess implements Satellite { lsn: LSN, additionalStmts: Statement[] = [] ) { + const namespace = this.builder.defaultNamespace const stmts: Statement[] = [] stmts.push({ sql: this.builder.deferForeignKeys }) @@ -503,7 +505,7 @@ export class SatelliteProcess implements Satellite { // Group all changes by table name to be able to insert them all together for (const op of changes) { - const tableName = new QualifiedTablename('main', op.relation.table) + const tableName = new QualifiedTablename(namespace, op.relation.table) const tableNameString = tableName.toString() if (groupedChanges.has(tableNameString)) { groupedChanges.get(tableName.toString())?.records.push(op.record) @@ -523,7 +525,7 @@ export class SatelliteProcess implements Satellite { }, {} as Record) allArgsForShadowInsert.push({ - namespace: 'main', + namespace, tablename: op.relation.table, primaryKey: primaryKeyToStr(primaryKeyCols), tags: encodeTags(op.tags), @@ -564,13 +566,13 @@ export class SatelliteProcess implements Satellite { // Then do a batched insert for the shadow table const batchedShadowInserts = this.builder.batchedInsertOrReplace( - this.opts.shadowTable.namespace, this.opts.shadowTable.tablename, ['namespace', 'tablename', 'primaryKey', 'tags'], allArgsForShadowInsert, ['namespace', 'tablename', 'primaryKey'], ['namespace', 'tablename', 'tags'], - this.maxSqlParameters + this.maxSqlParameters, + this.opts.shadowTable.namespace ) stmts.push(...batchedShadowInserts) @@ -1227,12 +1229,12 @@ export class SatelliteProcess implements Satellite { _updateShadowTagsStatement(shadow: ShadowEntry): Statement { return this.builder.insertOrReplace( - this.opts.shadowTable.namespace, this.opts.shadowTable.tablename, ['namespace', 'tablename', 'primaryKey', 'tags'], [shadow.namespace, shadow.tablename, shadow.primaryKey, shadow.tags], ['namespace', 'tablename', 'primaryKey'], - ['tags'] + ['tags'], + this.opts.shadowTable.namespace ) } @@ -1271,6 +1273,7 @@ export class SatelliteProcess implements Satellite { async _applyTransaction(transaction: Transaction) { console.log('APPLY TX: ' + JSON.stringify(transaction)) + const namespace = this.builder.defaultNamespace const origin = transaction.origin! const commitTimestamp = new Date(transaction.commit_timestamp.toNumber()) @@ -1304,7 +1307,7 @@ export class SatelliteProcess implements Satellite { ...transaction, changes, } - const entries = fromTransaction(tx, this.relations) + const entries = fromTransaction(tx, this.relations, namespace) // Before applying DML statements we need to assign a timestamp to pending operations. // This only needs to be done once, even if there are several DML chunks @@ -1340,7 +1343,7 @@ export class SatelliteProcess implements Satellite { // so store it in `tablenamesSet` such that those // triggers can be disabled while executing the transaction const affectedTable = new QualifiedTablename( - 'main', + namespace, change.table.name ).toString() // store the table information to generate the triggers after this `forEach` @@ -1620,22 +1623,22 @@ export class SatelliteProcess implements Satellite { if (updateColumnStmts.length > 0) { return this.builder.insertOrReplaceWith( - qualifiedTableName.namespace, qualifiedTableName.tablename, columnNames, columnValues, ['id'], updateColumnStmts, - updateColumnStmts.map((col) => fullRow[col]) + updateColumnStmts.map((col) => fullRow[col]), + qualifiedTableName.namespace ) } // no changes, can ignore statement if exists return this.builder.insertOrIgnore( - qualifiedTableName.namespace, qualifiedTableName.tablename, columnNames, - columnValues + columnValues, + qualifiedTableName.namespace ) } @@ -1662,7 +1665,7 @@ export function generateTriggersForTable( ): Statement[] { const table = { tableName: tbl.name, - namespace: 'main', + namespace: builder.defaultNamespace, columns: tbl.columns.map((col) => col.name), primary: tbl.pks, foreignKeys: tbl.fks.map((fk) => { diff --git a/clients/typescript/src/satellite/registry.ts b/clients/typescript/src/satellite/registry.ts index f65f277b88..214d28c527 100644 --- a/clients/typescript/src/satellite/registry.ts +++ b/clients/typescript/src/satellite/registry.ts @@ -218,7 +218,7 @@ export class GlobalRegistry extends BaseRegistry { ) const satelliteOpts: SatelliteOpts = { - ...satelliteDefaults, + ...satelliteDefaults(config.namespace), connectionBackOffOptions: config.connectionBackOffOptions, debug: config.debug, } diff --git a/clients/typescript/test/client/model/shapes.test.ts b/clients/typescript/test/client/model/shapes.test.ts index 94a2a0ff7c..fe1c6c194e 100644 --- a/clients/typescript/test/client/model/shapes.test.ts +++ b/clients/typescript/test/client/model/shapes.test.ts @@ -55,7 +55,7 @@ async function makeContext(t: ExecutionContext) { migrator, notifier, client, - satelliteDefaults + satelliteDefaults(migrator.electricQueryBuilder.defaultNamespace) ) const electric = ElectricClient.create( diff --git a/clients/typescript/test/migrators/builder.test.ts b/clients/typescript/test/migrators/builder.test.ts index cd598ef89d..ccfd361b7f 100644 --- a/clients/typescript/test/migrators/builder.test.ts +++ b/clients/typescript/test/migrators/builder.test.ts @@ -31,7 +31,9 @@ export const makeMigrationMetaData = (builder: QueryBuilder) => { stmts: [ SatOpMigrate_Stmt.fromPartial({ type: SatOpMigrate_Type.CREATE_TABLE, - sql: `CREATE TABLE "main"."stars" (\n "id" TEXT NOT NULL PRIMARY KEY,\n "avatar_url" TEXT NOT NULL,\n "name" TEXT,\n "starred_at" TEXT NOT NULL,\n "username" TEXT NOT NULL\n)${builder.sqliteOnly( + sql: `CREATE TABLE "${ + builder.defaultNamespace + }"."stars" (\n "id" TEXT NOT NULL PRIMARY KEY,\n "avatar_url" TEXT NOT NULL,\n "name" TEXT,\n "starred_at" TEXT NOT NULL,\n "username" TEXT NOT NULL\n)${builder.sqliteOnly( ' WITHOUT ROWID' )};\n`, }), @@ -122,7 +124,9 @@ export const bundleTests = (test: TestFn) => { t.is(migration.version, migrationMetaData.version) t.is( migration.statements[0], - `CREATE TABLE "main"."stars" (\n "id" TEXT NOT NULL PRIMARY KEY,\n "avatar_url" TEXT NOT NULL,\n "name" TEXT,\n "starred_at" TEXT NOT NULL,\n "username" TEXT NOT NULL\n)${builder.sqliteOnly( + `CREATE TABLE "${ + builder.defaultNamespace + }"."stars" (\n "id" TEXT NOT NULL PRIMARY KEY,\n "avatar_url" TEXT NOT NULL,\n "name" TEXT,\n "starred_at" TEXT NOT NULL,\n "username" TEXT NOT NULL\n)${builder.sqliteOnly( ' WITHOUT ROWID' )};\n` ) @@ -130,14 +134,14 @@ export const bundleTests = (test: TestFn) => { if (builder.dialect === 'SQLite') { t.is( migration.statements[3], - 'CREATE TRIGGER update_ensure_main_stars_primarykey\n BEFORE UPDATE ON "main"."stars"\nBEGIN\n SELECT\n CASE\n WHEN old."id" != new."id" THEN\n \t\tRAISE (ABORT, \'cannot change the value of column id as it belongs to the primary key\')\n END;\nEND;' + `CREATE TRIGGER update_ensure_${builder.defaultNamespace}_stars_primarykey\n BEFORE UPDATE ON "${builder.defaultNamespace}"."stars"\nBEGIN\n SELECT\n CASE\n WHEN old."id" != new."id" THEN\n \t\tRAISE (ABORT, 'cannot change the value of column id as it belongs to the primary key')\n END;\nEND;` ) } else { // Postgres t.is( migration.statements[3], dedent` - CREATE OR REPLACE FUNCTION update_ensure_main_stars_primarykey_function() + CREATE OR REPLACE FUNCTION update_ensure_${builder.defaultNamespace}_stars_primarykey_function() RETURNS TRIGGER AS $$ BEGIN IF OLD."id" IS DISTINCT FROM NEW."id" THEN @@ -152,10 +156,10 @@ export const bundleTests = (test: TestFn) => { t.is( migration.statements[4], dedent` - CREATE TRIGGER update_ensure_main_stars_primarykey - BEFORE UPDATE ON "main"."stars" + CREATE TRIGGER update_ensure_${builder.defaultNamespace}_stars_primarykey + BEFORE UPDATE ON "${builder.defaultNamespace}"."stars" FOR EACH ROW - EXECUTE FUNCTION update_ensure_main_stars_primarykey_function(); + EXECUTE FUNCTION update_ensure_${builder.defaultNamespace}_stars_primarykey_function(); ` ) } @@ -178,7 +182,7 @@ export const bundleTests = (test: TestFn) => { stmts: [ SatOpMigrate_Stmt.fromPartial({ type: 0, - sql: 'CREATE TABLE "main"."tenants" (\n "id" TEXT NOT NULL,\n "name" TEXT NOT NULL,\n CONSTRAINT "tenants_pkey" PRIMARY KEY ("id")\n) WITHOUT ROWID;\n', + sql: `CREATE TABLE "${builder.defaultNamespace}"."tenants" (\n "id" TEXT NOT NULL,\n "name" TEXT NOT NULL,\n CONSTRAINT "tenants_pkey" PRIMARY KEY ("id")\n) WITHOUT ROWID;\n`, }), ], table: SatOpMigrate_Table.fromPartial({ @@ -216,7 +220,7 @@ export const bundleTests = (test: TestFn) => { stmts: [ SatOpMigrate_Stmt.fromPartial({ type: 0, - sql: 'CREATE TABLE "main"."users" (\n "id" TEXT NOT NULL,\n "name" TEXT NOT NULL,\n "email" TEXT NOT NULL,\n "password_hash" TEXT NOT NULL,\n CONSTRAINT "users_pkey" PRIMARY KEY ("id")\n) WITHOUT ROWID;\n', + sql: `CREATE TABLE "${builder.defaultNamespace}"."users" (\n "id" TEXT NOT NULL,\n "name" TEXT NOT NULL,\n "email" TEXT NOT NULL,\n "password_hash" TEXT NOT NULL,\n CONSTRAINT "users_pkey" PRIMARY KEY ("id")\n) WITHOUT ROWID;\n`, }), ], table: SatOpMigrate_Table.fromPartial({ @@ -270,7 +274,7 @@ export const bundleTests = (test: TestFn) => { stmts: [ SatOpMigrate_Stmt.fromPartial({ type: 0, - sql: 'CREATE TABLE "main"."tenant_users" (\n "tenant_id" TEXT NOT NULL,\n "user_id" TEXT NOT NULL,\n CONSTRAINT "tenant_users_tenant_id_fkey" FOREIGN KEY ("tenant_id") REFERENCES "tenants" ("id") ON DELETE CASCADE,\n CONSTRAINT "tenant_users_user_id_fkey" FOREIGN KEY ("user_id") REFERENCES "users" ("id") ON DELETE CASCADE,\n CONSTRAINT "tenant_users_pkey" PRIMARY KEY ("tenant_id", "user_id")\n) WITHOUT ROWID;\n', + sql: `CREATE TABLE "${builder.defaultNamespace}"."tenant_users" (\n "tenant_id" TEXT NOT NULL,\n "user_id" TEXT NOT NULL,\n CONSTRAINT "tenant_users_tenant_id_fkey" FOREIGN KEY ("tenant_id") REFERENCES "tenants" ("id") ON DELETE CASCADE,\n CONSTRAINT "tenant_users_user_id_fkey" FOREIGN KEY ("user_id") REFERENCES "users" ("id") ON DELETE CASCADE,\n CONSTRAINT "tenant_users_pkey" PRIMARY KEY ("tenant_id", "user_id")\n) WITHOUT ROWID;\n`, }), ], table: SatOpMigrate_Table.fromPartial({ diff --git a/clients/typescript/test/migrators/postgres/builder.test.ts b/clients/typescript/test/migrators/postgres/builder.test.ts index 8b11088770..2b09d73c72 100644 --- a/clients/typescript/test/migrators/postgres/builder.test.ts +++ b/clients/typescript/test/migrators/postgres/builder.test.ts @@ -39,7 +39,7 @@ test('load migration from meta data', async (t) => { sql: ` SELECT table_name FROM information_schema.tables - WHERE table_schema = 'main' AND table_name = 'stars';`, + WHERE table_schema = 'public' AND table_name = 'stars';`, }) const starIdx = tables.findIndex((tbl) => tbl.table_name === 'stars') diff --git a/clients/typescript/test/migrators/postgres/schema.test.ts b/clients/typescript/test/migrators/postgres/schema.test.ts index d58a6655de..98eacbd753 100644 --- a/clients/typescript/test/migrators/postgres/schema.test.ts +++ b/clients/typescript/test/migrators/postgres/schema.test.ts @@ -39,7 +39,10 @@ test('check schema keys are unique', async (t) => { const migrator = new BundleMigrator(adapter, migrations) await migrator.up() - const metaTable = `"${satelliteDefaults.metaTable.namespace}"."${satelliteDefaults.metaTable.tablename}"` + const defaults = satelliteDefaults( + migrator.electricQueryBuilder.defaultNamespace + ) + const metaTable = `"${defaults.metaTable.namespace}"."${defaults.metaTable.tablename}"` await adapter.run({ sql: `INSERT INTO ${metaTable} (key, value) values ('key', 'value')`, diff --git a/clients/typescript/test/migrators/postgres/triggers.test.ts b/clients/typescript/test/migrators/postgres/triggers.test.ts index 7e1d72521d..8f6b1731d5 100644 --- a/clients/typescript/test/migrators/postgres/triggers.test.ts +++ b/clients/typescript/test/migrators/postgres/triggers.test.ts @@ -2,7 +2,10 @@ import { dedent } from 'ts-dedent' import testAny, { TestFn } from 'ava' import { generateTableTriggers } from '../../../src/migrators/triggers' import { satelliteDefaults } from '../../../src/satellite/config' -import { migrateDb, personTable } from '../../satellite/common' +import { + migrateDb, + personTable as getPersonTable, +} from '../../satellite/common' import { pgBuilder } from '../../../src/migrators/query-builder' import { makePgDatabase } from '../../support/node-postgres' import { Database, DatabaseAdapter } from '../../../src/drivers/node-postgres' @@ -13,8 +16,10 @@ type Context = { stopPG: () => Promise } const test = testAny as TestFn -const oplogTable = `"${satelliteDefaults.oplogTable.namespace}"."${satelliteDefaults.oplogTable.tablename}"` +const defaults = satelliteDefaults('public') +const oplogTable = `"${defaults.oplogTable.namespace}"."${defaults.oplogTable.tablename}"` +const personTable = getPersonTable('public') const personNamespace = personTable.namespace const personTableName = personTable.tableName const qualifiedPersonTable = `"${personNamespace}"."${personTableName}"` @@ -47,10 +52,10 @@ test('generateTableTriggers should create correct triggers for a table', (t) => t.assert( triggersSQL.includes( dedent` - CREATE TRIGGER insert_main_personTable_into_oplog - AFTER INSERT ON "main"."personTable" + CREATE TRIGGER insert_public_personTable_into_oplog + AFTER INSERT ON "public"."personTable" FOR EACH ROW - EXECUTE FUNCTION insert_main_personTable_into_oplog_function(); + EXECUTE FUNCTION insert_public_personTable_into_oplog_function(); ` ) ) @@ -58,20 +63,20 @@ test('generateTableTriggers should create correct triggers for a table', (t) => t.assert( triggersSQL.includes( dedent` - CREATE OR REPLACE FUNCTION insert_main_personTable_into_oplog_function() + CREATE OR REPLACE FUNCTION insert_public_personTable_into_oplog_function() RETURNS TRIGGER AS $$ BEGIN DECLARE flag_value INTEGER; BEGIN -- Get the flag value from _electric_trigger_settings - SELECT flag INTO flag_value FROM main._electric_trigger_settings WHERE namespace = 'main' AND tablename = 'personTable'; + SELECT flag INTO flag_value FROM "public"._electric_trigger_settings WHERE namespace = 'public' AND tablename = 'personTable'; IF flag_value = 1 THEN -- Insert into _electric_oplog - INSERT INTO main._electric_oplog (namespace, tablename, optype, "primaryKey", "newRow", "oldRow", timestamp) + INSERT INTO "public"._electric_oplog (namespace, tablename, optype, "primaryKey", "newRow", "oldRow", timestamp) VALUES ( - 'main', + 'public', 'personTable', 'INSERT', json_strip_nulls(json_build_object('id', cast(new."id" as TEXT))), @@ -92,10 +97,10 @@ test('generateTableTriggers should create correct triggers for a table', (t) => t.assert( triggersSQL.includes( dedent` - CREATE TRIGGER update_main_personTable_into_oplog - AFTER UPDATE ON "main"."personTable" + CREATE TRIGGER update_public_personTable_into_oplog + AFTER UPDATE ON "public"."personTable" FOR EACH ROW - EXECUTE FUNCTION update_main_personTable_into_oplog_function(); + EXECUTE FUNCTION update_public_personTable_into_oplog_function(); ` ) ) @@ -103,20 +108,20 @@ test('generateTableTriggers should create correct triggers for a table', (t) => t.assert( triggersSQL.includes( dedent` - CREATE OR REPLACE FUNCTION update_main_personTable_into_oplog_function() + CREATE OR REPLACE FUNCTION update_public_personTable_into_oplog_function() RETURNS TRIGGER AS $$ BEGIN DECLARE flag_value INTEGER; BEGIN -- Get the flag value from _electric_trigger_settings - SELECT flag INTO flag_value FROM main._electric_trigger_settings WHERE namespace = 'main' AND tablename = 'personTable'; + SELECT flag INTO flag_value FROM "public"._electric_trigger_settings WHERE namespace = 'public' AND tablename = 'personTable'; IF flag_value = 1 THEN -- Insert into _electric_oplog - INSERT INTO main._electric_oplog (namespace, tablename, optype, "primaryKey", "newRow", "oldRow", timestamp) + INSERT INTO "public"._electric_oplog (namespace, tablename, optype, "primaryKey", "newRow", "oldRow", timestamp) VALUES ( - 'main', + 'public', 'personTable', 'UPDATE', json_strip_nulls(json_build_object('id', cast(new."id" as TEXT))), @@ -137,10 +142,10 @@ test('generateTableTriggers should create correct triggers for a table', (t) => t.assert( triggersSQL.includes( dedent` - CREATE TRIGGER delete_main_personTable_into_oplog - AFTER DELETE ON "main"."personTable" + CREATE TRIGGER delete_public_personTable_into_oplog + AFTER DELETE ON "public"."personTable" FOR EACH ROW - EXECUTE FUNCTION delete_main_personTable_into_oplog_function(); + EXECUTE FUNCTION delete_public_personTable_into_oplog_function(); ` ) ) @@ -148,20 +153,20 @@ test('generateTableTriggers should create correct triggers for a table', (t) => t.assert( triggersSQL.includes( dedent` - CREATE OR REPLACE FUNCTION delete_main_personTable_into_oplog_function() + CREATE OR REPLACE FUNCTION delete_public_personTable_into_oplog_function() RETURNS TRIGGER AS $$ BEGIN DECLARE flag_value INTEGER; BEGIN -- Get the flag value from _electric_trigger_settings - SELECT flag INTO flag_value FROM main._electric_trigger_settings WHERE namespace = 'main' AND tablename = 'personTable'; + SELECT flag INTO flag_value FROM "public"._electric_trigger_settings WHERE namespace = 'public' AND tablename = 'personTable'; IF flag_value = 1 THEN -- Insert into _electric_oplog - INSERT INTO main._electric_oplog (namespace, tablename, optype, "primaryKey", "newRow", "oldRow", timestamp) + INSERT INTO "public"._electric_oplog (namespace, tablename, optype, "primaryKey", "newRow", "oldRow", timestamp) VALUES ( - 'main', + 'public', 'personTable', 'DELETE', json_strip_nulls(json_build_object('id', cast(old."id" as TEXT))), @@ -196,7 +201,7 @@ test('oplog insertion trigger should insert row into oplog table', async (t) => }) t.is(oplogRows.length, 1) t.deepEqual(oplogRows[0], { - namespace: 'main', + namespace: 'public', tablename: personTableName, optype: 'INSERT', // `id` and `bmi` values are stored as strings @@ -233,7 +238,7 @@ test('oplog trigger should handle Infinity values correctly', async (t) => { }) t.is(oplogRows.length, 1) t.deepEqual(oplogRows[0], { - namespace: 'main', + namespace: 'public', tablename: tableName, optype: 'INSERT', // `id` and `bmi` values are stored as strings @@ -269,7 +274,7 @@ test('oplog trigger should separate null blobs from empty blobs', async (t) => { // Check that the oplog table contains an entry for the inserted row const { rows: oplogRows } = await db.exec({ - sql: `SELECT * FROM "${satelliteDefaults.oplogTable.namespace}"."${satelliteDefaults.oplogTable.tablename}"`, + sql: `SELECT * FROM "${defaults.oplogTable.namespace}"."${defaults.oplogTable.tablename}"`, }) t.is(oplogRows.length, 2) t.regex(oplogRows[0].newRow as string, /,\s*"blob":\s*null\s*,/) diff --git a/clients/typescript/test/migrators/sqlite/schema.test.ts b/clients/typescript/test/migrators/sqlite/schema.test.ts index ad549ac1c1..4e3cfdaed3 100644 --- a/clients/typescript/test/migrators/sqlite/schema.test.ts +++ b/clients/typescript/test/migrators/sqlite/schema.test.ts @@ -39,7 +39,10 @@ test('check schema keys are unique', async (t) => { const migrator = new BundleMigrator(adapter, migrations) await migrator.up() - const metaTable = `"${satelliteDefaults.metaTable.namespace}"."${satelliteDefaults.metaTable.tablename}"` + const defaults = satelliteDefaults( + migrator.electricQueryBuilder.defaultNamespace + ) + const metaTable = `"${defaults.metaTable.namespace}"."${defaults.metaTable.tablename}"` await adapter.run({ sql: `INSERT INTO ${metaTable} (key, value) values ('key', 'value')`, diff --git a/clients/typescript/test/migrators/sqlite/triggers.test.ts b/clients/typescript/test/migrators/sqlite/triggers.test.ts index 9f4f9f594e..0461b07eaa 100644 --- a/clients/typescript/test/migrators/sqlite/triggers.test.ts +++ b/clients/typescript/test/migrators/sqlite/triggers.test.ts @@ -4,13 +4,18 @@ import { Database } from 'better-sqlite3' import testAny, { TestFn } from 'ava' import { generateTableTriggers } from '../../../src/migrators/triggers' import { satelliteDefaults } from '../../../src/satellite/config' -import { migrateDb, personTable } from '../../satellite/common' +import { + migrateDb, + personTable as getPersonTable, +} from '../../satellite/common' import { sqliteBuilder } from '../../../src/migrators/query-builder' import { DatabaseAdapter } from '../../../src/drivers/better-sqlite3' type Context = { db: Database; migrateDb: () => Promise } const test = testAny as TestFn -const oplogTable = `"${satelliteDefaults.oplogTable.namespace}"."${satelliteDefaults.oplogTable.tablename}"` +const defaults = satelliteDefaults('main') +const oplogTable = `"${defaults.oplogTable.namespace}"."${defaults.oplogTable.tablename}"` +const personTable = getPersonTable('main') test.beforeEach(async (t) => { const db = new OriginalDatabase(':memory:') @@ -169,7 +174,7 @@ test('oplog trigger should separate null blobs from empty blobs', async (t) => { // Check that the oplog table contains an entry for the inserted row const oplogRows = db .prepare( - `SELECT * FROM "${satelliteDefaults.oplogTable.namespace}"."${satelliteDefaults.oplogTable.tablename}"` + `SELECT * FROM "${defaults.oplogTable.namespace}"."${defaults.oplogTable.tablename}"` ) .all() t.is(oplogRows.length, 2) diff --git a/clients/typescript/test/satellite/common.ts b/clients/typescript/test/satellite/common.ts index c08c729233..e00a6604ab 100644 --- a/clients/typescript/test/satellite/common.ts +++ b/clients/typescript/test/satellite/common.ts @@ -224,10 +224,11 @@ export const relations = { } satisfies RelationsCache // Speed up the intervals for testing. -export const opts = Object.assign({}, satelliteDefaults, { - minSnapshotWindow: 40, - pollingInterval: 200, -}) +export const opts = (namespace: string) => + Object.assign({}, satelliteDefaults(namespace), { + minSnapshotWindow: 40, + pollingInterval: 200, + }) type Opts = SatelliteOpts & { minSnapshotWindow: number @@ -249,6 +250,8 @@ export type ContextType = { timestamp: number authState: AuthState token: string + opts: Opts + namespace: string stop?: () => Promise } & Extra @@ -257,7 +260,8 @@ const makeContextInternal = async ( dbName: string, adapter: DatabaseAdapter, migrator: BundleMigratorBase, - options: Opts = opts + namespace: string, + options: Opts = opts(namespace) ) => { const notifier = new MockNotifier(dbName, new EventEmitter()) const client = new MockSatelliteClient() @@ -270,7 +274,7 @@ const makeContextInternal = async ( options ) - const tableInfo = initTableInfo() + const tableInfo = initTableInfo(namespace) const timestamp = new Date().getTime() const runMigrations = async () => { @@ -281,6 +285,7 @@ const makeContextInternal = async ( const token = insecureAuthToken({ sub: 'test-user' }) t.context = { + ...t.context, dbName, adapter, notifier, @@ -291,38 +296,43 @@ const makeContextInternal = async ( timestamp, authState, token, + namespace, + opts: options, } } export const makeContext = async ( t: ExecutionContext, - options: Opts = opts + namespace: string, + options: Opts = opts(namespace) ) => { await mkdir('.tmp', { recursive: true }) const dbName = `.tmp/test-${randomValue()}.db` const db = new SqliteDatabase(dbName) const adapter = new SqliteDatabaseAdapter(db) const migrator = new SqliteBundleMigrator(adapter, sqliteMigrations) - makeContextInternal(t, dbName, adapter, migrator, options) + makeContextInternal(t, dbName, adapter, migrator, namespace, options) } export const makePgContext = async ( t: ExecutionContext, port: number, - options: Opts = opts + namespace: string, + options: Opts = opts(namespace) ) => { const dbName = `test-${randomValue()}` const { db, stop } = await makePgDatabase(dbName, port) const adapter = new PgDatabaseAdapter(db) const migrator = new PgBundleMigrator(adapter, pgMigrations) - makeContextInternal(t, dbName, adapter, migrator, options) + makeContextInternal(t, dbName, adapter, migrator, namespace, options) t.context.stop = stop } export const mockElectricClient = async ( db: SqliteDB, registry: Registry | GlobalRegistry, - options: Opts = opts + namespace: string = 'main', + options: Opts = opts(namespace) ): Promise> => { const dbName = db.name const adapter = new SqliteDatabaseAdapter(db) @@ -406,8 +416,10 @@ export async function migrateDb( } } -export const personTable: Table = { - namespace: 'main', +export const personTable: (namespace: string) => Table = ( + namespace: string +) => ({ + namespace, tableName: 'personTable', columns: ['id', 'name', 'age', 'bmi', 'int8', 'blob'], primary: ['id'], @@ -420,4 +432,4 @@ export const personTable: Table = { int8: PgBasicType.PG_INT8, blob: PgBasicType.PG_BYTEA, }, -} +}) diff --git a/clients/typescript/test/satellite/merge.test.ts b/clients/typescript/test/satellite/merge.test.ts index 8c67d35ec5..8a716cc147 100644 --- a/clients/typescript/test/satellite/merge.test.ts +++ b/clients/typescript/test/satellite/merge.test.ts @@ -12,9 +12,9 @@ import { QualifiedTablename, } from '../../src/util' import Long from 'long' -import { relations, migrateDb, personTable } from './common' +import { relations, migrateDb, personTable as getPersonTable } from './common' import Database from 'better-sqlite3' -import { satelliteDefaults } from '../../src/satellite/config' +import { SatelliteOpts, satelliteDefaults } from '../../src/satellite/config' import { QueryBuilder, pgBuilder, @@ -162,8 +162,8 @@ function _mergeTableTest( // we go through `fromTransaction` on purpose // in order to also test serialisation/deserialisation of the rows - const entry1: OplogEntry[] = fromTransaction(tx1, relations) - const entry2: OplogEntry[] = fromTransaction(tx2, relations) + const entry1: OplogEntry[] = fromTransaction(tx1, relations, 'main') + const entry2: OplogEntry[] = fromTransaction(tx2, relations, 'main') const merged = mergeEntries('local', entry1, 'remote', entry2, relations) @@ -180,11 +180,15 @@ function _mergeTableTest( type MaybePromise = T | Promise type SetupFn = ( t: ExecutionContext -) => MaybePromise<[DatabaseAdapterInterface, QueryBuilder]> +) => MaybePromise< + [DatabaseAdapterInterface, QueryBuilder, string, SatelliteOpts] +> const setupSqlite: SetupFn = (t: ExecutionContext) => { const db = new Database(':memory:') t.teardown(() => db.close()) - return [new SQLiteDatabaseAdapter(db), sqliteBuilder] + const namespace = 'main' + const defaults = satelliteDefaults(namespace) + return [new SQLiteDatabaseAdapter(db), sqliteBuilder, namespace, defaults] } let port = 4800 @@ -192,7 +196,9 @@ const setupPG: SetupFn = async (t: ExecutionContext) => { const dbName = `merge-test-${randomValue()}` const { db, stop } = await makePgDatabase(dbName, port++) t.teardown(async () => await stop()) - return [new PgDatabaseAdapter(db), pgBuilder] + const namespace = 'public' + const defaults = satelliteDefaults(namespace) + return [new PgDatabaseAdapter(db), pgBuilder, namespace, defaults] } ;( @@ -202,9 +208,10 @@ const setupPG: SetupFn = async (t: ExecutionContext) => { ] as const ).forEach(([dialect, setup]) => { test(`(${dialect}) merge works on oplog entries`, async (t) => { - const [adapter, builder] = await setup(t) + const [adapter, builder, namespace, defaults] = await setup(t) // Migrate the DB with the necessary tables and triggers + const personTable = getPersonTable(namespace) await migrateDb(adapter, personTable, builder) // Insert a row in the table @@ -216,7 +223,7 @@ const setupPG: SetupFn = async (t: ExecutionContext) => { await adapter.run({ sql: insertRowSQL }) // Fetch the oplog entry for the inserted row - const oplogTable = `"${satelliteDefaults.oplogTable.namespace}"."${satelliteDefaults.oplogTable.tablename}"` + const oplogTable = `"${defaults.oplogTable.namespace}"."${defaults.oplogTable.tablename}"` const oplogRows = await adapter.query({ sql: `SELECT * FROM ${oplogTable}`, }) @@ -253,7 +260,7 @@ const setupPG: SetupFn = async (t: ExecutionContext) => { 'local', [oplogEntry], 'remote', - fromTransaction(tx, relations), + fromTransaction(tx, relations, namespace), relations ) diff --git a/clients/typescript/test/satellite/postgres/process.migration.test.ts b/clients/typescript/test/satellite/postgres/process.migration.test.ts index 7f10839ebd..ced08b7955 100644 --- a/clients/typescript/test/satellite/postgres/process.migration.test.ts +++ b/clients/typescript/test/satellite/postgres/process.migration.test.ts @@ -12,7 +12,8 @@ const test = testAny as TestFn let port = 5000 test.beforeEach(async (t) => { - await makePgContext(t, port++) + const namespace = 'public' + await makePgContext(t, port++, namespace) t.context.getMatchingShadowEntries = getPgMatchingShadowEntries t.context.builder = pgBuilder await commonSetup(t) diff --git a/clients/typescript/test/satellite/postgres/process.tags.test.ts b/clients/typescript/test/satellite/postgres/process.tags.test.ts index 973e9166e6..f564da4b89 100644 --- a/clients/typescript/test/satellite/postgres/process.tags.test.ts +++ b/clients/typescript/test/satellite/postgres/process.tags.test.ts @@ -9,7 +9,8 @@ let port = 5100 const test = anyTest as TestFn test.beforeEach(async (t) => { - await makePgContext(t, port++) + const namespace = 'public' + await makePgContext(t, port++, namespace) t.context.getMatchingShadowEntries = getPgMatchingShadowEntries }) test.afterEach.always(cleanAndStopSatellite) diff --git a/clients/typescript/test/satellite/postgres/process.test.ts b/clients/typescript/test/satellite/postgres/process.test.ts index fd2a1be5f7..aeeca6b411 100644 --- a/clients/typescript/test/satellite/postgres/process.test.ts +++ b/clients/typescript/test/satellite/postgres/process.test.ts @@ -6,6 +6,7 @@ import { makePgContext, cleanAndStopSatellite } from '../common' import { pgBuilder } from '../../../src/migrators/query-builder' import { processTests, ContextType } from '../process.test' +import { QualifiedTablename } from '../../../src/util' let port = 5200 // Run all tests in this file serially @@ -14,9 +15,14 @@ let port = 5200 const test = anyTest.serial as TestFn test.serial = test // because the common test file uses `test.serial` for some tests (but for PG all tests are serial) test.beforeEach(async (t) => { - await makePgContext(t, port++) + const namespace = 'public' + await makePgContext(t, port++, namespace) t.context.builder = pgBuilder t.context.getMatchingShadowEntries = getPgMatchingShadowEntries + t.context.qualifiedParentTableName = new QualifiedTablename( + namespace, + 'parent' + ).toString() }) test.afterEach.always(cleanAndStopSatellite) diff --git a/clients/typescript/test/satellite/postgres/process.timing.test.ts b/clients/typescript/test/satellite/postgres/process.timing.test.ts index e6a8610f0c..385792add0 100644 --- a/clients/typescript/test/satellite/postgres/process.timing.test.ts +++ b/clients/typescript/test/satellite/postgres/process.timing.test.ts @@ -1,12 +1,13 @@ import anyTest, { TestFn } from 'ava' -import { processTimingTests, opts } from '../process.timing.test' +import { processTimingTests } from '../process.timing.test' import { makePgContext, cleanAndStopSatellite, ContextType } from '../common' let port = 4900 const test = anyTest as TestFn test.beforeEach(async (t) => { - await makePgContext(t, port++, opts) + const namespace = 'public' + await makePgContext(t, port++, namespace) }) test.afterEach.always(cleanAndStopSatellite) diff --git a/clients/typescript/test/satellite/process.migration.test.ts b/clients/typescript/test/satellite/process.migration.test.ts index 9ca83dfa93..ad7cecc3c4 100644 --- a/clients/typescript/test/satellite/process.migration.test.ts +++ b/clients/typescript/test/satellite/process.migration.test.ts @@ -72,10 +72,10 @@ const populateDB = async (t: ExecutionContext) => { const stmts: Statement[] = [] stmts.push({ - sql: `INSERT INTO main.parent (id, value, other) VALUES (1, 'local', null);`, + sql: `INSERT INTO parent (id, value, other) VALUES (1, 'local', null);`, }) stmts.push({ - sql: `INSERT INTO main.parent (id, value, other) VALUES (2, 'local', null);`, + sql: `INSERT INTO parent (id, value, other) VALUES (2, 'local', null);`, }) await adapter.runInTransaction(...stmts) } @@ -105,7 +105,7 @@ export const processMigrationTests = (test: TestFn) => { test.serial('setup populates DB', async (t) => { const adapter = t.context.adapter - const sql = 'SELECT * FROM main.parent' + const sql = `SELECT * FROM parent` const rows = await adapter.query({ sql }) t.deepEqual(rows, [ { @@ -145,12 +145,12 @@ export const processMigrationTests = (test: TestFn) => { pks: ['id'], }, migrationType: SatOpMigrate_Type.CREATE_TABLE, - sql: 'CREATE TABLE main."NewTable"(\ + sql: `CREATE TABLE "NewTable"(\ id TEXT NOT NULL,\ foo INTEGER,\ bar TEXT,\ PRIMARY KEY(id)\ - );', + );`, } const addColumn: SchemaChange = { @@ -182,7 +182,7 @@ export const processMigrationTests = (test: TestFn) => { pks: ['id'], }, migrationType: SatOpMigrate_Type.ALTER_ADD_COLUMN, - sql: 'ALTER TABLE main.parent ADD baz TEXT', + sql: 'ALTER TABLE parent ADD baz TEXT', } const addColumnRelation = { @@ -281,7 +281,7 @@ export const processMigrationTests = (test: TestFn) => { const fetchParentRows = async (adapter: DatabaseAdapter): Promise => { return adapter.query({ - sql: 'SELECT * FROM main.parent', + sql: 'SELECT * FROM parent', }) } @@ -510,7 +510,7 @@ export const processMigrationTests = (test: TestFn) => { // Check the row that was inserted in the new table const newTableRows = await adapter.query({ - sql: 'SELECT * FROM main."NewTable"', + sql: 'SELECT * FROM "NewTable"', }) t.is(newTableRows.length, 1) @@ -534,7 +534,7 @@ export const processMigrationTests = (test: TestFn) => { // Locally update row with id 1 await adapter.runInTransaction({ - sql: `UPDATE main.parent SET value = 'still local', other = 5 WHERE id = 1;`, + sql: `UPDATE parent SET value = 'still local', other = 5 WHERE id = 1;`, }) await satellite._performSnapshot() @@ -722,7 +722,7 @@ export const processMigrationTests = (test: TestFn) => { { migrationType: SatOpMigrate_Type.CREATE_TABLE, sql: ` - CREATE TABLE main."test_items" ( + CREATE TABLE "test_items" ( "id" TEXT NOT NULL, CONSTRAINT "test_items_pkey" PRIMARY KEY ("id") ); @@ -743,7 +743,7 @@ export const processMigrationTests = (test: TestFn) => { { migrationType: SatOpMigrate_Type.CREATE_TABLE, sql: ` - CREATE TABLE main."test_other_items" ( + CREATE TABLE "test_other_items" ( "id" TEXT NOT NULL, "item_id" TEXT, -- CONSTRAINT "test_other_items_item_id_fkey" FOREIGN KEY ("item_id") REFERENCES "test_items" ("id"), diff --git a/clients/typescript/test/satellite/process.tags.test.ts b/clients/typescript/test/satellite/process.tags.test.ts index 37a8058e48..e9d9bd356e 100644 --- a/clients/typescript/test/satellite/process.tags.test.ts +++ b/clients/typescript/test/satellite/process.tags.test.ts @@ -39,7 +39,7 @@ export const processTagsTests = (test: TestFn) => { const clientId = satellite._authState?.clientId ?? 'test_client' await adapter.run({ - sql: `INSERT INTO main.parent(id, value, other) VALUES (1, 'local', null)`, + sql: `INSERT INTO parent(id, value, other) VALUES (1, 'local', null)`, }) const txDate1 = await satellite._performSnapshot() @@ -48,7 +48,7 @@ export const processTagsTests = (test: TestFn) => { t.is(shadow[0].tags, genEncodedTags(clientId, [txDate1])) await adapter.run({ - sql: `UPDATE main.parent SET value = 'local1', other = 3 WHERE id = 1`, + sql: `UPDATE parent SET value = 'local1', other = 3 WHERE id = 1`, }) const txDate2 = await satellite._performSnapshot() @@ -57,7 +57,7 @@ export const processTagsTests = (test: TestFn) => { t.is(shadow[0].tags, genEncodedTags(clientId, [txDate2])) await adapter.run({ - sql: `UPDATE main.parent SET value = 'local2', other = 4 WHERE id = 1`, + sql: `UPDATE parent SET value = 'local2', other = 4 WHERE id = 1`, }) const txDate3 = await satellite._performSnapshot() @@ -66,7 +66,7 @@ export const processTagsTests = (test: TestFn) => { t.is(shadow[0].tags, genEncodedTags(clientId, [txDate3])) await adapter.run({ - sql: `DELETE FROM main.parent WHERE id = 1`, + sql: `DELETE FROM parent WHERE id = 1`, }) const txDate4 = await satellite._performSnapshot() @@ -92,6 +92,7 @@ export const processTagsTests = (test: TestFn) => { tableInfo, authState, getMatchingShadowEntries, + namespace, } = t.context await runMigrations() await satellite._setAuthState(authState) @@ -100,7 +101,7 @@ export const processTagsTests = (test: TestFn) => { // Local INSERT const stmts1 = { - sql: `INSERT INTO main.parent (id, value, other) VALUES ('1', 'local', null)`, + sql: `INSERT INTO parent (id, value, other) VALUES ('1', 'local', null)`, } await adapter.runInTransaction(stmts1) const txDate1 = await satellite._performSnapshot() @@ -122,7 +123,7 @@ export const processTagsTests = (test: TestFn) => { // Local DELETE const stmts2 = { - sql: `DELETE FROM main.parent WHERE id='1'`, + sql: `DELETE FROM parent WHERE id='1'`, } await adapter.runInTransaction(stmts2) const txDate2 = await satellite._performSnapshot() @@ -143,7 +144,7 @@ export const processTagsTests = (test: TestFn) => { // Local INSERT const stmts3 = { - sql: `INSERT INTO main.parent (id, value, other) VALUES ('1', 'local', null)`, + sql: `INSERT INTO parent (id, value, other) VALUES ('1', 'local', null)`, } await adapter.runInTransaction(stmts3) const txDate3 = await satellite._performSnapshot() @@ -166,7 +167,7 @@ export const processTagsTests = (test: TestFn) => { // apply incomig operation (local operation ack) const ackEntry = generateRemoteOplogEntry( tableInfo, - 'main', + namespace, 'parent', OPTYPES.insert, txDate1.getTime(), @@ -210,6 +211,7 @@ export const processTagsTests = (test: TestFn) => { tableInfo, authState, getMatchingShadowEntries, + namespace, } = t.context await runMigrations() await satellite._setAuthState(authState) @@ -218,14 +220,14 @@ export const processTagsTests = (test: TestFn) => { // For this key we will choose remote Tx, such that: Local TM > Remote TX stmts.push({ - sql: `INSERT INTO main.parent (id, value, other) VALUES ('1', 'local', null);`, + sql: `INSERT INTO parent (id, value, other) VALUES ('1', 'local', null);`, }) - stmts.push({ sql: `DELETE FROM main.parent WHERE id = 1` }) + stmts.push({ sql: `DELETE FROM parent WHERE id = 1` }) // For this key we will choose remote Tx, such that: Local TM < Remote TX stmts.push({ - sql: `INSERT INTO main.parent (id, value, other) VALUES ('2', 'local', null);`, + sql: `INSERT INTO parent (id, value, other) VALUES ('2', 'local', null);`, }) - stmts.push({ sql: `DELETE FROM main.parent WHERE id = 2` }) + stmts.push({ sql: `DELETE FROM parent WHERE id = 2` }) await adapter.runInTransaction(...stmts) const txDate1 = await satellite._performSnapshot() @@ -235,7 +237,7 @@ export const processTagsTests = (test: TestFn) => { const prevEntry = generateRemoteOplogEntry( tableInfo, - 'main', + namespace, 'parent', OPTYPES.insert, prevTs, @@ -249,7 +251,7 @@ export const processTagsTests = (test: TestFn) => { ) const nextEntry = generateRemoteOplogEntry( tableInfo, - 'main', + namespace, 'parent', OPTYPES.insert, nextTs, @@ -285,13 +287,13 @@ export const processTagsTests = (test: TestFn) => { const shadow = await getMatchingShadowEntries(adapter) const expectedShadow = [ { - namespace: 'main', + namespace, tablename: 'parent', primaryKey: '{"id":1}', tags: genEncodedTags('remote', [prevTs]), }, { - namespace: 'main', + namespace, tablename: 'parent', primaryKey: '{"id":2}', tags: genEncodedTags('remote', [nextTs]), @@ -299,7 +301,7 @@ export const processTagsTests = (test: TestFn) => { ] t.deepEqual(shadow, expectedShadow) - const userTable = await adapter.query({ sql: `SELECT * FROM main.parent;` }) + const userTable = await adapter.query({ sql: `SELECT * FROM parent;` }) // In both cases insert wins over delete, but // for id = 1 CR picks local data before delete, while @@ -319,6 +321,7 @@ export const processTagsTests = (test: TestFn) => { tableInfo, authState, getMatchingShadowEntries, + namespace, } = t.context await runMigrations() await satellite._setAuthState(authState) @@ -327,18 +330,18 @@ export const processTagsTests = (test: TestFn) => { // For this key we will choose remote Tx, such that: Local TM > Remote TX stmts.push({ - sql: `INSERT INTO main.parent (id, value, other) VALUES ('1', 'local', null);`, + sql: `INSERT INTO parent (id, value, other) VALUES ('1', 'local', null);`, }) stmts.push({ - sql: `INSERT INTO main.parent (id, value, other) VALUES ('2', 'local', null);`, + sql: `INSERT INTO parent (id, value, other) VALUES ('2', 'local', null);`, }) await adapter.runInTransaction(...stmts) const txDate1 = await satellite._performSnapshot() stmts = [] // For this key we will choose remote Tx, such that: Local TM < Remote TX - stmts.push({ sql: `DELETE FROM main.parent WHERE id = 1` }) - stmts.push({ sql: `DELETE FROM main.parent WHERE id = 2` }) + stmts.push({ sql: `DELETE FROM parent WHERE id = 1` }) + stmts.push({ sql: `DELETE FROM parent WHERE id = 2` }) await adapter.runInTransaction(...stmts) await satellite._performSnapshot() @@ -347,7 +350,7 @@ export const processTagsTests = (test: TestFn) => { const prevEntry = generateRemoteOplogEntry( tableInfo, - 'main', + namespace, 'parent', OPTYPES.insert, prevTs, @@ -361,7 +364,7 @@ export const processTagsTests = (test: TestFn) => { ) const nextEntry = generateRemoteOplogEntry( tableInfo, - 'main', + namespace, 'parent', OPTYPES.insert, nextTs, @@ -397,13 +400,13 @@ export const processTagsTests = (test: TestFn) => { const shadow = await getMatchingShadowEntries(adapter) const expectedShadow = [ { - namespace: 'main', + namespace, tablename: 'parent', primaryKey: '{"id":1}', tags: genEncodedTags('remote', [prevTs]), }, { - namespace: 'main', + namespace, tablename: 'parent', primaryKey: '{"id":2}', tags: genEncodedTags('remote', [nextTs]), @@ -411,7 +414,7 @@ export const processTagsTests = (test: TestFn) => { ] t.deepEqual(shadow, expectedShadow) - let userTable = await adapter.query({ sql: `SELECT * FROM main.parent;` }) + let userTable = await adapter.query({ sql: `SELECT * FROM parent;` }) // In both cases insert wins over delete, but // for id = 1 CR picks local data before delete, while @@ -431,24 +434,24 @@ export const processTagsTests = (test: TestFn) => { // Insert 4 items in separate snapshots await adapter.run({ - sql: `INSERT INTO main.parent (id, value) VALUES (1, 'val1')`, + sql: `INSERT INTO parent (id, value) VALUES (1, 'val1')`, }) const ts1 = await satellite._performSnapshot() await adapter.run({ - sql: `INSERT INTO main.parent (id, value) VALUES (2, 'val2')`, + sql: `INSERT INTO parent (id, value) VALUES (2, 'val2')`, }) const ts2 = await satellite._performSnapshot() await adapter.run({ - sql: `INSERT INTO main.parent (id, value) VALUES (3, 'val3')`, + sql: `INSERT INTO parent (id, value) VALUES (3, 'val3')`, }) const ts3 = await satellite._performSnapshot() await adapter.run({ - sql: `INSERT INTO main.parent (id, value) VALUES (4, 'val4')`, + sql: `INSERT INTO parent (id, value) VALUES (4, 'val4')`, }) const ts4 = await satellite._performSnapshot() // Now delete them all in a single snapshot - await adapter.run({ sql: `DELETE FROM main.parent` }) + await adapter.run({ sql: `DELETE FROM parent` }) const ts5 = await satellite._performSnapshot() // Now check that each delete clears the correct tag @@ -470,13 +473,13 @@ export const processTagsTests = (test: TestFn) => { await runMigrations() await adapter.run({ - sql: `INSERT INTO main.parent(id, value) VALUES (1,'val1')`, + sql: `INSERT INTO parent(id, value) VALUES (1,'val1')`, }) // Since no snapshot was made yet // the timestamp in the oplog is not yet set const insertEntry = await adapter.query({ - sql: `SELECT timestamp, "clearTags" FROM main._electric_oplog WHERE rowid = 1`, + sql: `SELECT timestamp, "clearTags" FROM _electric_oplog WHERE rowid = 1`, }) t.is(insertEntry[0].timestamp, null) t.deepEqual(JSON.parse(insertEntry[0].clearTags as string), []) @@ -488,7 +491,7 @@ export const processTagsTests = (test: TestFn) => { // Now the timestamp is set const insertEntryAfterSnapshot = await adapter.query({ - sql: `SELECT timestamp, "clearTags" FROM main._electric_oplog WHERE rowid = 1`, + sql: `SELECT timestamp, "clearTags" FROM _electric_oplog WHERE rowid = 1`, }) t.assert(insertEntryAfterSnapshot[0].timestamp != null) const insertTimestamp = parseDate( @@ -498,35 +501,35 @@ export const processTagsTests = (test: TestFn) => { // Now update the entry, then delete it, and then insert it again await adapter.run({ - sql: `UPDATE main.parent SET value = 'val2' WHERE id=1`, + sql: `UPDATE parent SET value = 'val2' WHERE id=1`, }) await adapter.run({ - sql: `DELETE FROM main.parent WHERE id=1`, + sql: `DELETE FROM parent WHERE id=1`, }) await adapter.run({ - sql: `INSERT INTO main.parent(id, value) VALUES (1,'val3')`, + sql: `INSERT INTO parent(id, value) VALUES (1,'val3')`, }) // Since no snapshot has been taken for these operations // their timestamp and clearTags should not be set const updateEntry = await adapter.query({ - sql: `SELECT timestamp, "clearTags" FROM main._electric_oplog WHERE rowid = 2`, + sql: `SELECT timestamp, "clearTags" FROM _electric_oplog WHERE rowid = 2`, }) t.is(updateEntry[0].timestamp, null) t.deepEqual(JSON.parse(updateEntry[0].clearTags as string), []) const deleteEntry = await adapter.query({ - sql: `SELECT timestamp, "clearTags" FROM main._electric_oplog WHERE rowid = 3`, + sql: `SELECT timestamp, "clearTags" FROM _electric_oplog WHERE rowid = 3`, }) t.is(deleteEntry[0].timestamp, null) t.deepEqual(JSON.parse(deleteEntry[0].clearTags as string), []) const reinsertEntry = await adapter.query({ - sql: `SELECT timestamp, "clearTags" FROM main._electric_oplog WHERE rowid = 4`, + sql: `SELECT timestamp, "clearTags" FROM _electric_oplog WHERE rowid = 4`, }) t.is(reinsertEntry[0].timestamp, null) @@ -539,7 +542,7 @@ export const processTagsTests = (test: TestFn) => { // The first operation (update) should override // the original insert (i.e. clearTags must contain the timestamp of the insert) const updateEntryAfterSnapshot = await adapter.query({ - sql: `SELECT timestamp, "clearTags" FROM main._electric_oplog WHERE rowid = 2`, + sql: `SELECT timestamp, "clearTags" FROM _electric_oplog WHERE rowid = 2`, }) const rawTimestampTx2 = updateEntryAfterSnapshot[0].timestamp @@ -554,7 +557,7 @@ export const processTagsTests = (test: TestFn) => { // The second operation (delete) should have the same timestamp // and should contain the tag of the TX in its clearTags const deleteEntryAfterSnapshot = await adapter.query({ - sql: `SELECT timestamp, "clearTags" FROM main._electric_oplog WHERE rowid = 3`, + sql: `SELECT timestamp, "clearTags" FROM _electric_oplog WHERE rowid = 3`, }) t.assert(deleteEntryAfterSnapshot[0].timestamp === rawTimestampTx2) @@ -566,7 +569,7 @@ export const processTagsTests = (test: TestFn) => { // The third operation (reinsert) should have the same timestamp // and should contain the tag of the TX in its clearTags const reinsertEntryAfterSnapshot = await adapter.query({ - sql: `SELECT timestamp, "clearTags" FROM main._electric_oplog WHERE rowid = 4`, + sql: `SELECT timestamp, "clearTags" FROM _electric_oplog WHERE rowid = 4`, }) t.assert(reinsertEntryAfterSnapshot[0].timestamp === rawTimestampTx2) @@ -584,6 +587,7 @@ export const processTagsTests = (test: TestFn) => { tableInfo, authState, getMatchingShadowEntries, + namespace, } = t.context await runMigrations() await satellite._setAuthState(authState) @@ -592,17 +596,17 @@ export const processTagsTests = (test: TestFn) => { // For this key we will choose remote Tx, such that: Local TM > Remote TX stmts.push({ - sql: `INSERT INTO main.parent (id, value, other) VALUES ('1', 'local', null);`, + sql: `INSERT INTO parent (id, value, other) VALUES ('1', 'local', null);`, }) stmts.push({ - sql: `UPDATE main.parent SET value = 'local', other = 999 WHERE id = 1`, + sql: `UPDATE parent SET value = 'local', other = 999 WHERE id = 1`, }) // For this key we will choose remote Tx, such that: Local TM < Remote TX stmts.push({ - sql: `INSERT INTO main.parent (id, value, other) VALUES ('2', 'local', null);`, + sql: `INSERT INTO parent (id, value, other) VALUES ('2', 'local', null);`, }) stmts.push({ - sql: `UPDATE main.parent SET value = 'local', other = 999 WHERE id = 1`, + sql: `UPDATE parent SET value = 'local', other = 999 WHERE id = 1`, }) await adapter.runInTransaction(...stmts) @@ -613,7 +617,7 @@ export const processTagsTests = (test: TestFn) => { const prevEntry = generateRemoteOplogEntry( tableInfo, - 'main', + namespace, 'parent', OPTYPES.insert, prevTs, @@ -628,7 +632,7 @@ export const processTagsTests = (test: TestFn) => { const nextEntry = generateRemoteOplogEntry( tableInfo, - 'main', + namespace, 'parent', OPTYPES.insert, nextTs, @@ -664,7 +668,7 @@ export const processTagsTests = (test: TestFn) => { let shadow = await getMatchingShadowEntries(adapter) const expectedShadow = [ { - namespace: 'main', + namespace, tablename: 'parent', primaryKey: '{"id":1}', tags: encodeTags([ @@ -673,7 +677,7 @@ export const processTagsTests = (test: TestFn) => { ]), }, { - namespace: 'main', + namespace, tablename: 'parent', primaryKey: '{"id":2}', tags: encodeTags([ @@ -693,7 +697,7 @@ export const processTagsTests = (test: TestFn) => { t.is(entries[2].clearTags, encodeTags([])) t.is(entries[3].clearTags, encodeTags([])) - let userTable = await adapter.query({ sql: `SELECT * FROM main.parent;` }) + let userTable = await adapter.query({ sql: `SELECT * FROM parent;` }) // In both cases insert wins over delete, but // for id = 1 CR picks local data before delete, while @@ -714,6 +718,7 @@ export const processTagsTests = (test: TestFn) => { tableInfo, authState, getMatchingShadowEntries, + namespace, } = t.context await runMigrations() await satellite._setAuthState(authState) @@ -723,18 +728,18 @@ export const processTagsTests = (test: TestFn) => { // For this key we will choose remote Tx, such that: Local TM > Remote TX stmts.push({ - sql: `INSERT INTO main.parent (id, value, other) VALUES ('1', 'local', null);`, + sql: `INSERT INTO parent (id, value, other) VALUES ('1', 'local', null);`, }) stmts.push({ - sql: `INSERT INTO main.parent (id, value, other) VALUES ('2', 'local', null);`, + sql: `INSERT INTO parent (id, value, other) VALUES ('2', 'local', null);`, }) await adapter.runInTransaction(...stmts) const txDate1 = await satellite._performSnapshot() stmts = [] // For this key we will choose remote Tx, such that: Local TM < Remote TX - stmts.push({ sql: `DELETE FROM main.parent WHERE id = 1` }) - stmts.push({ sql: `DELETE FROM main.parent WHERE id = 2` }) + stmts.push({ sql: `DELETE FROM parent WHERE id = 1` }) + stmts.push({ sql: `DELETE FROM parent WHERE id = 2` }) await adapter.runInTransaction(...stmts) await satellite._performSnapshot() @@ -794,7 +799,7 @@ export const processTagsTests = (test: TestFn) => { let shadow = await getMatchingShadowEntries(adapter) const expectedShadow = [ { - namespace: 'main', + namespace, tablename: 'parent', primaryKey: '{"id":2}', tags: genEncodedTags('remote', [txDate1]), @@ -802,7 +807,7 @@ export const processTagsTests = (test: TestFn) => { ] t.deepEqual(shadow, expectedShadow) - let userTable = await adapter.query({ sql: `SELECT * FROM main.parent;` }) + let userTable = await adapter.query({ sql: `SELECT * FROM parent;` }) const expectedUserTable = [{ id: 2, value: 'local', other: null }] t.deepEqual(expectedUserTable, userTable) }) @@ -815,6 +820,7 @@ export const processTagsTests = (test: TestFn) => { authState, adapter, getMatchingShadowEntries, + namespace, } = t.context await runMigrations() await satellite._setAuthState(authState) @@ -823,7 +829,7 @@ export const processTagsTests = (test: TestFn) => { const insertEntry = generateRemoteOplogEntry( tableInfo, - 'main', + namespace, 'parent', OPTYPES.update, txDate1, @@ -838,7 +844,7 @@ export const processTagsTests = (test: TestFn) => { const deleteDate = txDate1 + 1 const deleteEntry = generateRemoteOplogEntry( tableInfo, - 'main', + namespace, 'parent', OPTYPES.delete, deleteDate, @@ -864,7 +870,7 @@ export const processTagsTests = (test: TestFn) => { let shadow = await getMatchingShadowEntries(adapter) const expectedShadow = [ { - namespace: 'main', + namespace, tablename: 'parent', primaryKey: '{"id":1}', tags: genEncodedTags('remote', [txDate1]), diff --git a/clients/typescript/test/satellite/process.test.ts b/clients/typescript/test/satellite/process.test.ts index f0f4fcbb25..1ad2738e93 100644 --- a/clients/typescript/test/satellite/process.test.ts +++ b/clients/typescript/test/satellite/process.test.ts @@ -35,7 +35,7 @@ import { SatelliteError, SatelliteErrorCode, } from '../../src/util/types' -import { opts, relations, ContextType as CommonContextType } from './common' +import { relations, ContextType as CommonContextType } from './common' import { DEFAULT_LOG_POS, @@ -50,12 +50,16 @@ import { MockSubscriptionsManager } from '../../src/satellite/shapes/manager' import { AuthState, insecureAuthToken } from '../../src/auth' import { ConnectivityStateChangeNotification } from '../../src/notifiers' import { QueryBuilder } from '../../src/migrators/query-builder' +import { SatelliteOpts } from '../../src/satellite/config' export type ContextType = CommonContextType & { builder: QueryBuilder getMatchingShadowEntries: | typeof getSqliteMatchingShadowEntries | typeof getPgMatchingShadowEntries + opts: SatelliteOpts + namespace: string + qualifiedParentTableName: string } const parentRecord = { @@ -80,11 +84,6 @@ const startSatellite = async ( return { connectionPromise } } -const qualifiedParentTableName = new QualifiedTablename( - 'main', - 'parent' -).toString() - const dialectValue = ( sqliteValue: any, pgValue: any, @@ -113,10 +112,10 @@ export const processTests = (test: TestFn) => { }) test('load metadata', async (t) => { - const { adapter, runMigrations } = t.context + const { adapter, runMigrations, namespace } = t.context await runMigrations() - const meta = await loadSatelliteMetaTable(adapter) + const meta = await loadSatelliteMetaTable(adapter, namespace) t.deepEqual(meta, { compensations: dialectValue(1, '1', t), lsn: '', @@ -202,9 +201,9 @@ export const processTests = (test: TestFn) => { const { adapter, runMigrations } = t.context await runMigrations() - await adapter.run({ sql: `INSERT INTO main.parent(id) VALUES ('1'),('2')` }) + await adapter.run({ sql: `INSERT INTO parent(id) VALUES ('1'),('2')` }) await t.throwsAsync( - adapter.run({ sql: `UPDATE main.parent SET id='3' WHERE id = '1'` }), + adapter.run({ sql: `UPDATE parent SET id='3' WHERE id = '1'` }), { code: dialectValue('SQLITE_CONSTRAINT_TRIGGER', 'P0001', t), } @@ -213,11 +212,11 @@ export const processTests = (test: TestFn) => { test('snapshot works', async (t) => { const { satellite } = t.context - const { adapter, notifier, runMigrations, authState } = t.context + const { adapter, notifier, runMigrations, authState, namespace } = t.context await runMigrations() await satellite._setAuthState(authState) - await adapter.run({ sql: `INSERT INTO main.parent(id) VALUES ('1'),('2')` }) + await adapter.run({ sql: `INSERT INTO parent(id) VALUES ('1'),('2')` }) let snapshotTimestamp = await satellite._performSnapshot() @@ -225,7 +224,7 @@ export const processTests = (test: TestFn) => { let shadowTags = encodeTags([generateTag(clientId, snapshotTimestamp)]) var shadowRows = await adapter.query({ - sql: `SELECT tags FROM main._electric_shadow`, + sql: `SELECT tags FROM _electric_shadow`, }) t.is(shadowRows.length, 2) for (const row of shadowRows) { @@ -236,7 +235,7 @@ export const processTests = (test: TestFn) => { const { changes } = notifier.notifications[0] const expectedChange = { - qualifiedTablename: new QualifiedTablename('main', 'parent'), + qualifiedTablename: new QualifiedTablename(namespace, 'parent'), rowids: [1, 2], recordChanges: [ { primaryKey: { id: 1 }, type: 'INSERT' }, @@ -293,11 +292,18 @@ export const processTests = (test: TestFn) => { }) test('starting and stopping the process works', async (t) => { - const { adapter, notifier, runMigrations, satellite, authState, token } = - t.context + const { + adapter, + notifier, + runMigrations, + satellite, + authState, + token, + opts, + } = t.context await runMigrations() - await adapter.run({ sql: `INSERT INTO main.parent(id) VALUES ('1'),('2')` }) + await adapter.run({ sql: `INSERT INTO parent(id) VALUES ('1'),('2')` }) const conn = await startSatellite(satellite, authState, token) await conn.connectionPromise @@ -307,14 +313,14 @@ export const processTests = (test: TestFn) => { // connect, 1st txn t.is(notifier.notifications.length, 2) - await adapter.run({ sql: `INSERT INTO main.parent(id) VALUES ('3'),('4')` }) + await adapter.run({ sql: `INSERT INTO parent(id) VALUES ('3'),('4')` }) await sleepAsync(opts.pollingInterval) // 2nd txm t.is(notifier.notifications.length, 3) await satellite.stop() - await adapter.run({ sql: `INSERT INTO main.parent(id) VALUES ('5'),('6')` }) + await adapter.run({ sql: `INSERT INTO parent(id) VALUES ('5'),('6')` }) await sleepAsync(opts.pollingInterval) // no txn notified @@ -332,7 +338,7 @@ export const processTests = (test: TestFn) => { const { adapter, notifier, runMigrations } = t.context await runMigrations() - await adapter.run({ sql: `INSERT INTO main.parent(id) VALUES ('1'),('2')` }) + await adapter.run({ sql: `INSERT INTO parent(id) VALUES ('1'),('2')` }) t.is(notifier.notifications.length, 0) @@ -342,14 +348,15 @@ export const processTests = (test: TestFn) => { }) test('snapshot of INSERT with blob/Uint8Array', async (t) => { - const { adapter, runMigrations, satellite, authState, builder } = t.context + const { adapter, runMigrations, satellite, authState, builder, namespace } = + t.context await runMigrations() const blob = new Uint8Array([1, 2, 255, 244, 160, 1]) await adapter.run({ - sql: `INSERT INTO "main"."blobTable"(value) VALUES (${builder.makePositionalParam( + sql: `INSERT INTO "${namespace}"."blobTable"(value) VALUES (${builder.makePositionalParam( 1 )})`, args: [blob], @@ -368,7 +375,7 @@ export const processTests = (test: TestFn) => { relations ) const qualifiedBlobTable = new QualifiedTablename( - 'main', + namespace, 'blobTable' ).toString() const [_, keyChanges] = @@ -381,15 +388,21 @@ export const processTests = (test: TestFn) => { // If last operation is a DELETE, concurrent INSERT shall resurrect deleted // values as in 'INSERT wins over DELETE and restored deleted values' test('snapshot of INSERT after DELETE', async (t) => { - const { adapter, runMigrations, satellite, authState } = t.context + const { + adapter, + runMigrations, + satellite, + authState, + qualifiedParentTableName, + } = t.context await runMigrations() await adapter.run({ - sql: `INSERT INTO main.parent(id, value) VALUES (1,'val1')`, + sql: `INSERT INTO parent(id, value) VALUES (1,'val1')`, }) - await adapter.run({ sql: `DELETE FROM main.parent WHERE id=1` }) - await adapter.run({ sql: `INSERT INTO main.parent(id) VALUES (1)` }) + await adapter.run({ sql: `DELETE FROM parent WHERE id=1` }) + await adapter.run({ sql: `INSERT INTO parent(id) VALUES (1)` }) await satellite._setAuthState(authState) await satellite._performSnapshot() @@ -409,12 +422,13 @@ export const processTests = (test: TestFn) => { }) test('snapshot of INSERT with bigint', async (t) => { - const { adapter, runMigrations, satellite, authState } = t.context + const { adapter, runMigrations, satellite, authState, namespace } = + t.context await runMigrations() await adapter.run({ - sql: `INSERT INTO main."bigIntTable"(value) VALUES (1)`, + sql: `INSERT INTO "bigIntTable"(value) VALUES (1)`, }) await satellite._setAuthState(authState) @@ -430,7 +444,7 @@ export const processTests = (test: TestFn) => { relations ) const qualifiedTableName = new QualifiedTablename( - 'main', + namespace, 'bigIntTable' ).toString() const [_, keyChanges] = merged[qualifiedTableName]['{"value":"1"}'] @@ -439,14 +453,21 @@ export const processTests = (test: TestFn) => { }) test('take snapshot and merge local wins', async (t) => { - const { adapter, runMigrations, satellite, tableInfo, authState } = - t.context + const { + adapter, + runMigrations, + satellite, + tableInfo, + authState, + namespace, + qualifiedParentTableName, + } = t.context await runMigrations() const incomingTs = new Date().getTime() - 1 const incomingEntry = generateRemoteOplogEntry( tableInfo, - 'main', + namespace, 'parent', OPTYPES.insert, incomingTs, @@ -457,7 +478,7 @@ export const processTests = (test: TestFn) => { } ) await adapter.run({ - sql: `INSERT INTO main.parent(id, value, other) VALUES (1, 'local', 1)`, + sql: `INSERT INTO parent(id, value, other) VALUES (1, 'local', 1)`, }) await satellite._setAuthState(authState) @@ -476,7 +497,7 @@ export const processTests = (test: TestFn) => { const item = merged[qualifiedParentTableName]['{"id":1}'] t.deepEqual(item, { - namespace: 'main', + namespace, tablename: 'parent', primaryKeyCols: { id: 1 }, optype: OPTYPES.upsert, @@ -498,12 +519,19 @@ export const processTests = (test: TestFn) => { }) test('take snapshot and merge incoming wins', async (t) => { - const { adapter, runMigrations, satellite, tableInfo, authState } = - t.context + const { + adapter, + runMigrations, + satellite, + tableInfo, + authState, + namespace, + qualifiedParentTableName, + } = t.context await runMigrations() await adapter.run({ - sql: `INSERT INTO main.parent(id, value, other) VALUES (1, 'local', 1)`, + sql: `INSERT INTO parent(id, value, other) VALUES (1, 'local', 1)`, }) await satellite._setAuthState(authState) @@ -516,7 +544,7 @@ export const processTests = (test: TestFn) => { const incomingTs = localTimestamp + 1 const incomingEntry = generateRemoteOplogEntry( tableInfo, - 'main', + namespace, 'parent', OPTYPES.insert, incomingTs, @@ -537,7 +565,7 @@ export const processTests = (test: TestFn) => { const item = merged[qualifiedParentTableName]['{"id":1}'] t.deepEqual(item, { - namespace: 'main', + namespace, tablename: 'parent', primaryKeyCols: { id: 1 }, optype: OPTYPES.upsert, @@ -559,15 +587,21 @@ export const processTests = (test: TestFn) => { }) test('merge incoming wins on persisted ops', async (t) => { - const { adapter, runMigrations, satellite, tableInfo, authState } = - t.context + const { + adapter, + runMigrations, + satellite, + tableInfo, + authState, + namespace, + } = t.context await runMigrations() await satellite._setAuthState(authState) satellite.relations = relations // This operation is persisted await adapter.run({ - sql: `INSERT INTO main.parent(id, value, other) VALUES (1, 'local', 1)`, + sql: `INSERT INTO parent(id, value, other) VALUES (1, 'local', 1)`, }) await satellite._performSnapshot() const [originalInsert] = await satellite._getEntries() @@ -580,7 +614,7 @@ export const processTests = (test: TestFn) => { // This operation is done offline await adapter.run({ - sql: `UPDATE main.parent SET value = 'new local' WHERE id = 1`, + sql: `UPDATE parent SET value = 'new local' WHERE id = 1`, }) await satellite._performSnapshot() const [offlineInsert] = await satellite._getEntries() @@ -590,7 +624,7 @@ export const processTests = (test: TestFn) => { const incomingTs = offlineTimestamp + 1 const firstIncomingEntry = generateRemoteOplogEntry( tableInfo, - 'main', + namespace, 'parent', OPTYPES.update, incomingTs, @@ -608,7 +642,7 @@ export const processTests = (test: TestFn) => { await satellite._applyTransaction(firstIncomingTx) const [{ value: value1 }] = await adapter.query({ - sql: 'SELECT value FROM main.parent WHERE id = 1', + sql: 'SELECT value FROM parent WHERE id = 1', }) t.is( value1, @@ -619,7 +653,7 @@ export const processTests = (test: TestFn) => { // And after the offline transaction was sent, the resolved no-op transaction comes in const secondIncomingEntry = generateRemoteOplogEntry( tableInfo, - 'main', + namespace, 'parent', OPTYPES.update, offlineTimestamp, @@ -640,7 +674,7 @@ export const processTests = (test: TestFn) => { await satellite._applyTransaction(secondIncomingTx) const [{ value: value2 }] = await adapter.query({ - sql: 'SELECT value FROM main.parent WHERE id = 1', + sql: 'SELECT value FROM parent WHERE id = 1', }) t.is( value2, @@ -657,10 +691,11 @@ export const processTests = (test: TestFn) => { tableInfo, authState, getMatchingShadowEntries, + namespace, } = t.context await runMigrations() await adapter.run({ - sql: `INSERT INTO main.parent(id, value, other) VALUES (1, 'local', null)`, + sql: `INSERT INTO parent(id, value, other) VALUES (1, 'local', null)`, }) await satellite._setAuthState(authState) @@ -671,7 +706,7 @@ export const processTests = (test: TestFn) => { const incomingTs = new Date().getTime() const incomingEntry = generateRemoteOplogEntry( tableInfo, - 'main', + namespace, 'parent', OPTYPES.insert, incomingTs, @@ -696,7 +731,7 @@ export const processTests = (test: TestFn) => { await satellite._performSnapshot() - const sql = 'SELECT * from main.parent WHERE id=1' + const sql = 'SELECT * from parent WHERE id=1' const [row] = await adapter.query({ sql }) t.is(row.value, 'incoming') t.is(row.other, 1) @@ -724,13 +759,14 @@ export const processTests = (test: TestFn) => { tableInfo, authState, getMatchingShadowEntries, + namespace, } = t.context await runMigrations() const incomingTs = new Date() const incomingEntry = generateRemoteOplogEntry( tableInfo, - 'main', + namespace, 'parent', OPTYPES.delete, incomingTs.getTime(), @@ -747,7 +783,7 @@ export const processTests = (test: TestFn) => { await satellite._setAuthState(authState) await satellite._apply([incomingEntry], 'remote') - const sql = 'SELECT * from main.parent WHERE id=1' + const sql = 'SELECT * from parent WHERE id=1' const rows = await adapter.query({ sql }) const shadowEntries = await getMatchingShadowEntries(adapter) @@ -766,14 +802,20 @@ export const processTests = (test: TestFn) => { }) test('apply incoming with null on column with default', async (t) => { - const { runMigrations, satellite, adapter, tableInfo, authState } = - t.context + const { + runMigrations, + satellite, + adapter, + tableInfo, + authState, + namespace, + } = t.context await runMigrations() const incomingTs = new Date().getTime() const incomingEntry = generateRemoteOplogEntry( tableInfo, - 'main', + namespace, 'parent', OPTYPES.insert, incomingTs, @@ -798,7 +840,7 @@ export const processTests = (test: TestFn) => { } await satellite._applyTransaction(incomingTx) - const sql = `SELECT * from main.parent WHERE value='incoming'` + const sql = `SELECT * from parent WHERE value='incoming'` const rows = await adapter.query({ sql }) t.is(rows[0].other, null) @@ -806,14 +848,20 @@ export const processTests = (test: TestFn) => { }) test('apply incoming with undefined on column with default', async (t) => { - const { runMigrations, satellite, adapter, tableInfo, authState } = - t.context + const { + runMigrations, + satellite, + adapter, + tableInfo, + authState, + namespace, + } = t.context await runMigrations() const incomingTs = new Date().getTime() const incomingEntry = generateRemoteOplogEntry( tableInfo, - 'main', + namespace, 'parent', OPTYPES.insert, incomingTs, @@ -837,7 +885,7 @@ export const processTests = (test: TestFn) => { } await satellite._applyTransaction(incomingTx) - const sql = `SELECT * from main.parent WHERE value='incoming'` + const sql = `SELECT * from parent WHERE value='incoming'` const rows = await adapter.query({ sql }) t.is(rows[0].other, 0) @@ -845,7 +893,14 @@ export const processTests = (test: TestFn) => { }) test('INSERT wins over DELETE and restored deleted values', async (t) => { - const { runMigrations, satellite, tableInfo, authState } = t.context + const { + runMigrations, + satellite, + tableInfo, + authState, + namespace, + qualifiedParentTableName, + } = t.context await runMigrations() await satellite._setAuthState(authState) const clientId = satellite._authState!.clientId @@ -856,7 +911,7 @@ export const processTests = (test: TestFn) => { const incoming = [ generateRemoteOplogEntry( tableInfo, - 'main', + namespace, 'parent', OPTYPES.insert, incomingTs, @@ -868,7 +923,7 @@ export const processTests = (test: TestFn) => { ), generateRemoteOplogEntry( tableInfo, - 'main', + namespace, 'parent', OPTYPES.delete, incomingTs, @@ -882,7 +937,7 @@ export const processTests = (test: TestFn) => { const local = [ generateLocalOplogEntry( tableInfo, - 'main', + namespace, 'parent', OPTYPES.insert, localTs, @@ -899,7 +954,7 @@ export const processTests = (test: TestFn) => { const item = merged[qualifiedParentTableName]['{"id":1}'] t.deepEqual(item, { - namespace: 'main', + namespace, tablename: 'parent', primaryKeyCols: { id: 1 }, optype: OPTYPES.upsert, @@ -921,7 +976,14 @@ export const processTests = (test: TestFn) => { }) test('concurrent updates take all changed values', async (t) => { - const { runMigrations, satellite, tableInfo, authState } = t.context + const { + runMigrations, + satellite, + tableInfo, + authState, + namespace, + qualifiedParentTableName, + } = t.context await runMigrations() await satellite._setAuthState(authState) const clientId = satellite._authState!.clientId @@ -932,7 +994,7 @@ export const processTests = (test: TestFn) => { const incoming = [ generateRemoteOplogEntry( tableInfo, - 'main', + namespace, 'parent', OPTYPES.update, incomingTs, @@ -953,7 +1015,7 @@ export const processTests = (test: TestFn) => { const local = [ generateLocalOplogEntry( tableInfo, - 'main', + namespace, 'parent', OPTYPES.update, localTs, @@ -978,7 +1040,7 @@ export const processTests = (test: TestFn) => { // The local entry concurrently modified the value of the `other` column to 1. // The merged entries should have `value = 'remote'` and `other = 1`. t.deepEqual(item, { - namespace: 'main', + namespace, tablename: 'parent', primaryKeyCols: { id: 1 }, optype: OPTYPES.upsert, @@ -999,7 +1061,14 @@ export const processTests = (test: TestFn) => { }) test('merge incoming with empty local', async (t) => { - const { runMigrations, satellite, tableInfo, authState } = t.context + const { + runMigrations, + satellite, + tableInfo, + authState, + namespace, + qualifiedParentTableName, + } = t.context await runMigrations() await satellite._setAuthState(authState) const clientId = satellite._authState!.clientId @@ -1010,7 +1079,7 @@ export const processTests = (test: TestFn) => { const incoming = [ generateRemoteOplogEntry( tableInfo, - 'main', + namespace, 'parent', OPTYPES.insert, incomingTs, @@ -1027,7 +1096,7 @@ export const processTests = (test: TestFn) => { const item = merged[qualifiedParentTableName]['{"id":1}'] t.deepEqual(item, { - namespace: 'main', + namespace, tablename: 'parent', primaryKeyCols: { id: 1 }, optype: OPTYPES.upsert, @@ -1050,11 +1119,11 @@ export const processTests = (test: TestFn) => { } await satellite._setMeta('compensations', 0) await adapter.run({ - sql: `INSERT INTO main.parent(id, value) VALUES (1, '1')`, + sql: `INSERT INTO parent(id, value) VALUES (1, '1')`, }) await t.throwsAsync( - adapter.run({ sql: `INSERT INTO main.child(id, parent) VALUES (1, 2)` }), + adapter.run({ sql: `INSERT INTO child(id, parent) VALUES (1, 2)` }), { code: dialectValue('SQLITE_CONSTRAINT_FOREIGNKEY', '23503', t), } @@ -1070,6 +1139,7 @@ export const processTests = (test: TestFn) => { timestamp, authState, builder, + namespace, } = t.context await runMigrations() @@ -1081,7 +1151,7 @@ export const processTests = (test: TestFn) => { const incoming = generateLocalOplogEntry( tableInfo, - 'main', + namespace, 'child', OPTYPES.insert, timestamp, @@ -1116,6 +1186,7 @@ export const processTests = (test: TestFn) => { timestamp, authState, builder, + namespace, } = t.context await runMigrations() @@ -1128,7 +1199,7 @@ export const processTests = (test: TestFn) => { const childInsertEntry = generateRemoteOplogEntry( tableInfo, - 'main', + namespace, 'child', OPTYPES.insert, timestamp, @@ -1141,7 +1212,7 @@ export const processTests = (test: TestFn) => { const parentInsertEntry = generateRemoteOplogEntry( tableInfo, - 'main', + namespace, 'parent', OPTYPES.insert, timestamp, @@ -1152,9 +1223,9 @@ export const processTests = (test: TestFn) => { ) await adapter.run({ - sql: `INSERT INTO main.parent(id, value) VALUES (1, '1')`, + sql: `INSERT INTO parent(id, value) VALUES (1, '1')`, }) - await adapter.run({ sql: `DELETE FROM main.parent WHERE id=1` }) + await adapter.run({ sql: `DELETE FROM parent WHERE id=1` }) await satellite._performSnapshot() @@ -1171,7 +1242,7 @@ export const processTests = (test: TestFn) => { await satellite._applyTransaction(insertChildAndParentTx) const rows = await adapter.query({ - sql: `SELECT * from main.parent WHERE id=1`, + sql: `SELECT * from parent WHERE id=1`, }) // Not only does the parent exist. @@ -1182,8 +1253,15 @@ export const processTests = (test: TestFn) => { }) test('compensations: using triggers with flag 0', async (t) => { - const { adapter, runMigrations, satellite, tableInfo, authState, builder } = - t.context + const { + adapter, + runMigrations, + satellite, + tableInfo, + authState, + builder, + namespace, + } = t.context await runMigrations() if (builder.dialect === 'SQLite') { @@ -1192,21 +1270,21 @@ export const processTests = (test: TestFn) => { await satellite._setMeta('compensations', 0) await adapter.run({ - sql: `INSERT INTO main.parent(id, value) VALUES (1, '1')`, + sql: `INSERT INTO parent(id, value) VALUES (1, '1')`, }) await satellite._setAuthState(authState) const ts = await satellite._performSnapshot() await satellite._garbageCollectOplog(ts) await adapter.run({ - sql: `INSERT INTO main.child(id, parent) VALUES (1, 1)`, + sql: `INSERT INTO child(id, parent) VALUES (1, 1)`, }) await satellite._performSnapshot() const timestamp = new Date().getTime() const incoming = generateRemoteOplogEntry( tableInfo, - 'main', + namespace, 'parent', OPTYPES.delete, timestamp, @@ -1232,8 +1310,15 @@ export const processTests = (test: TestFn) => { }) test('compensations: using triggers with flag 1', async (t) => { - const { adapter, runMigrations, satellite, tableInfo, authState, builder } = - t.context + const { + adapter, + runMigrations, + satellite, + tableInfo, + authState, + builder, + namespace, + } = t.context await runMigrations() if (builder.dialect === 'SQLite') { @@ -1242,14 +1327,14 @@ export const processTests = (test: TestFn) => { await satellite._setMeta('compensations', 1) await adapter.run({ - sql: `INSERT INTO main.parent(id, value) VALUES (1, '1')`, + sql: `INSERT INTO parent(id, value) VALUES (1, '1')`, }) await satellite._setAuthState(authState) const ts = await satellite._performSnapshot() await satellite._garbageCollectOplog(ts) await adapter.run({ - sql: `INSERT INTO main.child(id, parent) VALUES (1, 1)`, + sql: `INSERT INTO child(id, parent) VALUES (1, 1)`, }) await satellite._performSnapshot() @@ -1257,7 +1342,7 @@ export const processTests = (test: TestFn) => { const incoming = [ generateRemoteOplogEntry( tableInfo, - 'main', + namespace, 'parent', OPTYPES.delete, timestamp, @@ -1275,7 +1360,7 @@ export const processTests = (test: TestFn) => { }) test('get oplogEntries from transaction', async (t) => { - const { runMigrations, satellite } = t.context + const { runMigrations, satellite, namespace } = t.context await runMigrations() const relations = await satellite['_getLocalRelations']() @@ -1294,7 +1379,7 @@ export const processTests = (test: TestFn) => { } const expected: OplogEntry = { - namespace: 'main', + namespace, tablename: 'parent', optype: 'INSERT', newRow: '{"id":0}', @@ -1305,17 +1390,17 @@ export const processTests = (test: TestFn) => { clearTags: encodeTags([]), } - const opLog = fromTransaction(transaction, relations) + const opLog = fromTransaction(transaction, relations, namespace) t.deepEqual(opLog[0], expected) }) test('get transactions from opLogEntries', async (t) => { - const { runMigrations } = t.context + const { runMigrations, namespace } = t.context await runMigrations() const opLogEntries: OplogEntry[] = [ { - namespace: 'public', + namespace, tablename: 'parent', optype: 'INSERT', newRow: '{"id":0}', @@ -1326,7 +1411,7 @@ export const processTests = (test: TestFn) => { clearTags: encodeTags([]), }, { - namespace: 'public', + namespace, tablename: 'parent', optype: 'UPDATE', newRow: '{"id":1}', @@ -1337,7 +1422,7 @@ export const processTests = (test: TestFn) => { clearTags: encodeTags([]), }, { - namespace: 'public', + namespace, tablename: 'parent', optype: 'INSERT', newRow: '{"id":2}', @@ -1400,7 +1485,7 @@ export const processTests = (test: TestFn) => { await connectionPromise adapter.run({ - sql: `INSERT INTO main.parent(id, value, other) VALUES (1, 'local', 1)`, + sql: `INSERT INTO parent(id, value, other) VALUES (1, 'local', 1)`, }) await satellite._performSnapshot() @@ -1412,7 +1497,7 @@ export const processTests = (test: TestFn) => { satellite.disconnect() adapter.run({ - sql: `INSERT INTO main.parent(id, value, other) VALUES (2, 'local', 1)`, + sql: `INSERT INTO parent(id, value, other) VALUES (2, 'local', 1)`, }) await satellite._performSnapshot() @@ -1477,10 +1562,10 @@ export const processTests = (test: TestFn) => { await conn.connectionPromise adapter.run({ - sql: `INSERT INTO main.parent(id, value, other) VALUES (1, 'local', 1);`, + sql: `INSERT INTO parent(id, value, other) VALUES (1, 'local', 1);`, }) adapter.run({ - sql: `UPDATE main.parent SET value = 'local', other = 2 WHERE id = 1;`, + sql: `UPDATE parent SET value = 'local', other = 2 WHERE id = 1;`, }) // Before snapshot, we didn't send anything @@ -1541,11 +1626,10 @@ export const processTests = (test: TestFn) => { }) test('apply shape data and persist subscription', async (t) => { - const { client, satellite, adapter, notifier, token } = t.context + const { client, satellite, adapter, notifier, token, namespace } = t.context const { runMigrations, authState } = t.context await runMigrations() - const namespace = 'main' const tablename = 'parent' const qualified = new QualifiedTablename(namespace, tablename) @@ -1587,7 +1671,7 @@ export const processTests = (test: TestFn) => { t.is(row.length, 1) const shadowRows = await adapter.query({ - sql: `SELECT tags FROM main._electric_shadow`, + sql: `SELECT tags FROM _electric_shadow`, }) t.is(shadowRows.length, 1) @@ -1673,11 +1757,17 @@ export const processTests = (test: TestFn) => { }) test('applied shape data will be acted upon correctly', async (t) => { - const { client, satellite, adapter, runMigrations, authState, token } = - t.context + const { + client, + satellite, + adapter, + runMigrations, + authState, + token, + namespace, + } = t.context await runMigrations() - const namespace = 'main' const tablename = 'parent' const qualified = `"${namespace}"."${tablename}"` @@ -1704,11 +1794,11 @@ export const processTests = (test: TestFn) => { t.is(row.length, 1) const shadowRows = await adapter.query({ - sql: `SELECT * FROM main._electric_shadow`, + sql: `SELECT * FROM _electric_shadow`, }) t.is(shadowRows.length, 1) t.like(shadowRows[0], { - namespace: 'main', + namespace, tablename: 'parent', }) @@ -1716,7 +1806,7 @@ export const processTests = (test: TestFn) => { await satellite._performSnapshot() const oplogs = await adapter.query({ - sql: `SELECT * FROM main._electric_oplog`, + sql: `SELECT * FROM _electric_oplog`, }) t.not(oplogs[0].clearTags, '[]') } catch (e) { @@ -1759,7 +1849,7 @@ export const processTests = (test: TestFn) => { }) const [result] = await adapter.query({ - sql: 'SELECT * FROM main.parent WHERE id = 100', + sql: 'SELECT * FROM parent WHERE id = 100', }) t.deepEqual(result, { id: 100, value: 'new_value', other: null }) }) @@ -1801,18 +1891,24 @@ export const processTests = (test: TestFn) => { }) const results = await adapter.query({ - sql: 'SELECT * FROM main.parent', + sql: 'SELECT * FROM parent', }) t.deepEqual(results, []) }) test('a subscription that failed to apply because of FK constraint triggers GC', async (t) => { - const { client, satellite, adapter, runMigrations, authState, token } = - t.context + const { + client, + satellite, + adapter, + runMigrations, + authState, + token, + namespace, + } = t.context await runMigrations() const tablename = 'child' - const namespace = 'main' // relations must be present at subscription delivery client.setRelations(relations) @@ -1840,11 +1936,17 @@ export const processTests = (test: TestFn) => { }) test('a second successful subscription', async (t) => { - const { client, satellite, adapter, runMigrations, authState, token } = - t.context + const { + client, + satellite, + adapter, + runMigrations, + authState, + token, + namespace, + } = t.context await runMigrations() - const namespace = 'main' const tablename = 'child' // relations must be present at subscription delivery @@ -1874,7 +1976,7 @@ export const processTests = (test: TestFn) => { t.is(row.length, 1) const shadowRows = await adapter.query({ - sql: `SELECT tags FROM main._electric_shadow`, + sql: `SELECT tags FROM _electric_shadow`, }) t.is(shadowRows.length, 2) @@ -1887,8 +1989,15 @@ export const processTests = (test: TestFn) => { }) test('a single subscribe with multiple tables with FKs', async (t) => { - const { client, satellite, adapter, runMigrations, authState, token } = - t.context + const { + client, + satellite, + adapter, + runMigrations, + authState, + token, + namespace, + } = t.context await runMigrations() // relations must be present at subscription delivery @@ -1918,7 +2027,7 @@ export const processTests = (test: TestFn) => { setTimeout(async () => { try { const row = await adapter.query({ - sql: `SELECT id FROM "main"."child"`, + sql: `SELECT id FROM "${namespace}"."child"`, }) t.is(row.length, 1) @@ -1940,11 +2049,17 @@ export const processTests = (test: TestFn) => { test.serial( 'a shape delivery that triggers garbage collection', async (t) => { - const { client, satellite, adapter, runMigrations, authState, token } = - t.context + const { + client, + satellite, + adapter, + runMigrations, + authState, + token, + namespace, + } = t.context await runMigrations() - const namespace = 'main' const tablename = 'parent' const childTable = 'child' @@ -2003,12 +2118,18 @@ export const processTests = (test: TestFn) => { }) test('a subscription request failure does not clear the manager state', async (t) => { - const { client, satellite, adapter, runMigrations, authState, token } = - t.context + const { + client, + satellite, + adapter, + runMigrations, + authState, + token, + namespace, + } = t.context await runMigrations() // relations must be present at subscription delivery - const namespace = 'main' const tablename = 'parent' client.setRelations(relations) client.setRelationData(tablename, parentRecord) @@ -2052,7 +2173,7 @@ export const processTests = (test: TestFn) => { await runMigrations() // Add log entry while offline - await adapter.run({ sql: `INSERT INTO main.parent(id) VALUES ('1'),('2')` }) + await adapter.run({ sql: `INSERT INTO parent(id) VALUES ('1'),('2')` }) const conn = await startSatellite(satellite, authState, token) @@ -2085,15 +2206,15 @@ export const processTests = (test: TestFn) => { // manager does not violate the FKs when unsubscribing from all subscriptions try { await satellite.adapter.runInTransaction( - { sql: `CREATE TABLE main.users (id TEXT PRIMARY KEY, name TEXT)` }, + { sql: `CREATE TABLE users (id TEXT PRIMARY KEY, name TEXT)` }, { - sql: `CREATE TABLE main.posts (id TEXT PRIMARY KEY, title TEXT, author_id TEXT, FOREIGN KEY(author_id) REFERENCES ${builder.pgOnly( - 'main.' - )}users(id) ${builder.pgOnly('DEFERRABLE INITIALLY IMMEDIATE')})`, + sql: `CREATE TABLE posts (id TEXT PRIMARY KEY, title TEXT, author_id TEXT, FOREIGN KEY(author_id) REFERENCES users(id) ${builder.pgOnly( + 'DEFERRABLE INITIALLY IMMEDIATE' + )})`, }, - { sql: `INSERT INTO main.users (id, name) VALUES ('u1', 'user1')` }, + { sql: `INSERT INTO users (id, name) VALUES ('u1', 'user1')` }, { - sql: `INSERT INTO main.posts (id, title, author_id) VALUES ('p1', 'My first post', 'u1')`, + sql: `INSERT INTO posts (id, title, author_id) VALUES ('p1', 'My first post', 'u1')`, } ) } catch (e: any) { @@ -2105,12 +2226,12 @@ export const processTests = (test: TestFn) => { // Check that everything was deleted const users = await satellite.adapter.query({ - sql: 'SELECT * FROM main.users', + sql: 'SELECT * FROM users', }) t.assert(users.length === 0) const posts = await satellite.adapter.query({ - sql: 'SELECT * FROM main.posts', + sql: 'SELECT * FROM posts', }) t.assert(posts.length === 0) }) @@ -2119,7 +2240,7 @@ export const processTests = (test: TestFn) => { const { adapter, runMigrations, satellite, authState, token } = t.context await startSatellite(satellite, authState, token) await runMigrations() - await adapter.run({ sql: `INSERT INTO main.parent(id) VALUES ('1'),('2')` }) + await adapter.run({ sql: `INSERT INTO parent(id) VALUES ('1'),('2')` }) const ts = await satellite._performSnapshot() await satellite._garbageCollectOplog(ts) t.is((await satellite._getEntries(0)).length, 0) @@ -2141,10 +2262,10 @@ export const processTests = (test: TestFn) => { runMigrations, authState, token, + namespace, } = t.context await runMigrations() - const namespace = 'main' const tablename = 'parent' const qualified = `"${namespace}"."${tablename}"` @@ -2166,7 +2287,7 @@ export const processTests = (test: TestFn) => { const expectedTs = new Date().getTime() const incoming = generateRemoteOplogEntry( tableInfo, - 'main', + namespace, 'parent', OPTYPES.insert, expectedTs, @@ -2190,11 +2311,11 @@ export const processTests = (test: TestFn) => { t.is(row.length, 2) const shadowRows = await adapter.query({ - sql: `SELECT * FROM main._electric_shadow`, + sql: `SELECT * FROM _electric_shadow`, }) t.is(shadowRows.length, 2) t.like(shadowRows[0], { - namespace: 'main', + namespace, tablename: 'parent', }) @@ -2202,7 +2323,7 @@ export const processTests = (test: TestFn) => { const deleteTx = await satellite._performSnapshot() const oplogs = await adapter.query({ - sql: `SELECT * FROM main._electric_oplog`, + sql: `SELECT * FROM _electric_oplog`, }) t.is( oplogs[0].clearTags, @@ -2220,17 +2341,17 @@ export const processTests = (test: TestFn) => { await satellite._setAuthState(authState) await adapter.run({ - sql: `INSERT INTO main.parent(id, value) VALUES (1,'val1')`, + sql: `INSERT INTO parent(id, value) VALUES (1,'val1')`, }) await adapter.run({ - sql: `INSERT INTO main.parent(id, value) VALUES (2,'val2')`, + sql: `INSERT INTO parent(id, value) VALUES (2,'val2')`, }) - await adapter.run({ sql: `DELETE FROM main.parent WHERE id=1` }) + await adapter.run({ sql: `DELETE FROM parent WHERE id=1` }) await satellite._performSnapshot() - await adapter.run({ sql: `DELETE FROM main.parent WHERE id=2` }) + await adapter.run({ sql: `DELETE FROM parent WHERE id=2` }) await satellite._performSnapshot() diff --git a/clients/typescript/test/satellite/process.timing.test.ts b/clients/typescript/test/satellite/process.timing.test.ts index 60da87d3ed..2d0bc0e17d 100644 --- a/clients/typescript/test/satellite/process.timing.test.ts +++ b/clients/typescript/test/satellite/process.timing.test.ts @@ -13,14 +13,22 @@ import { satelliteDefaults } from '../../src/satellite/config' */ // Speed up the intervals for testing. -export const opts = Object.assign({}, satelliteDefaults, { - minSnapshotWindow: 80, - pollingInterval: 500, -}) +export const opts = (namespace: string) => + Object.assign({}, satelliteDefaults(namespace), { + minSnapshotWindow: 80, + pollingInterval: 500, + }) export const processTimingTests = (test: TestFn) => { test(`throttled snapshot respects window`, async (t) => { - const { adapter, notifier, runMigrations, satellite, authState } = t.context + const { + adapter, + notifier, + runMigrations, + satellite, + authState, + namespace, + } = t.context await runMigrations() await satellite._setAuthState(authState) @@ -29,13 +37,13 @@ export const processTimingTests = (test: TestFn) => { const numNotifications = notifier.notifications.length - const sql = `INSERT INTO main.parent(id) VALUES ('1'),('2')` + const sql = `INSERT INTO parent(id) VALUES ('1'),('2')` await adapter.run({ sql }) await satellite._throttledSnapshot() t.is(notifier.notifications.length, numNotifications) - await sleepAsync(opts.minSnapshotWindow + 50) + await sleepAsync(opts(namespace).minSnapshotWindow + 50) t.is(notifier.notifications.length, numNotifications + 1) }) diff --git a/clients/typescript/test/satellite/registry.test.ts b/clients/typescript/test/satellite/registry.test.ts index 241ba1094b..101383fee0 100644 --- a/clients/typescript/test/satellite/registry.test.ts +++ b/clients/typescript/test/satellite/registry.test.ts @@ -7,12 +7,13 @@ import { Notifier } from '../../src/notifiers/index' import { MockSatelliteProcess, MockRegistry } from '../../src/satellite/mock' import { SocketFactory } from '../../src/sockets' import { DbSchema } from '../../src/client/model' +import { sqliteBuilder } from '../../src/migrators/query-builder' const dbName = 'test.db' const dbDescription = {} as DbSchema const adapter = {} as DatabaseAdapter -const migrator = {} as Migrator +const migrator = { electricQueryBuilder: sqliteBuilder } as unknown as Migrator const notifier = {} as Notifier const socketFactory = {} as SocketFactory const config: InternalElectricConfig = { diff --git a/clients/typescript/test/satellite/serialization.test.ts b/clients/typescript/test/satellite/serialization.test.ts index 7865c94d0b..2f3f73dc04 100644 --- a/clients/typescript/test/satellite/serialization.test.ts +++ b/clients/typescript/test/satellite/serialization.test.ts @@ -10,7 +10,7 @@ import { DatabaseAdapter as SQLiteDatabaseAdapter } from '../../src/drivers/bett import { DatabaseAdapter as PgDatabaseAdapter } from '../../src/drivers/node-postgres/adapter' import { DatabaseAdapter as DatabaseAdapterInterface } from '../../src/electric/adapter' import { inferRelationsFromDb } from '../../src/util/relations' -import { satelliteDefaults } from '../../src/satellite/config' +import { SatelliteOpts } from '../../src/satellite/config' import { QueryBuilder, pgBuilder, @@ -18,6 +18,7 @@ import { } from '../../src/migrators/query-builder' import { makePgDatabase } from '../support/node-postgres' import { randomValue } from '../../src/util/random' +import { opts } from './common' test('serialize/deserialize row data', async (t) => { const rel: Relation = { @@ -276,11 +277,12 @@ test('Null mask uses bits as if they were a list', async (t) => { type MaybePromise = T | Promise type SetupFn = ( t: ExecutionContext -) => MaybePromise<[DatabaseAdapterInterface, QueryBuilder]> +) => MaybePromise<[DatabaseAdapterInterface, QueryBuilder, SatelliteOpts]> const setupSqlite: SetupFn = (t: ExecutionContext) => { const db = new Database(':memory:') t.teardown(() => db.close()) - return [new SQLiteDatabaseAdapter(db), sqliteBuilder] + const namespace = 'main' + return [new SQLiteDatabaseAdapter(db), sqliteBuilder, opts(namespace)] } let port = 4800 @@ -288,7 +290,8 @@ const setupPG: SetupFn = async (t: ExecutionContext) => { const dbName = `serialization-test-${randomValue()}` const { db, stop } = await makePgDatabase(dbName, port++) t.teardown(async () => await stop()) - return [new PgDatabaseAdapter(db), pgBuilder] + const namespace = 'public' + return [new PgDatabaseAdapter(db), pgBuilder, opts(namespace)] } ;( @@ -298,7 +301,7 @@ const setupPG: SetupFn = async (t: ExecutionContext) => { ] as const ).forEach(([dialect, setup]) => { test(`(${dialect}) Prioritize PG types in the schema before inferred SQLite types`, async (t) => { - const [adapter, builder] = await setup(t) + const [adapter, builder, defaults] = await setup(t) await adapter.run({ sql: 'CREATE TABLE bools (id INTEGER PRIMARY KEY, b INTEGER)', @@ -306,7 +309,7 @@ const setupPG: SetupFn = async (t: ExecutionContext) => { const sqliteInferredRelations = await inferRelationsFromDb( adapter, - satelliteDefaults, + defaults, builder ) const boolsInferredRelation = sqliteInferredRelations['bools'] @@ -364,11 +367,11 @@ const setupPG: SetupFn = async (t: ExecutionContext) => { }) test(`(${dialect}) Use incoming Relation types if not found in the schema`, async (t) => { - const [adapter, builder] = await setup(t) + const [adapter, builder, defaults] = await setup(t) const inferredRelations = await inferRelationsFromDb( adapter, - satelliteDefaults, + defaults, builder ) // Empty database diff --git a/clients/typescript/test/satellite/sqlite/process.migration.test.ts b/clients/typescript/test/satellite/sqlite/process.migration.test.ts index 505d75bddd..060d3eef0e 100644 --- a/clients/typescript/test/satellite/sqlite/process.migration.test.ts +++ b/clients/typescript/test/satellite/sqlite/process.migration.test.ts @@ -11,7 +11,8 @@ import { const test = testAny as TestFn test.beforeEach(async (t) => { - await makeContext(t) + const namespace = 'main' + await makeContext(t, namespace) t.context.getMatchingShadowEntries = getSQLiteMatchingShadowEntries t.context.builder = sqliteBuilder await commonSetup(t) diff --git a/clients/typescript/test/satellite/sqlite/process.tags.test.ts b/clients/typescript/test/satellite/sqlite/process.tags.test.ts index 31738feb7f..dd9d5753ab 100644 --- a/clients/typescript/test/satellite/sqlite/process.tags.test.ts +++ b/clients/typescript/test/satellite/sqlite/process.tags.test.ts @@ -7,7 +7,8 @@ import { getMatchingShadowEntries } from '../../support/satellite-helpers' const test = anyTest as TestFn test.beforeEach(async (t) => { - await makeContext(t) + const namespace = 'main' + await makeContext(t, namespace) t.context.getMatchingShadowEntries = getMatchingShadowEntries }) test.afterEach.always(cleanAndStopSatellite) diff --git a/clients/typescript/test/satellite/sqlite/process.test.ts b/clients/typescript/test/satellite/sqlite/process.test.ts index ec73fc687a..5c43bbf515 100644 --- a/clients/typescript/test/satellite/sqlite/process.test.ts +++ b/clients/typescript/test/satellite/sqlite/process.test.ts @@ -6,12 +6,18 @@ import { makeContext, cleanAndStopSatellite } from '../common' import { sqliteBuilder } from '../../../src/migrators/query-builder' import { processTests, ContextType } from '../process.test' +import { QualifiedTablename } from '../../../src/util' const test = anyTest as TestFn test.beforeEach(async (t) => { - await makeContext(t) + const namespace = 'main' + await makeContext(t, namespace) t.context.builder = sqliteBuilder t.context.getMatchingShadowEntries = getSQLiteMatchingShadowEntries + t.context.qualifiedParentTableName = new QualifiedTablename( + namespace, + 'parent' + ).toString() }) test.afterEach.always(cleanAndStopSatellite) diff --git a/clients/typescript/test/satellite/sqlite/process.timing.test.ts b/clients/typescript/test/satellite/sqlite/process.timing.test.ts index 43caa89d68..135346b8ed 100644 --- a/clients/typescript/test/satellite/sqlite/process.timing.test.ts +++ b/clients/typescript/test/satellite/sqlite/process.timing.test.ts @@ -1,9 +1,9 @@ import anyTest, { TestFn } from 'ava' -import { processTimingTests, opts } from '../process.timing.test' +import { processTimingTests } from '../process.timing.test' import { makeContext, clean, ContextType } from '../common' const test = anyTest as TestFn -test.beforeEach(async (t) => makeContext(t, opts)) +test.beforeEach(async (t) => makeContext(t, 'main')) test.afterEach.always(clean) processTimingTests(test) diff --git a/clients/typescript/test/support/migrations/pg-migrations.js b/clients/typescript/test/support/migrations/pg-migrations.js index e2b018058b..de8e54425c 100644 --- a/clients/typescript/test/support/migrations/pg-migrations.js +++ b/clients/typescript/test/support/migrations/pg-migrations.js @@ -9,29 +9,29 @@ export default [ { statements: [ - 'DROP TABLE IF EXISTS main._electric_trigger_settings;', - 'CREATE TABLE main._electric_trigger_settings(namespace TEXT, tablename TEXT, flag INTEGER, PRIMARY KEY (namespace, tablename));', + 'DROP TABLE IF EXISTS public._electric_trigger_settings;', + 'CREATE TABLE public._electric_trigger_settings(namespace TEXT, tablename TEXT, flag INTEGER, PRIMARY KEY (namespace, tablename));', ], version: '1', }, { statements: [ - 'CREATE TABLE IF NOT EXISTS main.items (\n value TEXT PRIMARY KEY NOT NULL\n);', - 'CREATE TABLE IF NOT EXISTS main."bigIntTable" (\n value BIGINT PRIMARY KEY NOT NULL\n);', - 'CREATE TABLE IF NOT EXISTS main.parent (\n id INTEGER PRIMARY KEY NOT NULL,\n value TEXT,\n other INTEGER DEFAULT 0\n);', - 'CREATE TABLE IF NOT EXISTS main.child (\n id INTEGER PRIMARY KEY NOT NULL,\n parent INTEGER NOT NULL,\n FOREIGN KEY(parent) REFERENCES main.parent(id) DEFERRABLE INITIALLY IMMEDIATE\n);', - 'CREATE TABLE "main"."blobTable" (value bytea NOT NULL, CONSTRAINT "blobTable_pkey" PRIMARY KEY (value)\n);', - 'DROP TABLE IF EXISTS main._electric_trigger_settings;', - 'CREATE TABLE main._electric_trigger_settings(namespace TEXT, tablename TEXT, flag INTEGER, PRIMARY KEY (namespace, tablename));', - "INSERT INTO main._electric_trigger_settings(namespace, tablename,flag) VALUES ('main', 'child', 1);", - "INSERT INTO main._electric_trigger_settings(namespace, tablename,flag) VALUES ('main', 'items', 1);", - "INSERT INTO main._electric_trigger_settings(namespace, tablename,flag) VALUES ('main', 'parent', 1);", - "INSERT INTO main._electric_trigger_settings(namespace, tablename,flag) VALUES ('main', 'bigIntTable', 1);", - "INSERT INTO main._electric_trigger_settings(namespace, tablename,flag) VALUES ('main', 'blobTable', 1);", - - 'DROP TRIGGER IF EXISTS update_ensure_main_child_primarykey ON main.child;', - ` - CREATE OR REPLACE FUNCTION update_ensure_main_child_primarykey_function() + 'CREATE TABLE IF NOT EXISTS public.items (\n value TEXT PRIMARY KEY NOT NULL\n);', + 'CREATE TABLE IF NOT EXISTS public."bigIntTable" (\n value BIGINT PRIMARY KEY NOT NULL\n);', + 'CREATE TABLE IF NOT EXISTS public.parent (\n id INTEGER PRIMARY KEY NOT NULL,\n value TEXT,\n other INTEGER DEFAULT 0\n);', + 'CREATE TABLE IF NOT EXISTS public.child (\n id INTEGER PRIMARY KEY NOT NULL,\n parent INTEGER NOT NULL,\n FOREIGN KEY(parent) REFERENCES public.parent(id) DEFERRABLE INITIALLY IMMEDIATE\n);', + 'CREATE TABLE "public"."blobTable" (value bytea NOT NULL, CONSTRAINT "blobTable_pkey" PRIMARY KEY (value)\n);', + 'DROP TABLE IF EXISTS public._electric_trigger_settings;', + 'CREATE TABLE public._electric_trigger_settings(namespace TEXT, tablename TEXT, flag INTEGER, PRIMARY KEY (namespace, tablename));', + "INSERT INTO public._electric_trigger_settings(namespace, tablename,flag) VALUES ('public', 'child', 1);", + "INSERT INTO public._electric_trigger_settings(namespace, tablename,flag) VALUES ('public', 'items', 1);", + "INSERT INTO public._electric_trigger_settings(namespace, tablename,flag) VALUES ('public', 'parent', 1);", + "INSERT INTO public._electric_trigger_settings(namespace, tablename,flag) VALUES ('public', 'bigIntTable', 1);", + "INSERT INTO public._electric_trigger_settings(namespace, tablename,flag) VALUES ('public', 'blobTable', 1);", + + 'DROP TRIGGER IF EXISTS update_ensure_public_child_primarykey ON public.child;', + ` + CREATE OR REPLACE FUNCTION update_ensure_public_child_primarykey_function() RETURNS TRIGGER AS $$ BEGIN IF old.id != new.id THEN @@ -41,28 +41,28 @@ export default [ END; $$ LANGUAGE plpgsql;`, ` - CREATE TRIGGER update_ensure_main_child_primarykey - BEFORE UPDATE ON main.child + CREATE TRIGGER update_ensure_public_child_primarykey + BEFORE UPDATE ON public.child FOR EACH ROW - EXECUTE FUNCTION update_ensure_main_child_primarykey_function(); + EXECUTE FUNCTION update_ensure_public_child_primarykey_function(); `, - 'DROP TRIGGER IF EXISTS insert_main_child_into_oplog ON main.child', + 'DROP TRIGGER IF EXISTS insert_public_child_into_oplog ON public.child', ` - CREATE OR REPLACE FUNCTION insert_main_child_into_oplog_function() + CREATE OR REPLACE FUNCTION insert_public_child_into_oplog_function() RETURNS TRIGGER AS $$ BEGIN DECLARE flag_value INTEGER; BEGIN -- Get the flag value from _electric_trigger_settings - SELECT flag INTO flag_value FROM main._electric_trigger_settings WHERE namespace = 'main' AND tablename = 'child'; + SELECT flag INTO flag_value FROM public._electric_trigger_settings WHERE namespace = 'public' AND tablename = 'child'; IF flag_value = 1 THEN -- Insert into _electric_oplog - INSERT INTO main._electric_oplog (namespace, tablename, optype, "primaryKey", "newRow", "oldRow", timestamp) - VALUES ('main', 'child', 'INSERT', json_strip_nulls(json_build_object('id', NEW.id)), jsonb_build_object('id', NEW.id, 'parent', NEW.parent), NULL, NULL); + INSERT INTO public._electric_oplog (namespace, tablename, optype, "primaryKey", "newRow", "oldRow", timestamp) + VALUES ('public', 'child', 'INSERT', json_strip_nulls(json_build_object('id', NEW.id)), jsonb_build_object('id', NEW.id, 'parent', NEW.parent), NULL, NULL); END IF; RETURN NEW; @@ -72,27 +72,27 @@ export default [ `, ` - CREATE TRIGGER insert_main_child_into_oplog - AFTER INSERT ON main.child + CREATE TRIGGER insert_public_child_into_oplog + AFTER INSERT ON public.child FOR EACH ROW - EXECUTE FUNCTION insert_main_child_into_oplog_function(); + EXECUTE FUNCTION insert_public_child_into_oplog_function(); `, - 'DROP TRIGGER IF EXISTS update_main_child_into_oplog ON main.child;', + 'DROP TRIGGER IF EXISTS update_public_child_into_oplog ON public.child;', ` - CREATE OR REPLACE FUNCTION update_main_child_into_oplog_function() + CREATE OR REPLACE FUNCTION update_public_child_into_oplog_function() RETURNS TRIGGER AS $$ BEGIN DECLARE flag_value INTEGER; BEGIN -- Get the flag value from _electric_trigger_settings - SELECT flag INTO flag_value FROM main._electric_trigger_settings WHERE namespace = 'main' AND tablename = 'child'; + SELECT flag INTO flag_value FROM public._electric_trigger_settings WHERE namespace = 'public' AND tablename = 'child'; IF flag_value = 1 THEN -- Insert into _electric_oplog - INSERT INTO main._electric_oplog (namespace, tablename, optype, "primaryKey", "newRow", "oldRow", timestamp) - VALUES ('main', 'child', 'UPDATE', json_strip_nulls(json_build_object('id', NEW.id)), jsonb_build_object('id', NEW.id, 'parent', NEW.parent), jsonb_build_object('id', OLD.id, 'parent', OLD.parent), NULL); + INSERT INTO public._electric_oplog (namespace, tablename, optype, "primaryKey", "newRow", "oldRow", timestamp) + VALUES ('public', 'child', 'UPDATE', json_strip_nulls(json_build_object('id', NEW.id)), jsonb_build_object('id', NEW.id, 'parent', NEW.parent), jsonb_build_object('id', OLD.id, 'parent', OLD.parent), NULL); END IF; RETURN NEW; @@ -101,27 +101,27 @@ export default [ $$ LANGUAGE plpgsql; `, ` - CREATE TRIGGER update_main_child_into_oplog - AFTER UPDATE ON main.child + CREATE TRIGGER update_public_child_into_oplog + AFTER UPDATE ON public.child FOR EACH ROW - EXECUTE FUNCTION update_main_child_into_oplog_function(); + EXECUTE FUNCTION update_public_child_into_oplog_function(); `, - 'DROP TRIGGER IF EXISTS delete_main_child_into_oplog ON main.child;', + 'DROP TRIGGER IF EXISTS delete_public_child_into_oplog ON public.child;', ` - CREATE OR REPLACE FUNCTION delete_main_child_into_oplog_function() + CREATE OR REPLACE FUNCTION delete_public_child_into_oplog_function() RETURNS TRIGGER AS $$ BEGIN DECLARE flag_value INTEGER; BEGIN -- Get the flag value from _electric_trigger_settings - SELECT flag INTO flag_value FROM main._electric_trigger_settings WHERE namespace = 'main' AND tablename = 'child'; + SELECT flag INTO flag_value FROM public._electric_trigger_settings WHERE namespace = 'public' AND tablename = 'child'; IF flag_value = 1 THEN -- Insert into _electric_oplog - INSERT INTO main._electric_oplog (namespace, tablename, optype, "primaryKey", "newRow", "oldRow", timestamp) - VALUES ('main', 'child', 'DELETE', json_strip_nulls(json_build_object('id', OLD.id)), NULL, jsonb_build_object('id', OLD.id, 'parent', OLD.parent), NULL); + INSERT INTO public._electric_oplog (namespace, tablename, optype, "primaryKey", "newRow", "oldRow", timestamp) + VALUES ('public', 'child', 'DELETE', json_strip_nulls(json_build_object('id', OLD.id)), NULL, jsonb_build_object('id', OLD.id, 'parent', OLD.parent), NULL); END IF; RETURN NEW; @@ -130,30 +130,30 @@ export default [ $$ LANGUAGE plpgsql; `, ` - CREATE TRIGGER delete_main_child_into_oplog - AFTER DELETE ON main.child + CREATE TRIGGER delete_public_child_into_oplog + AFTER DELETE ON public.child FOR EACH ROW - EXECUTE FUNCTION delete_main_child_into_oplog_function(); + EXECUTE FUNCTION delete_public_child_into_oplog_function(); `, - 'DROP TRIGGER IF EXISTS compensation_insert_main_child_parent_into_oplog ON main.child;', + 'DROP TRIGGER IF EXISTS compensation_insert_public_child_parent_into_oplog ON public.child;', ` - CREATE OR REPLACE FUNCTION compensation_insert_main_child_parent_into_oplog_function() + CREATE OR REPLACE FUNCTION compensation_insert_public_child_parent_into_oplog_function() RETURNS TRIGGER AS $$ BEGIN DECLARE flag_value INTEGER; meta_value TEXT; BEGIN - SELECT flag INTO flag_value FROM main._electric_trigger_settings WHERE namespace = 'main' AND tablename = 'parent'; + SELECT flag INTO flag_value FROM public._electric_trigger_settings WHERE namespace = 'public' AND tablename = 'parent'; - SELECT value INTO meta_value FROM main._electric_meta WHERE key = 'compensations'; + SELECT value INTO meta_value FROM public._electric_meta WHERE key = 'compensations'; IF flag_value = 1 AND meta_value = '1' THEN - INSERT INTO main._electric_oplog (namespace, tablename, optype, "primaryKey", "newRow", "oldRow", timestamp) - SELECT 'main', 'parent', 'INSERT', json_strip_nulls(json_build_object('id', id)), + INSERT INTO public._electric_oplog (namespace, tablename, optype, "primaryKey", "newRow", "oldRow", timestamp) + SELECT 'public', 'parent', 'INSERT', json_strip_nulls(json_build_object('id', id)), jsonb_build_object('id', id, 'value', value, 'other', other), NULL, NULL - FROM main.parent WHERE id = NEW."parent"; + FROM public.parent WHERE id = NEW."parent"; END IF; RETURN NEW; @@ -162,15 +162,15 @@ export default [ $$ LANGUAGE plpgsql; `, ` - CREATE TRIGGER compensation_insert_main_child_parent_into_oplog - AFTER INSERT ON main.child + CREATE TRIGGER compensation_insert_public_child_parent_into_oplog + AFTER INSERT ON public.child FOR EACH ROW - EXECUTE FUNCTION compensation_insert_main_child_parent_into_oplog_function(); + EXECUTE FUNCTION compensation_insert_public_child_parent_into_oplog_function(); `, - 'DROP TRIGGER IF EXISTS compensation_update_main_child_parent_into_oplog ON main.parent;', + 'DROP TRIGGER IF EXISTS compensation_update_public_child_parent_into_oplog ON public.parent;', ` - CREATE OR REPLACE FUNCTION compensation_update_main_child_parent_into_oplog_function() + CREATE OR REPLACE FUNCTION compensation_update_public_child_parent_into_oplog_function() RETURNS TRIGGER AS $$ BEGIN DECLARE @@ -178,17 +178,17 @@ export default [ meta_value TEXT; BEGIN -- Get the flag value from _electric_trigger_settings - SELECT flag INTO flag_value FROM main._electric_trigger_settings WHERE namespace = 'main' AND tablename = 'parent'; + SELECT flag INTO flag_value FROM public._electric_trigger_settings WHERE namespace = 'public' AND tablename = 'parent'; -- Get the 'compensations' value from _electric_meta - SELECT value INTO meta_value FROM main._electric_meta WHERE key = 'compensations'; + SELECT value INTO meta_value FROM public._electric_meta WHERE key = 'compensations'; IF flag_value = 1 AND meta_value = '1' THEN -- Insert into _electric_oplog - INSERT INTO main._electric_oplog (namespace, tablename, optype, "primaryKey", "newRow", "oldRow", timestamp) - SELECT 'main', 'parent', 'UPDATE', json_strip_nulls(json_build_object('id', id)), + INSERT INTO public._electric_oplog (namespace, tablename, optype, "primaryKey", "newRow", "oldRow", timestamp) + SELECT 'public', 'parent', 'UPDATE', json_strip_nulls(json_build_object('id', id)), jsonb_build_object('id', id, 'value', value, 'other', other), NULL, NULL - FROM main.parent WHERE id = NEW."parent"; + FROM public.parent WHERE id = NEW."parent"; END IF; RETURN NEW; @@ -197,15 +197,15 @@ export default [ $$ LANGUAGE plpgsql; `, ` - CREATE TRIGGER compensation_update_main_child_parent_into_oplog - AFTER UPDATE ON main.child + CREATE TRIGGER compensation_update_public_child_parent_into_oplog + AFTER UPDATE ON public.child FOR EACH ROW - EXECUTE FUNCTION compensation_update_main_child_parent_into_oplog_function(); + EXECUTE FUNCTION compensation_update_public_child_parent_into_oplog_function(); `, - 'DROP TRIGGER IF EXISTS update_ensure_main_items_primarykey ON main.items;', + 'DROP TRIGGER IF EXISTS update_ensure_public_items_primarykey ON public.items;', ` - CREATE OR REPLACE FUNCTION update_ensure_main_items_primarykey_function() + CREATE OR REPLACE FUNCTION update_ensure_public_items_primarykey_function() RETURNS TRIGGER AS $$ BEGIN IF old.value != new.value THEN @@ -215,27 +215,27 @@ export default [ END; $$ LANGUAGE plpgsql;`, ` - CREATE TRIGGER update_ensure_main_items_primarykey - BEFORE UPDATE ON main.items + CREATE TRIGGER update_ensure_public_items_primarykey + BEFORE UPDATE ON public.items FOR EACH ROW - EXECUTE FUNCTION update_ensure_main_items_primarykey_function(); + EXECUTE FUNCTION update_ensure_public_items_primarykey_function(); `, - 'DROP TRIGGER IF EXISTS insert_main_items_into_oplog ON main.items;', + 'DROP TRIGGER IF EXISTS insert_public_items_into_oplog ON public.items;', ` - CREATE OR REPLACE FUNCTION insert_main_items_into_oplog_function() + CREATE OR REPLACE FUNCTION insert_public_items_into_oplog_function() RETURNS TRIGGER AS $$ BEGIN DECLARE flag_value INTEGER; BEGIN -- Get the flag value from _electric_trigger_settings - SELECT flag INTO flag_value FROM main._electric_trigger_settings WHERE namespace = 'main' AND tablename = 'items'; + SELECT flag INTO flag_value FROM public._electric_trigger_settings WHERE namespace = 'public' AND tablename = 'items'; IF flag_value = 1 THEN -- Insert into _electric_oplog - INSERT INTO main._electric_oplog (namespace, tablename, optype, "primaryKey", "newRow", "oldRow", timestamp) - VALUES ('main', 'items', 'INSERT', json_strip_nulls(json_build_object('value', NEW.value)), jsonb_build_object('value', NEW.value), NULL, NULL); + INSERT INTO public._electric_oplog (namespace, tablename, optype, "primaryKey", "newRow", "oldRow", timestamp) + VALUES ('public', 'items', 'INSERT', json_strip_nulls(json_build_object('value', NEW.value)), jsonb_build_object('value', NEW.value), NULL, NULL); END IF; RETURN NEW; @@ -246,27 +246,27 @@ export default [ ` -- Attach the trigger function to the table - CREATE TRIGGER insert_main_items_into_oplog - AFTER INSERT ON main.items + CREATE TRIGGER insert_public_items_into_oplog + AFTER INSERT ON public.items FOR EACH ROW - EXECUTE FUNCTION insert_main_items_into_oplog_function(); + EXECUTE FUNCTION insert_public_items_into_oplog_function(); `, - 'DROP TRIGGER IF EXISTS update_main_items_into_oplog ON main.items;', + 'DROP TRIGGER IF EXISTS update_public_items_into_oplog ON public.items;', ` - CREATE OR REPLACE FUNCTION update_main_items_into_oplog_function() + CREATE OR REPLACE FUNCTION update_public_items_into_oplog_function() RETURNS TRIGGER AS $$ BEGIN DECLARE flag_value INTEGER; BEGIN -- Get the flag value from _electric_trigger_settings - SELECT flag INTO flag_value FROM main._electric_trigger_settings WHERE namespace = 'main' AND tablename = 'items'; + SELECT flag INTO flag_value FROM public._electric_trigger_settings WHERE namespace = 'public' AND tablename = 'items'; IF flag_value = 1 THEN -- Insert into _electric_oplog - INSERT INTO main._electric_oplog (namespace, tablename, optype, "primaryKey", "newRow", "oldRow", timestamp) - VALUES ('main', 'items', 'UPDATE', json_strip_nulls(json_build_object('value', NEW.value)), jsonb_build_object('value', NEW.value), jsonb_build_object('value', OLD.value), NULL); + INSERT INTO public._electric_oplog (namespace, tablename, optype, "primaryKey", "newRow", "oldRow", timestamp) + VALUES ('public', 'items', 'UPDATE', json_strip_nulls(json_build_object('value', NEW.value)), jsonb_build_object('value', NEW.value), jsonb_build_object('value', OLD.value), NULL); END IF; RETURN NEW; @@ -276,27 +276,27 @@ export default [ ` -- Attach the trigger function to the table - CREATE TRIGGER update_main_items_into_oplog - AFTER UPDATE ON main.items + CREATE TRIGGER update_public_items_into_oplog + AFTER UPDATE ON public.items FOR EACH ROW - EXECUTE FUNCTION update_main_items_into_oplog_function(); + EXECUTE FUNCTION update_public_items_into_oplog_function(); `, - 'DROP TRIGGER IF EXISTS delete_main_items_into_oplog ON main.items;', + 'DROP TRIGGER IF EXISTS delete_public_items_into_oplog ON public.items;', ` - CREATE OR REPLACE FUNCTION delete_main_items_into_oplog_function() + CREATE OR REPLACE FUNCTION delete_public_items_into_oplog_function() RETURNS TRIGGER AS $$ BEGIN DECLARE flag_value INTEGER; BEGIN -- Get the flag value from _electric_trigger_settings - SELECT flag INTO flag_value FROM main._electric_trigger_settings WHERE namespace = 'main' AND tablename = 'items'; + SELECT flag INTO flag_value FROM public._electric_trigger_settings WHERE namespace = 'public' AND tablename = 'items'; IF flag_value = 1 THEN -- Insert into _electric_oplog - INSERT INTO main._electric_oplog (namespace, tablename, optype, "primaryKey", "newRow", "oldRow", timestamp) - VALUES ('main', 'items', 'DELETE', json_strip_nulls(json_build_object('value', OLD.value)), NULL, jsonb_build_object('value', OLD.value), NULL); + INSERT INTO public._electric_oplog (namespace, tablename, optype, "primaryKey", "newRow", "oldRow", timestamp) + VALUES ('public', 'items', 'DELETE', json_strip_nulls(json_build_object('value', OLD.value)), NULL, jsonb_build_object('value', OLD.value), NULL); END IF; RETURN OLD; @@ -305,15 +305,15 @@ export default [ $$ LANGUAGE plpgsql;`, ` -- Attach the trigger function to the table - CREATE TRIGGER delete_main_items_into_oplog - AFTER DELETE ON main.items + CREATE TRIGGER delete_public_items_into_oplog + AFTER DELETE ON public.items FOR EACH ROW - EXECUTE FUNCTION delete_main_items_into_oplog_function(); + EXECUTE FUNCTION delete_public_items_into_oplog_function(); `, - 'DROP TRIGGER IF EXISTS update_ensure_main_parent_primarykey ON main.parent;', + 'DROP TRIGGER IF EXISTS update_ensure_public_parent_primarykey ON public.parent;', ` - CREATE OR REPLACE FUNCTION update_ensure_main_parent_primarykey_function() + CREATE OR REPLACE FUNCTION update_ensure_public_parent_primarykey_function() RETURNS TRIGGER AS $$ BEGIN IF OLD.id != NEW.id THEN @@ -326,28 +326,28 @@ export default [ ` -- Attach the trigger function to the table - CREATE TRIGGER update_ensure_main_parent_primarykey - BEFORE UPDATE ON main.parent + CREATE TRIGGER update_ensure_public_parent_primarykey + BEFORE UPDATE ON public.parent FOR EACH ROW - EXECUTE FUNCTION update_ensure_main_parent_primarykey_function(); + EXECUTE FUNCTION update_ensure_public_parent_primarykey_function(); `, - 'DROP TRIGGER IF EXISTS insert_main_parent_into_oplog ON main.parent;', + 'DROP TRIGGER IF EXISTS insert_public_parent_into_oplog ON public.parent;', ` - CREATE OR REPLACE FUNCTION insert_main_parent_into_oplog_function() + CREATE OR REPLACE FUNCTION insert_public_parent_into_oplog_function() RETURNS TRIGGER AS $$ BEGIN DECLARE flag_value INTEGER; BEGIN -- Get the flag value from _electric_trigger_settings - SELECT flag INTO flag_value FROM main._electric_trigger_settings WHERE namespace = 'main' AND tablename = 'parent'; + SELECT flag INTO flag_value FROM public._electric_trigger_settings WHERE namespace = 'public' AND tablename = 'parent'; IF flag_value = 1 THEN -- Insert into _electric_oplog - INSERT INTO main._electric_oplog (namespace, tablename, optype, "primaryKey", "newRow", "oldRow", timestamp) + INSERT INTO public._electric_oplog (namespace, tablename, optype, "primaryKey", "newRow", "oldRow", timestamp) VALUES ( - 'main', + 'public', 'parent', 'INSERT', json_strip_nulls(json_build_object('id', NEW.id)), @@ -365,29 +365,29 @@ export default [ ` -- Attach the trigger function to the table - CREATE TRIGGER insert_main_parent_into_oplog - AFTER INSERT ON main.parent + CREATE TRIGGER insert_public_parent_into_oplog + AFTER INSERT ON public.parent FOR EACH ROW - EXECUTE FUNCTION insert_main_parent_into_oplog_function(); + EXECUTE FUNCTION insert_public_parent_into_oplog_function(); `, - 'DROP TRIGGER IF EXISTS update_main_parent_into_oplog ON main.parent;', + 'DROP TRIGGER IF EXISTS update_public_parent_into_oplog ON public.parent;', ` - CREATE OR REPLACE FUNCTION update_main_parent_into_oplog_function() + CREATE OR REPLACE FUNCTION update_public_parent_into_oplog_function() RETURNS TRIGGER AS $$ BEGIN DECLARE flag_value INTEGER; BEGIN -- Get the flag value from _electric_trigger_settings - SELECT flag INTO flag_value FROM main._electric_trigger_settings WHERE namespace = 'main' AND tablename = 'parent'; + SELECT flag INTO flag_value FROM public._electric_trigger_settings WHERE namespace = 'public' AND tablename = 'parent'; IF flag_value = 1 THEN -- Insert into _electric_oplog - INSERT INTO main._electric_oplog (namespace, tablename, optype, "primaryKey", "newRow", "oldRow", timestamp) + INSERT INTO public._electric_oplog (namespace, tablename, optype, "primaryKey", "newRow", "oldRow", timestamp) VALUES ( - 'main', + 'public', 'parent', 'UPDATE', json_strip_nulls(json_build_object('id', NEW.id)), @@ -405,29 +405,29 @@ export default [ ` -- Attach the trigger function to the table - CREATE TRIGGER update_main_parent_into_oplog - AFTER UPDATE ON main.parent + CREATE TRIGGER update_public_parent_into_oplog + AFTER UPDATE ON public.parent FOR EACH ROW - EXECUTE FUNCTION update_main_parent_into_oplog_function(); + EXECUTE FUNCTION update_public_parent_into_oplog_function(); `, - 'DROP TRIGGER IF EXISTS delete_main_parent_into_oplog ON main.parent;', + 'DROP TRIGGER IF EXISTS delete_public_parent_into_oplog ON public.parent;', ` - CREATE OR REPLACE FUNCTION delete_main_parent_into_oplog_function() + CREATE OR REPLACE FUNCTION delete_public_parent_into_oplog_function() RETURNS TRIGGER AS $$ BEGIN DECLARE flag_value INTEGER; BEGIN -- Get the flag value from _electric_trigger_settings - SELECT flag INTO flag_value FROM main._electric_trigger_settings WHERE namespace = 'main' AND tablename = 'parent'; + SELECT flag INTO flag_value FROM public._electric_trigger_settings WHERE namespace = 'public' AND tablename = 'parent'; IF flag_value = 1 THEN -- Insert into _electric_oplog - INSERT INTO main._electric_oplog (namespace, tablename, optype, "primaryKey", "newRow", "oldRow", timestamp) + INSERT INTO public._electric_oplog (namespace, tablename, optype, "primaryKey", "newRow", "oldRow", timestamp) VALUES ( - 'main', + 'public', 'parent', 'DELETE', json_strip_nulls(json_build_object('id', OLD.id)), @@ -445,25 +445,25 @@ export default [ ` -- Attach the trigger function to the table - CREATE TRIGGER delete_main_parent_into_oplog - AFTER DELETE ON main.parent + CREATE TRIGGER delete_public_parent_into_oplog + AFTER DELETE ON public.parent FOR EACH ROW - EXECUTE FUNCTION delete_main_parent_into_oplog_function(); + EXECUTE FUNCTION delete_public_parent_into_oplog_function(); `, ` -- Toggles for turning the triggers on and off - INSERT INTO "main"."_electric_trigger_settings" ("namespace", "tablename", "flag") - VALUES ('main', 'bigIntTable', 1) + INSERT INTO "public"."_electric_trigger_settings" ("namespace", "tablename", "flag") + VALUES ('public', 'bigIntTable', 1) ON CONFLICT DO NOTHING; `, ` /* Triggers for table bigIntTable */ -- ensures primary key is immutable - DROP TRIGGER IF EXISTS update_ensure_main_bigIntTable_primarykey ON "main"."bigIntTable"; + DROP TRIGGER IF EXISTS update_ensure_public_bigIntTable_primarykey ON "public"."bigIntTable"; `, ` - CREATE OR REPLACE FUNCTION update_ensure_main_bigIntTable_primarykey_function() + CREATE OR REPLACE FUNCTION update_ensure_public_bigIntTable_primarykey_function() RETURNS TRIGGER AS $$ BEGIN IF OLD."value" IS DISTINCT FROM NEW."value" THEN @@ -474,30 +474,30 @@ export default [ $$ LANGUAGE plpgsql; `, ` - CREATE TRIGGER update_ensure_main_bigIntTable_primarykey - BEFORE UPDATE ON "main"."bigIntTable" + CREATE TRIGGER update_ensure_public_bigIntTable_primarykey + BEFORE UPDATE ON "public"."bigIntTable" FOR EACH ROW - EXECUTE FUNCTION update_ensure_main_bigIntTable_primarykey_function(); + EXECUTE FUNCTION update_ensure_public_bigIntTable_primarykey_function(); `, ` -- Triggers that add INSERT, UPDATE, DELETE operation to the oplog table - DROP TRIGGER IF EXISTS insert_main_bigIntTable_into_oplog ON "main"."bigIntTable"; + DROP TRIGGER IF EXISTS insert_public_bigIntTable_into_oplog ON "public"."bigIntTable"; `, ` - CREATE OR REPLACE FUNCTION insert_main_bigIntTable_into_oplog_function() + CREATE OR REPLACE FUNCTION insert_public_bigIntTable_into_oplog_function() RETURNS TRIGGER AS $$ BEGIN DECLARE flag_value INTEGER; BEGIN -- Get the flag value from _electric_trigger_settings - SELECT flag INTO flag_value FROM main._electric_trigger_settings WHERE namespace = 'main' AND tablename = 'bigIntTable'; + SELECT flag INTO flag_value FROM public._electric_trigger_settings WHERE namespace = 'public' AND tablename = 'bigIntTable'; IF flag_value = 1 THEN -- Insert into _electric_oplog - INSERT INTO main._electric_oplog (namespace, tablename, optype, "primaryKey", "newRow", "oldRow", timestamp) + INSERT INTO public._electric_oplog (namespace, tablename, optype, "primaryKey", "newRow", "oldRow", timestamp) VALUES ( - 'main', + 'public', 'bigIntTable', 'INSERT', json_strip_nulls(json_build_object('value', cast(new."value" as TEXT))), @@ -513,27 +513,27 @@ export default [ $$ LANGUAGE plpgsql; `, ` - CREATE TRIGGER insert_main_bigIntTable_into_oplog - AFTER INSERT ON "main"."bigIntTable" + CREATE TRIGGER insert_public_bigIntTable_into_oplog + AFTER INSERT ON "public"."bigIntTable" FOR EACH ROW - EXECUTE FUNCTION insert_main_bigIntTable_into_oplog_function(); + EXECUTE FUNCTION insert_public_bigIntTable_into_oplog_function(); `, - 'DROP TRIGGER IF EXISTS update_main_bigIntTable_into_oplog ON "main"."bigIntTable";', + 'DROP TRIGGER IF EXISTS update_public_bigIntTable_into_oplog ON "public"."bigIntTable";', ` - CREATE OR REPLACE FUNCTION update_main_bigIntTable_into_oplog_function() + CREATE OR REPLACE FUNCTION update_public_bigIntTable_into_oplog_function() RETURNS TRIGGER AS $$ BEGIN DECLARE flag_value INTEGER; BEGIN -- Get the flag value from _electric_trigger_settings - SELECT flag INTO flag_value FROM main._electric_trigger_settings WHERE namespace = 'main' AND tablename = 'bigIntTable'; + SELECT flag INTO flag_value FROM public._electric_trigger_settings WHERE namespace = 'public' AND tablename = 'bigIntTable'; IF flag_value = 1 THEN -- Insert into _electric_oplog - INSERT INTO main._electric_oplog (namespace, tablename, optype, "primaryKey", "newRow", "oldRow", timestamp) + INSERT INTO public._electric_oplog (namespace, tablename, optype, "primaryKey", "newRow", "oldRow", timestamp) VALUES ( - 'main', + 'public', 'bigIntTable', 'UPDATE', json_strip_nulls(json_build_object('value', cast(new."value" as TEXT))), @@ -549,27 +549,27 @@ export default [ $$ LANGUAGE plpgsql; `, ` - CREATE TRIGGER update_main_bigIntTable_into_oplog - AFTER UPDATE ON "main"."bigIntTable" + CREATE TRIGGER update_public_bigIntTable_into_oplog + AFTER UPDATE ON "public"."bigIntTable" FOR EACH ROW - EXECUTE FUNCTION update_main_bigIntTable_into_oplog_function(); + EXECUTE FUNCTION update_public_bigIntTable_into_oplog_function(); `, - 'DROP TRIGGER IF EXISTS delete_main_bigIntTable_into_oplog ON "main"."bigIntTable";', + 'DROP TRIGGER IF EXISTS delete_public_bigIntTable_into_oplog ON "public"."bigIntTable";', ` - CREATE OR REPLACE FUNCTION delete_main_bigIntTable_into_oplog_function() + CREATE OR REPLACE FUNCTION delete_public_bigIntTable_into_oplog_function() RETURNS TRIGGER AS $$ BEGIN DECLARE flag_value INTEGER; BEGIN -- Get the flag value from _electric_trigger_settings - SELECT flag INTO flag_value FROM main._electric_trigger_settings WHERE namespace = 'main' AND tablename = 'bigIntTable'; + SELECT flag INTO flag_value FROM public._electric_trigger_settings WHERE namespace = 'public' AND tablename = 'bigIntTable'; IF flag_value = 1 THEN -- Insert into _electric_oplog - INSERT INTO main._electric_oplog (namespace, tablename, optype, "primaryKey", "newRow", "oldRow", timestamp) + INSERT INTO public._electric_oplog (namespace, tablename, optype, "primaryKey", "newRow", "oldRow", timestamp) VALUES ( - 'main', + 'public', 'bigIntTable', 'DELETE', json_strip_nulls(json_build_object('value', cast(old."value" as TEXT))), @@ -585,23 +585,23 @@ export default [ $$ LANGUAGE plpgsql; `, ` - CREATE TRIGGER delete_main_bigIntTable_into_oplog - AFTER DELETE ON "main"."bigIntTable" + CREATE TRIGGER delete_public_bigIntTable_into_oplog + AFTER DELETE ON "public"."bigIntTable" FOR EACH ROW - EXECUTE FUNCTION delete_main_bigIntTable_into_oplog_function(); - `, - 'DROP TRIGGER IF EXISTS update_ensure_main_blobTable_primarykey ON "main"."blobTable";', - 'CREATE OR REPLACE FUNCTION update_ensure_main_blobTable_primarykey_function()\nRETURNS TRIGGER AS $$\nBEGIN\n IF OLD."value" IS DISTINCT FROM NEW."value" THEN\n RAISE EXCEPTION \'Cannot change the value of column value as it belongs to the primary key\';\n END IF;\n RETURN NEW;\nEND;\n$$ LANGUAGE plpgsql;', - 'CREATE TRIGGER update_ensure_main_blobTable_primarykey\n BEFORE UPDATE ON "main"."blobTable"\n FOR EACH ROW\n EXECUTE FUNCTION update_ensure_main_blobTable_primarykey_function();', - 'DROP TRIGGER IF EXISTS insert_main_blobTable_into_oplog ON "main"."blobTable";', - "CREATE OR REPLACE FUNCTION insert_main_blobTable_into_oplog_function()\n RETURNS TRIGGER AS $$\n BEGIN\n DECLARE\n flag_value INTEGER;\n BEGIN\n -- Get the flag value from _electric_trigger_settings\n SELECT flag INTO flag_value FROM main._electric_trigger_settings WHERE namespace = 'main' AND tablename = 'blobTable';\n\n IF flag_value = 1 THEN\n -- Insert into _electric_oplog\n INSERT INTO main._electric_oplog (namespace, tablename, optype, \"primaryKey\", \"newRow\", \"oldRow\", timestamp)\n VALUES (\n 'main',\n 'blobTable',\n 'INSERT',\n json_strip_nulls(json_build_object('value', CASE WHEN new.\"value\" IS NOT NULL THEN encode(new.\"value\"::bytea, 'hex') ELSE NULL END)),\n jsonb_build_object('value', CASE WHEN new.\"value\" IS NOT NULL THEN encode(new.\"value\"::bytea, 'hex') ELSE NULL END),\n NULL,\n NULL\n );\n END IF;\n\n RETURN NEW;\n END;\n END;\n $$ LANGUAGE plpgsql;", - 'CREATE TRIGGER insert_main_blobTable_into_oplog\n AFTER INSERT ON "main"."blobTable"\n FOR EACH ROW\n EXECUTE FUNCTION insert_main_blobTable_into_oplog_function();', - 'DROP TRIGGER IF EXISTS update_main_blobTable_into_oplog ON "main"."blobTable";', - "CREATE OR REPLACE FUNCTION update_main_blobTable_into_oplog_function()\n RETURNS TRIGGER AS $$\n BEGIN\n DECLARE\n flag_value INTEGER;\n BEGIN\n -- Get the flag value from _electric_trigger_settings\n SELECT flag INTO flag_value FROM main._electric_trigger_settings WHERE namespace = 'main' AND tablename = 'blobTable';\n\n IF flag_value = 1 THEN\n -- Insert into _electric_oplog\n INSERT INTO main._electric_oplog (namespace, tablename, optype, \"primaryKey\", \"newRow\", \"oldRow\", timestamp)\n VALUES (\n 'main',\n 'blobTable',\n 'UPDATE',\n json_strip_nulls(json_build_object('value', CASE WHEN new.\"value\" IS NOT NULL THEN encode(new.\"value\"::bytea, 'hex') ELSE NULL END)),\n jsonb_build_object('value', CASE WHEN new.\"value\" IS NOT NULL THEN encode(new.\"value\"::bytea, 'hex') ELSE NULL END),\n jsonb_build_object('value', CASE WHEN old.\"value\" IS NOT NULL THEN encode(old.\"value\"::bytea, 'hex') ELSE NULL END),\n NULL\n );\n END IF;\n\n RETURN NEW;\n END;\n END;\n $$ LANGUAGE plpgsql;", - 'CREATE TRIGGER update_main_blobTable_into_oplog\n AFTER UPDATE ON "main"."blobTable"\n FOR EACH ROW\n EXECUTE FUNCTION update_main_blobTable_into_oplog_function();', - 'DROP TRIGGER IF EXISTS delete_main_blobTable_into_oplog ON "main"."blobTable";', - "CREATE OR REPLACE FUNCTION delete_main_blobTable_into_oplog_function()\n RETURNS TRIGGER AS $$\n BEGIN\n DECLARE\n flag_value INTEGER;\n BEGIN\n -- Get the flag value from _electric_trigger_settings\n SELECT flag INTO flag_value FROM main._electric_trigger_settings WHERE namespace = 'main' AND tablename = 'blobTable';\n\n IF flag_value = 1 THEN\n -- Insert into _electric_oplog\n INSERT INTO main._electric_oplog (namespace, tablename, optype, \"primaryKey\", \"newRow\", \"oldRow\", timestamp)\n VALUES (\n 'main',\n 'blobTable',\n 'DELETE',\n json_strip_nulls(json_build_object('value', CASE WHEN old.\"value\" IS NOT NULL THEN encode(old.\"value\"::bytea, 'hex') ELSE NULL END)),\n NULL,\n jsonb_build_object('value', CASE WHEN old.\"value\" IS NOT NULL THEN encode(old.\"value\"::bytea, 'hex') ELSE NULL END),\n NULL\n );\n END IF;\n\n RETURN NEW;\n END;\n END;\n $$ LANGUAGE plpgsql;", - 'CREATE TRIGGER delete_main_blobTable_into_oplog\n AFTER DELETE ON "main"."blobTable"\n FOR EACH ROW\n EXECUTE FUNCTION delete_main_blobTable_into_oplog_function();', + EXECUTE FUNCTION delete_public_bigIntTable_into_oplog_function(); + `, + 'DROP TRIGGER IF EXISTS update_ensure_public_blobTable_primarykey ON "public"."blobTable";', + 'CREATE OR REPLACE FUNCTION update_ensure_public_blobTable_primarykey_function()\nRETURNS TRIGGER AS $$\nBEGIN\n IF OLD."value" IS DISTINCT FROM NEW."value" THEN\n RAISE EXCEPTION \'Cannot change the value of column value as it belongs to the primary key\';\n END IF;\n RETURN NEW;\nEND;\n$$ LANGUAGE plpgsql;', + 'CREATE TRIGGER update_ensure_public_blobTable_primarykey\n BEFORE UPDATE ON "public"."blobTable"\n FOR EACH ROW\n EXECUTE FUNCTION update_ensure_public_blobTable_primarykey_function();', + 'DROP TRIGGER IF EXISTS insert_public_blobTable_into_oplog ON "public"."blobTable";', + "CREATE OR REPLACE FUNCTION insert_public_blobTable_into_oplog_function()\n RETURNS TRIGGER AS $$\n BEGIN\n DECLARE\n flag_value INTEGER;\n BEGIN\n -- Get the flag value from _electric_trigger_settings\n SELECT flag INTO flag_value FROM public._electric_trigger_settings WHERE namespace = 'public' AND tablename = 'blobTable';\n\n IF flag_value = 1 THEN\n -- Insert into _electric_oplog\n INSERT INTO public._electric_oplog (namespace, tablename, optype, \"primaryKey\", \"newRow\", \"oldRow\", timestamp)\n VALUES (\n 'public',\n 'blobTable',\n 'INSERT',\n json_strip_nulls(json_build_object('value', CASE WHEN new.\"value\" IS NOT NULL THEN encode(new.\"value\"::bytea, 'hex') ELSE NULL END)),\n jsonb_build_object('value', CASE WHEN new.\"value\" IS NOT NULL THEN encode(new.\"value\"::bytea, 'hex') ELSE NULL END),\n NULL,\n NULL\n );\n END IF;\n\n RETURN NEW;\n END;\n END;\n $$ LANGUAGE plpgsql;", + 'CREATE TRIGGER insert_public_blobTable_into_oplog\n AFTER INSERT ON "public"."blobTable"\n FOR EACH ROW\n EXECUTE FUNCTION insert_public_blobTable_into_oplog_function();', + 'DROP TRIGGER IF EXISTS update_public_blobTable_into_oplog ON "public"."blobTable";', + "CREATE OR REPLACE FUNCTION update_public_blobTable_into_oplog_function()\n RETURNS TRIGGER AS $$\n BEGIN\n DECLARE\n flag_value INTEGER;\n BEGIN\n -- Get the flag value from _electric_trigger_settings\n SELECT flag INTO flag_value FROM public._electric_trigger_settings WHERE namespace = 'public' AND tablename = 'blobTable';\n\n IF flag_value = 1 THEN\n -- Insert into _electric_oplog\n INSERT INTO public._electric_oplog (namespace, tablename, optype, \"primaryKey\", \"newRow\", \"oldRow\", timestamp)\n VALUES (\n 'public',\n 'blobTable',\n 'UPDATE',\n json_strip_nulls(json_build_object('value', CASE WHEN new.\"value\" IS NOT NULL THEN encode(new.\"value\"::bytea, 'hex') ELSE NULL END)),\n jsonb_build_object('value', CASE WHEN new.\"value\" IS NOT NULL THEN encode(new.\"value\"::bytea, 'hex') ELSE NULL END),\n jsonb_build_object('value', CASE WHEN old.\"value\" IS NOT NULL THEN encode(old.\"value\"::bytea, 'hex') ELSE NULL END),\n NULL\n );\n END IF;\n\n RETURN NEW;\n END;\n END;\n $$ LANGUAGE plpgsql;", + 'CREATE TRIGGER update_public_blobTable_into_oplog\n AFTER UPDATE ON "public"."blobTable"\n FOR EACH ROW\n EXECUTE FUNCTION update_public_blobTable_into_oplog_function();', + 'DROP TRIGGER IF EXISTS delete_public_blobTable_into_oplog ON "public"."blobTable";', + "CREATE OR REPLACE FUNCTION delete_public_blobTable_into_oplog_function()\n RETURNS TRIGGER AS $$\n BEGIN\n DECLARE\n flag_value INTEGER;\n BEGIN\n -- Get the flag value from _electric_trigger_settings\n SELECT flag INTO flag_value FROM public._electric_trigger_settings WHERE namespace = 'public' AND tablename = 'blobTable';\n\n IF flag_value = 1 THEN\n -- Insert into _electric_oplog\n INSERT INTO public._electric_oplog (namespace, tablename, optype, \"primaryKey\", \"newRow\", \"oldRow\", timestamp)\n VALUES (\n 'public',\n 'blobTable',\n 'DELETE',\n json_strip_nulls(json_build_object('value', CASE WHEN old.\"value\" IS NOT NULL THEN encode(old.\"value\"::bytea, 'hex') ELSE NULL END)),\n NULL,\n jsonb_build_object('value', CASE WHEN old.\"value\" IS NOT NULL THEN encode(old.\"value\"::bytea, 'hex') ELSE NULL END),\n NULL\n );\n END IF;\n\n RETURN NEW;\n END;\n END;\n $$ LANGUAGE plpgsql;", + 'CREATE TRIGGER delete_public_blobTable_into_oplog\n AFTER DELETE ON "public"."blobTable"\n FOR EACH ROW\n EXECUTE FUNCTION delete_public_blobTable_into_oplog_function();', ], version: '2', }, diff --git a/clients/typescript/test/support/satellite-helpers.ts b/clients/typescript/test/support/satellite-helpers.ts index 26dcf3b5db..dbf22231dc 100644 --- a/clients/typescript/test/support/satellite-helpers.ts +++ b/clients/typescript/test/support/satellite-helpers.ts @@ -25,17 +25,17 @@ interface TableSchema { columns: string[] } -export const initTableInfo = (): TableInfo => { +export const initTableInfo = (namespace: string): TableInfo => { return { - 'main.parent': { + [`${namespace}.parent`]: { primaryKey: ['id'], columns: ['id', 'value', 'other'], }, - 'main.child': { + [`${namespace}.child`]: { primaryKey: ['id'], columns: ['id', 'parent'], }, - 'main.Items': { + [`${namespace}.Items`]: { primaryKey: ['value'], columns: ['value', 'other'], }, @@ -44,10 +44,11 @@ export const initTableInfo = (): TableInfo => { export const loadSatelliteMetaTable = async ( db: DatabaseAdapter, + namespace: string, metaTableName = '_electric_meta' ): Promise => { const rows = await db.query({ - sql: `SELECT key, value FROM main.${metaTableName}`, + sql: `SELECT key, value FROM "${namespace}"."${metaTableName}"`, }) const entries = rows.map((x) => [x.key, x.value]) @@ -201,7 +202,8 @@ export async function getMatchingShadowEntries( adapter: DatabaseAdapter, oplog?: OplogEntry, builder: QueryBuilder = sqliteBuilder, - shadowTable = 'main._electric_shadow' + namespace: string = builder.defaultNamespace, + shadowTable = `"${namespace}"._electric_shadow` ): Promise { let query: Statement let selectTags = `SELECT namespace, tablename, "primaryKey", tags FROM ${shadowTable}` @@ -225,7 +227,14 @@ export async function getMatchingShadowEntries( export async function getPgMatchingShadowEntries( adapter: DatabaseAdapter, oplog?: OplogEntry, - shadowTable = 'main._electric_shadow' + namespace: string = 'public', + shadowTable = `"${namespace}"._electric_shadow` ): Promise { - return getMatchingShadowEntries(adapter, oplog, pgBuilder, shadowTable) + return getMatchingShadowEntries( + adapter, + oplog, + pgBuilder, + namespace, + shadowTable + ) } From 13e60986530fd99f2c46475b1b2c6a13856a5f0c Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Wed, 10 Apr 2024 12:10:40 +0200 Subject: [PATCH 054/156] Ported unit tests for SQL query building also to Postgres dialect --- .../typescript/src/client/model/builder.ts | 51 +- .../test/client/model/builder.test.ts | 533 ------------- .../typescript/test/client/model/builder.ts | 698 ++++++++++++++++++ .../client/model/postgres/builder.test.ts | 4 + .../test/client/model/sqlite/builder.test.ts | 4 + 5 files changed, 737 insertions(+), 553 deletions(-) delete mode 100644 clients/typescript/test/client/model/builder.test.ts create mode 100644 clients/typescript/test/client/model/builder.ts create mode 100644 clients/typescript/test/client/model/postgres/builder.test.ts create mode 100644 clients/typescript/test/client/model/sqlite/builder.test.ts diff --git a/clients/typescript/src/client/model/builder.ts b/clients/typescript/src/client/model/builder.ts index 333dbd1235..fb2fc888ad 100644 --- a/clients/typescript/src/client/model/builder.ts +++ b/clients/typescript/src/client/model/builder.ts @@ -51,11 +51,12 @@ export class Builder { public dialect: Dialect ) { this._fullyQualifiedTableName = `"${this._tableName}"` + squelPostgres.cls.DefaultQueryBuilderOptions.nameQuoteCharacter = '"' + squelPostgres.cls.DefaultQueryBuilderOptions.autoQuoteFieldNames = true + squelPostgres.cls.DefaultQueryBuilderOptions.autoQuoteAliasNames = true if (dialect === 'Postgres') { - squelPostgres.cls.DefaultQueryBuilderOptions.nameQuoteCharacter = '"' //squelPostgres.cls.DefaultQueryBuilderOptions.autoQuoteTableNames = true - squelPostgres.cls.DefaultQueryBuilderOptions.autoQuoteFieldNames = true - squelPostgres.cls.DefaultQueryBuilderOptions.autoQuoteAliasNames = true + // need to register it, otherwise squel complains that the Date type is not registered // as Squel does not support it out-of-the-box but our Postgres drivers do support it. squelPostgres.registerValueHandler(Date, (date) => date) @@ -127,7 +128,9 @@ export class Builder { i: DeleteManyInput, idRequired = false ): QueryBuilder { - const deleteQuery = squel.delete().from(this._fullyQualifiedTableName) + const deleteQuery = squelPostgres + .delete() + .from(this._fullyQualifiedTableName) const whereObject = i.where // safe because the schema for `where` adds an empty object as default which is provided if the `where` field is absent const fields = this.getFields(whereObject, idRequired) return addFilters(fields, whereObject, deleteQuery) @@ -284,7 +287,10 @@ export class Builder { ) const squelOrder = order === 'asc' // squel expects 'true' for ascending order, 'false' for descending order - return query.order(field, squelOrder) + // have to quote the field name ourselves + // because Squel does not seem to auto quote + // field names in order by statements + return query.order(`"${field}"`, squelOrder) }, q) } @@ -360,7 +366,7 @@ export function makeFilter( prefixFieldsWith = '' ): Array<{ sql: string; args?: unknown[] }> { if (fieldValue === null) - return [{ sql: `${prefixFieldsWith}${fieldName} IS NULL` }] + return [{ sql: `${prefixFieldsWith}"${fieldName}" IS NULL` }] else if (fieldName === 'AND' || fieldName === 'OR' || fieldName === 'NOT') { return [ makeBooleanFilter( @@ -428,7 +434,9 @@ export function makeFilter( } // needed because `WHERE field = NULL` is not valid SQL else - return [{ sql: `${prefixFieldsWith}${fieldName} = ?`, args: [fieldValue] }] + return [ + { sql: `${prefixFieldsWith}"${fieldName}" = ?`, args: [fieldValue] }, + ] } function joinStatements( @@ -484,21 +492,21 @@ function makeEqualsFilter( fieldName: string, value: unknown | undefined ): { sql: string; args?: unknown[] } { - return { sql: `${fieldName} = ?`, args: [value] } + return { sql: `"${fieldName}" = ?`, args: [value] } } function makeInFilter( fieldName: string, values: unknown[] | undefined ): { sql: string; args?: unknown[] } { - return { sql: `${fieldName} IN ?`, args: [values] } + return { sql: `"${fieldName}" IN ?`, args: [values] } } function makeNotInFilter( fieldName: string, values: unknown[] | undefined ): { sql: string; args?: unknown[] } { - return { sql: `${fieldName} NOT IN ?`, args: [values] } + return { sql: `"${fieldName}" NOT IN ?`, args: [values] } } function makeNotFilter( @@ -507,9 +515,9 @@ function makeNotFilter( ): { sql: string; args?: unknown[] } { if (value === null) { // needed because `WHERE field != NULL` is not valid SQL - return { sql: `${fieldName} IS NOT NULL` } + return { sql: `"${fieldName}" IS NOT NULL` } } else { - return { sql: `${fieldName} != ?`, args: [value] } + return { sql: `"${fieldName}" != ?`, args: [value] } } } @@ -517,28 +525,28 @@ function makeLtFilter( fieldName: string, value: unknown ): { sql: string; args?: unknown[] } { - return { sql: `${fieldName} < ?`, args: [value] } + return { sql: `"${fieldName}" < ?`, args: [value] } } function makeLteFilter( fieldName: string, value: unknown ): { sql: string; args?: unknown[] } { - return { sql: `${fieldName} <= ?`, args: [value] } + return { sql: `"${fieldName}" <= ?`, args: [value] } } function makeGtFilter( fieldName: string, value: unknown ): { sql: string; args?: unknown[] } { - return { sql: `${fieldName} > ?`, args: [value] } + return { sql: `"${fieldName}" > ?`, args: [value] } } function makeGteFilter( fieldName: string, value: unknown ): { sql: string; args?: unknown[] } { - return { sql: `${fieldName} >= ?`, args: [value] } + return { sql: `"${fieldName}" >= ?`, args: [value] } } function makeStartsWithFilter( @@ -547,7 +555,7 @@ function makeStartsWithFilter( ): { sql: string; args?: unknown[] } { if (typeof value !== 'string') throw new Error('startsWith filter must be a string') - return { sql: `${fieldName} LIKE ?`, args: [`${escapeLike(value)}%`] } + return { sql: `"${fieldName}" LIKE ?`, args: [`${escapeLike(value)}%`] } } function makeEndsWithFilter( @@ -556,7 +564,7 @@ function makeEndsWithFilter( ): { sql: string; args?: unknown[] } { if (typeof value !== 'string') throw new Error('endsWith filter must be a string') - return { sql: `${fieldName} LIKE ?`, args: [`%${escapeLike(value)}`] } + return { sql: `"${fieldName}" LIKE ?`, args: [`%${escapeLike(value)}`] } } function makeContainsFilter( @@ -565,7 +573,7 @@ function makeContainsFilter( ): { sql: string; args?: unknown[] } { if (typeof value !== 'string') throw new Error('contains filter must be a string') - return { sql: `${fieldName} LIKE ?`, args: [`%${escapeLike(value)}%`] } + return { sql: `"${fieldName}" LIKE ?`, args: [`%${escapeLike(value)}%`] } } function escapeLike(value: string): string { @@ -584,7 +592,10 @@ function addLimit(i: AnyFindInput, q: PostgresSelect): PostgresSelect { function addDistinct(i: AnyFindInput, q: PostgresSelect): PostgresSelect { if (typeof i.distinct === 'undefined') return q - return q.distinct(...i.distinct) + // have to quote the fields ourselves + // because Squel does not seem to auto quote + // field names in order by statements + return q.distinct(...i.distinct.map((f) => `"${f}"`)) } /** diff --git a/clients/typescript/test/client/model/builder.test.ts b/clients/typescript/test/client/model/builder.test.ts deleted file mode 100644 index 9c260abb7b..0000000000 --- a/clients/typescript/test/client/model/builder.test.ts +++ /dev/null @@ -1,533 +0,0 @@ -import test from 'ava' -import { Builder } from '../../../src/client/model/builder' -import { ShapeManagerMock } from '../../../src/client/model/shapes' -import { ZodError } from 'zod' -import { schema } from '../generated' - -const shapeManager = new ShapeManagerMock() -const postTableDescription = schema.getTableDescription('Post') - -const tbl = new Builder( - 'Post', - ['id', 'title', 'contents', 'nbr'], - shapeManager, - postTableDescription, - 'SQLite' -) - -// Sync all shapes such that we don't get warnings on every query -shapeManager.sync({ tablename: 'Post' }) - -const post1 = { - id: 'i1', - title: 't1', - contents: 'c1', - nbr: 18, -} - -const post2 = { - id: 'i2', - title: 't2', - contents: 'c2', - nbr: 21, -} - -/* - * The tests below check that the generated queries are correct. - * The query builder does not validate the input, it assumes that the input it gets was already validated. - * Input validation is currently done by the `Table` itself before building the query. - */ - -test('null values are inserted as NULL', (t) => { - const query = tbl - .create({ - data: { - id: 'i1', - title: 't1', - contents: 'c1', - nbr: null, - }, - }) - .toString() - - t.is( - query, - `INSERT INTO "Post" (id, title, contents, nbr) VALUES ('i1', 't1', 'c1', NULL) RETURNING id, title, contents, nbr` - ) -}) - -// Test that we can make a create query -test('create query', (t) => { - const query = tbl - .create({ - data: post1, - }) - .toString() - - t.is( - query, - `INSERT INTO "Post" (id, title, contents, nbr) VALUES ('i1', 't1', 'c1', 18) RETURNING id, title, contents, nbr` - ) -}) - -test('createMany query', (t) => { - const query = tbl - .createMany({ - data: [post1, post2], - }) - .toString() - - t.is( - query, - `INSERT INTO "Post" (id, title, contents, nbr) VALUES ('i1', 't1', 'c1', 18), ('i2', 't2', 'c2', 21)` - ) - - const query2 = tbl - .createMany({ - data: [post1, post2], - skipDuplicates: true, - }) - .toString() - - t.is( - query2, - `INSERT INTO "Post" (id, title, contents, nbr) VALUES ('i1', 't1', 'c1', 18), ('i2', 't2', 'c2', 21) ON CONFLICT DO NOTHING` - ) -}) - -test('findUnique query', async (t) => { - const query = tbl - .findUnique({ - where: { - id: 'i2', - nbr: 21, - }, - }) - .toString() - - t.is( - query, - `SELECT id, nbr, title, contents FROM "Post" WHERE (id = 'i2') AND (nbr = 21) LIMIT 2` - ) -}) - -test('findUnique query with selection', (t) => { - const query = tbl - .findUnique({ - where: { - id: 'i2', - nbr: 21, - }, - select: { - title: true, - contents: false, - }, - }) - .toString() - - t.is( - query, - `SELECT id, nbr, title FROM "Post" WHERE (id = 'i2') AND (nbr = 21) LIMIT 2` - ) -}) - -test('findUnique query with selection of NULL value', (t) => { - const query = tbl - .findUnique({ - where: { - id: 'i2', - nbr: null, - }, - select: { - title: true, - contents: false, - }, - }) - .toString() - - t.is( - query, - `SELECT id, nbr, title FROM "Post" WHERE (id = 'i2') AND (nbr IS NULL) LIMIT 2` - ) -}) - -test('findUnique query with selection of non-NULL value', (t) => { - const query = tbl - .findUnique({ - where: { - id: 'i2', - nbr: { not: null }, - }, - select: { - title: true, - contents: false, - }, - }) - .toString() - - t.is( - query, - `SELECT id, nbr, title FROM "Post" WHERE (id = 'i2') AND (nbr IS NOT NULL) LIMIT 2` - ) -}) - -test('findUnique query with selection of row that does not equal a value', (t) => { - const query = tbl - .findUnique({ - where: { - id: 'i2', - nbr: { not: 5 }, - }, - select: { - title: true, - contents: false, - }, - }) - .toString() - - t.is( - query, - `SELECT id, nbr, title FROM "Post" WHERE (id = 'i2') AND (nbr != 5) LIMIT 2` - ) -}) - -test('findUnique query supports several filters', (t) => { - const query = tbl - .findUnique({ - where: { - id: 'i2', - nbr: { not: 5, in: [1, 2, 3] }, - }, - }) - .toString() - - t.is( - query, - `SELECT id, nbr, title, contents FROM "Post" WHERE (id = 'i2') AND (nbr IN (1, 2, 3)) AND (nbr != 5) LIMIT 2` - ) -}) - -test('findUnique query with no filters throws an error', (t) => { - const error = t.throws( - () => { - tbl.findUnique({ - where: { - id: 'i2', - nbr: 21, - foo: {}, - }, - }) - }, - { instanceOf: ZodError } - ) - - t.deepEqual((error as ZodError).issues, [ - { - code: 'custom', - message: 'Please provide at least one filter.', - path: [], - }, - ]) -}) - -test('findMany allows results to be ordered on one field', (t) => { - const query = tbl - .findMany({ - // `where` argument must not be provided when using the actual API because it is added as default by the validator - // but since we directly use the query builder we need to provide it - where: {}, - orderBy: { - id: 'asc', - }, - }) - .toString() - - t.is(query, 'SELECT id, title, contents, nbr FROM "Post" ORDER BY id ASC') -}) - -test('findMany allows results to be ordered on several fields', (t) => { - const query = tbl - .findMany({ - // `where` argument must not be provided when using the actual API because it is added as default by the validator - // but since we directly use the query builder we need to provide it - where: {}, - orderBy: [ - { - id: 'asc', - }, - { - title: 'desc', - }, - ], - }) - .toString() - - t.is( - query, - 'SELECT id, title, contents, nbr FROM "Post" ORDER BY id ASC, title DESC' - ) -}) - -test('findMany supports pagination', (t) => { - const query = tbl - .findMany({ - // `where` argument must not be provided when using the actual API because it is added as default by the validator - // but since we directly use the query builder we need to provide it - where: {}, - take: 1, - skip: 1, - }) - .toString() - - t.is(query, 'SELECT id, title, contents, nbr FROM "Post" LIMIT 1 OFFSET 1') -}) - -test('findMany supports distinct results', (t) => { - const query = tbl - .findMany({ - // `where` argument must not be provided when using the actual API because it is added as default by the validator - // but since we directly use the query builder we need to provide it - where: {}, - distinct: ['nbr'], - }) - .toString() - - t.is(query, 'SELECT DISTINCT ON (nbr) id, title, contents, nbr FROM "Post"') -}) - -test('findMany supports IN filters in where argument', (t) => { - const query = tbl - .findMany({ - where: { - nbr: { - in: [1, 5, 18], - }, - }, - }) - .toString() - - t.is( - query, - 'SELECT nbr, id, title, contents FROM "Post" WHERE (nbr IN (1, 5, 18))' - ) -}) - -test('findMany supports NOT IN filters in where argument', (t) => { - const query = tbl - .findMany({ - where: { - nbr: { - notIn: [1, 5, 18], - }, - }, - }) - .toString() - - t.is( - query, - 'SELECT nbr, id, title, contents FROM "Post" WHERE (nbr NOT IN (1, 5, 18))' - ) -}) - -test('findMany supports lt, lte, gt, gte filters in where argument', (t) => { - const query = tbl - .findMany({ - where: { - nbr: { - lt: 11, - lte: 10, - gt: 4, - gte: 5, - }, - }, - }) - .toString() - - t.is( - query, - 'SELECT nbr, id, title, contents FROM "Post" WHERE (nbr < 11) AND (nbr <= 10) AND (nbr > 4) AND (nbr >= 5)' - ) -}) - -test('findMany supports startsWith filter in where argument', (t) => { - const query = tbl - .findMany({ - where: { - title: { - startsWith: 'foo', - }, - }, - }) - .toString() - - t.is( - query, - `SELECT title, id, contents, nbr FROM "Post" WHERE (title LIKE 'foo%')` - ) -}) - -test('findMany supports endsWith filter in where argument', (t) => { - const query = tbl - .findMany({ - where: { - title: { - endsWith: 'foo', - }, - }, - }) - .toString() - - t.is( - query, - `SELECT title, id, contents, nbr FROM "Post" WHERE (title LIKE '%foo')` - ) -}) - -test('findMany supports contains filter in where argument', (t) => { - const query = tbl - .findMany({ - where: { - title: { - contains: 'foo', - }, - }, - }) - .toString() - - t.is( - query, - `SELECT title, id, contents, nbr FROM "Post" WHERE (title LIKE '%foo%')` - ) -}) - -test('findMany supports boolean filters in where argument', (t) => { - const query = tbl - .findMany({ - where: { - OR: [ - { - title: { - contains: 'foo', - }, - }, - { - title: 'bar', - }, - ], - AND: [ - { - contents: 'content', - }, - { - nbr: 6, - }, - ], - NOT: [ - { - title: 'foobar', - }, - { - title: 'barfoo', - }, - ], - nbr: 5, - }, - }) - .toString() - - t.is( - query, - `SELECT nbr, id, title, contents FROM "Post" WHERE (title LIKE '%foo%' OR title = 'bar') AND (contents = 'content' AND nbr = 6) AND ((NOT title = 'foobar') AND (NOT title = 'barfoo')) AND (nbr = 5)` - ) -}) - -test('findMany supports single AND filter and single NOT filter in where argument', (t) => { - const query = tbl - .findMany({ - where: { - OR: [ - { - title: { - contains: 'foo', - }, - }, - { - title: 'bar', - }, - ], - AND: { - contents: 'content', - }, - NOT: { - title: 'foobar', - }, - nbr: 5, - }, - }) - .toString() - - t.is( - query, - `SELECT nbr, id, title, contents FROM "Post" WHERE (title LIKE '%foo%' OR title = 'bar') AND (contents = 'content') AND (NOT title = 'foobar') AND (nbr = 5)` - ) -}) - -test('update query', (t) => { - const query = tbl - .update({ - data: { title: 'Foo', contents: 'Bar' }, - where: { id: '1' }, - }) - .toString() - - t.is( - query, - `UPDATE "Post" SET title = 'Foo', contents = 'Bar' WHERE (id = '1') RETURNING id, title, contents, nbr` - ) -}) - -test('updateMany query', (t) => { - const query1 = tbl - .updateMany({ - data: { title: 'Foo', contents: 'Bar' }, - // `where` argument must not be provided when using the actual API because it is added as default by the validator - // but since we directly use the query builder we need to provide it - where: {}, - }) - .toString() - - const sql = `UPDATE "Post" SET title = 'Foo', contents = 'Bar' RETURNING id, title, contents, nbr` - - t.is(query1, sql) -}) - -test('delete query', (t) => { - const query = tbl - .delete({ - where: { id: 'Foo', title: 'Bar' }, - }) - .toString() - - t.is(query, `DELETE FROM "Post" WHERE (id = 'Foo') AND (title = 'Bar')`) -}) - -test('deleteMany query', (t) => { - const query1 = tbl - .deleteMany({ - where: { id: 'Foo', title: 'Bar' }, - }) - .toString() - - t.is(query1, `DELETE FROM "Post" WHERE (id = 'Foo') AND (title = 'Bar')`) - - const query2 = tbl - .deleteMany({ - // `where` argument is not required when using the actual API because it is added as default by the validator - // but since we directly use the query builder we need to provide it - where: {}, - }) - .toString() - - const sql = 'DELETE FROM "Post"' - t.is(query2, sql) -}) diff --git a/clients/typescript/test/client/model/builder.ts b/clients/typescript/test/client/model/builder.ts new file mode 100644 index 0000000000..3b64602361 --- /dev/null +++ b/clients/typescript/test/client/model/builder.ts @@ -0,0 +1,698 @@ +import anyTest, { TestFn } from 'ava' +import { Builder } from '../../../src/client/model/builder' +import { ZodError } from 'zod' +import { Dialect } from '../../../src/migrators/query-builder/builder' +import { ShapeManagerMock } from '../../../src/client/model/shapes' +import { schema } from '../generated' +import { pgBuilder, sqliteBuilder } from '../../../src/migrators/query-builder' + +export type ContextType = { + tbl: Builder +} + +const post1 = { + id: 'i1', + title: 't1', + contents: 'c1', + nbr: 18, +} + +const post2 = { + id: 'i2', + title: 't2', + contents: 'c2', + nbr: 21, +} + +const test = anyTest as TestFn + +const shapeManager = new ShapeManagerMock() +const postTableDescription = schema.getTableDescription('Post') +// Sync all shapes such that we don't get warnings on every query +shapeManager.sync({ tablename: 'Post' }) + +function makeContext(dialect: Dialect) { + test.beforeEach(async (t) => { + const tbl = new Builder( + 'Post', + ['id', 'title', 'contents', 'nbr'], + shapeManager, + postTableDescription, + dialect + ) + + t.context = { tbl } + }) +} + +/* + * The tests below check that the generated queries are correct. + * The query builder does not validate the input, it assumes that the input it gets was already validated. + * Input validation is currently done by the `Table` itself before building the query. + */ + +export const builderTests = (dialect: Dialect) => { + makeContext(dialect) + const builder = dialect === 'SQLite' ? sqliteBuilder : pgBuilder + const makePositionalParam = builder.makePositionalParam.bind(builder) + + test('null values are inserted as NULL', (t) => { + const { tbl } = t.context + const stmt = tbl + .create({ + data: { + id: 'i1', + title: 't1', + contents: 'c1', + nbr: null, + }, + }) + .toParam() + + t.deepEqual(stmt, { + text: `INSERT INTO "Post" ("id", "title", "contents", "nbr") VALUES (${makePositionalParam( + 1 + )}, ${makePositionalParam(2)}, ${makePositionalParam( + 3 + )}, ${makePositionalParam( + 4 + )}) RETURNING "id", "title", "contents", "nbr"`, + values: ['i1', 't1', 'c1', null], + }) + }) + + // Test that we can make a create query + test('create query', (t) => { + const { tbl } = t.context + const stmt = tbl + .create({ + data: post1, + }) + .toParam() + + t.deepEqual(stmt, { + text: `INSERT INTO "Post" ("id", "title", "contents", "nbr") VALUES (${makePositionalParam( + 1 + )}, ${makePositionalParam(2)}, ${makePositionalParam( + 3 + )}, ${makePositionalParam( + 4 + )}) RETURNING "id", "title", "contents", "nbr"`, + values: ['i1', 't1', 'c1', 18], + }) + }) + + test('createMany query', (t) => { + const { tbl } = t.context + const stmt1 = tbl + .createMany({ + data: [post1, post2], + }) + .toParam() + + t.deepEqual(stmt1, { + text: `INSERT INTO "Post" ("id", "title", "contents", "nbr") VALUES (${makePositionalParam( + 1 + )}, ${makePositionalParam(2)}, ${makePositionalParam( + 3 + )}, ${makePositionalParam(4)}), (${makePositionalParam( + 5 + )}, ${makePositionalParam(6)}, ${makePositionalParam( + 7 + )}, ${makePositionalParam(8)})`, + values: ['i1', 't1', 'c1', 18, 'i2', 't2', 'c2', 21], + }) + + const stmt2 = tbl + .createMany({ + data: [post1, post2], + skipDuplicates: true, + }) + .toParam() + + t.deepEqual(stmt2, { + text: `INSERT INTO "Post" ("id", "title", "contents", "nbr") VALUES (${makePositionalParam( + 1 + )}, ${makePositionalParam(2)}, ${makePositionalParam( + 3 + )}, ${makePositionalParam(4)}), (${makePositionalParam( + 5 + )}, ${makePositionalParam(6)}, ${makePositionalParam( + 7 + )}, ${makePositionalParam(8)}) ON CONFLICT DO NOTHING`, + values: ['i1', 't1', 'c1', 18, 'i2', 't2', 'c2', 21], + }) + }) + + test('findUnique query', async (t) => { + const { tbl } = t.context + const stmt = tbl + .findUnique({ + where: { + id: 'i2', + nbr: 21, + }, + }) + .toParam() + + t.deepEqual(stmt, { + text: `SELECT "id", "nbr", "title", "contents" FROM "Post" WHERE ("id" = ${makePositionalParam( + 1 + )}) AND ("nbr" = ${makePositionalParam(2)}) LIMIT ${makePositionalParam( + 3 + )}`, + values: ['i2', 21, 2], + }) + }) + + test('findUnique query with selection', (t) => { + const { tbl } = t.context + const stmt = tbl + .findUnique({ + where: { + id: 'i2', + nbr: 21, + }, + select: { + title: true, + contents: false, + }, + }) + .toParam() + + t.deepEqual(stmt, { + text: `SELECT "id", "nbr", "title" FROM "Post" WHERE ("id" = ${makePositionalParam( + 1 + )}) AND ("nbr" = ${makePositionalParam(2)}) LIMIT ${makePositionalParam( + 3 + )}`, + values: ['i2', 21, 2], + }) + }) + + test('findUnique query with selection of NULL value', (t) => { + const { tbl } = t.context + const stmt = tbl + .findUnique({ + where: { + id: 'i2', + nbr: null, + }, + select: { + title: true, + contents: false, + }, + }) + .toParam() + + t.deepEqual(stmt, { + text: `SELECT "id", "nbr", "title" FROM "Post" WHERE ("id" = ${makePositionalParam( + 1 + )}) AND ("nbr" IS NULL) LIMIT ${makePositionalParam(2)}`, + values: ['i2', 2], + }) + }) + + test('findUnique query with selection of non-NULL value', (t) => { + const { tbl } = t.context + const stmt = tbl + .findUnique({ + where: { + id: 'i2', + nbr: { not: null }, + }, + select: { + title: true, + contents: false, + }, + }) + .toParam() + + t.deepEqual(stmt, { + text: `SELECT "id", "nbr", "title" FROM "Post" WHERE ("id" = ${makePositionalParam( + 1 + )}) AND ("nbr" IS NOT NULL) LIMIT ${makePositionalParam(2)}`, + values: ['i2', 2], + }) + }) + + test('findUnique query with selection of row that does not equal a value', (t) => { + const { tbl } = t.context + const stmt = tbl + .findUnique({ + where: { + id: 'i2', + nbr: { not: 5 }, + }, + select: { + title: true, + contents: false, + }, + }) + .toParam() + + t.deepEqual(stmt, { + text: `SELECT "id", "nbr", "title" FROM "Post" WHERE ("id" = ${makePositionalParam( + 1 + )}) AND ("nbr" != ${makePositionalParam(2)}) LIMIT ${makePositionalParam( + 3 + )}`, + values: ['i2', 5, 2], + }) + }) + + test('findUnique query supports several filters', (t) => { + const { tbl } = t.context + const stmt = tbl + .findUnique({ + where: { + id: 'i2', + nbr: { not: 5, in: [1, 2, 3] }, + }, + }) + .toParam() + + t.deepEqual(stmt, { + text: `SELECT "id", "nbr", "title", "contents" FROM "Post" WHERE ("id" = ${makePositionalParam( + 1 + )}) AND ("nbr" IN (${makePositionalParam(2)}, ${makePositionalParam( + 3 + )}, ${makePositionalParam(4)})) AND ("nbr" != ${makePositionalParam( + 5 + )}) LIMIT ${makePositionalParam(6)}`, + values: ['i2', 1, 2, 3, 5, 2], + }) + }) + + test('findUnique query with no filters throws an error', (t) => { + const { tbl } = t.context + const error = t.throws( + () => { + tbl.findUnique({ + where: { + id: 'i2', + nbr: 21, + foo: {}, + }, + }) + }, + { instanceOf: ZodError } + ) + + t.deepEqual((error as ZodError).issues, [ + { + code: 'custom', + message: 'Please provide at least one filter.', + path: [], + }, + ]) + }) + + test('findMany allows results to be ordered on one field', (t) => { + const { tbl } = t.context + const stmt = tbl + .findMany({ + // `where` argument must not be provided when using the actual API because it is added as default by the validator + // but since we directly use the query builder we need to provide it + where: {}, + orderBy: { + id: 'asc', + }, + }) + .toParam() + + t.deepEqual(stmt, { + text: `SELECT "id", "title", "contents", "nbr" FROM "Post" ORDER BY "id" ASC`, + values: [], + }) + }) + + test('findMany allows results to be ordered on several fields', (t) => { + const { tbl } = t.context + const stmt = tbl + .findMany({ + // `where` argument must not be provided when using the actual API because it is added as default by the validator + // but since we directly use the query builder we need to provide it + where: {}, + orderBy: [ + { + id: 'asc', + }, + { + title: 'desc', + }, + ], + }) + .toParam() + + t.deepEqual(stmt, { + text: `SELECT "id", "title", "contents", "nbr" FROM "Post" ORDER BY "id" ASC, "title" DESC`, + values: [], + }) + }) + + test('findMany supports pagination', (t) => { + const { tbl } = t.context + const stmt = tbl + .findMany({ + // `where` argument must not be provided when using the actual API because it is added as default by the validator + // but since we directly use the query builder we need to provide it + where: {}, + take: 1, + skip: 1, + }) + .toParam() + + t.deepEqual(stmt, { + text: `SELECT "id", "title", "contents", "nbr" FROM "Post" LIMIT ${makePositionalParam( + 1 + )} OFFSET ${makePositionalParam(2)}`, + values: [1, 1], + }) + }) + + test('findMany supports distinct results', (t) => { + const { tbl } = t.context + const stmt = tbl + .findMany({ + // `where` argument must not be provided when using the actual API because it is added as default by the validator + // but since we directly use the query builder we need to provide it + where: {}, + distinct: ['nbr'], + }) + .toParam() + + t.deepEqual(stmt, { + text: `SELECT DISTINCT ON ("nbr") "id", "title", "contents", "nbr" FROM "Post"`, + values: [], + }) + }) + + test('findMany supports IN filters in where argument', (t) => { + const { tbl } = t.context + const stmt = tbl + .findMany({ + where: { + nbr: { + in: [1, 5, 18], + }, + }, + }) + .toParam() + + t.deepEqual(stmt, { + text: `SELECT "nbr", "id", "title", "contents" FROM "Post" WHERE ("nbr" IN (${makePositionalParam( + 1 + )}, ${makePositionalParam(2)}, ${makePositionalParam(3)}))`, + values: [1, 5, 18], + }) + }) + + test('findMany supports NOT IN filters in where argument', (t) => { + const { tbl } = t.context + const stmt = tbl + .findMany({ + where: { + nbr: { + notIn: [1, 5, 18], + }, + }, + }) + .toParam() + + t.deepEqual(stmt, { + text: `SELECT "nbr", "id", "title", "contents" FROM "Post" WHERE ("nbr" NOT IN (${makePositionalParam( + 1 + )}, ${makePositionalParam(2)}, ${makePositionalParam(3)}))`, + values: [1, 5, 18], + }) + }) + + test('findMany supports lt, lte, gt, gte filters in where argument', (t) => { + const { tbl } = t.context + const stmt = tbl + .findMany({ + where: { + nbr: { + lt: 11, + lte: 10, + gt: 4, + gte: 5, + }, + }, + }) + .toParam() + + t.deepEqual(stmt, { + text: `SELECT "nbr", "id", "title", "contents" FROM "Post" WHERE ("nbr" < ${makePositionalParam( + 1 + )}) AND ("nbr" <= ${makePositionalParam( + 2 + )}) AND ("nbr" > ${makePositionalParam( + 3 + )}) AND ("nbr" >= ${makePositionalParam(4)})`, + values: [11, 10, 4, 5], + }) + }) + + test('findMany supports startsWith filter in where argument', (t) => { + const { tbl } = t.context + const stmt = tbl + .findMany({ + where: { + title: { + startsWith: 'foo', + }, + }, + }) + .toParam() + + t.deepEqual(stmt, { + text: `SELECT "title", "id", "contents", "nbr" FROM "Post" WHERE ("title" LIKE ${makePositionalParam( + 1 + )})`, + values: ['foo%'], + }) + }) + + test('findMany supports endsWith filter in where argument', (t) => { + const { tbl } = t.context + const stmt = tbl + .findMany({ + where: { + title: { + endsWith: 'foo', + }, + }, + }) + .toParam() + + t.deepEqual(stmt, { + text: `SELECT "title", "id", "contents", "nbr" FROM "Post" WHERE ("title" LIKE ${makePositionalParam( + 1 + )})`, + values: ['%foo'], + }) + }) + + test('findMany supports contains filter in where argument', (t) => { + const { tbl } = t.context + const stmt = tbl + .findMany({ + where: { + title: { + contains: 'foo', + }, + }, + }) + .toParam() + + t.deepEqual(stmt, { + text: `SELECT "title", "id", "contents", "nbr" FROM "Post" WHERE ("title" LIKE ${makePositionalParam( + 1 + )})`, + values: ['%foo%'], + }) + }) + + test('findMany supports boolean filters in where argument', (t) => { + const { tbl } = t.context + const stmt = tbl + .findMany({ + where: { + OR: [ + { + title: { + contains: 'foo', + }, + }, + { + title: 'bar', + }, + ], + AND: [ + { + contents: 'content', + }, + { + nbr: 6, + }, + ], + NOT: [ + { + title: 'foobar', + }, + { + title: 'barfoo', + }, + ], + nbr: 5, + }, + }) + .toParam() + + t.deepEqual(stmt, { + text: `SELECT "nbr", "id", "title", "contents" FROM "Post" WHERE ("title" LIKE ${makePositionalParam( + 1 + )} OR "title" = ${makePositionalParam( + 2 + )}) AND ("contents" = ${makePositionalParam( + 3 + )} AND "nbr" = ${makePositionalParam( + 4 + )}) AND ((NOT "title" = ${makePositionalParam( + 5 + )}) AND (NOT "title" = ${makePositionalParam( + 6 + )})) AND ("nbr" = ${makePositionalParam(7)})`, + values: ['%foo%', 'bar', 'content', 6, 'foobar', 'barfoo', 5], + }) + }) + + test('findMany supports single AND filter and single NOT filter in where argument', (t) => { + const { tbl } = t.context + const stmt = tbl + .findMany({ + where: { + OR: [ + { + title: { + contains: 'foo', + }, + }, + { + title: 'bar', + }, + ], + AND: { + contents: 'content', + }, + NOT: { + title: 'foobar', + }, + nbr: 5, + }, + }) + .toParam() + + t.deepEqual(stmt, { + text: `SELECT "nbr", "id", "title", "contents" FROM "Post" WHERE ("title" LIKE ${makePositionalParam( + 1 + )} OR "title" = ${makePositionalParam( + 2 + )}) AND ("contents" = ${makePositionalParam( + 3 + )}) AND (NOT "title" = ${makePositionalParam( + 4 + )}) AND ("nbr" = ${makePositionalParam(5)})`, + values: ['%foo%', 'bar', 'content', 'foobar', 5], + }) + }) + + test('update query', (t) => { + const { tbl } = t.context + const stmt = tbl + .update({ + data: { title: 'Foo', contents: 'Bar' }, + where: { id: '1' }, + }) + .toParam() + + t.deepEqual(stmt, { + text: `UPDATE "Post" SET "title" = ${makePositionalParam( + 1 + )}, "contents" = ${makePositionalParam( + 2 + )} WHERE ("id" = ${makePositionalParam( + 3 + )}) RETURNING "id", "title", "contents", "nbr"`, + values: ['Foo', 'Bar', '1'], + }) + }) + + test('updateMany query', (t) => { + const { tbl } = t.context + const stmt = tbl + .updateMany({ + data: { title: 'Foo', contents: 'Bar' }, + // `where` argument must not be provided when using the actual API because it is added as default by the validator + // but since we directly use the query builder we need to provide it + where: {}, + }) + .toParam() + + t.deepEqual(stmt, { + text: `UPDATE "Post" SET "title" = ${makePositionalParam( + 1 + )}, "contents" = ${makePositionalParam( + 2 + )} RETURNING "id", "title", "contents", "nbr"`, + values: ['Foo', 'Bar'], + }) + }) + + test('delete query', (t) => { + const { tbl } = t.context + const stmt = tbl + .delete({ + where: { id: 'Foo', title: 'Bar' }, + }) + .toParam() + + t.deepEqual(stmt, { + text: `DELETE FROM "Post" WHERE ("id" = ${makePositionalParam( + 1 + )}) AND ("title" = ${makePositionalParam(2)})`, + values: ['Foo', 'Bar'], + }) + }) + + test('deleteMany query', (t) => { + const { tbl } = t.context + const stmt1 = tbl + .deleteMany({ + where: { id: 'Foo', title: 'Bar' }, + }) + .toParam() + + t.deepEqual(stmt1, { + text: `DELETE FROM "Post" WHERE ("id" = ${makePositionalParam( + 1 + )}) AND ("title" = ${makePositionalParam(2)})`, + values: ['Foo', 'Bar'], + }) + + const stmt2 = tbl + .deleteMany({ + // `where` argument is not required when using the actual API because it is added as default by the validator + // but since we directly use the query builder we need to provide it + where: {}, + }) + .toParam() + + t.deepEqual(stmt2, { + text: `DELETE FROM "Post"`, + values: [], + }) + }) +} diff --git a/clients/typescript/test/client/model/postgres/builder.test.ts b/clients/typescript/test/client/model/postgres/builder.test.ts new file mode 100644 index 0000000000..f44c534195 --- /dev/null +++ b/clients/typescript/test/client/model/postgres/builder.test.ts @@ -0,0 +1,4 @@ +import { builderTests } from '../builder' + +const dialect = 'Postgres' +builderTests(dialect) diff --git a/clients/typescript/test/client/model/sqlite/builder.test.ts b/clients/typescript/test/client/model/sqlite/builder.test.ts new file mode 100644 index 0000000000..4e54cea206 --- /dev/null +++ b/clients/typescript/test/client/model/sqlite/builder.test.ts @@ -0,0 +1,4 @@ +import { builderTests } from '../builder' + +const dialect = 'SQLite' +builderTests(dialect) From 0b66b425d8f480b7fd7e118826712d6b57141188 Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Wed, 10 Apr 2024 15:26:14 +0200 Subject: [PATCH 055/156] Fixes to e2e tests --- e2e/satellite_client/src/client.ts | 12 ++++++++---- .../03.01_node_satellite_loads_local_migrations.lux | 2 ++ .../03.06_node_satellite_does_sync_on_subscribe.lux | 2 +- ...tellite_can_resume_subscriptions_on_reconnect.lux | 2 +- 4 files changed, 12 insertions(+), 6 deletions(-) diff --git a/e2e/satellite_client/src/client.ts b/e2e/satellite_client/src/client.ts index faba0799d1..1387590c4f 100644 --- a/e2e/satellite_client/src/client.ts +++ b/e2e/satellite_client/src/client.ts @@ -62,7 +62,11 @@ export const electrify_db = async ( debug: true, } console.log(`(in electrify_db) config: ${JSON.stringify(config)}`) - schema.migrations = migrations + if (process.env.DIALECT === 'Postgres') { + schema.pgMigrations = migrations + } else { + schema.migrations = migrations + } const result = await electrify(db, schema, config) const token = await mockSecureAuthToken(exp) @@ -295,7 +299,7 @@ export const write_float = (electric: Electric, id: string, f4: number, f8: numb export const get_json_raw = async (electric: Electric, id: string) => { const res = await electric.db.rawQuery({ - sql: `SELECT js FROM jsons WHERE id = ?;`, + sql: `SELECT js FROM jsons WHERE id = ${builder.makePositionalParam(1)};`, args: [id] }) as unknown as Array<{ js: string }> return res[0]?.js @@ -303,7 +307,7 @@ export const get_json_raw = async (electric: Electric, id: string) => { export const get_jsonb_raw = async (electric: Electric, id: string) => { const res = await electric.db.rawQuery({ - sql: `SELECT jsb FROM jsons WHERE id = ${builder.paramSign};`, + sql: `SELECT jsb FROM jsons WHERE id = ${builder.makePositionalParam(1)};`, args: [id] }) as unknown as Array<{ jsb: string }> return res[0]?.jsb @@ -406,7 +410,7 @@ export const insert_extended_into = async (electric: Electric, table: string, va } const columns = Object.keys(values) const columnNames = columns.join(", ") - const placeHolders = Array(columns.length).fill("?") + const placeHolders = columns.map((_, i) => builder.makePositionalParam(i+1)) const args = Object.values(values) await electric.db.unsafeExec({ diff --git a/e2e/tests/03.01_node_satellite_loads_local_migrations.lux b/e2e/tests/03.01_node_satellite_loads_local_migrations.lux index b06deb4c07..80df1e95db 100644 --- a/e2e/tests/03.01_node_satellite_loads_local_migrations.lux +++ b/e2e/tests/03.01_node_satellite_loads_local_migrations.lux @@ -20,6 +20,8 @@ ] """] +[invoke setup] + [invoke setup_client_with_migrations 1 "electric_1" 5133 $migrations "false"] [shell satellite_1] diff --git a/e2e/tests/03.06_node_satellite_does_sync_on_subscribe.lux b/e2e/tests/03.06_node_satellite_does_sync_on_subscribe.lux index 6d62e74102..56bd46082d 100644 --- a/e2e/tests/03.06_node_satellite_does_sync_on_subscribe.lux +++ b/e2e/tests/03.06_node_satellite_does_sync_on_subscribe.lux @@ -40,7 +40,7 @@ [shell pg_1] !\x - [invoke wait-for "SELECT COUNT(*) FROM public.items;" "count \| 1" 10 ${psql}] + [invoke wait-for "SELECT COUNT(*) FROM public.items;" "count \| 2" 10 ${psql}] [cleanup] [invoke teardown] diff --git a/e2e/tests/03.08_node_satellite_can_resume_subscriptions_on_reconnect.lux b/e2e/tests/03.08_node_satellite_can_resume_subscriptions_on_reconnect.lux index d4bcb3eca5..dae3bc2230 100644 --- a/e2e/tests/03.08_node_satellite_can_resume_subscriptions_on_reconnect.lux +++ b/e2e/tests/03.08_node_satellite_can_resume_subscriptions_on_reconnect.lux @@ -40,7 +40,7 @@ [shell pg_1] !\x - [invoke wait-for "SELECT COUNT(*) FROM public.items;" "count \| 1" 10 ${psql}] + [invoke wait-for "SELECT COUNT(*) FROM public.items;" "count \| 2" 10 ${psql}] [shell satellite_1] [progress stopping client] From b4a014bd6ef1051d6e6f4d1f29889dd85960555c Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Thu, 11 Apr 2024 11:21:00 +0200 Subject: [PATCH 056/156] Proper quoting of table names and field names --- .../typescript/src/client/model/builder.ts | 105 +++-- .../test/client/model/shapes.test.ts | 5 +- .../satellite/postgres/serialization.test.ts | 28 ++ .../test/satellite/serialization.test.ts | 414 ------------------ .../test/satellite/serialization.ts | 404 +++++++++++++++++ .../satellite/sqlite/serialization.test.ts | 25 ++ 6 files changed, 517 insertions(+), 464 deletions(-) create mode 100644 clients/typescript/test/satellite/postgres/serialization.test.ts delete mode 100644 clients/typescript/test/satellite/serialization.test.ts create mode 100644 clients/typescript/test/satellite/serialization.ts create mode 100644 clients/typescript/test/satellite/sqlite/serialization.test.ts diff --git a/clients/typescript/src/client/model/builder.ts b/clients/typescript/src/client/model/builder.ts index fb2fc888ad..9e3fb3db75 100644 --- a/clients/typescript/src/client/model/builder.ts +++ b/clients/typescript/src/client/model/builder.ts @@ -53,7 +53,6 @@ export class Builder { this._fullyQualifiedTableName = `"${this._tableName}"` squelPostgres.cls.DefaultQueryBuilderOptions.nameQuoteCharacter = '"' squelPostgres.cls.DefaultQueryBuilderOptions.autoQuoteFieldNames = true - squelPostgres.cls.DefaultQueryBuilderOptions.autoQuoteAliasNames = true if (dialect === 'Postgres') { //squelPostgres.cls.DefaultQueryBuilderOptions.autoQuoteTableNames = true @@ -185,7 +184,12 @@ export class Builder { if (!this.shapeManager.hasBeenSubscribed(this._tableName)) Log.debug('Reading from unsynced table ' + this._tableName) - const query = squelPostgres.select().from(this._fullyQualifiedTableName) // specify from which table to select + // don't autoquote field names in the selection + // because if a field is a BigInt we cast it + // and squel would add quotes around the entire cast + const query = squelPostgres + .select({ autoQuoteFieldNames: false }) + .from(this._fullyQualifiedTableName) // specify from which table to select // only select the fields provided in `i.select` and the ones in `i.where` const addFieldSelectionP = this.addFieldSelection.bind( this, @@ -259,9 +263,9 @@ export class Builder { private castBigIntToText(field: string) { const pgType = this._tableDescription.fields.get(field) if (pgType === PgBasicType.PG_INT8 && this.dialect === 'SQLite') { - return `cast(${field} as TEXT) AS ${field}` + return `cast("${field}" as TEXT) AS "${field}"` } - return field + return `"${field}"` } private addOrderBy(i: AnyFindInput, q: PostgresSelect): PostgresSelect { @@ -315,24 +319,10 @@ export class Builder { // the DAL will convert the string into a BigInt in the `fromSqlite` function from `../conversions/sqlite.ts`. const pgType = this._tableDescription.fields.get(field) if (pgType === PgBasicType.PG_INT8 && this.dialect === 'SQLite') { - //squelPostgres.function('cast(?)', `"${field}" as TEXT`) - // FIXME: squel adds quotes around the entire cast... - // tried to override squel's internal _formatFieldName to special case this field but it still quoted it - const f = `cast("${field}" as TEXT) AS "${field}"` - const res = query.returning(f) //, field) - /* - const returningBlock = query.blocks[query.blocks.length - 1] - const originalFormatter = returningBlock._formatFieldName.bind(returningBlock) - returningBlock._formatFieldName = (field, opts) => { - console.log(`formatting field name: ${field}`) - if (field === f) { - console.log(`returning field: ${field}`) - return field - } - else return originalFormatter(field, opts) - } - */ - return res + // make a raw string and quote the field name ourselves + // because otherwise Squel would add quotes around the entire cast + const f = squelPostgres.rstr(`cast("${field}" as TEXT) AS "${field}"`) + return query.returning(f) } return query.returning(field) }, query) @@ -423,8 +413,9 @@ export function makeFilter( const [filter, handler] = entry if (filter in obj) { const sql = handler( - prefixFieldsWith + fieldName, - obj[filter as keyof typeof obj] + fieldName, + obj[filter as keyof typeof obj], + prefixFieldsWith ) filters.push(sql) } @@ -490,90 +481,110 @@ function makeBooleanFilter( function makeEqualsFilter( fieldName: string, - value: unknown | undefined + value: unknown | undefined, + prefixFieldsWith: string ): { sql: string; args?: unknown[] } { - return { sql: `"${fieldName}" = ?`, args: [value] } + return { sql: `${prefixFieldsWith}"${fieldName}" = ?`, args: [value] } } function makeInFilter( fieldName: string, - values: unknown[] | undefined + values: unknown[] | undefined, + prefixFieldsWith: string ): { sql: string; args?: unknown[] } { - return { sql: `"${fieldName}" IN ?`, args: [values] } + return { sql: `${prefixFieldsWith}"${fieldName}" IN ?`, args: [values] } } function makeNotInFilter( fieldName: string, - values: unknown[] | undefined + values: unknown[] | undefined, + prefixFieldsWith: string ): { sql: string; args?: unknown[] } { - return { sql: `"${fieldName}" NOT IN ?`, args: [values] } + return { sql: `${prefixFieldsWith}"${fieldName}" NOT IN ?`, args: [values] } } function makeNotFilter( fieldName: string, - value: unknown + value: unknown, + prefixFieldsWith: string ): { sql: string; args?: unknown[] } { if (value === null) { // needed because `WHERE field != NULL` is not valid SQL - return { sql: `"${fieldName}" IS NOT NULL` } + return { sql: `${prefixFieldsWith}"${fieldName}" IS NOT NULL` } } else { - return { sql: `"${fieldName}" != ?`, args: [value] } + return { sql: `${prefixFieldsWith}"${fieldName}" != ?`, args: [value] } } } function makeLtFilter( fieldName: string, - value: unknown + value: unknown, + prefixFieldsWith: string ): { sql: string; args?: unknown[] } { - return { sql: `"${fieldName}" < ?`, args: [value] } + return { sql: `${prefixFieldsWith}"${fieldName}" < ?`, args: [value] } } function makeLteFilter( fieldName: string, - value: unknown + value: unknown, + prefixFieldsWith: string ): { sql: string; args?: unknown[] } { - return { sql: `"${fieldName}" <= ?`, args: [value] } + return { sql: `${prefixFieldsWith}"${fieldName}" <= ?`, args: [value] } } function makeGtFilter( fieldName: string, - value: unknown + value: unknown, + prefixFieldsWith: string ): { sql: string; args?: unknown[] } { - return { sql: `"${fieldName}" > ?`, args: [value] } + return { sql: `${prefixFieldsWith}"${fieldName}" > ?`, args: [value] } } function makeGteFilter( fieldName: string, - value: unknown + value: unknown, + prefixFieldsWith: string ): { sql: string; args?: unknown[] } { - return { sql: `"${fieldName}" >= ?`, args: [value] } + return { sql: `${prefixFieldsWith}"${fieldName}" >= ?`, args: [value] } } function makeStartsWithFilter( fieldName: string, - value: unknown + value: unknown, + prefixFieldsWith: string ): { sql: string; args?: unknown[] } { if (typeof value !== 'string') throw new Error('startsWith filter must be a string') - return { sql: `"${fieldName}" LIKE ?`, args: [`${escapeLike(value)}%`] } + return { + sql: `${prefixFieldsWith}"${fieldName}" LIKE ?`, + args: [`${escapeLike(value)}%`], + } } function makeEndsWithFilter( fieldName: string, - value: unknown + value: unknown, + prefixFieldsWith: string ): { sql: string; args?: unknown[] } { if (typeof value !== 'string') throw new Error('endsWith filter must be a string') - return { sql: `"${fieldName}" LIKE ?`, args: [`%${escapeLike(value)}`] } + return { + sql: `${prefixFieldsWith}"${fieldName}" LIKE ?`, + args: [`%${escapeLike(value)}`], + } } function makeContainsFilter( fieldName: string, - value: unknown + value: unknown, + prefixFieldsWith: string ): { sql: string; args?: unknown[] } { if (typeof value !== 'string') throw new Error('contains filter must be a string') - return { sql: `"${fieldName}" LIKE ?`, args: [`%${escapeLike(value)}%`] } + return { + sql: `${prefixFieldsWith}"${fieldName}" LIKE ?`, + args: [`%${escapeLike(value)}%`], + } } function escapeLike(value: string): string { diff --git a/clients/typescript/test/client/model/shapes.test.ts b/clients/typescript/test/client/model/shapes.test.ts index fe1c6c194e..278e12df74 100644 --- a/clients/typescript/test/client/model/shapes.test.ts +++ b/clients/typescript/test/client/model/shapes.test.ts @@ -368,13 +368,12 @@ test.serial('nested shape is constructed', async (t) => { const shape = Post.computeShape(input) t.deepEqual(shape, { tablename: 'Post', - where: - "(this.id IN (3, 'test') OR this.test LIKE '\\%hello%') AND ((NOT this.id = 1) AND (NOT this.id = 2)) AND (this.nbr = 6 AND this.nbr = 7) AND (this.title = 'foo') AND (this.contents = 'important''')", + where: `(this."id" IN (3, 'test') OR this."test" LIKE '\\%hello%') AND ((NOT this."id" = 1) AND (NOT this."id" = 2)) AND (this."nbr" = 6 AND this."nbr" = 7) AND (this."title" = 'foo') AND (this."contents" = 'important''')`, include: [ { foreignKey: ['authorId'], select: { - where: "this.value < '2024-01-01T00:00:00.000Z'", + where: `this."value" < '2024-01-01T00:00:00.000Z'`, tablename: 'User', include: [ { diff --git a/clients/typescript/test/satellite/postgres/serialization.test.ts b/clients/typescript/test/satellite/postgres/serialization.test.ts new file mode 100644 index 0000000000..40451d4b22 --- /dev/null +++ b/clients/typescript/test/satellite/postgres/serialization.test.ts @@ -0,0 +1,28 @@ +import anyTest, { ExecutionContext, TestFn } from 'ava' +import { makePgDatabase } from '../../support/node-postgres' +import { randomValue } from '../../../src/util/random' +import { opts } from '../common' +import { ContextType, SetupFn, serializationTests } from '../serialization' +import { pgTypeDecoder, pgTypeEncoder } from '../../../src/util/common' +import { DatabaseAdapter as PgDatabaseAdapter } from '../../../src/drivers/node-postgres/adapter' +import { pgBuilder } from '../../../src/migrators/query-builder' + +const test = anyTest as TestFn + +let port = 4800 +const setupPG: SetupFn = async (t: ExecutionContext) => { + const dbName = `serialization-test-${randomValue()}` + const { db, stop } = await makePgDatabase(dbName, port++) + t.teardown(async () => await stop()) + const namespace = 'public' + return [new PgDatabaseAdapter(db), pgBuilder, opts(namespace)] +} + +test.beforeEach(async (t) => { + t.context.dialect = 'Postgres' + t.context.encoder = pgTypeEncoder + t.context.decoder = pgTypeDecoder + t.context.setup = setupPG +}) + +serializationTests(test) diff --git a/clients/typescript/test/satellite/serialization.test.ts b/clients/typescript/test/satellite/serialization.test.ts deleted file mode 100644 index 2f3f73dc04..0000000000 --- a/clients/typescript/test/satellite/serialization.test.ts +++ /dev/null @@ -1,414 +0,0 @@ -import { SatRelation_RelationType } from '../../src/_generated/protocol/satellite' -import { serializeRow, deserializeRow } from '../../src/satellite/client' -import test, { ExecutionContext } from 'ava' -import { Relation, Record } from '../../src/util/types' -import { DbSchema, TableSchema } from '../../src/client/model/schema' -import { PgBasicType } from '../../src/client/conversions/types' -import { HKT } from '../../src/client/util/hkt' -import Database from 'better-sqlite3' -import { DatabaseAdapter as SQLiteDatabaseAdapter } from '../../src/drivers/better-sqlite3' -import { DatabaseAdapter as PgDatabaseAdapter } from '../../src/drivers/node-postgres/adapter' -import { DatabaseAdapter as DatabaseAdapterInterface } from '../../src/electric/adapter' -import { inferRelationsFromDb } from '../../src/util/relations' -import { SatelliteOpts } from '../../src/satellite/config' -import { - QueryBuilder, - pgBuilder, - sqliteBuilder, -} from '../../src/migrators/query-builder' -import { makePgDatabase } from '../support/node-postgres' -import { randomValue } from '../../src/util/random' -import { opts } from './common' - -test('serialize/deserialize row data', async (t) => { - const rel: Relation = { - id: 1, - schema: 'schema', - table: 'table', - tableType: SatRelation_RelationType.TABLE, - columns: [ - { name: 'name1', type: 'TEXT', isNullable: true }, - { name: 'name2', type: 'TEXT', isNullable: true }, - { name: 'name3', type: 'TEXT', isNullable: true }, - { name: 'blob1', type: 'BYTEA', isNullable: true }, - { name: 'blob2', type: 'BYTEA', isNullable: true }, - { name: 'blob3', type: 'BYTEA', isNullable: true }, - { name: 'int1', type: 'INTEGER', isNullable: true }, - { name: 'int2', type: 'INTEGER', isNullable: true }, - { name: 'bigint1', type: 'INT8', isNullable: true }, - { name: 'bigint2', type: 'INT8', isNullable: true }, - { name: 'float1', type: 'REAL', isNullable: true }, - { name: 'float2', type: 'FLOAT4', isNullable: true }, - { name: 'float3', type: 'FLOAT8', isNullable: true }, - { name: 'bool1', type: 'BOOL', isNullable: true }, - { name: 'bool2', type: 'BOOL', isNullable: true }, - { name: 'bool3', type: 'BOOL', isNullable: true }, - // bundled migrations contain type 'TEXT' for enums - { name: 'enum1', type: 'TEXT', isNullable: true }, - { name: 'enum2', type: 'TEXT', isNullable: true }, - ], - } - - const dbDescription = new DbSchema( - { - table: { - fields: new Map([ - ['name1', PgBasicType.PG_TEXT], - ['name2', PgBasicType.PG_TEXT], - ['name3', PgBasicType.PG_TEXT], - ['blob1', PgBasicType.PG_BYTEA], - ['blob2', PgBasicType.PG_BYTEA], - ['blob3', PgBasicType.PG_BYTEA], - ['int1', PgBasicType.PG_INTEGER], - ['int2', PgBasicType.PG_INTEGER], - ['bigint1', PgBasicType.PG_INT8], - ['bigint2', PgBasicType.PG_INT8], - ['float1', PgBasicType.PG_REAL], - ['float2', PgBasicType.PG_FLOAT4], - ['float3', PgBasicType.PG_FLOAT8], - ['bool1', PgBasicType.PG_BOOL], - ['bool2', PgBasicType.PG_BOOL], - ['bool3', PgBasicType.PG_BOOL], - // enum types are transformed to text type by our generator - ['enum1', PgBasicType.PG_TEXT], - ['enum2', PgBasicType.PG_TEXT], - ]), - relations: [], - } as unknown as TableSchema< - any, - any, - any, - any, - any, - any, - any, - any, - any, - HKT - >, - }, - [], - [] - ) - - const record: Record = { - name1: 'Hello', - name2: 'World!', - name3: null, - blob1: new Uint8Array([1, 15, 255, 145]), - blob2: new Uint8Array([]), - blob3: null, - int1: 1, - int2: -30, - bigint1: '31447483647', - bigint2: null, - float1: 1.0, - float2: -30.3, - float3: 5e234, - bool1: 1, - bool2: 0, - bool3: null, - enum1: 'red', - enum2: null, - } - - const recordKeys = Object.keys(record) - - const s_row = serializeRow(record, rel, dbDescription) - t.deepEqual( - s_row.values.map((bytes, idx) => - recordKeys[idx].startsWith('blob') - ? 'blob' - : new TextDecoder().decode(bytes) - ), - [ - 'Hello', - 'World!', - '', - 'blob', - 'blob', - 'blob', - '1', - '-30', - '31447483647', - '', - '1', - '-30.3', - '5e+234', - 't', - 'f', - '', - 'red', - '', - ] - ) - - const d_row = deserializeRow(s_row, rel, dbDescription) - t.deepEqual(d_row, record) - - // Test edge cases for floats such as NaN, Infinity, -Infinity - const record2: Record = { - name1: 'Edge cases for Floats', - name2: null, - name3: null, - blob1: new Uint8Array([0, 1, 255, 245]), - blob2: new Uint8Array([]), - blob3: null, - int1: null, - int2: null, - bigint1: null, - bigint2: null, - float1: NaN, - float2: Infinity, - float3: -Infinity, - bool1: null, - bool2: null, - bool3: null, - enum1: 'red', - enum2: null, - } - const recordKeys2 = Object.keys(record2) - - const s_row2 = serializeRow(record2, rel, dbDescription) - t.deepEqual( - s_row2.values.map((bytes, idx) => - recordKeys2[idx].startsWith('blob') - ? 'blob' - : new TextDecoder().decode(bytes) - ), - [ - 'Edge cases for Floats', - '', - '', - 'blob', - 'blob', - 'blob', - '', - '', - '', - '', - 'NaN', - 'Infinity', - '-Infinity', - '', - '', - '', - 'red', - '', - ] - ) - - const d_row2 = deserializeRow(s_row2, rel, dbDescription) - t.deepEqual(d_row2, { - ...record2, - float1: 'NaN', // SQLite does not support NaN so we deserialise it into the string 'NaN' - }) -}) - -test('Null mask uses bits as if they were a list', async (t) => { - const rel: Relation = { - id: 1, - schema: 'schema', - table: 'table', - tableType: SatRelation_RelationType.TABLE, - columns: [ - { name: 'bit0', type: 'TEXT', isNullable: true }, - { name: 'bit1', type: 'TEXT', isNullable: true }, - { name: 'bit2', type: 'TEXT', isNullable: true }, - { name: 'bit3', type: 'TEXT', isNullable: true }, - { name: 'bit4', type: 'TEXT', isNullable: true }, - { name: 'bit5', type: 'TEXT', isNullable: true }, - { name: 'bit6', type: 'TEXT', isNullable: true }, - { name: 'bit7', type: 'TEXT', isNullable: true }, - { name: 'bit8', type: 'TEXT', isNullable: true }, - ], - } - - const dbDescription = new DbSchema( - { - table: { - fields: new Map([ - ['bit0', PgBasicType.PG_TEXT], - ['bit1', PgBasicType.PG_TEXT], - ['bit2', PgBasicType.PG_TEXT], - ['bit3', PgBasicType.PG_TEXT], - ['bit4', PgBasicType.PG_TEXT], - ['bit5', PgBasicType.PG_TEXT], - ['bit6', PgBasicType.PG_TEXT], - ['bit7', PgBasicType.PG_TEXT], - ['bit8', PgBasicType.PG_TEXT], - ]), - relations: [], - } as unknown as TableSchema< - any, - any, - any, - any, - any, - any, - any, - any, - any, - HKT - >, - }, - [], - [] - ) - - const record: Record = { - bit0: null, - bit1: null, - bit2: 'Filled', - bit3: null, - bit4: 'Filled', - bit5: 'Filled', - bit6: 'Filled', - bit7: 'Filled', - bit8: null, - } - const s_row = serializeRow(record, rel, dbDescription) - - const mask = [...s_row.nullsBitmask].map((x) => x.toString(2)).join('') - - t.is(mask, '1101000010000000') -}) - -type MaybePromise = T | Promise -type SetupFn = ( - t: ExecutionContext -) => MaybePromise<[DatabaseAdapterInterface, QueryBuilder, SatelliteOpts]> -const setupSqlite: SetupFn = (t: ExecutionContext) => { - const db = new Database(':memory:') - t.teardown(() => db.close()) - const namespace = 'main' - return [new SQLiteDatabaseAdapter(db), sqliteBuilder, opts(namespace)] -} - -let port = 4800 -const setupPG: SetupFn = async (t: ExecutionContext) => { - const dbName = `serialization-test-${randomValue()}` - const { db, stop } = await makePgDatabase(dbName, port++) - t.teardown(async () => await stop()) - const namespace = 'public' - return [new PgDatabaseAdapter(db), pgBuilder, opts(namespace)] -} - -;( - [ - ['SQLite', setupSqlite], - ['Postgres', setupPG], - ] as const -).forEach(([dialect, setup]) => { - test(`(${dialect}) Prioritize PG types in the schema before inferred SQLite types`, async (t) => { - const [adapter, builder, defaults] = await setup(t) - - await adapter.run({ - sql: 'CREATE TABLE bools (id INTEGER PRIMARY KEY, b INTEGER)', - }) - - const sqliteInferredRelations = await inferRelationsFromDb( - adapter, - defaults, - builder - ) - const boolsInferredRelation = sqliteInferredRelations['bools'] - - // Inferred types only support SQLite types, so the bool column is INTEGER - const boolColumn = boolsInferredRelation.columns[1] - t.is(boolColumn.name, 'b') - t.is(boolColumn.type, 'INTEGER') - - // Db schema holds the correct Postgres types - const boolsDbDescription = new DbSchema( - { - bools: { - fields: new Map([ - ['id', PgBasicType.PG_INTEGER], - ['b', PgBasicType.PG_BOOL], - ]), - relations: [], - } as unknown as TableSchema< - any, - any, - any, - any, - any, - any, - any, - any, - any, - HKT - >, - }, - [], - [] - ) - - const satOpRow = serializeRow( - { id: 5, b: 1 }, - boolsInferredRelation, - boolsDbDescription - ) - - // Encoded values ["5", "t"] - t.deepEqual(satOpRow.values, [ - new Uint8Array(['5'.charCodeAt(0)]), - new Uint8Array(['t'.charCodeAt(0)]), - ]) - - const deserializedRow = deserializeRow( - satOpRow, - boolsInferredRelation, - boolsDbDescription - ) - - t.deepEqual(deserializedRow, { id: 5, b: 1 }) - }) - - test(`(${dialect}) Use incoming Relation types if not found in the schema`, async (t) => { - const [adapter, builder, defaults] = await setup(t) - - const inferredRelations = await inferRelationsFromDb( - adapter, - defaults, - builder - ) - // Empty database - t.is(Object.keys(inferredRelations).length, 0) - - // Empty Db schema - const testDbDescription = new DbSchema({}, [], []) - - const newTableRelation: Relation = { - id: 1, - schema: 'schema', - table: 'new_table', - tableType: SatRelation_RelationType.TABLE, - columns: [ - { name: 'value', type: 'INTEGER', isNullable: true }, - { name: 'color', type: 'COLOR', isNullable: true }, // at runtime, incoming SatRelation messages contain the name of the enum type - ], - } - - const row = { - value: 6, - color: 'red', - } - - const satOpRow = serializeRow(row, newTableRelation, testDbDescription) - - t.deepEqual( - satOpRow.values.map((bytes) => new TextDecoder().decode(bytes)), - ['6', 'red'] - ) - - const deserializedRow = deserializeRow( - satOpRow, - newTableRelation, - testDbDescription - ) - - t.deepEqual(deserializedRow, row) - }) -}) diff --git a/clients/typescript/test/satellite/serialization.ts b/clients/typescript/test/satellite/serialization.ts new file mode 100644 index 0000000000..23f74917a8 --- /dev/null +++ b/clients/typescript/test/satellite/serialization.ts @@ -0,0 +1,404 @@ +import { SatRelation_RelationType } from '../../src/_generated/protocol/satellite' +import { serializeRow, deserializeRow } from '../../src/satellite/client' +import { TestFn, ExecutionContext } from 'ava' +import { Relation, Record } from '../../src/util/types' +import { DbSchema, TableSchema } from '../../src/client/model/schema' +import { PgBasicType } from '../../src/client/conversions/types' +import { HKT } from '../../src/client/util/hkt' +import { DatabaseAdapter as DatabaseAdapterInterface } from '../../src/electric/adapter' +import { inferRelationsFromDb } from '../../src/util/relations' +import { SatelliteOpts } from '../../src/satellite/config' +import { QueryBuilder } from '../../src/migrators/query-builder' +import { TypeDecoder, TypeEncoder } from '../../src/util' + +export type ContextType = { + dialect: 'SQLite' | 'Postgres' + encoder: TypeEncoder + decoder: TypeDecoder + setup: SetupFn +} + +type MaybePromise = T | Promise +export type SetupFn = ( + t: ExecutionContext +) => MaybePromise<[DatabaseAdapterInterface, QueryBuilder, SatelliteOpts]> + +export const serializationTests = (test: TestFn) => { + test('serialize/deserialize row data', async (t) => { + const { encoder, decoder, dialect } = t.context + const rel: Relation = { + id: 1, + schema: 'schema', + table: 'table', + tableType: SatRelation_RelationType.TABLE, + columns: [ + { name: 'name1', type: 'TEXT', isNullable: true }, + { name: 'name2', type: 'TEXT', isNullable: true }, + { name: 'name3', type: 'TEXT', isNullable: true }, + { name: 'blob1', type: 'BYTEA', isNullable: true }, + { name: 'blob2', type: 'BYTEA', isNullable: true }, + { name: 'blob3', type: 'BYTEA', isNullable: true }, + { name: 'int1', type: 'INTEGER', isNullable: true }, + { name: 'int2', type: 'INTEGER', isNullable: true }, + { name: 'bigint1', type: 'INT8', isNullable: true }, + { name: 'bigint2', type: 'INT8', isNullable: true }, + { name: 'float1', type: 'REAL', isNullable: true }, + { name: 'float2', type: 'FLOAT4', isNullable: true }, + { name: 'float3', type: 'FLOAT8', isNullable: true }, + { name: 'bool1', type: 'BOOL', isNullable: true }, + { name: 'bool2', type: 'BOOL', isNullable: true }, + { name: 'bool3', type: 'BOOL', isNullable: true }, + // bundled migrations contain type 'TEXT' for enums + { name: 'enum1', type: 'TEXT', isNullable: true }, + { name: 'enum2', type: 'TEXT', isNullable: true }, + ], + } + + const dbDescription = new DbSchema( + { + table: { + fields: new Map([ + ['name1', PgBasicType.PG_TEXT], + ['name2', PgBasicType.PG_TEXT], + ['name3', PgBasicType.PG_TEXT], + ['blob1', PgBasicType.PG_BYTEA], + ['blob2', PgBasicType.PG_BYTEA], + ['blob3', PgBasicType.PG_BYTEA], + ['int1', PgBasicType.PG_INTEGER], + ['int2', PgBasicType.PG_INTEGER], + ['bigint1', PgBasicType.PG_INT8], + ['bigint2', PgBasicType.PG_INT8], + ['float1', PgBasicType.PG_REAL], + ['float2', PgBasicType.PG_FLOAT4], + ['float3', PgBasicType.PG_FLOAT8], + ['bool1', PgBasicType.PG_BOOL], + ['bool2', PgBasicType.PG_BOOL], + ['bool3', PgBasicType.PG_BOOL], + // enum types are transformed to text type by our generator + ['enum1', PgBasicType.PG_TEXT], + ['enum2', PgBasicType.PG_TEXT], + ]), + relations: [], + } as unknown as TableSchema< + any, + any, + any, + any, + any, + any, + any, + any, + any, + HKT + >, + }, + [], + [] + ) + + const record: Record = { + name1: 'Hello', + name2: 'World!', + name3: null, + blob1: new Uint8Array([1, 15, 255, 145]), + blob2: new Uint8Array([]), + blob3: null, + int1: 1, + int2: -30, + bigint1: '31447483647', + bigint2: null, + float1: 1.0, + float2: -30.3, + float3: 5e234, + bool1: dialect === 'SQLite' ? 1 : true, + bool2: dialect === 'SQLite' ? 0 : false, + bool3: null, + enum1: 'red', + enum2: null, + } + + const recordKeys = Object.keys(record) + + const s_row = serializeRow(record, rel, dbDescription, encoder) + t.deepEqual( + s_row.values.map((bytes, idx) => + recordKeys[idx].startsWith('blob') + ? 'blob' + : new TextDecoder().decode(bytes) + ), + [ + 'Hello', + 'World!', + '', + 'blob', + 'blob', + 'blob', + '1', + '-30', + '31447483647', + '', + '1', + '-30.3', + '5e+234', + 't', + 'f', + '', + 'red', + '', + ] + ) + + const d_row = deserializeRow(s_row, rel, dbDescription, decoder) + t.deepEqual(d_row, record) + + // Test edge cases for floats such as NaN, Infinity, -Infinity + const record2: Record = { + name1: 'Edge cases for Floats', + name2: null, + name3: null, + blob1: new Uint8Array([0, 1, 255, 245]), + blob2: new Uint8Array([]), + blob3: null, + int1: null, + int2: null, + bigint1: null, + bigint2: null, + float1: NaN, + float2: Infinity, + float3: -Infinity, + bool1: null, + bool2: null, + bool3: null, + enum1: 'red', + enum2: null, + } + const recordKeys2 = Object.keys(record2) + + const s_row2 = serializeRow(record2, rel, dbDescription, encoder) + t.deepEqual( + s_row2.values.map((bytes, idx) => + recordKeys2[idx].startsWith('blob') + ? 'blob' + : new TextDecoder().decode(bytes) + ), + [ + 'Edge cases for Floats', + '', + '', + 'blob', + 'blob', + 'blob', + '', + '', + '', + '', + 'NaN', + 'Infinity', + '-Infinity', + '', + '', + '', + 'red', + '', + ] + ) + + const d_row2 = deserializeRow(s_row2, rel, dbDescription, decoder) + t.deepEqual(d_row2, { + ...record2, + float1: 'NaN', // SQLite does not support NaN so we deserialise it into the string 'NaN' + }) + }) + + test('Null mask uses bits as if they were a list', async (t) => { + const { encoder } = t.context + const rel: Relation = { + id: 1, + schema: 'schema', + table: 'table', + tableType: SatRelation_RelationType.TABLE, + columns: [ + { name: 'bit0', type: 'TEXT', isNullable: true }, + { name: 'bit1', type: 'TEXT', isNullable: true }, + { name: 'bit2', type: 'TEXT', isNullable: true }, + { name: 'bit3', type: 'TEXT', isNullable: true }, + { name: 'bit4', type: 'TEXT', isNullable: true }, + { name: 'bit5', type: 'TEXT', isNullable: true }, + { name: 'bit6', type: 'TEXT', isNullable: true }, + { name: 'bit7', type: 'TEXT', isNullable: true }, + { name: 'bit8', type: 'TEXT', isNullable: true }, + ], + } + + const dbDescription = new DbSchema( + { + table: { + fields: new Map([ + ['bit0', PgBasicType.PG_TEXT], + ['bit1', PgBasicType.PG_TEXT], + ['bit2', PgBasicType.PG_TEXT], + ['bit3', PgBasicType.PG_TEXT], + ['bit4', PgBasicType.PG_TEXT], + ['bit5', PgBasicType.PG_TEXT], + ['bit6', PgBasicType.PG_TEXT], + ['bit7', PgBasicType.PG_TEXT], + ['bit8', PgBasicType.PG_TEXT], + ]), + relations: [], + } as unknown as TableSchema< + any, + any, + any, + any, + any, + any, + any, + any, + any, + HKT + >, + }, + [], + [] + ) + + const record: Record = { + bit0: null, + bit1: null, + bit2: 'Filled', + bit3: null, + bit4: 'Filled', + bit5: 'Filled', + bit6: 'Filled', + bit7: 'Filled', + bit8: null, + } + const s_row = serializeRow(record, rel, dbDescription, encoder) + + const mask = [...s_row.nullsBitmask].map((x) => x.toString(2)).join('') + + t.is(mask, '1101000010000000') + }) + + test(`Prioritize PG types in the schema before inferred SQLite types`, async (t) => { + const { encoder, decoder, dialect } = t.context + const [adapter, builder, defaults] = await t.context.setup(t) + + await adapter.run({ + sql: 'CREATE TABLE bools (id INTEGER PRIMARY KEY, b INTEGER)', + }) + + const sqliteInferredRelations = await inferRelationsFromDb( + adapter, + defaults, + builder + ) + const boolsInferredRelation = sqliteInferredRelations['bools'] + + // Inferred types only support SQLite types, so the bool column is INTEGER + const boolColumn = boolsInferredRelation.columns[1] + t.is(boolColumn.name, 'b') + t.is(boolColumn.type, 'INTEGER') + + // Db schema holds the correct Postgres types + const boolsDbDescription = new DbSchema( + { + bools: { + fields: new Map([ + ['id', PgBasicType.PG_INTEGER], + ['b', PgBasicType.PG_BOOL], + ]), + relations: [], + } as unknown as TableSchema< + any, + any, + any, + any, + any, + any, + any, + any, + any, + HKT + >, + }, + [], + [] + ) + + const satOpRow = serializeRow( + { id: 5, b: dialect === 'SQLite' ? 1 : true }, + boolsInferredRelation, + boolsDbDescription, + encoder + ) + + // Encoded values ["5", "t"] + t.deepEqual(satOpRow.values, [ + new Uint8Array(['5'.charCodeAt(0)]), + new Uint8Array(['t'.charCodeAt(0)]), + ]) + + const deserializedRow = deserializeRow( + satOpRow, + boolsInferredRelation, + boolsDbDescription, + decoder + ) + + t.deepEqual(deserializedRow, { id: 5, b: dialect === 'SQLite' ? 1 : true }) + }) + + test(`Use incoming Relation types if not found in the schema`, async (t) => { + const { encoder, decoder } = t.context + const [adapter, builder, defaults] = await t.context.setup(t) + + const inferredRelations = await inferRelationsFromDb( + adapter, + defaults, + builder + ) + // Empty database + t.is(Object.keys(inferredRelations).length, 0) + + // Empty Db schema + const testDbDescription = new DbSchema({}, [], []) + + const newTableRelation: Relation = { + id: 1, + schema: 'schema', + table: 'new_table', + tableType: SatRelation_RelationType.TABLE, + columns: [ + { name: 'value', type: 'INTEGER', isNullable: true }, + { name: 'color', type: 'COLOR', isNullable: true }, // at runtime, incoming SatRelation messages contain the name of the enum type + ], + } + + const row = { + value: 6, + color: 'red', + } + + const satOpRow = serializeRow( + row, + newTableRelation, + testDbDescription, + encoder + ) + + t.deepEqual( + satOpRow.values.map((bytes) => new TextDecoder().decode(bytes)), + ['6', 'red'] + ) + + const deserializedRow = deserializeRow( + satOpRow, + newTableRelation, + testDbDescription, + decoder + ) + + t.deepEqual(deserializedRow, row) + }) +} diff --git a/clients/typescript/test/satellite/sqlite/serialization.test.ts b/clients/typescript/test/satellite/sqlite/serialization.test.ts new file mode 100644 index 0000000000..271051f77a --- /dev/null +++ b/clients/typescript/test/satellite/sqlite/serialization.test.ts @@ -0,0 +1,25 @@ +import anyTest, { ExecutionContext, TestFn } from 'ava' +import Database from 'better-sqlite3' +import { DatabaseAdapter as SQLiteDatabaseAdapter } from '../../../src/drivers/better-sqlite3' +import { sqliteBuilder } from '../../../src/migrators/query-builder' +import { opts } from '../common' +import { ContextType, SetupFn, serializationTests } from '../serialization' +import { sqliteTypeDecoder, sqliteTypeEncoder } from '../../../src/util/common' + +const test = anyTest as TestFn + +const setupSqlite: SetupFn = (t: ExecutionContext) => { + const db = new Database(':memory:') + t.teardown(() => db.close()) + const namespace = 'main' + return [new SQLiteDatabaseAdapter(db), sqliteBuilder, opts(namespace)] +} + +test.beforeEach(async (t) => { + t.context.dialect = 'SQLite' + t.context.encoder = sqliteTypeEncoder + t.context.decoder = sqliteTypeDecoder + t.context.setup = setupSqlite +}) + +serializationTests(test) From d6d1323a58d3865ad8aea02e94d696edbebff55c Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Thu, 11 Apr 2024 12:11:19 +0200 Subject: [PATCH 057/156] Float4 conversions for PG --- clients/typescript/src/client/conversions/postgres.ts | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/clients/typescript/src/client/conversions/postgres.ts b/clients/typescript/src/client/conversions/postgres.ts index 69bf400d3f..3c3a1d71de 100644 --- a/clients/typescript/src/client/conversions/postgres.ts +++ b/clients/typescript/src/client/conversions/postgres.ts @@ -30,6 +30,10 @@ function toPostgres(v: any, pgType: PgType): any { return serialiseJSON(v) } + if (pgType === PgBasicType.PG_FLOAT4 || pgType === PgBasicType.PG_REAL) { + return Math.fround(v) + } + return v } @@ -52,6 +56,11 @@ function fromPostgres(v: any, pgType: PgType): any { return BigInt(v) // needed because the node-pg driver returns bigints as strings } + if (pgType === PgBasicType.PG_FLOAT4 || pgType === PgBasicType.PG_REAL) { + // fround the number to represent it as a 32-bit float + return Math.fround(v) + } + return v } From 37438484e9154b2f96dd1dda4c05b4b950f308f3 Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Tue, 16 Apr 2024 09:29:44 +0200 Subject: [PATCH 058/156] Fix BYTEA e2e tests for PG on the client --- e2e/satellite_client/src/client.ts | 10 +++++++++- e2e/tests/03.19_node_satellite_can_sync_json.lux | 15 ++++++++++++--- e2e/tests/_satellite_macros.luxinc | 6 ++++++ e2e/tests/_shared.luxinc | 4 ++-- 4 files changed, 29 insertions(+), 6 deletions(-) diff --git a/e2e/satellite_client/src/client.ts b/e2e/satellite_client/src/client.ts index 1387590c4f..4f726c6b29 100644 --- a/e2e/satellite_client/src/client.ts +++ b/e2e/satellite_client/src/client.ts @@ -367,11 +367,19 @@ export const write_enum = (electric: Electric, id: string, c: Color | null) => { } export const get_blob = async (electric: Electric, id: string) => { - return electric.db.blobs.findUnique({ + const res = await electric.db.blobs.findUnique({ where: { id: id } }) + + if (res) { + // The PG driver returns a NodeJS Buffer but the e2e test matches on a plain Uint8Array. + // So we convert the Buffer to a Uint8Array. + // Note that Buffer is a subclass of Uint8Array. + res.blob = new Uint8Array(res.blob) + } + return res } export const write_blob = (electric: Electric, id: string, blob: Uint8Array | null) => { diff --git a/e2e/tests/03.19_node_satellite_can_sync_json.lux b/e2e/tests/03.19_node_satellite_can_sync_json.lux index 45ad8c412f..b001a851a3 100644 --- a/e2e/tests/03.19_node_satellite_can_sync_json.lux +++ b/e2e/tests/03.19_node_satellite_can_sync_json.lux @@ -49,7 +49,9 @@ [invoke node_write_json "row3" "null" "client.JsonNull"] # read JsonNull value #[invoke node_get_json "row3" "null"] - [invoke node_get_jsonb "row3" "{ __is_electric_json_null__: true }"] + # When running with PG on the client we don't yet support top-level JSON null values + # instead top-level JSON null values become DB null values + [invoke node_get_jsonb_regex "row3" "\{ __is_electric_json_null__: true \}|null"] # write regular JSON values [invoke node_write_json "row4" 500 "{ a: true, b: [ 1, 2 ] }"] @@ -63,6 +65,10 @@ [invoke node_write_json "row6" "null" "[\"it's ⚡\", {}, \"\\u2603 under \\u2602\"]"] [invoke node_get_jsonb "row6" "[ \"it's ⚡\", {}, '☃ under ☂' ]"] + # Write JSON string "null" + [invoke node_write_json "row7" "null" "'null'"] + [invoke node_get_jsonb "row7" "'null'"] + # Even though JSON can encode the NUL code point and unpaired surrogates, those will fail Postgres' jsonb validation. # Per the builtin JSON.stringify() function: # @@ -92,10 +98,11 @@ #??row4 | [ 1, { a: "foo" }, true ] | "bar" ??row1 | [{"a": 1}, {"b": 5, "d": false}] ??row2 | - ??row3 | null + ?row3 \| null| ??row4 | {"a": true, "b": [1, 2]} ??row5 | [1, {"a": "foo"}, true] ??row6 | ["it's ⚡", {}, "☃ under ☂"] + ??row7 | "null" # Start a new Satellite client and verify that it receives all rows [invoke setup_client 2 electric_1 5133] @@ -120,7 +127,7 @@ [invoke node_get_jsonb "row2" "null"] #[invoke node_get_json "row3" "null"] - [invoke node_get_jsonb "row3" "{ __is_electric_json_null__: true }"] + [invoke node_get_jsonb_regex "row3" "\{ __is_electric_json_null__: true \}|null"] #[invoke node_get_json "row4" 500] [invoke node_get_jsonb "row4" "{ a: true, b: [ 1, 2 ] }"] @@ -130,5 +137,7 @@ [invoke node_get_jsonb "row6" "[ \"it's ⚡\", {}, '☃ under ☂' ]"] + [invoke node_get_jsonb "row7" "'null'"] + [cleanup] [invoke teardown] diff --git a/e2e/tests/_satellite_macros.luxinc b/e2e/tests/_satellite_macros.luxinc index 86ae0f34f2..0adc979fd9 100644 --- a/e2e/tests/_satellite_macros.luxinc +++ b/e2e/tests/_satellite_macros.luxinc @@ -131,6 +131,12 @@ ??$node [endmacro] +[macro node_get_jsonb_regex id expected_jsonb] + !await client.get_jsonb(db, '${id}') + ?\{ id: '${id}', jsb: ${expected_jsonb} \} + ??$node +[endmacro] + [macro node_get_enum id expected_enum] !await client.get_enum(db, '${id}') ??{ id: '${id}', c: ${expected_enum} } diff --git a/e2e/tests/_shared.luxinc b/e2e/tests/_shared.luxinc index 5397b29417..188789d005 100644 --- a/e2e/tests/_shared.luxinc +++ b/e2e/tests/_shared.luxinc @@ -96,14 +96,14 @@ [macro elixir_client_subscribe tables] """! - {:ok, %{err: nil}} = TestWsClient.make_rpc_call(conn, "subscribe", ProtocolHelpers.subscription_request(request_1: ~w|$tables|)) + {:ok, %{err: nil}} = TestWsClient.make_rpc_call(conn, "subscribe", ProtocolHelpers.subscription_request(request_1: [tables: ~w|$tables|])) """ ?rec \[\d+\]: %Electric.Satellite.SatSubsDataEnd\{\} [endmacro] [macro elixir_client_subscribe_with_id id tables] """! - {:ok, %{err: nil}} = TestWsClient.make_rpc_call(conn, "subscribe", ProtocolHelpers.subscription_request("$id", request_1: ~w|$tables|)) + {:ok, %{err: nil}} = TestWsClient.make_rpc_call(conn, "subscribe", ProtocolHelpers.subscription_request("$id", request_1: [tables: ~w|$tables|])) """ ?rec \[\d+\]: %Electric.Satellite.SatSubsDataEnd\{\} [endmacro] From ae2a984b84738ad0754bd49dc314a40eb3d8c5a3 Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Tue, 16 Apr 2024 09:47:35 +0200 Subject: [PATCH 059/156] Modify GH workflow for e2e to also run PG on client tests --- .github/workflows/e2e.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/e2e.yml b/.github/workflows/e2e.yml index 20a2c8c233..b9bbf432c7 100644 --- a/.github/workflows/e2e.yml +++ b/.github/workflows/e2e.yml @@ -80,7 +80,7 @@ jobs: - run: make lux - run: make deps pull - - run: make test_only + - run: make test_only && make -C tests test_pg id: tests env: ELECTRIC_IMAGE_NAME: electric-sql-ci/electric From c7d341f470f29eb8faa901817503b9d92f5b0753 Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Tue, 16 Apr 2024 11:20:29 +0200 Subject: [PATCH 060/156] Modify parsing of timestamptz by Electric to also allow formatting with +00 instead of Z --- components/electric/lib/electric/satellite/serialization.ex | 3 --- 1 file changed, 3 deletions(-) diff --git a/components/electric/lib/electric/satellite/serialization.ex b/components/electric/lib/electric/satellite/serialization.ex index d4a2389866..6304910158 100644 --- a/components/electric/lib/electric/satellite/serialization.ex +++ b/components/electric/lib/electric/satellite/serialization.ex @@ -626,9 +626,6 @@ defmodule Electric.Satellite.Serialization do def decode_column_value!(val, :timestamptz) do # The offset of datetimes coming over the Satellite protocol MUST be 0. - len_minus_1 = byte_size(val) - 1 - <<_::binary-size(len_minus_1), "Z">> = val - {:ok, dt, 0} = DateTime.from_iso8601(val) assert_valid_year!(dt.year) From f15d86d5dee4abcdf965b28f338bc9efffa86e1f Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Tue, 16 Apr 2024 11:23:07 +0200 Subject: [PATCH 061/156] Fixes for unit tests and e2e tests --- .../src/client/conversions/datatypes/json.ts | 4 +- .../src/client/conversions/postgres.ts | 27 +- clients/typescript/src/client/model/table.ts | 5 +- clients/typescript/src/satellite/client.ts | 110 +++- clients/typescript/src/satellite/oplog.ts | 2 +- clients/typescript/src/satellite/process.ts | 1 + .../typescript/src/satellite/shapes/cache.ts | 10 +- clients/typescript/src/util/common.ts | 59 +- clients/typescript/src/util/proto.ts | 4 +- clients/typescript/src/util/types.ts | 2 +- .../test/client/generated/index.d.ts | 601 ++++++++++++++++++ .../test/client/generated/migrations.d.ts | 2 + .../test/client/generated/pg-migrations.d.ts | 2 + .../typescript/test/satellite/client.test.ts | 59 +- .../03.19_node_satellite_can_sync_json.lux | 2 + 15 files changed, 831 insertions(+), 59 deletions(-) create mode 100644 clients/typescript/test/client/generated/index.d.ts create mode 100644 clients/typescript/test/client/generated/migrations.d.ts create mode 100644 clients/typescript/test/client/generated/pg-migrations.d.ts diff --git a/clients/typescript/src/client/conversions/datatypes/json.ts b/clients/typescript/src/client/conversions/datatypes/json.ts index ac09f74897..032ae3249f 100644 --- a/clients/typescript/src/client/conversions/datatypes/json.ts +++ b/clients/typescript/src/client/conversions/datatypes/json.ts @@ -15,11 +15,13 @@ export function serialiseJSON(v: JSON): string { } export function deserialiseJSON(v: string): JSON { + console.log('DESERIALISING:\n' + v) + console.log('PARSED:\n' + JSON.parse(v)) if (v === JSON.stringify(null)) return { __is_electric_json_null__: true } return JSON.parse(v) } -function isJsonNull(v: JSON): boolean { +export function isJsonNull(v: JSON): boolean { return ( isObject(v) && Object.hasOwn(v, '__is_electric_json_null__') && diff --git a/clients/typescript/src/client/conversions/postgres.ts b/clients/typescript/src/client/conversions/postgres.ts index 3c3a1d71de..18af8e5637 100644 --- a/clients/typescript/src/client/conversions/postgres.ts +++ b/clients/typescript/src/client/conversions/postgres.ts @@ -1,7 +1,7 @@ import { InvalidArgumentError } from '../validation/errors/invalidArgumentError' import { Converter } from './converter' import { deserialiseDate, serialiseDate } from './datatypes/date' -import { deserialiseJSON, serialiseJSON } from './datatypes/json' +import { isJsonNull } from './datatypes/json' import { PgBasicType, PgDateType, PgType } from './types' /** @@ -27,7 +27,20 @@ function toPostgres(v: any, pgType: PgType): any { } if (pgType === PgBasicType.PG_JSON || pgType === PgBasicType.PG_JSONB) { - return serialiseJSON(v) + // FIXME: the specialised conversions below are needed because of the pg package + // we use to connect to the PG database + // if we support other PG drivers then this may not be needed + // Ideally, we would do this conversion in the driver itself + if (v === null) { + return null + } + if (isJsonNull(v)) { + // Also turn into a DB null + // because we currently don't support top-level JSON null value + // when using Postgres + return null // 'null' + } + return JSON.stringify(v) } if (pgType === PgBasicType.PG_FLOAT4 || pgType === PgBasicType.PG_REAL) { @@ -49,7 +62,15 @@ function fromPostgres(v: any, pgType: PgType): any { } if (pgType === PgBasicType.PG_JSON || pgType === PgBasicType.PG_JSONB) { - return deserialiseJSON(v) + if (v === null) { + // DB null + return null + } + if (v === 'null') { + // JSON null value + return { __is_electric_json_null__: true } + } + return JSON.parse(v) } if (pgType === PgBasicType.PG_INT8) { diff --git a/clients/typescript/src/client/model/table.ts b/clients/typescript/src/client/model/table.ts index 1d2c2b162d..bfcfa4e195 100644 --- a/clients/typescript/src/client/model/table.ts +++ b/clients/typescript/src/client/model/table.ts @@ -1663,10 +1663,7 @@ export function rawQuery( // only allow safe queries from the client if (isPotentiallyDangerous(sql.sql)) { throw new InvalidArgumentError( - 'Cannot use queries that might alter the store - ' + - 'please use read-only queries' + - ' - DEBUG:\n' + - JSON.stringify(sql, null, 2) + 'Cannot use queries that might alter the store - please use read-only queries' ) } diff --git a/clients/typescript/src/satellite/client.ts b/clients/typescript/src/satellite/client.ts index 38b4cea41b..8bb6f62106 100644 --- a/clients/typescript/src/satellite/client.ts +++ b/clients/typescript/src/satellite/client.ts @@ -76,9 +76,13 @@ import { import { base64, DEFAULT_LOG_POS, - typeEncoder, - typeDecoder, + sqliteTypeEncoder, + sqliteTypeDecoder, bytesToNumber, + TypeEncoder, + TypeDecoder, + pgTypeEncoder, + pgTypeDecoder, } from '../util/common' import { Client } from '.' import { SatelliteClientOpts, satelliteClientDefaults } from './config' @@ -133,6 +137,8 @@ type EventEmitter = AsyncEventEmitter export class SatelliteClient implements Client { private opts: Required private dialect: SatInStartReplicationReq_Dialect + private encoder: TypeEncoder + private decoder: TypeDecoder private emitter: EventEmitter @@ -200,13 +206,18 @@ export class SatelliteClient implements Client { opts.dialect === 'SQLite' ? SatInStartReplicationReq_Dialect.SQLITE : SatInStartReplicationReq_Dialect.POSTGRES + this.encoder = opts.dialect === 'SQLite' ? sqliteTypeEncoder : pgTypeEncoder + this.decoder = opts.dialect === 'SQLite' ? sqliteTypeDecoder : pgTypeDecoder this.socketFactory = socketFactory this.inbound = this.resetInboundReplication() this.outbound = this.resetReplication() this.dbDescription = dbDescription - this.subscriptionsDataCache = new SubscriptionsDataCache(dbDescription) + this.subscriptionsDataCache = new SubscriptionsDataCache( + dbDescription, + this.decoder + ) this.rpcClient = new RPC( this.sendMessage.bind(this), this.opts.timeout, @@ -647,10 +658,20 @@ export class SatelliteClient implements Client { const relation = this.outbound.relations.get(change.relation.id)! const tags = change.tags if (change.oldRecord) { - oldRecord = serializeRow(change.oldRecord, relation, this.dbDescription) + oldRecord = serializeRow( + change.oldRecord, + relation, + this.dbDescription, + this.encoder + ) } if (change.record) { - record = serializeRow(change.record, relation, this.dbDescription) + record = serializeRow( + change.record, + relation, + this.dbDescription, + this.encoder + ) } switch (change.type) { case DataChangeType.DELETE: @@ -1091,7 +1112,12 @@ export class SatelliteClient implements Client { const change: DataInsert = { relation: rel, type: DataChangeType.INSERT, - record: deserializeRow(op.insert.rowData!, rel, this.dbDescription), + record: deserializeRow( + op.insert.rowData!, + rel, + this.dbDescription, + this.decoder + ), tags: op.insert.tags, } @@ -1108,11 +1134,17 @@ export class SatelliteClient implements Client { const change = { relation: rel, type: DataChangeType.UPDATE, - record: deserializeRow(op.update.rowData!, rel, this.dbDescription), + record: deserializeRow( + op.update.rowData!, + rel, + this.dbDescription, + this.decoder + ), oldRecord: deserializeRow( op.update.oldRowData, rel, - this.dbDescription + this.dbDescription, + this.decoder ), tags: op.update.tags, } @@ -1129,7 +1161,8 @@ export class SatelliteClient implements Client { oldRecord: deserializeRow( op.delete.oldRowData!, rel, - this.dbDescription + this.dbDescription, + this.decoder ), tags: op.delete.tags, } @@ -1143,7 +1176,12 @@ export class SatelliteClient implements Client { const change = { relation: rel, type: DataChangeType.GONE, - oldRecord: deserializeRow(op.gone.pkData, rel, this.dbDescription), + oldRecord: deserializeRow( + op.gone.pkData, + rel, + this.dbDescription, + this.decoder + ), tags: [], } @@ -1314,7 +1352,8 @@ function getColumnType( export function serializeRow( rec: Record, relation: Relation, - dbDescription: DbSchema + dbDescription: DbSchema, + encoder: TypeEncoder ): SatOpRow { let recordNumColumn = 0 const recordNullBitMask = new Uint8Array( @@ -1324,7 +1363,7 @@ export function serializeRow( (acc: Uint8Array[], c: RelationColumn) => { if (rec[c.name] != null) { const pgColumnType = getColumnType(dbDescription, relation.table, c) - acc.push(serializeColumnData(rec[c.name]!, pgColumnType)) + acc.push(serializeColumnData(rec[c.name]!, pgColumnType, encoder)) } else { acc.push(serializeNullData()) setMaskBit(recordNullBitMask, recordNumColumn) @@ -1343,17 +1382,20 @@ export function serializeRow( export function deserializeRow( row: SatOpRow, relation: Relation, - dbDescription: DbSchema + dbDescription: DbSchema, + decoder: TypeDecoder ): Record export function deserializeRow( row: SatOpRow | undefined, relation: Relation, - dbDescription: DbSchema + dbDescription: DbSchema, + decoder: TypeDecoder ): Record | undefined export function deserializeRow( row: SatOpRow | undefined, relation: Relation, - dbDescription: DbSchema + dbDescription: DbSchema, + decoder: TypeDecoder ): Record | undefined { if (row == undefined) { return undefined @@ -1365,7 +1407,7 @@ export function deserializeRow( value = null } else { const pgColumnType = getColumnType(dbDescription, relation.table, c) - value = deserializeColumnData(row.values[i], pgColumnType) + value = deserializeColumnData(row.values[i], pgColumnType, decoder) } return [c.name, value] }) @@ -1383,49 +1425,59 @@ function calculateNumBytes(column_num: number): number { function deserializeColumnData( column: Uint8Array, - columnType: PgType -): string | number | Uint8Array { + columnType: PgType, + decoder: TypeDecoder +): boolean | string | number | Uint8Array { switch (columnType) { case PgBasicType.PG_BOOL: - return typeDecoder.bool(column) + return decoder.bool(column) case PgBasicType.PG_INT: case PgBasicType.PG_INT2: case PgBasicType.PG_INT4: case PgBasicType.PG_INTEGER: - return Number(typeDecoder.text(column)) + return Number(decoder.text(column)) case PgBasicType.PG_FLOAT4: case PgBasicType.PG_FLOAT8: case PgBasicType.PG_REAL: - return typeDecoder.float(column) + return decoder.float(column) case PgDateType.PG_TIMETZ: - return typeDecoder.timetz(column) + return decoder.timetz(column) case PgBasicType.PG_BYTEA: return column + /* + case PgBasicType.PG_JSON: + case PgBasicType.PG_JSONB: + return (decoder.json as any)(column) + */ default: // also covers user-defined enumeration types - return typeDecoder.text(column) + return decoder.text(column) } } // All values serialized as textual representation function serializeColumnData( - columnValue: string | number | object, - columnType: PgType + columnValue: boolean | string | number | object, + columnType: PgType, + encoder: TypeEncoder ): Uint8Array { switch (columnType) { case PgBasicType.PG_BOOL: - return typeEncoder.bool(columnValue as number) + return (encoder.bool as any)(columnValue) // the encoder accepts the number or bool case PgDateType.PG_TIMETZ: - return typeEncoder.timetz(columnValue as string) + return encoder.timetz(columnValue as string) case PgBasicType.PG_BYTEA: return columnValue as Uint8Array + case PgBasicType.PG_JSON: + case PgBasicType.PG_JSONB: + return (encoder.json as any)(columnValue) default: - return typeEncoder.text(String(columnValue)) + return encoder.text(String(columnValue)) } } function serializeNullData(): Uint8Array { - return typeEncoder.text('') + return sqliteTypeEncoder.text('') } export function toMessage(data: Uint8Array): SatPbMsg { diff --git a/clients/typescript/src/satellite/oplog.ts b/clients/typescript/src/satellite/oplog.ts index 88049e88d3..4101e2b4f8 100644 --- a/clients/typescript/src/satellite/oplog.ts +++ b/clients/typescript/src/satellite/oplog.ts @@ -513,7 +513,7 @@ export const opLogEntryToChange = ( * @returns a stringified JSON with stable sorting on column names */ export const primaryKeyToStr = < - T extends Record + T extends Record >( primaryKeyObj: T ): string => { diff --git a/clients/typescript/src/satellite/process.ts b/clients/typescript/src/satellite/process.ts index 63f01a98bc..7d6a6838d7 100644 --- a/clients/typescript/src/satellite/process.ts +++ b/clients/typescript/src/satellite/process.ts @@ -584,6 +584,7 @@ export class SatelliteProcess implements Satellite { ) try { + console.log('APPLYING SUBS DATA:\n' + JSON.stringify(stmts, null, 2)) await this.adapter.runInTransaction(...stmts) // We're explicitly not specifying rowids in these changes for now, diff --git a/clients/typescript/src/satellite/shapes/cache.ts b/clients/typescript/src/satellite/shapes/cache.ts index af02db486a..8c4787e343 100644 --- a/clients/typescript/src/satellite/shapes/cache.ts +++ b/clients/typescript/src/satellite/shapes/cache.ts @@ -11,6 +11,7 @@ import { Relation, SatelliteError, SatelliteErrorCode, + TypeDecoder, subsDataErrorToSatelliteError, } from '../../util' import { deserializeRow } from '../client' @@ -39,7 +40,7 @@ export class SubscriptionsDataCache extends EventEmitter { inDelivery?: SubscriptionDataInternal dbDescription: DbSchema - constructor(dbDescription: DbSchema) { + constructor(dbDescription: DbSchema, private decoder: TypeDecoder) { super() this.requestedSubscriptions = {} @@ -279,7 +280,12 @@ export class SubscriptionsDataCache extends EventEmitter { ) } - const record = deserializeRow(rowData, relation, this.dbDescription) + const record = deserializeRow( + rowData, + relation, + this.dbDescription, + this.decoder + ) if (!record) { this.internalError( diff --git a/clients/typescript/src/util/common.ts b/clients/typescript/src/util/common.ts index 14502ea85b..0bb8745dff 100644 --- a/clients/typescript/src/util/common.ts +++ b/clients/typescript/src/util/common.ts @@ -2,18 +2,65 @@ import { SatelliteError } from './types' import BASE64 from 'base-64' import { TextEncoderLite, TextDecoderLite } from 'text-encoder-lite' -export const typeDecoder = { +export type TypeEncoder = typeof sqliteTypeEncoder | typeof pgTypeEncoder +export type TypeDecoder = typeof sqliteTypeDecoder | typeof pgTypeDecoder + +export const sqliteTypeEncoder = { + bool: boolToBytes, + text: (string: string) => textEncoder.encode(string), + json: (string: string) => { + const res = textEncoder.encode(string) + console.log('TEXTT ENCODED:\n' + res) + return res + }, + timetz: (string: string) => + sqliteTypeEncoder.text(stringToTimetzString(string)), +} + +export const sqliteTypeDecoder = { bool: bytesToBool, text: bytesToString, + json: bytesToString, timetz: bytesToTimetzString, float: bytesToFloat, } -export const typeEncoder = { - bool: boolToBytes, - text: (string: string) => textEncoder.encode(string), - timetz: (string: string) => typeEncoder.text(stringToTimetzString(string)), +//// PG encoders/decoders +export const pgTypeEncoder = { + ...sqliteTypeEncoder, + bool: pgBoolToBytes, + json: (x: JSON) => { + const str = JSON.stringify(x) + console.log('GONNA ENCODE:\n' + x) + console.log('SERIALISED:\n' + str) + const res = textEncoder.encode(str) + console.log('TEXT ENCODED:\n' + res) + //return textEncoder.encode(serialiseJSON(x)) + return res + }, +} + +export const pgTypeDecoder = { + ...sqliteTypeDecoder, + bool: bytesToPgBool, + json: (bs: Uint8Array) => JSON.parse(textDecoder.decode(bs)), +} + +function pgBoolToBytes(b: boolean) { + if (typeof b !== 'boolean') { + throw new Error(`Invalid boolean value: ${b}`) + } + return new Uint8Array([b ? trueByte : falseByte]) +} + +function bytesToPgBool(bs: Uint8Array) { + if (bs.length === 1 && (bs[0] === trueByte || bs[0] === falseByte)) { + return bs[0] === trueByte + } + + throw new Error(`Invalid binary-encoded boolean value: ${bs}`) } +//// export const base64 = { fromBytes: (bytes: Uint8Array) => @@ -99,7 +146,7 @@ function bytesToTimetzString(bytes: Uint8Array) { * @returns The SQLite value. */ function bytesToFloat(bytes: Uint8Array) { - const text = typeDecoder.text(bytes) + const text = sqliteTypeDecoder.text(bytes) if (text === 'NaN') { return 'NaN' } else { diff --git a/clients/typescript/src/util/proto.ts b/clients/typescript/src/util/proto.ts index 18a8eef59d..3e64244069 100644 --- a/clients/typescript/src/util/proto.ts +++ b/clients/typescript/src/util/proto.ts @@ -2,7 +2,7 @@ import * as Pb from '../_generated/protocol/satellite' import * as _m0 from 'protobufjs/minimal' import { SatelliteError, SatelliteErrorCode } from './types' import { ShapeRequest } from '../satellite/shapes/types' -import { base64, typeDecoder } from './common' +import { base64, sqliteTypeDecoder } from './common' import { getMaskBit } from './bitmaskHelpers' export type GetName = @@ -468,7 +468,7 @@ function rowToString(row: Pb.SatOpRow): string { return row.values .map((x, i) => getMaskBit(row.nullsBitmask, i) == 0 - ? JSON.stringify(typeDecoder.text(x)) + ? JSON.stringify(sqliteTypeDecoder.text(x)) : '∅' ) .join(', ') diff --git a/clients/typescript/src/util/types.ts b/clients/typescript/src/util/types.ts index a472f32d8f..a09f978ad4 100644 --- a/clients/typescript/src/util/types.ts +++ b/clients/typescript/src/util/types.ts @@ -181,7 +181,7 @@ export function isDataChange(change: Change): change is DataChange { } export type Record = { - [key: string]: string | number | Uint8Array | undefined | null + [key: string]: boolean | string | number | Uint8Array | undefined | null } export type Replication = { diff --git a/clients/typescript/test/client/generated/index.d.ts b/clients/typescript/test/client/generated/index.d.ts new file mode 100644 index 0000000000..673b556e72 --- /dev/null +++ b/clients/typescript/test/client/generated/index.d.ts @@ -0,0 +1,601 @@ +import { z } from 'zod'; +import type { Prisma } from './prismaClient'; +import { type TableSchema, DbSchema, ElectricClient, type HKT } from '../../../src/client/model'; +export type NullableJsonInput = Prisma.JsonValue | null; +export declare const JsonValue: z.ZodType; +export type JsonValueType = z.infer; +export declare const NullableJsonValue: z.ZodNullable>; +export type NullableJsonValueType = z.infer; +export declare const InputJsonValue: z.ZodType; +export type InputJsonValueType = z.infer; +export declare const DataTypesScalarFieldEnumSchema: z.ZodEnum<["id", "date", "time", "timetz", "timestamp", "timestamptz", "bool", "uuid", "int2", "int4", "int8", "float4", "float8", "json", "bytea", "relatedId"]>; +export declare const DummyScalarFieldEnumSchema: z.ZodEnum<["id", "timestamp"]>; +export declare const ItemsScalarFieldEnumSchema: z.ZodEnum<["value", "nbr"]>; +export declare const JsonNullValueFilterSchema: z.ZodEnum<["DbNull", "JsonNull", "AnyNull"]>; +export declare const NullableJsonNullValueInputSchema: z.ZodEnum<["DbNull", "JsonNull"]>; +export declare const PostScalarFieldEnumSchema: z.ZodEnum<["id", "title", "contents", "nbr", "authorId"]>; +export declare const ProfileImageScalarFieldEnumSchema: z.ZodEnum<["id", "image"]>; +export declare const ProfileScalarFieldEnumSchema: z.ZodEnum<["id", "bio", "meta", "userId", "imageId"]>; +export declare const QueryModeSchema: z.ZodEnum<["default", "insensitive"]>; +export declare const SortOrderSchema: z.ZodEnum<["asc", "desc"]>; +export declare const TransactionIsolationLevelSchema: z.ZodEnum<["ReadUncommitted", "ReadCommitted", "RepeatableRead", "Serializable"]>; +export declare const UserScalarFieldEnumSchema: z.ZodEnum<["id", "name", "meta"]>; +export declare const ItemsSchema: z.ZodObject<{ + value: z.ZodString; + nbr: z.ZodNullable; +}, "strip", z.ZodTypeAny, { + value: string; + nbr: number | null; +}, { + value: string; + nbr: number | null; +}>; +export type Items = z.infer; +export declare const UserSchema: z.ZodObject<{ + id: z.ZodNumber; + name: z.ZodNullable; + meta: z.ZodNullable; +}, "strip", z.ZodTypeAny, { + id: number; + name: string | null; + meta: string | null; +}, { + id: number; + name: string | null; + meta: string | null; +}>; +export type User = z.infer; +export declare const PostSchema: z.ZodObject<{ + id: z.ZodNumber; + title: z.ZodString; + contents: z.ZodString; + nbr: z.ZodNullable; + authorId: z.ZodNumber; +}, "strip", z.ZodTypeAny, { + id: number; + nbr: number | null; + title: string; + contents: string; + authorId: number; +}, { + id: number; + nbr: number | null; + title: string; + contents: string; + authorId: number; +}>; +export type Post = z.infer; +export declare const ProfileSchema: z.ZodObject<{ + id: z.ZodNumber; + bio: z.ZodString; + meta: z.ZodOptional>>; + userId: z.ZodNumber; + imageId: z.ZodNullable; +}, "strip", z.ZodTypeAny, { + meta?: Prisma.JsonValue | undefined; + id: number; + userId: number; + bio: string; + imageId: string | null; +}, { + meta?: Prisma.JsonValue | undefined; + id: number; + userId: number; + bio: string; + imageId: string | null; +}>; +export type Profile = z.infer; +export declare const ProfileImageSchema: z.ZodObject<{ + id: z.ZodString; + image: z.ZodType; +}, "strip", z.ZodTypeAny, { + id: string; + image: Uint8Array; +}, { + id: string; + image: Uint8Array; +}>; +export type ProfileImage = z.infer; +export declare const DataTypesSchema: z.ZodObject<{ + id: z.ZodNumber; + date: z.ZodNullable; + time: z.ZodNullable; + timetz: z.ZodNullable; + timestamp: z.ZodNullable; + timestamptz: z.ZodNullable; + bool: z.ZodNullable; + uuid: z.ZodNullable; + int2: z.ZodNullable; + int4: z.ZodNullable; + int8: z.ZodNullable; + float4: z.ZodNullable>; + float8: z.ZodNullable>; + json: z.ZodOptional>>; + bytea: z.ZodNullable>; + relatedId: z.ZodNullable; +}, "strip", z.ZodTypeAny, { + json?: Prisma.JsonValue | undefined; + id: number; + uuid: string | null; + timestamp: Date | null; + date: Date | null; + time: Date | null; + timetz: Date | null; + timestamptz: Date | null; + bool: boolean | null; + int2: number | null; + int4: number | null; + int8: bigint | null; + float4: number | null; + float8: number | null; + bytea: Uint8Array | null; + relatedId: number | null; +}, { + json?: Prisma.JsonValue | undefined; + id: number; + uuid: string | null; + timestamp: Date | null; + date: Date | null; + time: Date | null; + timetz: Date | null; + timestamptz: Date | null; + bool: boolean | null; + int2: number | null; + int4: number | null; + int8: bigint | null; + float4: number | null; + float8: number | null; + bytea: Uint8Array | null; + relatedId: number | null; +}>; +export type DataTypes = z.infer; +export declare const DummySchema: z.ZodObject<{ + id: z.ZodNumber; + timestamp: z.ZodNullable; +}, "strip", z.ZodTypeAny, { + id: number; + timestamp: Date | null; +}, { + id: number; + timestamp: Date | null; +}>; +export type Dummy = z.infer; +export declare const ItemsSelectSchema: z.ZodType; +export declare const UserIncludeSchema: z.ZodType; +export declare const UserArgsSchema: z.ZodType; +export declare const UserCountOutputTypeArgsSchema: z.ZodType; +export declare const UserCountOutputTypeSelectSchema: z.ZodType; +export declare const UserSelectSchema: z.ZodType; +export declare const PostIncludeSchema: z.ZodType; +export declare const PostArgsSchema: z.ZodType; +export declare const PostSelectSchema: z.ZodType; +export declare const ProfileIncludeSchema: z.ZodType; +export declare const ProfileArgsSchema: z.ZodType; +export declare const ProfileSelectSchema: z.ZodType; +export declare const ProfileImageIncludeSchema: z.ZodType; +export declare const ProfileImageArgsSchema: z.ZodType; +export declare const ProfileImageSelectSchema: z.ZodType; +export declare const DataTypesIncludeSchema: z.ZodType; +export declare const DataTypesArgsSchema: z.ZodType; +export declare const DataTypesSelectSchema: z.ZodType; +export declare const DummyIncludeSchema: z.ZodType; +export declare const DummyArgsSchema: z.ZodType; +export declare const DummyCountOutputTypeArgsSchema: z.ZodType; +export declare const DummyCountOutputTypeSelectSchema: z.ZodType; +export declare const DummySelectSchema: z.ZodType; +export declare const ItemsWhereInputSchema: z.ZodType; +export declare const ItemsOrderByWithRelationInputSchema: z.ZodType; +export declare const ItemsWhereUniqueInputSchema: z.ZodType; +export declare const ItemsOrderByWithAggregationInputSchema: z.ZodType; +export declare const ItemsScalarWhereWithAggregatesInputSchema: z.ZodType; +export declare const UserWhereInputSchema: z.ZodType; +export declare const UserOrderByWithRelationInputSchema: z.ZodType; +export declare const UserWhereUniqueInputSchema: z.ZodType; +export declare const UserOrderByWithAggregationInputSchema: z.ZodType; +export declare const UserScalarWhereWithAggregatesInputSchema: z.ZodType; +export declare const PostWhereInputSchema: z.ZodType; +export declare const PostOrderByWithRelationInputSchema: z.ZodType; +export declare const PostWhereUniqueInputSchema: z.ZodType; +export declare const PostOrderByWithAggregationInputSchema: z.ZodType; +export declare const PostScalarWhereWithAggregatesInputSchema: z.ZodType; +export declare const ProfileWhereInputSchema: z.ZodType; +export declare const ProfileOrderByWithRelationInputSchema: z.ZodType; +export declare const ProfileWhereUniqueInputSchema: z.ZodType; +export declare const ProfileOrderByWithAggregationInputSchema: z.ZodType; +export declare const ProfileScalarWhereWithAggregatesInputSchema: z.ZodType; +export declare const ProfileImageWhereInputSchema: z.ZodType; +export declare const ProfileImageOrderByWithRelationInputSchema: z.ZodType; +export declare const ProfileImageWhereUniqueInputSchema: z.ZodType; +export declare const ProfileImageOrderByWithAggregationInputSchema: z.ZodType; +export declare const ProfileImageScalarWhereWithAggregatesInputSchema: z.ZodType; +export declare const DataTypesWhereInputSchema: z.ZodType; +export declare const DataTypesOrderByWithRelationInputSchema: z.ZodType; +export declare const DataTypesWhereUniqueInputSchema: z.ZodType; +export declare const DataTypesOrderByWithAggregationInputSchema: z.ZodType; +export declare const DataTypesScalarWhereWithAggregatesInputSchema: z.ZodType; +export declare const DummyWhereInputSchema: z.ZodType; +export declare const DummyOrderByWithRelationInputSchema: z.ZodType; +export declare const DummyWhereUniqueInputSchema: z.ZodType; +export declare const DummyOrderByWithAggregationInputSchema: z.ZodType; +export declare const DummyScalarWhereWithAggregatesInputSchema: z.ZodType; +export declare const ItemsCreateInputSchema: z.ZodType; +export declare const ItemsUncheckedCreateInputSchema: z.ZodType; +export declare const ItemsUpdateInputSchema: z.ZodType; +export declare const ItemsUncheckedUpdateInputSchema: z.ZodType; +export declare const ItemsCreateManyInputSchema: z.ZodType; +export declare const ItemsUpdateManyMutationInputSchema: z.ZodType; +export declare const ItemsUncheckedUpdateManyInputSchema: z.ZodType; +export declare const UserCreateInputSchema: z.ZodType; +export declare const UserUncheckedCreateInputSchema: z.ZodType; +export declare const UserUpdateInputSchema: z.ZodType; +export declare const UserUncheckedUpdateInputSchema: z.ZodType; +export declare const UserCreateManyInputSchema: z.ZodType; +export declare const UserUpdateManyMutationInputSchema: z.ZodType; +export declare const UserUncheckedUpdateManyInputSchema: z.ZodType; +export declare const PostCreateInputSchema: z.ZodType; +export declare const PostUncheckedCreateInputSchema: z.ZodType; +export declare const PostUpdateInputSchema: z.ZodType; +export declare const PostUncheckedUpdateInputSchema: z.ZodType; +export declare const PostCreateManyInputSchema: z.ZodType; +export declare const PostUpdateManyMutationInputSchema: z.ZodType; +export declare const PostUncheckedUpdateManyInputSchema: z.ZodType; +export declare const ProfileCreateInputSchema: z.ZodType; +export declare const ProfileUncheckedCreateInputSchema: z.ZodType; +export declare const ProfileUpdateInputSchema: z.ZodType; +export declare const ProfileUncheckedUpdateInputSchema: z.ZodType; +export declare const ProfileCreateManyInputSchema: z.ZodType; +export declare const ProfileUpdateManyMutationInputSchema: z.ZodType; +export declare const ProfileUncheckedUpdateManyInputSchema: z.ZodType; +export declare const ProfileImageCreateInputSchema: z.ZodType; +export declare const ProfileImageUncheckedCreateInputSchema: z.ZodType; +export declare const ProfileImageUpdateInputSchema: z.ZodType; +export declare const ProfileImageUncheckedUpdateInputSchema: z.ZodType; +export declare const ProfileImageCreateManyInputSchema: z.ZodType; +export declare const ProfileImageUpdateManyMutationInputSchema: z.ZodType; +export declare const ProfileImageUncheckedUpdateManyInputSchema: z.ZodType; +export declare const DataTypesCreateInputSchema: z.ZodType; +export declare const DataTypesUncheckedCreateInputSchema: z.ZodType; +export declare const DataTypesUpdateInputSchema: z.ZodType; +export declare const DataTypesUncheckedUpdateInputSchema: z.ZodType; +export declare const DataTypesCreateManyInputSchema: z.ZodType; +export declare const DataTypesUpdateManyMutationInputSchema: z.ZodType; +export declare const DataTypesUncheckedUpdateManyInputSchema: z.ZodType; +export declare const DummyCreateInputSchema: z.ZodType; +export declare const DummyUncheckedCreateInputSchema: z.ZodType; +export declare const DummyUpdateInputSchema: z.ZodType; +export declare const DummyUncheckedUpdateInputSchema: z.ZodType; +export declare const DummyCreateManyInputSchema: z.ZodType; +export declare const DummyUpdateManyMutationInputSchema: z.ZodType; +export declare const DummyUncheckedUpdateManyInputSchema: z.ZodType; +export declare const StringFilterSchema: z.ZodType; +export declare const IntNullableFilterSchema: z.ZodType; +export declare const ItemsCountOrderByAggregateInputSchema: z.ZodType; +export declare const ItemsAvgOrderByAggregateInputSchema: z.ZodType; +export declare const ItemsMaxOrderByAggregateInputSchema: z.ZodType; +export declare const ItemsMinOrderByAggregateInputSchema: z.ZodType; +export declare const ItemsSumOrderByAggregateInputSchema: z.ZodType; +export declare const StringWithAggregatesFilterSchema: z.ZodType; +export declare const IntNullableWithAggregatesFilterSchema: z.ZodType; +export declare const IntFilterSchema: z.ZodType; +export declare const StringNullableFilterSchema: z.ZodType; +export declare const PostListRelationFilterSchema: z.ZodType; +export declare const ProfileRelationFilterSchema: z.ZodType; +export declare const PostOrderByRelationAggregateInputSchema: z.ZodType; +export declare const UserCountOrderByAggregateInputSchema: z.ZodType; +export declare const UserAvgOrderByAggregateInputSchema: z.ZodType; +export declare const UserMaxOrderByAggregateInputSchema: z.ZodType; +export declare const UserMinOrderByAggregateInputSchema: z.ZodType; +export declare const UserSumOrderByAggregateInputSchema: z.ZodType; +export declare const IntWithAggregatesFilterSchema: z.ZodType; +export declare const StringNullableWithAggregatesFilterSchema: z.ZodType; +export declare const UserRelationFilterSchema: z.ZodType; +export declare const PostCountOrderByAggregateInputSchema: z.ZodType; +export declare const PostAvgOrderByAggregateInputSchema: z.ZodType; +export declare const PostMaxOrderByAggregateInputSchema: z.ZodType; +export declare const PostMinOrderByAggregateInputSchema: z.ZodType; +export declare const PostSumOrderByAggregateInputSchema: z.ZodType; +export declare const JsonNullableFilterSchema: z.ZodType; +export declare const ProfileImageRelationFilterSchema: z.ZodType; +export declare const ProfileCountOrderByAggregateInputSchema: z.ZodType; +export declare const ProfileAvgOrderByAggregateInputSchema: z.ZodType; +export declare const ProfileMaxOrderByAggregateInputSchema: z.ZodType; +export declare const ProfileMinOrderByAggregateInputSchema: z.ZodType; +export declare const ProfileSumOrderByAggregateInputSchema: z.ZodType; +export declare const JsonNullableWithAggregatesFilterSchema: z.ZodType; +export declare const BytesFilterSchema: z.ZodType; +export declare const ProfileImageCountOrderByAggregateInputSchema: z.ZodType; +export declare const ProfileImageMaxOrderByAggregateInputSchema: z.ZodType; +export declare const ProfileImageMinOrderByAggregateInputSchema: z.ZodType; +export declare const BytesWithAggregatesFilterSchema: z.ZodType; +export declare const DateTimeNullableFilterSchema: z.ZodType; +export declare const BoolNullableFilterSchema: z.ZodType; +export declare const UuidNullableFilterSchema: z.ZodType; +export declare const BigIntNullableFilterSchema: z.ZodType; +export declare const FloatNullableFilterSchema: z.ZodType; +export declare const BytesNullableFilterSchema: z.ZodType; +export declare const DummyRelationFilterSchema: z.ZodType; +export declare const DataTypesCountOrderByAggregateInputSchema: z.ZodType; +export declare const DataTypesAvgOrderByAggregateInputSchema: z.ZodType; +export declare const DataTypesMaxOrderByAggregateInputSchema: z.ZodType; +export declare const DataTypesMinOrderByAggregateInputSchema: z.ZodType; +export declare const DataTypesSumOrderByAggregateInputSchema: z.ZodType; +export declare const DateTimeNullableWithAggregatesFilterSchema: z.ZodType; +export declare const BoolNullableWithAggregatesFilterSchema: z.ZodType; +export declare const UuidNullableWithAggregatesFilterSchema: z.ZodType; +export declare const BigIntNullableWithAggregatesFilterSchema: z.ZodType; +export declare const FloatNullableWithAggregatesFilterSchema: z.ZodType; +export declare const BytesNullableWithAggregatesFilterSchema: z.ZodType; +export declare const DataTypesListRelationFilterSchema: z.ZodType; +export declare const DataTypesOrderByRelationAggregateInputSchema: z.ZodType; +export declare const DummyCountOrderByAggregateInputSchema: z.ZodType; +export declare const DummyAvgOrderByAggregateInputSchema: z.ZodType; +export declare const DummyMaxOrderByAggregateInputSchema: z.ZodType; +export declare const DummyMinOrderByAggregateInputSchema: z.ZodType; +export declare const DummySumOrderByAggregateInputSchema: z.ZodType; +export declare const StringFieldUpdateOperationsInputSchema: z.ZodType; +export declare const NullableIntFieldUpdateOperationsInputSchema: z.ZodType; +export declare const PostCreateNestedManyWithoutAuthorInputSchema: z.ZodType; +export declare const ProfileCreateNestedOneWithoutUserInputSchema: z.ZodType; +export declare const PostUncheckedCreateNestedManyWithoutAuthorInputSchema: z.ZodType; +export declare const ProfileUncheckedCreateNestedOneWithoutUserInputSchema: z.ZodType; +export declare const IntFieldUpdateOperationsInputSchema: z.ZodType; +export declare const NullableStringFieldUpdateOperationsInputSchema: z.ZodType; +export declare const PostUpdateManyWithoutAuthorNestedInputSchema: z.ZodType; +export declare const ProfileUpdateOneWithoutUserNestedInputSchema: z.ZodType; +export declare const PostUncheckedUpdateManyWithoutAuthorNestedInputSchema: z.ZodType; +export declare const ProfileUncheckedUpdateOneWithoutUserNestedInputSchema: z.ZodType; +export declare const UserCreateNestedOneWithoutPostsInputSchema: z.ZodType; +export declare const UserUpdateOneWithoutPostsNestedInputSchema: z.ZodType; +export declare const UserCreateNestedOneWithoutProfileInputSchema: z.ZodType; +export declare const ProfileImageCreateNestedOneWithoutProfileInputSchema: z.ZodType; +export declare const UserUpdateOneWithoutProfileNestedInputSchema: z.ZodType; +export declare const ProfileImageUpdateOneWithoutProfileNestedInputSchema: z.ZodType; +export declare const ProfileCreateNestedOneWithoutImageInputSchema: z.ZodType; +export declare const ProfileUncheckedCreateNestedOneWithoutImageInputSchema: z.ZodType; +export declare const BytesFieldUpdateOperationsInputSchema: z.ZodType; +export declare const ProfileUpdateOneWithoutImageNestedInputSchema: z.ZodType; +export declare const ProfileUncheckedUpdateOneWithoutImageNestedInputSchema: z.ZodType; +export declare const DummyCreateNestedOneWithoutDatatypeInputSchema: z.ZodType; +export declare const NullableDateTimeFieldUpdateOperationsInputSchema: z.ZodType; +export declare const NullableBoolFieldUpdateOperationsInputSchema: z.ZodType; +export declare const NullableBigIntFieldUpdateOperationsInputSchema: z.ZodType; +export declare const NullableFloatFieldUpdateOperationsInputSchema: z.ZodType; +export declare const NullableBytesFieldUpdateOperationsInputSchema: z.ZodType; +export declare const DummyUpdateOneWithoutDatatypeNestedInputSchema: z.ZodType; +export declare const DataTypesCreateNestedManyWithoutRelatedInputSchema: z.ZodType; +export declare const DataTypesUncheckedCreateNestedManyWithoutRelatedInputSchema: z.ZodType; +export declare const DataTypesUpdateManyWithoutRelatedNestedInputSchema: z.ZodType; +export declare const DataTypesUncheckedUpdateManyWithoutRelatedNestedInputSchema: z.ZodType; +export declare const NestedStringFilterSchema: z.ZodType; +export declare const NestedIntNullableFilterSchema: z.ZodType; +export declare const NestedStringWithAggregatesFilterSchema: z.ZodType; +export declare const NestedIntFilterSchema: z.ZodType; +export declare const NestedIntNullableWithAggregatesFilterSchema: z.ZodType; +export declare const NestedFloatNullableFilterSchema: z.ZodType; +export declare const NestedStringNullableFilterSchema: z.ZodType; +export declare const NestedIntWithAggregatesFilterSchema: z.ZodType; +export declare const NestedFloatFilterSchema: z.ZodType; +export declare const NestedStringNullableWithAggregatesFilterSchema: z.ZodType; +export declare const NestedJsonNullableFilterSchema: z.ZodType; +export declare const NestedBytesFilterSchema: z.ZodType; +export declare const NestedBytesWithAggregatesFilterSchema: z.ZodType; +export declare const NestedDateTimeNullableFilterSchema: z.ZodType; +export declare const NestedBoolNullableFilterSchema: z.ZodType; +export declare const NestedUuidNullableFilterSchema: z.ZodType; +export declare const NestedBigIntNullableFilterSchema: z.ZodType; +export declare const NestedBytesNullableFilterSchema: z.ZodType; +export declare const NestedDateTimeNullableWithAggregatesFilterSchema: z.ZodType; +export declare const NestedBoolNullableWithAggregatesFilterSchema: z.ZodType; +export declare const NestedUuidNullableWithAggregatesFilterSchema: z.ZodType; +export declare const NestedBigIntNullableWithAggregatesFilterSchema: z.ZodType; +export declare const NestedFloatNullableWithAggregatesFilterSchema: z.ZodType; +export declare const NestedBytesNullableWithAggregatesFilterSchema: z.ZodType; +export declare const PostCreateWithoutAuthorInputSchema: z.ZodType; +export declare const PostUncheckedCreateWithoutAuthorInputSchema: z.ZodType; +export declare const PostCreateOrConnectWithoutAuthorInputSchema: z.ZodType; +export declare const PostCreateManyAuthorInputEnvelopeSchema: z.ZodType; +export declare const ProfileCreateWithoutUserInputSchema: z.ZodType; +export declare const ProfileUncheckedCreateWithoutUserInputSchema: z.ZodType; +export declare const ProfileCreateOrConnectWithoutUserInputSchema: z.ZodType; +export declare const PostUpsertWithWhereUniqueWithoutAuthorInputSchema: z.ZodType; +export declare const PostUpdateWithWhereUniqueWithoutAuthorInputSchema: z.ZodType; +export declare const PostUpdateManyWithWhereWithoutAuthorInputSchema: z.ZodType; +export declare const PostScalarWhereInputSchema: z.ZodType; +export declare const ProfileUpsertWithoutUserInputSchema: z.ZodType; +export declare const ProfileUpdateWithoutUserInputSchema: z.ZodType; +export declare const ProfileUncheckedUpdateWithoutUserInputSchema: z.ZodType; +export declare const UserCreateWithoutPostsInputSchema: z.ZodType; +export declare const UserUncheckedCreateWithoutPostsInputSchema: z.ZodType; +export declare const UserCreateOrConnectWithoutPostsInputSchema: z.ZodType; +export declare const UserUpsertWithoutPostsInputSchema: z.ZodType; +export declare const UserUpdateWithoutPostsInputSchema: z.ZodType; +export declare const UserUncheckedUpdateWithoutPostsInputSchema: z.ZodType; +export declare const UserCreateWithoutProfileInputSchema: z.ZodType; +export declare const UserUncheckedCreateWithoutProfileInputSchema: z.ZodType; +export declare const UserCreateOrConnectWithoutProfileInputSchema: z.ZodType; +export declare const ProfileImageCreateWithoutProfileInputSchema: z.ZodType; +export declare const ProfileImageUncheckedCreateWithoutProfileInputSchema: z.ZodType; +export declare const ProfileImageCreateOrConnectWithoutProfileInputSchema: z.ZodType; +export declare const UserUpsertWithoutProfileInputSchema: z.ZodType; +export declare const UserUpdateWithoutProfileInputSchema: z.ZodType; +export declare const UserUncheckedUpdateWithoutProfileInputSchema: z.ZodType; +export declare const ProfileImageUpsertWithoutProfileInputSchema: z.ZodType; +export declare const ProfileImageUpdateWithoutProfileInputSchema: z.ZodType; +export declare const ProfileImageUncheckedUpdateWithoutProfileInputSchema: z.ZodType; +export declare const ProfileCreateWithoutImageInputSchema: z.ZodType; +export declare const ProfileUncheckedCreateWithoutImageInputSchema: z.ZodType; +export declare const ProfileCreateOrConnectWithoutImageInputSchema: z.ZodType; +export declare const ProfileUpsertWithoutImageInputSchema: z.ZodType; +export declare const ProfileUpdateWithoutImageInputSchema: z.ZodType; +export declare const ProfileUncheckedUpdateWithoutImageInputSchema: z.ZodType; +export declare const DummyCreateWithoutDatatypeInputSchema: z.ZodType; +export declare const DummyUncheckedCreateWithoutDatatypeInputSchema: z.ZodType; +export declare const DummyCreateOrConnectWithoutDatatypeInputSchema: z.ZodType; +export declare const DummyUpsertWithoutDatatypeInputSchema: z.ZodType; +export declare const DummyUpdateWithoutDatatypeInputSchema: z.ZodType; +export declare const DummyUncheckedUpdateWithoutDatatypeInputSchema: z.ZodType; +export declare const DataTypesCreateWithoutRelatedInputSchema: z.ZodType; +export declare const DataTypesUncheckedCreateWithoutRelatedInputSchema: z.ZodType; +export declare const DataTypesCreateOrConnectWithoutRelatedInputSchema: z.ZodType; +export declare const DataTypesCreateManyRelatedInputEnvelopeSchema: z.ZodType; +export declare const DataTypesUpsertWithWhereUniqueWithoutRelatedInputSchema: z.ZodType; +export declare const DataTypesUpdateWithWhereUniqueWithoutRelatedInputSchema: z.ZodType; +export declare const DataTypesUpdateManyWithWhereWithoutRelatedInputSchema: z.ZodType; +export declare const DataTypesScalarWhereInputSchema: z.ZodType; +export declare const PostCreateManyAuthorInputSchema: z.ZodType; +export declare const PostUpdateWithoutAuthorInputSchema: z.ZodType; +export declare const PostUncheckedUpdateWithoutAuthorInputSchema: z.ZodType; +export declare const PostUncheckedUpdateManyWithoutPostsInputSchema: z.ZodType; +export declare const DataTypesCreateManyRelatedInputSchema: z.ZodType; +export declare const DataTypesUpdateWithoutRelatedInputSchema: z.ZodType; +export declare const DataTypesUncheckedUpdateWithoutRelatedInputSchema: z.ZodType; +export declare const DataTypesUncheckedUpdateManyWithoutDatatypeInputSchema: z.ZodType; +export declare const ItemsFindFirstArgsSchema: z.ZodType; +export declare const ItemsFindFirstOrThrowArgsSchema: z.ZodType; +export declare const ItemsFindManyArgsSchema: z.ZodType; +export declare const ItemsAggregateArgsSchema: z.ZodType; +export declare const ItemsGroupByArgsSchema: z.ZodType; +export declare const ItemsFindUniqueArgsSchema: z.ZodType; +export declare const ItemsFindUniqueOrThrowArgsSchema: z.ZodType; +export declare const UserFindFirstArgsSchema: z.ZodType; +export declare const UserFindFirstOrThrowArgsSchema: z.ZodType; +export declare const UserFindManyArgsSchema: z.ZodType; +export declare const UserAggregateArgsSchema: z.ZodType; +export declare const UserGroupByArgsSchema: z.ZodType; +export declare const UserFindUniqueArgsSchema: z.ZodType; +export declare const UserFindUniqueOrThrowArgsSchema: z.ZodType; +export declare const PostFindFirstArgsSchema: z.ZodType; +export declare const PostFindFirstOrThrowArgsSchema: z.ZodType; +export declare const PostFindManyArgsSchema: z.ZodType; +export declare const PostAggregateArgsSchema: z.ZodType; +export declare const PostGroupByArgsSchema: z.ZodType; +export declare const PostFindUniqueArgsSchema: z.ZodType; +export declare const PostFindUniqueOrThrowArgsSchema: z.ZodType; +export declare const ProfileFindFirstArgsSchema: z.ZodType; +export declare const ProfileFindFirstOrThrowArgsSchema: z.ZodType; +export declare const ProfileFindManyArgsSchema: z.ZodType; +export declare const ProfileAggregateArgsSchema: z.ZodType; +export declare const ProfileGroupByArgsSchema: z.ZodType; +export declare const ProfileFindUniqueArgsSchema: z.ZodType; +export declare const ProfileFindUniqueOrThrowArgsSchema: z.ZodType; +export declare const ProfileImageFindFirstArgsSchema: z.ZodType; +export declare const ProfileImageFindFirstOrThrowArgsSchema: z.ZodType; +export declare const ProfileImageFindManyArgsSchema: z.ZodType; +export declare const ProfileImageAggregateArgsSchema: z.ZodType; +export declare const ProfileImageGroupByArgsSchema: z.ZodType; +export declare const ProfileImageFindUniqueArgsSchema: z.ZodType; +export declare const ProfileImageFindUniqueOrThrowArgsSchema: z.ZodType; +export declare const DataTypesFindFirstArgsSchema: z.ZodType; +export declare const DataTypesFindFirstOrThrowArgsSchema: z.ZodType; +export declare const DataTypesFindManyArgsSchema: z.ZodType; +export declare const DataTypesAggregateArgsSchema: z.ZodType; +export declare const DataTypesGroupByArgsSchema: z.ZodType; +export declare const DataTypesFindUniqueArgsSchema: z.ZodType; +export declare const DataTypesFindUniqueOrThrowArgsSchema: z.ZodType; +export declare const DummyFindFirstArgsSchema: z.ZodType; +export declare const DummyFindFirstOrThrowArgsSchema: z.ZodType; +export declare const DummyFindManyArgsSchema: z.ZodType; +export declare const DummyAggregateArgsSchema: z.ZodType; +export declare const DummyGroupByArgsSchema: z.ZodType; +export declare const DummyFindUniqueArgsSchema: z.ZodType; +export declare const DummyFindUniqueOrThrowArgsSchema: z.ZodType; +export declare const ItemsCreateArgsSchema: z.ZodType; +export declare const ItemsUpsertArgsSchema: z.ZodType; +export declare const ItemsCreateManyArgsSchema: z.ZodType; +export declare const ItemsDeleteArgsSchema: z.ZodType; +export declare const ItemsUpdateArgsSchema: z.ZodType; +export declare const ItemsUpdateManyArgsSchema: z.ZodType; +export declare const ItemsDeleteManyArgsSchema: z.ZodType; +export declare const UserCreateArgsSchema: z.ZodType; +export declare const UserUpsertArgsSchema: z.ZodType; +export declare const UserCreateManyArgsSchema: z.ZodType; +export declare const UserDeleteArgsSchema: z.ZodType; +export declare const UserUpdateArgsSchema: z.ZodType; +export declare const UserUpdateManyArgsSchema: z.ZodType; +export declare const UserDeleteManyArgsSchema: z.ZodType; +export declare const PostCreateArgsSchema: z.ZodType; +export declare const PostUpsertArgsSchema: z.ZodType; +export declare const PostCreateManyArgsSchema: z.ZodType; +export declare const PostDeleteArgsSchema: z.ZodType; +export declare const PostUpdateArgsSchema: z.ZodType; +export declare const PostUpdateManyArgsSchema: z.ZodType; +export declare const PostDeleteManyArgsSchema: z.ZodType; +export declare const ProfileCreateArgsSchema: z.ZodType; +export declare const ProfileUpsertArgsSchema: z.ZodType; +export declare const ProfileCreateManyArgsSchema: z.ZodType; +export declare const ProfileDeleteArgsSchema: z.ZodType; +export declare const ProfileUpdateArgsSchema: z.ZodType; +export declare const ProfileUpdateManyArgsSchema: z.ZodType; +export declare const ProfileDeleteManyArgsSchema: z.ZodType; +export declare const ProfileImageCreateArgsSchema: z.ZodType; +export declare const ProfileImageUpsertArgsSchema: z.ZodType; +export declare const ProfileImageCreateManyArgsSchema: z.ZodType; +export declare const ProfileImageDeleteArgsSchema: z.ZodType; +export declare const ProfileImageUpdateArgsSchema: z.ZodType; +export declare const ProfileImageUpdateManyArgsSchema: z.ZodType; +export declare const ProfileImageDeleteManyArgsSchema: z.ZodType; +export declare const DataTypesCreateArgsSchema: z.ZodType; +export declare const DataTypesUpsertArgsSchema: z.ZodType; +export declare const DataTypesCreateManyArgsSchema: z.ZodType; +export declare const DataTypesDeleteArgsSchema: z.ZodType; +export declare const DataTypesUpdateArgsSchema: z.ZodType; +export declare const DataTypesUpdateManyArgsSchema: z.ZodType; +export declare const DataTypesDeleteManyArgsSchema: z.ZodType; +export declare const DummyCreateArgsSchema: z.ZodType; +export declare const DummyUpsertArgsSchema: z.ZodType; +export declare const DummyCreateManyArgsSchema: z.ZodType; +export declare const DummyDeleteArgsSchema: z.ZodType; +export declare const DummyUpdateArgsSchema: z.ZodType; +export declare const DummyUpdateManyArgsSchema: z.ZodType; +export declare const DummyDeleteManyArgsSchema: z.ZodType; +interface ItemsGetPayload extends HKT { + readonly _A?: boolean | null | undefined | Prisma.ItemsArgs; + readonly type: Omit, "Please either choose `select` or `include`">; +} +interface UserGetPayload extends HKT { + readonly _A?: boolean | null | undefined | Prisma.UserArgs; + readonly type: Omit, "Please either choose `select` or `include`">; +} +interface PostGetPayload extends HKT { + readonly _A?: boolean | null | undefined | Prisma.PostArgs; + readonly type: Omit, "Please either choose `select` or `include`">; +} +interface ProfileGetPayload extends HKT { + readonly _A?: boolean | null | undefined | Prisma.ProfileArgs; + readonly type: Omit, "Please either choose `select` or `include`">; +} +interface ProfileImageGetPayload extends HKT { + readonly _A?: boolean | null | undefined | Prisma.ProfileImageArgs; + readonly type: Omit, "Please either choose `select` or `include`">; +} +interface DataTypesGetPayload extends HKT { + readonly _A?: boolean | null | undefined | Prisma.DataTypesArgs; + readonly type: Omit, "Please either choose `select` or `include`">; +} +interface DummyGetPayload extends HKT { + readonly _A?: boolean | null | undefined | Prisma.DummyArgs; + readonly type: Omit, "Please either choose `select` or `include`">; +} +export declare const tableSchemas: { + Items: TableSchema & Prisma.ItemsUncheckedCreateInput) | (Prisma.Without & Prisma.ItemsCreateInput), (Prisma.Without & Prisma.ItemsUncheckedUpdateInput) | (Prisma.Without & Prisma.ItemsUpdateInput), Prisma.ItemsSelect | null | undefined, Prisma.ItemsWhereInput | undefined, Prisma.ItemsWhereUniqueInput, never, Prisma.Enumerable | undefined, Prisma.ItemsScalarFieldEnum, ItemsGetPayload>; + User: TableSchema & Prisma.UserUncheckedCreateInput) | (Prisma.Without & Prisma.UserCreateInput), (Prisma.Without & Prisma.UserUncheckedUpdateInput) | (Prisma.Without & Prisma.UserUpdateInput), Prisma.UserSelect | null | undefined, Prisma.UserWhereInput | undefined, Prisma.UserWhereUniqueInput, Omit, Prisma.Enumerable | undefined, Prisma.UserScalarFieldEnum, UserGetPayload>; + Post: TableSchema & Prisma.PostUncheckedCreateInput) | (Prisma.Without & Prisma.PostCreateInput), (Prisma.Without & Prisma.PostUncheckedUpdateInput) | (Prisma.Without & Prisma.PostUpdateInput), Prisma.PostSelect | null | undefined, Prisma.PostWhereInput | undefined, Prisma.PostWhereUniqueInput, Omit, Prisma.Enumerable | undefined, Prisma.PostScalarFieldEnum, PostGetPayload>; + Profile: TableSchema & Prisma.ProfileUncheckedCreateInput) | (Prisma.Without & Prisma.ProfileCreateInput), (Prisma.Without & Prisma.ProfileUncheckedUpdateInput) | (Prisma.Without & Prisma.ProfileUpdateInput), Prisma.ProfileSelect | null | undefined, Prisma.ProfileWhereInput | undefined, Prisma.ProfileWhereUniqueInput, Omit, Prisma.Enumerable | undefined, Prisma.ProfileScalarFieldEnum, ProfileGetPayload>; + ProfileImage: TableSchema & Prisma.ProfileImageUncheckedCreateInput) | (Prisma.Without & Prisma.ProfileImageCreateInput), (Prisma.Without & Prisma.ProfileImageUncheckedUpdateInput) | (Prisma.Without & Prisma.ProfileImageUpdateInput), Prisma.ProfileImageSelect | null | undefined, Prisma.ProfileImageWhereInput | undefined, Prisma.ProfileImageWhereUniqueInput, Omit, Prisma.Enumerable | undefined, Prisma.ProfileImageScalarFieldEnum, ProfileImageGetPayload>; + DataTypes: TableSchema & Prisma.DataTypesUncheckedCreateInput) | (Prisma.Without & Prisma.DataTypesCreateInput), (Prisma.Without & Prisma.DataTypesUncheckedUpdateInput) | (Prisma.Without & Prisma.DataTypesUpdateInput), Prisma.DataTypesSelect | null | undefined, Prisma.DataTypesWhereInput | undefined, Prisma.DataTypesWhereUniqueInput, Omit, Prisma.Enumerable | undefined, Prisma.DataTypesScalarFieldEnum, DataTypesGetPayload>; + Dummy: TableSchema & Prisma.DummyUncheckedCreateInput) | (Prisma.Without & Prisma.DummyCreateInput), (Prisma.Without & Prisma.DummyUncheckedUpdateInput) | (Prisma.Without & Prisma.DummyUpdateInput), Prisma.DummySelect | null | undefined, Prisma.DummyWhereInput | undefined, Prisma.DummyWhereUniqueInput, Omit, Prisma.Enumerable | undefined, Prisma.DummyScalarFieldEnum, DummyGetPayload>; +}; +export declare const schema: DbSchema<{ + Items: TableSchema & Prisma.ItemsUncheckedCreateInput) | (Prisma.Without & Prisma.ItemsCreateInput), (Prisma.Without & Prisma.ItemsUncheckedUpdateInput) | (Prisma.Without & Prisma.ItemsUpdateInput), Prisma.ItemsSelect | null | undefined, Prisma.ItemsWhereInput | undefined, Prisma.ItemsWhereUniqueInput, never, Prisma.Enumerable | undefined, Prisma.ItemsScalarFieldEnum, ItemsGetPayload>; + User: TableSchema & Prisma.UserUncheckedCreateInput) | (Prisma.Without & Prisma.UserCreateInput), (Prisma.Without & Prisma.UserUncheckedUpdateInput) | (Prisma.Without & Prisma.UserUpdateInput), Prisma.UserSelect | null | undefined, Prisma.UserWhereInput | undefined, Prisma.UserWhereUniqueInput, Omit, Prisma.Enumerable | undefined, Prisma.UserScalarFieldEnum, UserGetPayload>; + Post: TableSchema & Prisma.PostUncheckedCreateInput) | (Prisma.Without & Prisma.PostCreateInput), (Prisma.Without & Prisma.PostUncheckedUpdateInput) | (Prisma.Without & Prisma.PostUpdateInput), Prisma.PostSelect | null | undefined, Prisma.PostWhereInput | undefined, Prisma.PostWhereUniqueInput, Omit, Prisma.Enumerable | undefined, Prisma.PostScalarFieldEnum, PostGetPayload>; + Profile: TableSchema & Prisma.ProfileUncheckedCreateInput) | (Prisma.Without & Prisma.ProfileCreateInput), (Prisma.Without & Prisma.ProfileUncheckedUpdateInput) | (Prisma.Without & Prisma.ProfileUpdateInput), Prisma.ProfileSelect | null | undefined, Prisma.ProfileWhereInput | undefined, Prisma.ProfileWhereUniqueInput, Omit, Prisma.Enumerable | undefined, Prisma.ProfileScalarFieldEnum, ProfileGetPayload>; + ProfileImage: TableSchema & Prisma.ProfileImageUncheckedCreateInput) | (Prisma.Without & Prisma.ProfileImageCreateInput), (Prisma.Without & Prisma.ProfileImageUncheckedUpdateInput) | (Prisma.Without & Prisma.ProfileImageUpdateInput), Prisma.ProfileImageSelect | null | undefined, Prisma.ProfileImageWhereInput | undefined, Prisma.ProfileImageWhereUniqueInput, Omit, Prisma.Enumerable | undefined, Prisma.ProfileImageScalarFieldEnum, ProfileImageGetPayload>; + DataTypes: TableSchema & Prisma.DataTypesUncheckedCreateInput) | (Prisma.Without & Prisma.DataTypesCreateInput), (Prisma.Without & Prisma.DataTypesUncheckedUpdateInput) | (Prisma.Without & Prisma.DataTypesUpdateInput), Prisma.DataTypesSelect | null | undefined, Prisma.DataTypesWhereInput | undefined, Prisma.DataTypesWhereUniqueInput, Omit, Prisma.Enumerable | undefined, Prisma.DataTypesScalarFieldEnum, DataTypesGetPayload>; + Dummy: TableSchema & Prisma.DummyUncheckedCreateInput) | (Prisma.Without & Prisma.DummyCreateInput), (Prisma.Without & Prisma.DummyUncheckedUpdateInput) | (Prisma.Without & Prisma.DummyUpdateInput), Prisma.DummySelect | null | undefined, Prisma.DummyWhereInput | undefined, Prisma.DummyWhereUniqueInput, Omit, Prisma.Enumerable | undefined, Prisma.DummyScalarFieldEnum, DummyGetPayload>; +}>; +export type Electric = ElectricClient; +export declare const JsonNull: { + __is_electric_json_null__: boolean; +}; +export {}; diff --git a/clients/typescript/test/client/generated/migrations.d.ts b/clients/typescript/test/client/generated/migrations.d.ts new file mode 100644 index 0000000000..b177b57e95 --- /dev/null +++ b/clients/typescript/test/client/generated/migrations.d.ts @@ -0,0 +1,2 @@ +declare const _default: never[]; +export default _default; diff --git a/clients/typescript/test/client/generated/pg-migrations.d.ts b/clients/typescript/test/client/generated/pg-migrations.d.ts new file mode 100644 index 0000000000..b177b57e95 --- /dev/null +++ b/clients/typescript/test/client/generated/pg-migrations.d.ts @@ -0,0 +1,2 @@ +declare const _default: never[]; +export default _default; diff --git a/clients/typescript/test/satellite/client.test.ts b/clients/typescript/test/satellite/client.test.ts index c663923a74..74eb46f2c1 100644 --- a/clients/typescript/test/satellite/client.test.ts +++ b/clients/typescript/test/satellite/client.test.ts @@ -11,7 +11,13 @@ import { OplogEntry, toTransactions } from '../../src/satellite/oplog' import { ShapeRequest } from '../../src/satellite/shapes/types' import { WebSocketNode } from '../../src/sockets/node' import { QualifiedTablename, sleepAsync } from '../../src/util' -import { base64, bytesToNumber, numberToBytes } from '../../src/util/common' +import { + base64, + bytesToNumber, + numberToBytes, + sqliteTypeDecoder, + sqliteTypeEncoder, +} from '../../src/util/common' import { DataChangeType, DataTransaction, @@ -333,7 +339,12 @@ test.serial('receive transaction over multiple messages', async (t) => { const insertOp = Proto.SatOpInsert.fromPartial({ relationId: 1, - rowData: serializeRow({ name1: 'Foo', name2: 'Bar' }, rel, dbDescription), + rowData: serializeRow( + { name1: 'Foo', name2: 'Bar' }, + rel, + dbDescription, + sqliteTypeEncoder + ), }) const updateOp = Proto.SatOpUpdate.fromPartial({ @@ -341,16 +352,23 @@ test.serial('receive transaction over multiple messages', async (t) => { rowData: serializeRow( { name1: 'Hello', name2: 'World!' }, rel, - dbDescription + dbDescription, + sqliteTypeEncoder + ), + oldRowData: serializeRow( + { name1: '', name2: '' }, + rel, + dbDescription, + sqliteTypeEncoder ), - oldRowData: serializeRow({ name1: '', name2: '' }, rel, dbDescription), }) const deleteOp = Proto.SatOpDelete.fromPartial({ relationId: 1, oldRowData: serializeRow( { name1: 'Hello', name2: 'World!' }, rel, - dbDescription + dbDescription, + sqliteTypeEncoder ), }) @@ -722,7 +740,8 @@ test.serial('default and null test', async (t) => { intvalue_null_default: '10', }, rel, - dbDescription + dbDescription, + sqliteTypeEncoder ), }) @@ -745,7 +764,12 @@ test.serial('default and null test', async (t) => { ], } - const record: any = deserializeRow(serializedRow, rel, dbDescription)! + const record: any = deserializeRow( + serializedRow, + rel, + dbDescription, + sqliteTypeDecoder + )! const firstOpLogMessage = Proto.SatOpLog.fromPartial({ ops: [ @@ -1114,7 +1138,12 @@ test.serial('subscription correct protocol sequence with data', async (t) => { const insertOp = Proto.SatOpInsert.fromPartial({ relationId: 0, - rowData: serializeRow({ name1: 'Foo', name2: 'Bar' }, rel, dbDescription), + rowData: serializeRow( + { name1: 'Foo', name2: 'Bar' }, + rel, + dbDescription, + sqliteTypeEncoder + ), }) const satTransOpInsert = Proto.SatTransOp.fromPartial({ insert: insertOp }) @@ -1210,12 +1239,22 @@ test.serial('client correctly handles additional data messages', async (t) => { const insertOp = Proto.SatOpInsert.fromPartial({ relationId: 1, - rowData: serializeRow({ name1: 'Foo', name2: 'Bar' }, rel, dbDescription), + rowData: serializeRow( + { name1: 'Foo', name2: 'Bar' }, + rel, + dbDescription, + sqliteTypeEncoder + ), }) const secondInsertOp = Proto.SatOpInsert.fromPartial({ relationId: 1, - rowData: serializeRow({ name1: 'More', name2: 'Data' }, rel, dbDescription), + rowData: serializeRow( + { name1: 'More', name2: 'Data' }, + rel, + dbDescription, + sqliteTypeEncoder + ), }) const firstOpLogMessage = Proto.SatOpLog.fromPartial({ diff --git a/e2e/tests/03.19_node_satellite_can_sync_json.lux b/e2e/tests/03.19_node_satellite_can_sync_json.lux index b001a851a3..330f5bc950 100644 --- a/e2e/tests/03.19_node_satellite_can_sync_json.lux +++ b/e2e/tests/03.19_node_satellite_can_sync_json.lux @@ -52,6 +52,7 @@ # When running with PG on the client we don't yet support top-level JSON null values # instead top-level JSON null values become DB null values [invoke node_get_jsonb_regex "row3" "\{ __is_electric_json_null__: true \}|null"] + #[invoke node_get_jsonb "row3" "{ __is_electric_json_null__: true }"] # write regular JSON values [invoke node_write_json "row4" 500 "{ a: true, b: [ 1, 2 ] }"] @@ -128,6 +129,7 @@ #[invoke node_get_json "row3" "null"] [invoke node_get_jsonb_regex "row3" "\{ __is_electric_json_null__: true \}|null"] + #[invoke node_get_jsonb "row3" "{ __is_electric_json_null__: true }"] #[invoke node_get_json "row4" 500] [invoke node_get_jsonb "row4" "{ a: true, b: [ 1, 2 ] }"] From c7b24afc77212c9fefc926ffe6c5f39a47512e86 Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Tue, 16 Apr 2024 11:53:07 +0200 Subject: [PATCH 062/156] Fix unit test after rebase --- .../typescript/test/client/model/datatype.ts | 10 ++++-- .../client/model/postgres/datatype.test.ts | 1 + .../test/client/model/sqlite/datatype.test.ts | 1 + .../typescript/test/satellite/client.test.ts | 36 ++++++++++++------- 4 files changed, 33 insertions(+), 15 deletions(-) diff --git a/clients/typescript/test/client/model/datatype.ts b/clients/typescript/test/client/model/datatype.ts index 4d8be29859..b3276c3238 100644 --- a/clients/typescript/test/client/model/datatype.ts +++ b/clients/typescript/test/client/model/datatype.ts @@ -5,9 +5,11 @@ import { } from '../../../src/client/validation/errors/messages' import { JsonNull, Electric } from '../generated' import { ZodError } from 'zod' +import { Dialect } from '../../../src/migrators/query-builder/builder' export type ContextType = { tbl: Electric['db']['DataTypes'] + dialect: Dialect } /* @@ -862,7 +864,7 @@ export const datatypeTests = (test: TestFn) => { }) test('support JSONB type', async (t) => { - const { tbl } = t.context + const { tbl, dialect } = t.context const json = { a: 1, b: true, c: { d: 'nested' }, e: [1, 2, 3], f: null } const res = await tbl.create({ data: { @@ -889,7 +891,9 @@ export const datatypeTests = (test: TestFn) => { }, }) - t.deepEqual(res2.json, JsonNull) + // Currently can't store top-level JSON null values when using PG + // they are automatically transformed to DB NULL + t.deepEqual(res2.json, dialect === 'SQLite' ? JsonNull : null) const fetchRes2 = await tbl.findUnique({ where: { @@ -897,7 +901,7 @@ export const datatypeTests = (test: TestFn) => { }, }) - t.deepEqual(fetchRes2?.json, JsonNull) + t.deepEqual(fetchRes2?.json, dialect === 'SQLite' ? JsonNull : null) }) test('support null values for JSONB type', async (t) => { diff --git a/clients/typescript/test/client/model/postgres/datatype.test.ts b/clients/typescript/test/client/model/postgres/datatype.test.ts index b51b87d5ac..655f0bed2a 100644 --- a/clients/typescript/test/client/model/postgres/datatype.test.ts +++ b/clients/typescript/test/client/model/postgres/datatype.test.ts @@ -45,6 +45,7 @@ test.beforeEach(async (t) => { t.context = { tbl, stop, + dialect: 'Postgres', } }) diff --git a/clients/typescript/test/client/model/sqlite/datatype.test.ts b/clients/typescript/test/client/model/sqlite/datatype.test.ts index c064e046d0..c7625587d7 100644 --- a/clients/typescript/test/client/model/sqlite/datatype.test.ts +++ b/clients/typescript/test/client/model/sqlite/datatype.test.ts @@ -39,6 +39,7 @@ test.beforeEach(async (t) => { t.context = { db, tbl, + dialect: 'SQLite', } }) diff --git a/clients/typescript/test/satellite/client.test.ts b/clients/typescript/test/satellite/client.test.ts index 74eb46f2c1..e94aa876f1 100644 --- a/clients/typescript/test/satellite/client.test.ts +++ b/clients/typescript/test/satellite/client.test.ts @@ -1409,7 +1409,8 @@ test.serial( deserializeRow( satOpLog[1].insert?.rowData, relations.parent, - dbDescription + dbDescription, + sqliteTypeDecoder ), { id: 1, @@ -1422,7 +1423,8 @@ test.serial( deserializeRow( satOpLog[2].update?.rowData, relations.parent, - dbDescription + dbDescription, + sqliteTypeDecoder ), { id: 1, @@ -1435,7 +1437,8 @@ test.serial( deserializeRow( satOpLog[2].update?.oldRowData, relations.parent, - dbDescription + dbDescription, + sqliteTypeDecoder ), { id: 1, @@ -1448,7 +1451,8 @@ test.serial( deserializeRow( satOpLog[3].delete?.oldRowData, relations.parent, - dbDescription + dbDescription, + sqliteTypeDecoder ), { id: 1, @@ -1522,7 +1526,8 @@ test.serial( other: null, }, relations.parent, - dbDescription + dbDescription, + sqliteTypeEncoder ), }) @@ -1535,7 +1540,8 @@ test.serial( other: 2, }, relations.parent, - dbDescription + dbDescription, + sqliteTypeEncoder ), oldRowData: serializeRow( { @@ -1544,7 +1550,8 @@ test.serial( other: null, }, relations.parent, - dbDescription + dbDescription, + sqliteTypeEncoder ), }) @@ -1557,7 +1564,8 @@ test.serial( other: 2, }, relations.parent, - dbDescription + dbDescription, + sqliteTypeEncoder ), }) @@ -1658,7 +1666,8 @@ test.serial( deserializeRow( data.ops[1].insert?.rowData, relations.parent, - dbDescription + dbDescription, + sqliteTypeDecoder ), { ...change.record, @@ -1673,7 +1682,8 @@ test.serial( deserializeRow( data.ops[1].insert?.rowData, relations.parent, - dbDescription + dbDescription, + sqliteTypeDecoder ), { ...change.record, @@ -1688,7 +1698,8 @@ test.serial( deserializeRow( data.ops[1].insert?.rowData, relations.parent, - dbDescription + dbDescription, + sqliteTypeDecoder ), change.record ) @@ -1843,7 +1854,8 @@ test.serial( other: null, }, relations.parent, - dbDescription + dbDescription, + sqliteTypeEncoder ), }) From 4348a5c00f3139ff5aa356444ec6e6056aa4e24c Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Tue, 16 Apr 2024 12:28:30 +0200 Subject: [PATCH 063/156] Fixed query 4 in performSnapshot for PG. --- .../src/migrators/query-builder/pgBuilder.ts | 28 +++++++++++-------- 1 file changed, 16 insertions(+), 12 deletions(-) diff --git a/clients/typescript/src/migrators/query-builder/pgBuilder.ts b/clients/typescript/src/migrators/query-builder/pgBuilder.ts index eedb3fff98..4be9c21875 100644 --- a/clients/typescript/src/migrators/query-builder/pgBuilder.ts +++ b/clients/typescript/src/migrators/query-builder/pgBuilder.ts @@ -426,18 +426,22 @@ class PgBuilder extends QueryBuilder { // since this is not reliant on re-executing a query // for every row in the shadow table, but uses a PK join instead. return dedent` - WITH _to_be_deleted (rowid) AS ( - SELECT ${shadow}.rowid - FROM ${oplog} - INNER JOIN ${shadow} - ON ${shadow}.namespace = ${oplog}.namespace - AND ${shadow}.tablename = ${oplog}.tablename - AND - ${shadow}."primaryKey"::jsonb @> ${oplog}."primaryKey"::jsonb AND ${shadow}."primaryKey"::jsonb <@ ${oplog}."primaryKey"::jsonb - WHERE ${oplog}.timestamp = $1 - AND ${oplog}.optype = 'DELETE' - GROUP BY ${shadow}.rowid - ) + WITH + _relevant_shadows AS ( + SELECT DISTINCT ON (s.rowid) + s.rowid AS rowid, + op.optype AS last_optype + FROM ${oplog} AS op + INNER JOIN ${shadow} AS s + ON s.namespace = op.namespace + AND s.tablename = op.tablename + AND s."primaryKey"::jsonb = op."primaryKey"::jsonb + WHERE op.timestamp = $1 + ORDER BY s.rowid, op.rowid DESC + ), + _to_be_deleted AS ( + SELECT rowid FROM _relevant_shadows WHERE last_optype = 'DELETE' + ) DELETE FROM ${shadow} WHERE rowid IN (SELECT rowid FROM _to_be_deleted); ` From 8a8eff09524bcd664b4022a6fd15d7c0551227a1 Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Tue, 16 Apr 2024 14:54:32 +0200 Subject: [PATCH 064/156] Fix unit test --- clients/typescript/src/satellite/process.ts | 2 +- .../typescript/test/satellite/process.test.ts | 103 ++++++++++-------- 2 files changed, 56 insertions(+), 49 deletions(-) diff --git a/clients/typescript/src/satellite/process.ts b/clients/typescript/src/satellite/process.ts index 7d6a6838d7..f5b06c7559 100644 --- a/clients/typescript/src/satellite/process.ts +++ b/clients/typescript/src/satellite/process.ts @@ -306,7 +306,7 @@ export class SatelliteProcess implements Satellite { // TODO: table and schema warrant escaping here too, but they aren't in the triggers table. const deleteStmts = tables.map((x) => ({ - sql: `DELETE FROM "${x.namespace}".""${x.table}`, + sql: `DELETE FROM "${x.namespace}"."${x.tablename}"`, })) const stmtsWithTriggers = [ diff --git a/clients/typescript/test/satellite/process.test.ts b/clients/typescript/test/satellite/process.test.ts index 1ad2738e93..3c20bd41ef 100644 --- a/clients/typescript/test/satellite/process.test.ts +++ b/clients/typescript/test/satellite/process.test.ts @@ -2060,62 +2060,69 @@ export const processTests = (test: TestFn) => { } = t.context await runMigrations() - const tablename = 'parent' - const childTable = 'child' + const tablename = 'parent' + const childTable = 'child' - // relations must be present at subscription delivery - client.setRelations(relations) - client.setRelationData(tablename, parentRecord) - client.setRelationData(childTable, childRecord) - client.setRelationData('another', {}) + // relations must be present at subscription delivery + client.setRelations(relations) + client.setRelationData(tablename, parentRecord) + client.setRelationData(childTable, childRecord) + client.setRelationData('another', {}) - const conn = await startSatellite(satellite, authState, token) - await conn.connectionPromise - - const shapeDef1: Shape = { - tablename: 'parent', - include: [{ foreignKey: ['parent'], select: { tablename: 'child' } }], - } - const shapeDef2: Shape = { - tablename: 'another', - } + const conn = await startSatellite(satellite, authState, token) + await conn.connectionPromise - satellite!.relations = relations - const { synced: synced1 } = await satellite.subscribe([shapeDef1]) - await synced1 - const row = await adapter.query({ sql: `SELECT id FROM main.parent` }) - t.is(row.length, 1) - const row1 = await adapter.query({ sql: `SELECT id FROM main.child` }) - t.is(row1.length, 1) - const { synced } = await satellite.subscribe([shapeDef2]) + const shapeDef1: Shape = { + tablename: 'parent', + include: [{ foreignKey: ['parent'], select: { tablename: 'child' } }], + } + const shapeDef2: Shape = { + tablename: 'another', + } - try { - await synced - t.fail() - } catch (expected: any) { - t.true(expected instanceof SatelliteError) - try { + satellite!.relations = relations + const { synced: synced1 } = await satellite.subscribe([shapeDef1]) + await synced1 const row = await adapter.query({ - sql: `SELECT id FROM "${namespace}"."${tablename}"`, + sql: `SELECT id FROM "${namespace}".parent`, }) - t.is(row.length, 0) - const row1 = await adapter.query({ sql: `SELECT id FROM "${namespace}"."${childTable}"` }) - t.is(row1.length, 0) - - const shadowRows = await adapter.query({ - sql: `SELECT tags FROM "${namespace}"._electric_shadow`, + t.is(row.length, 1) + const row1 = await adapter.query({ + sql: `SELECT id FROM "${namespace}".child`, }) - t.is(shadowRows.length, 2) - - const subsMeta = await satellite._getMeta('subscriptions') - const subsObj = JSON.parse(subsMeta) - t.deepEqual(subsObj, {}) - t.true(expected.message.search("table 'another'") >= 0) - } catch (e) { - t.fail(JSON.stringify(e)) + t.is(row1.length, 1) + const { synced } = await satellite.subscribe([shapeDef2]) + + try { + await synced + t.fail() + } catch (expected: any) { + t.true(expected instanceof SatelliteError) + try { + const row = await adapter.query({ + sql: `SELECT id FROM "${namespace}"."${tablename}"`, + }) + t.is(row.length, 0) + const row1 = await adapter.query({ + sql: `SELECT id FROM "${namespace}"."${childTable}"`, + }) + t.is(row1.length, 0) + + const shadowRows = await adapter.query({ + sql: `SELECT tags FROM "${namespace}"._electric_shadow`, + }) + t.is(shadowRows.length, 2) + + const subsMeta = await satellite._getMeta('subscriptions') + const subsObj = JSON.parse(subsMeta) + t.deepEqual(subsObj, {}) + t.true(expected.message.search("table 'another'") >= 0) + } catch (e) { + t.fail(JSON.stringify(e)) + } + } } - } -}) + ) test('a subscription request failure does not clear the manager state', async (t) => { const { From 8510b6edd31ce51e874e95dfb98cce2a81464f94 Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Tue, 16 Apr 2024 15:17:55 +0200 Subject: [PATCH 065/156] Remove hardcoded namespace in DAL table class to use default namespace --- clients/typescript/src/client/model/table.ts | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/clients/typescript/src/client/model/table.ts b/clients/typescript/src/client/model/table.ts index bfcfa4e195..dbf358cb65 100644 --- a/clients/typescript/src/client/model/table.ts +++ b/clients/typescript/src/client/model/table.ts @@ -125,7 +125,8 @@ export class Table< this._fields, this._transformer.converter ) - this._qualifiedTableName = new QualifiedTablename('main', tableName) + const namespace = this.dialect === 'Postgres' ? 'public' : 'main' + this._qualifiedTableName = new QualifiedTablename(namespace, tableName) this._tables = new Map() this._schema = tableDescription.modelSchema this.createSchema = omitCountFromSelectAndIncludeSchema( From b2316850f4aab8a79666b233bab1c35fb34e37bb Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Tue, 16 Apr 2024 16:06:41 +0200 Subject: [PATCH 066/156] Require all adapters to define a default namespace and fetch it in liveRawQuery in order to correctly parse table names. --- clients/typescript/src/client/model/table.ts | 4 ++-- clients/typescript/src/drivers/better-sqlite3/adapter.ts | 1 + clients/typescript/src/drivers/capacitor-sqlite/adapter.ts | 1 + clients/typescript/src/drivers/expo-sqlite-next/adapter.ts | 1 + clients/typescript/src/drivers/expo-sqlite/adapter.ts | 1 + clients/typescript/src/drivers/generic/adapter.ts | 4 ++++ clients/typescript/src/drivers/generic/mock.ts | 1 + clients/typescript/src/drivers/node-postgres/adapter.ts | 1 + clients/typescript/src/drivers/op-sqlite/adapter.ts | 1 + clients/typescript/src/drivers/tauri-postgres/adapter.ts | 1 + clients/typescript/src/drivers/tauri-sqlite/adapter.ts | 1 + clients/typescript/src/drivers/wa-sqlite/adapter.ts | 1 + clients/typescript/src/electric/adapter.ts | 2 ++ 13 files changed, 18 insertions(+), 2 deletions(-) diff --git a/clients/typescript/src/client/model/table.ts b/clients/typescript/src/client/model/table.ts index dbf358cb65..fd508ea648 100644 --- a/clients/typescript/src/client/model/table.ts +++ b/clients/typescript/src/client/model/table.ts @@ -1680,7 +1680,7 @@ export function liveRawQuery( // parse the table names from the query // because this is a raw query so // we cannot trust that it queries this table - const tablenames = parseTableNames(sql.sql) + const tablenames = parseTableNames(sql.sql, adapter.defaultNamespace) const res = await rawQuery(adapter, sql) return new LiveResult(res, tablenames) }) @@ -1688,7 +1688,7 @@ export function liveRawQuery( result.subscribe = createQueryResultSubscribeFunction( notifier, result, - parseTableNames(sql.sql) + parseTableNames(sql.sql, adapter.defaultNamespace) ) result.sourceQuery = sql return result diff --git a/clients/typescript/src/drivers/better-sqlite3/adapter.ts b/clients/typescript/src/drivers/better-sqlite3/adapter.ts index 78d79b2680..5faa3b8d2a 100644 --- a/clients/typescript/src/drivers/better-sqlite3/adapter.ts +++ b/clients/typescript/src/drivers/better-sqlite3/adapter.ts @@ -19,6 +19,7 @@ export class DatabaseAdapter implements DatabaseAdapterInterface { db: Database + readonly defaultNamespace = 'main' constructor(db: Database) { super() diff --git a/clients/typescript/src/drivers/capacitor-sqlite/adapter.ts b/clients/typescript/src/drivers/capacitor-sqlite/adapter.ts index 5a7df55300..6d7e417efd 100644 --- a/clients/typescript/src/drivers/capacitor-sqlite/adapter.ts +++ b/clients/typescript/src/drivers/capacitor-sqlite/adapter.ts @@ -7,6 +7,7 @@ import { RunResult } from '../../electric/adapter' export class DatabaseAdapter extends GenericDatabaseAdapter { readonly db: Database + readonly defaultNamespace = 'main' constructor(db: Database) { super() diff --git a/clients/typescript/src/drivers/expo-sqlite-next/adapter.ts b/clients/typescript/src/drivers/expo-sqlite-next/adapter.ts index b5de4e97c3..d435e15b71 100644 --- a/clients/typescript/src/drivers/expo-sqlite-next/adapter.ts +++ b/clients/typescript/src/drivers/expo-sqlite-next/adapter.ts @@ -6,6 +6,7 @@ import { SerialDatabaseAdapter } from '../generic' export class DatabaseAdapter extends SerialDatabaseAdapter { readonly db: Database + readonly defaultNamespace = 'main' constructor(db: Database) { super() this.db = db diff --git a/clients/typescript/src/drivers/expo-sqlite/adapter.ts b/clients/typescript/src/drivers/expo-sqlite/adapter.ts index baad9d7785..d4c73c6dbe 100644 --- a/clients/typescript/src/drivers/expo-sqlite/adapter.ts +++ b/clients/typescript/src/drivers/expo-sqlite/adapter.ts @@ -6,6 +6,7 @@ import { RunResult } from '../../electric/adapter' export class DatabaseAdapter extends GenericDatabaseAdapter { readonly db: Database + readonly defaultNamespace = 'main' #rowsModified = 0 constructor(db: Database) { diff --git a/clients/typescript/src/drivers/generic/adapter.ts b/clients/typescript/src/drivers/generic/adapter.ts index 0564efd11f..e3baa13525 100644 --- a/clients/typescript/src/drivers/generic/adapter.ts +++ b/clients/typescript/src/drivers/generic/adapter.ts @@ -18,6 +18,7 @@ abstract class DatabaseAdapter implements DatabaseAdapterInterface { protected txMutex: Mutex + abstract readonly defaultNamespace: 'main' | 'public' constructor() { super() @@ -110,6 +111,8 @@ export abstract class BatchDatabaseAdapter extends DatabaseAdapter implements DatabaseAdapterInterface { + abstract readonly defaultNamespace: 'main' | 'public' + /** * @param statements SQL statements to execute against the DB in a single batch. */ @@ -131,6 +134,7 @@ export abstract class SerialDatabaseAdapter extends DatabaseAdapter implements DatabaseAdapterInterface { + abstract readonly defaultNamespace: 'main' | 'public' async runInTransaction(...statements: Statement[]): Promise { // Uses a mutex to ensure that transactions are not interleaved. const release = await this.txMutex.acquire() diff --git a/clients/typescript/src/drivers/generic/mock.ts b/clients/typescript/src/drivers/generic/mock.ts index f7c137781f..03dd96fc01 100644 --- a/clients/typescript/src/drivers/generic/mock.ts +++ b/clients/typescript/src/drivers/generic/mock.ts @@ -3,6 +3,7 @@ import { RunResult } from '../../electric/adapter' import { Row, Statement } from '../../util' export class MockDatabaseAdapter extends SerialDatabaseAdapter { + readonly defaultNamespace = 'main' private expectRun: ((stmt: Statement) => Promise) | undefined private expectQuery: ((stmt: Statement) => Promise) | undefined diff --git a/clients/typescript/src/drivers/node-postgres/adapter.ts b/clients/typescript/src/drivers/node-postgres/adapter.ts index 6d6c5f0c7b..fd744fc8d5 100644 --- a/clients/typescript/src/drivers/node-postgres/adapter.ts +++ b/clients/typescript/src/drivers/node-postgres/adapter.ts @@ -6,6 +6,7 @@ import { RunResult } from '../../electric/adapter' export class DatabaseAdapter extends GenericDatabaseAdapter { readonly db: Database + readonly defaultNamespace = 'public' constructor(db: Database) { super() diff --git a/clients/typescript/src/drivers/op-sqlite/adapter.ts b/clients/typescript/src/drivers/op-sqlite/adapter.ts index c20c24e5b1..0704dc979a 100644 --- a/clients/typescript/src/drivers/op-sqlite/adapter.ts +++ b/clients/typescript/src/drivers/op-sqlite/adapter.ts @@ -7,6 +7,7 @@ import { SQLBatchTuple } from '@op-engineering/op-sqlite' export class DatabaseAdapter extends GenericDatabaseAdapter { readonly db: Database + readonly defaultNamespace = 'main' constructor(db: Database) { super() diff --git a/clients/typescript/src/drivers/tauri-postgres/adapter.ts b/clients/typescript/src/drivers/tauri-postgres/adapter.ts index 6d6c5f0c7b..fd744fc8d5 100644 --- a/clients/typescript/src/drivers/tauri-postgres/adapter.ts +++ b/clients/typescript/src/drivers/tauri-postgres/adapter.ts @@ -6,6 +6,7 @@ import { RunResult } from '../../electric/adapter' export class DatabaseAdapter extends GenericDatabaseAdapter { readonly db: Database + readonly defaultNamespace = 'public' constructor(db: Database) { super() diff --git a/clients/typescript/src/drivers/tauri-sqlite/adapter.ts b/clients/typescript/src/drivers/tauri-sqlite/adapter.ts index 900baa3f4b..c74e8e7fd3 100644 --- a/clients/typescript/src/drivers/tauri-sqlite/adapter.ts +++ b/clients/typescript/src/drivers/tauri-sqlite/adapter.ts @@ -6,6 +6,7 @@ import { RunResult } from '../../electric/adapter' export class DatabaseAdapter extends GenericDatabaseAdapter { readonly db: Database + readonly defaultNamespace = 'main' constructor(db: Database) { super() diff --git a/clients/typescript/src/drivers/wa-sqlite/adapter.ts b/clients/typescript/src/drivers/wa-sqlite/adapter.ts index d4f0864882..c16ceacc1e 100644 --- a/clients/typescript/src/drivers/wa-sqlite/adapter.ts +++ b/clients/typescript/src/drivers/wa-sqlite/adapter.ts @@ -6,6 +6,7 @@ import { RunResult } from '../../electric/adapter' export class DatabaseAdapter extends GenericDatabaseAdapter { readonly db: Database + readonly defaultNamespace = 'main' constructor(db: Database) { super() diff --git a/clients/typescript/src/electric/adapter.ts b/clients/typescript/src/electric/adapter.ts index f1fd0f22e3..6f0dcb22a6 100644 --- a/clients/typescript/src/electric/adapter.ts +++ b/clients/typescript/src/electric/adapter.ts @@ -5,6 +5,8 @@ import { parseTableNames } from '../util' // A `DatabaseAdapter` adapts a database client to provide the // normalised interface defined here. export interface DatabaseAdapter { + readonly defaultNamespace: 'main' | 'public' + // Runs the provided sql statement run(statement: Statement): Promise From 80b973f4b4f69c8f7d590f32b04d367287ec5072 Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Wed, 17 Apr 2024 09:12:40 +0200 Subject: [PATCH 067/156] Extract SQLite/PG type encoders/decoders from common util file to their own encoder files. --- clients/typescript/src/auth/decode.ts | 2 +- clients/typescript/src/auth/insecure.ts | 2 +- clients/typescript/src/auth/secure/index.ts | 2 +- clients/typescript/src/migrators/builder.ts | 3 +- clients/typescript/src/satellite/client.ts | 4 +- clients/typescript/src/satellite/mock.ts | 3 +- clients/typescript/src/satellite/oplog.ts | 6 +- clients/typescript/src/satellite/process.ts | 9 +- .../typescript/src/satellite/shapes/cache.ts | 2 +- clients/typescript/src/util/common.ts | 191 +----------------- .../typescript/src/util/encoders/common.ts | 100 +++++++++ clients/typescript/src/util/encoders/index.ts | 15 ++ .../src/util/encoders/pgEncoders.ts | 38 ++++ .../src/util/encoders/sqliteEncoders.ts | 59 ++++++ clients/typescript/src/util/encoders/types.ts | 5 + clients/typescript/src/util/proto.ts | 2 +- .../typescript/test/satellite/client.test.ts | 2 +- .../satellite/postgres/serialization.test.ts | 2 +- .../typescript/test/satellite/process.test.ts | 9 +- .../test/satellite/serialization.ts | 2 +- .../satellite/sqlite/serialization.test.ts | 5 +- clients/typescript/test/util/commmon.test.ts | 5 +- .../test/util/subscriptions.test.ts | 2 +- 23 files changed, 248 insertions(+), 222 deletions(-) create mode 100644 clients/typescript/src/util/encoders/common.ts create mode 100644 clients/typescript/src/util/encoders/index.ts create mode 100644 clients/typescript/src/util/encoders/pgEncoders.ts create mode 100644 clients/typescript/src/util/encoders/sqliteEncoders.ts create mode 100644 clients/typescript/src/util/encoders/types.ts diff --git a/clients/typescript/src/auth/decode.ts b/clients/typescript/src/auth/decode.ts index 4264ae9c74..5ec86bca43 100644 --- a/clients/typescript/src/auth/decode.ts +++ b/clients/typescript/src/auth/decode.ts @@ -1,4 +1,4 @@ -import { base64 } from '../util/common' +import { base64 } from '../util/encoders' export interface JwtPayload { iss?: string diff --git a/clients/typescript/src/auth/insecure.ts b/clients/typescript/src/auth/insecure.ts index f771e40097..3cab9c4bf1 100644 --- a/clients/typescript/src/auth/insecure.ts +++ b/clients/typescript/src/auth/insecure.ts @@ -1,5 +1,5 @@ import { TokenClaims } from './index' -import { base64 } from '../util/common' +import { base64 } from '../util/encoders' export function insecureAuthToken(claims: TokenClaims): string { const header = { diff --git a/clients/typescript/src/auth/secure/index.ts b/clients/typescript/src/auth/secure/index.ts index 2222bbc160..3f245e2aa9 100644 --- a/clients/typescript/src/auth/secure/index.ts +++ b/clients/typescript/src/auth/secure/index.ts @@ -1,5 +1,5 @@ import { jwtDecode, JwtPayload } from '../decode' -import { textEncoder } from '../../util/common' +import { textEncoder } from '../../util/encoders' import { TokenClaims } from '../index' import { InvalidArgumentError } from '../../client/validation/errors/invalidArgumentError' diff --git a/clients/typescript/src/migrators/builder.ts b/clients/typescript/src/migrators/builder.ts index 11ef578abc..985e1b7967 100644 --- a/clients/typescript/src/migrators/builder.ts +++ b/clients/typescript/src/migrators/builder.ts @@ -1,6 +1,7 @@ import * as z from 'zod' import { SatOpMigrate } from '../_generated/protocol/satellite' -import { base64, getProtocolVersion } from '../util' +import { base64 } from '../util/encoders' +import { getProtocolVersion } from '../util' import { Migration } from './index' import { generateTriggersForTable } from '../satellite/process' import { sqliteBuilder, pgBuilder, QueryBuilder } from './query-builder' diff --git a/clients/typescript/src/satellite/client.ts b/clients/typescript/src/satellite/client.ts index 8bb6f62106..837a413d50 100644 --- a/clients/typescript/src/satellite/client.ts +++ b/clients/typescript/src/satellite/client.ts @@ -75,7 +75,6 @@ import { } from '../util/types' import { base64, - DEFAULT_LOG_POS, sqliteTypeEncoder, sqliteTypeDecoder, bytesToNumber, @@ -83,7 +82,8 @@ import { TypeDecoder, pgTypeEncoder, pgTypeDecoder, -} from '../util/common' +} from '../util/encoders' +import { DEFAULT_LOG_POS } from '../util/common' import { Client } from '.' import { SatelliteClientOpts, satelliteClientDefaults } from './config' import Log from 'loglevel' diff --git a/clients/typescript/src/satellite/mock.ts b/clients/typescript/src/satellite/mock.ts index 75bb5ae381..a3b2eef208 100644 --- a/clients/typescript/src/satellite/mock.ts +++ b/clients/typescript/src/satellite/mock.ts @@ -32,12 +32,11 @@ import { SocketFactory } from '../sockets' import { DEFAULT_LOG_POS, subsDataErrorToSatelliteError, - base64, AsyncEventEmitter, genUUID, QualifiedTablename, } from '../util' -import { bytesToNumber } from '../util/common' +import { base64, bytesToNumber } from '../util/encoders' import { generateTag } from './oplog' import { Shape, diff --git a/clients/typescript/src/satellite/oplog.ts b/clients/typescript/src/satellite/oplog.ts index 4101e2b4f8..2576146e96 100644 --- a/clients/typescript/src/satellite/oplog.ts +++ b/clients/typescript/src/satellite/oplog.ts @@ -11,7 +11,11 @@ import { Relation, } from '../util/types' import { union } from '../util/sets' -import { numberToBytes, blobToHexString, hexStringToBlob } from '../util/common' +import { + numberToBytes, + blobToHexString, + hexStringToBlob, +} from '../util/encoders' // format: UUID@timestamp_in_milliseconds export type Timestamp = string diff --git a/clients/typescript/src/satellite/process.ts b/clients/typescript/src/satellite/process.ts index f5b06c7559..79453922b7 100644 --- a/clients/typescript/src/satellite/process.ts +++ b/clients/typescript/src/satellite/process.ts @@ -15,13 +15,8 @@ import { Notifier, UnsubscribeFunction, } from '../notifiers/index' -import { - Waiter, - base64, - bytesToNumber, - emptyPromise, - getWaiter, -} from '../util/common' +import { Waiter, emptyPromise, getWaiter } from '../util/common' +import { base64, bytesToNumber } from '../util/encoders' import { QualifiedTablename } from '../util/tablename' import { AdditionalData, diff --git a/clients/typescript/src/satellite/shapes/cache.ts b/clients/typescript/src/satellite/shapes/cache.ts index 8c4787e343..71bcfa6c7d 100644 --- a/clients/typescript/src/satellite/shapes/cache.ts +++ b/clients/typescript/src/satellite/shapes/cache.ts @@ -11,9 +11,9 @@ import { Relation, SatelliteError, SatelliteErrorCode, - TypeDecoder, subsDataErrorToSatelliteError, } from '../../util' +import type { TypeDecoder } from '../../util/encoders' import { deserializeRow } from '../client' import { InitialDataChange, diff --git a/clients/typescript/src/util/common.ts b/clients/typescript/src/util/common.ts index 0bb8745dff..2cead1bd71 100644 --- a/clients/typescript/src/util/common.ts +++ b/clients/typescript/src/util/common.ts @@ -1,197 +1,8 @@ +import { numberToBytes } from './encoders/common' import { SatelliteError } from './types' -import BASE64 from 'base-64' -import { TextEncoderLite, TextDecoderLite } from 'text-encoder-lite' - -export type TypeEncoder = typeof sqliteTypeEncoder | typeof pgTypeEncoder -export type TypeDecoder = typeof sqliteTypeDecoder | typeof pgTypeDecoder - -export const sqliteTypeEncoder = { - bool: boolToBytes, - text: (string: string) => textEncoder.encode(string), - json: (string: string) => { - const res = textEncoder.encode(string) - console.log('TEXTT ENCODED:\n' + res) - return res - }, - timetz: (string: string) => - sqliteTypeEncoder.text(stringToTimetzString(string)), -} - -export const sqliteTypeDecoder = { - bool: bytesToBool, - text: bytesToString, - json: bytesToString, - timetz: bytesToTimetzString, - float: bytesToFloat, -} - -//// PG encoders/decoders -export const pgTypeEncoder = { - ...sqliteTypeEncoder, - bool: pgBoolToBytes, - json: (x: JSON) => { - const str = JSON.stringify(x) - console.log('GONNA ENCODE:\n' + x) - console.log('SERIALISED:\n' + str) - const res = textEncoder.encode(str) - console.log('TEXT ENCODED:\n' + res) - //return textEncoder.encode(serialiseJSON(x)) - return res - }, -} - -export const pgTypeDecoder = { - ...sqliteTypeDecoder, - bool: bytesToPgBool, - json: (bs: Uint8Array) => JSON.parse(textDecoder.decode(bs)), -} - -function pgBoolToBytes(b: boolean) { - if (typeof b !== 'boolean') { - throw new Error(`Invalid boolean value: ${b}`) - } - return new Uint8Array([b ? trueByte : falseByte]) -} - -function bytesToPgBool(bs: Uint8Array) { - if (bs.length === 1 && (bs[0] === trueByte || bs[0] === falseByte)) { - return bs[0] === trueByte - } - - throw new Error(`Invalid binary-encoded boolean value: ${bs}`) -} -//// - -export const base64 = { - fromBytes: (bytes: Uint8Array) => - BASE64.encode( - String.fromCharCode.apply(null, new Uint8Array(bytes) as any) - ), - toBytes: (string: string) => - Uint8Array.from(BASE64.decode(string), (c) => c.charCodeAt(0)), - encode: (string: string) => base64.fromBytes(textEncoder.encode(string)), - decode: (string: string) => textDecoder.decode(base64.toBytes(string)), -} - -export const textEncoder = { - encode: (string: string): Uint8Array => - globalThis.TextEncoder - ? new TextEncoder().encode(string) - : new TextEncoderLite().encode(string), -} - -export const textDecoder = { - decode: (bytes: Uint8Array): string => - globalThis.TextDecoder - ? new TextDecoder().decode(bytes) - : new TextDecoderLite().decode(bytes), -} export const DEFAULT_LOG_POS = numberToBytes(0) -const trueByte = 't'.charCodeAt(0) -const falseByte = 'f'.charCodeAt(0) - -export function boolToBytes(b: number) { - if (b !== 0 && b !== 1) { - throw new Error(`Invalid boolean value: ${b}`) - } - return new Uint8Array([b === 1 ? trueByte : falseByte]) -} -export function bytesToBool(bs: Uint8Array) { - if (bs.length === 1 && (bs[0] === trueByte || bs[0] === falseByte)) { - return bs[0] === trueByte ? 1 : 0 - } - - throw new Error(`Invalid binary-encoded boolean value: ${bs}`) -} - -export function numberToBytes(i: number) { - return Uint8Array.of( - (i & 0xff000000) >> 24, - (i & 0x00ff0000) >> 16, - (i & 0x0000ff00) >> 8, - (i & 0x000000ff) >> 0 - ) -} - -export function bytesToNumber(bytes: Uint8Array) { - let n = 0 - for (const byte of bytes.values()) { - n = (n << 8) | byte - } - return n -} - -export function bytesToString(bytes: Uint8Array) { - return textDecoder.decode(bytes) -} - -/** - * Converts a PG string of type `timetz` to its equivalent SQLite string. - * e.g. '18:28:35.42108+00' -> '18:28:35.42108' - * @param bytes Data for this `timetz` column. - * @returns The SQLite string. - */ -function bytesToTimetzString(bytes: Uint8Array) { - const str = bytesToString(bytes) - return str.replace('+00', '') -} - -/** - * Converts a PG string of type `float4` or `float8` to an equivalent SQLite number. - * Since SQLite does not recognise `NaN` we turn it into the string `'NaN'` instead. - * cf. https://github.com/WiseLibs/better-sqlite3/issues/1088 - * @param bytes Data for this `float4` or `float8` column. - * @returns The SQLite value. - */ -function bytesToFloat(bytes: Uint8Array) { - const text = sqliteTypeDecoder.text(bytes) - if (text === 'NaN') { - return 'NaN' - } else { - return Number(text) - } -} - -/** - * Converts an arbitrary blob (or bytestring) into a hex encoded string, which - * is also the `bytea` PG string. - * @param bytes - the blob to encode - * @returns the blob as a hex encoded string - */ -export function blobToHexString(bytes: Uint8Array) { - let hexString = '' - for (const byte of bytes.values()) { - hexString += byte.toString(16).padStart(2, '0') - } - return hexString -} - -/** - * Converts a hex encoded string into a `Uint8Array` blob. - * @param bytes - the blob to encode - * @returns the blob as a hex encoded string - */ -export function hexStringToBlob(hexString: string) { - const byteArray = new Uint8Array(hexString.length / 2) - for (let i = 0; i < hexString.length; i += 2) { - const byte = parseInt(hexString.substring(i, i + 2), 16) - byteArray[i / 2] = byte - } - return byteArray -} - -/** - * Converts a SQLite string representing a `timetz` value to a PG string. - * e.g. '18:28:35.42108' -> '18:28:35.42108+00' - * @param str The SQLite string representing a `timetz` value. - * @returns The PG string. - */ -function stringToTimetzString(str: string) { - return `${str}+00` -} - export type PromiseWithResolvers = { promise: Promise resolve: (value: T | PromiseLike) => void diff --git a/clients/typescript/src/util/encoders/common.ts b/clients/typescript/src/util/encoders/common.ts new file mode 100644 index 0000000000..46d92a5b32 --- /dev/null +++ b/clients/typescript/src/util/encoders/common.ts @@ -0,0 +1,100 @@ +import BASE64 from 'base-64' +import { TextEncoderLite, TextDecoderLite } from 'text-encoder-lite' + +export const base64 = { + fromBytes: (bytes: Uint8Array) => + BASE64.encode( + String.fromCharCode.apply(null, new Uint8Array(bytes) as any) + ), + toBytes: (string: string) => + Uint8Array.from(BASE64.decode(string), (c) => c.charCodeAt(0)), + encode: (string: string) => base64.fromBytes(textEncoder.encode(string)), + decode: (string: string) => textDecoder.decode(base64.toBytes(string)), +} + +export const textEncoder = { + encode: (string: string): Uint8Array => + globalThis.TextEncoder + ? new TextEncoder().encode(string) + : new TextEncoderLite().encode(string), +} + +export const textDecoder = { + decode: (bytes: Uint8Array): string => + globalThis.TextDecoder + ? new TextDecoder().decode(bytes) + : new TextDecoderLite().decode(bytes), +} + +export const trueByte = 't'.charCodeAt(0) +export const falseByte = 'f'.charCodeAt(0) + +export function numberToBytes(i: number) { + return Uint8Array.of( + (i & 0xff000000) >> 24, + (i & 0x00ff0000) >> 16, + (i & 0x0000ff00) >> 8, + (i & 0x000000ff) >> 0 + ) +} + +export function bytesToNumber(bytes: Uint8Array) { + let n = 0 + for (const byte of bytes.values()) { + n = (n << 8) | byte + } + return n +} + +export function bytesToString(bytes: Uint8Array) { + return textDecoder.decode(bytes) +} + +/** + * Converts a PG string of type `timetz` to its equivalent SQLite string. + * e.g. '18:28:35.42108+00' -> '18:28:35.42108' + * @param bytes Data for this `timetz` column. + * @returns The SQLite string. + */ +export function bytesToTimetzString(bytes: Uint8Array) { + const str = bytesToString(bytes) + return str.replace('+00', '') +} + +/** + * Converts an arbitrary blob (or bytestring) into a hex encoded string, which + * is also the `bytea` PG string. + * @param bytes - the blob to encode + * @returns the blob as a hex encoded string + */ +export function blobToHexString(bytes: Uint8Array) { + let hexString = '' + for (const byte of bytes.values()) { + hexString += byte.toString(16).padStart(2, '0') + } + return hexString +} + +/** + * Converts a hex encoded string into a `Uint8Array` blob. + * @param bytes - the blob to encode + * @returns the blob as a hex encoded string + */ +export function hexStringToBlob(hexString: string) { + const byteArray = new Uint8Array(hexString.length / 2) + for (let i = 0; i < hexString.length; i += 2) { + const byte = parseInt(hexString.substring(i, i + 2), 16) + byteArray[i / 2] = byte + } + return byteArray +} + +/** + * Converts a SQLite string representing a `timetz` value to a PG string. + * e.g. '18:28:35.42108' -> '18:28:35.42108+00' + * @param str The SQLite string representing a `timetz` value. + * @returns The PG string. + */ +export function stringToTimetzString(str: string) { + return `${str}+00` +} diff --git a/clients/typescript/src/util/encoders/index.ts b/clients/typescript/src/util/encoders/index.ts new file mode 100644 index 0000000000..6c32ee2796 --- /dev/null +++ b/clients/typescript/src/util/encoders/index.ts @@ -0,0 +1,15 @@ +export { + base64, + bytesToNumber, + textEncoder, + textDecoder, + numberToBytes, + blobToHexString, + hexStringToBlob, +} from './common' + +export type { TypeEncoder, TypeDecoder } from './types' + +export { sqliteTypeEncoder, sqliteTypeDecoder } from './sqliteEncoders' + +export { pgTypeEncoder, pgTypeDecoder } from './pgEncoders' diff --git a/clients/typescript/src/util/encoders/pgEncoders.ts b/clients/typescript/src/util/encoders/pgEncoders.ts new file mode 100644 index 0000000000..72348f2c4e --- /dev/null +++ b/clients/typescript/src/util/encoders/pgEncoders.ts @@ -0,0 +1,38 @@ +import { sqliteTypeEncoder, sqliteTypeDecoder } from './sqliteEncoders' +import { textEncoder, textDecoder } from './common' +import { trueByte, falseByte } from './common' + +export const pgTypeEncoder = { + ...sqliteTypeEncoder, + bool: boolToBytes, + json: (x: JSON) => { + const str = JSON.stringify(x) + console.log('GONNA ENCODE:\n' + x) + console.log('SERIALISED:\n' + str) + const res = textEncoder.encode(str) + console.log('TEXT ENCODED:\n' + res) + //return textEncoder.encode(serialiseJSON(x)) + return res + }, +} + +export const pgTypeDecoder = { + ...sqliteTypeDecoder, + bool: bytesToBool, + json: (bs: Uint8Array) => JSON.parse(textDecoder.decode(bs)), +} + +function boolToBytes(b: boolean) { + if (typeof b !== 'boolean') { + throw new Error(`Invalid boolean value: ${b}`) + } + return new Uint8Array([b ? trueByte : falseByte]) +} + +function bytesToBool(bs: Uint8Array): boolean { + if (bs.length === 1 && (bs[0] === trueByte || bs[0] === falseByte)) { + return bs[0] === trueByte + } + + throw new Error(`Invalid binary-encoded boolean value: ${bs}`) +} diff --git a/clients/typescript/src/util/encoders/sqliteEncoders.ts b/clients/typescript/src/util/encoders/sqliteEncoders.ts new file mode 100644 index 0000000000..1a0911620d --- /dev/null +++ b/clients/typescript/src/util/encoders/sqliteEncoders.ts @@ -0,0 +1,59 @@ +import { + trueByte, + falseByte, + textEncoder, + stringToTimetzString, + bytesToString, + bytesToTimetzString, +} from './common' + +export const sqliteTypeEncoder = { + bool: boolToBytes, + text: (string: string) => textEncoder.encode(string), + json: (string: string) => { + const res = textEncoder.encode(string) + console.log('TEXTT ENCODED:\n' + res) + return res + }, + timetz: (string: string) => + sqliteTypeEncoder.text(stringToTimetzString(string)), +} + +export const sqliteTypeDecoder = { + bool: bytesToBool, + text: bytesToString, + json: bytesToString, + timetz: bytesToTimetzString, + float: bytesToFloat, +} + +export function boolToBytes(b: number) { + if (b !== 0 && b !== 1) { + throw new Error(`Invalid boolean value: ${b}`) + } + return new Uint8Array([b === 1 ? trueByte : falseByte]) +} + +export function bytesToBool(bs: Uint8Array): number { + if (bs.length === 1 && (bs[0] === trueByte || bs[0] === falseByte)) { + return bs[0] === trueByte ? 1 : 0 + } + + throw new Error(`Invalid binary-encoded boolean value: ${bs}`) +} + +/** + * Converts a PG string of type `float4` or `float8` to an equivalent SQLite number. + * Since SQLite does not recognise `NaN` we turn it into the string `'NaN'` instead. + * cf. https://github.com/WiseLibs/better-sqlite3/issues/1088 + * @param bytes Data for this `float4` or `float8` column. + * @returns The SQLite value. + */ +function bytesToFloat(bytes: Uint8Array) { + const text = sqliteTypeDecoder.text(bytes) + if (text === 'NaN') { + return 'NaN' + } else { + return Number(text) + } +} diff --git a/clients/typescript/src/util/encoders/types.ts b/clients/typescript/src/util/encoders/types.ts new file mode 100644 index 0000000000..2c08d4de45 --- /dev/null +++ b/clients/typescript/src/util/encoders/types.ts @@ -0,0 +1,5 @@ +import { sqliteTypeEncoder, sqliteTypeDecoder } from './sqliteEncoders' +import { pgTypeEncoder, pgTypeDecoder } from './pgEncoders' + +export type TypeEncoder = typeof sqliteTypeEncoder | typeof pgTypeEncoder +export type TypeDecoder = typeof sqliteTypeDecoder | typeof pgTypeDecoder diff --git a/clients/typescript/src/util/proto.ts b/clients/typescript/src/util/proto.ts index 3e64244069..777a78ddf6 100644 --- a/clients/typescript/src/util/proto.ts +++ b/clients/typescript/src/util/proto.ts @@ -2,7 +2,7 @@ import * as Pb from '../_generated/protocol/satellite' import * as _m0 from 'protobufjs/minimal' import { SatelliteError, SatelliteErrorCode } from './types' import { ShapeRequest } from '../satellite/shapes/types' -import { base64, sqliteTypeDecoder } from './common' +import { base64, sqliteTypeDecoder } from './encoders' import { getMaskBit } from './bitmaskHelpers' export type GetName = diff --git a/clients/typescript/test/satellite/client.test.ts b/clients/typescript/test/satellite/client.test.ts index e94aa876f1..2643d68596 100644 --- a/clients/typescript/test/satellite/client.test.ts +++ b/clients/typescript/test/satellite/client.test.ts @@ -17,7 +17,7 @@ import { numberToBytes, sqliteTypeDecoder, sqliteTypeEncoder, -} from '../../src/util/common' +} from '../../src/util/encoders' import { DataChangeType, DataTransaction, diff --git a/clients/typescript/test/satellite/postgres/serialization.test.ts b/clients/typescript/test/satellite/postgres/serialization.test.ts index 40451d4b22..d79059b8e5 100644 --- a/clients/typescript/test/satellite/postgres/serialization.test.ts +++ b/clients/typescript/test/satellite/postgres/serialization.test.ts @@ -3,7 +3,7 @@ import { makePgDatabase } from '../../support/node-postgres' import { randomValue } from '../../../src/util/random' import { opts } from '../common' import { ContextType, SetupFn, serializationTests } from '../serialization' -import { pgTypeDecoder, pgTypeEncoder } from '../../../src/util/common' +import { pgTypeDecoder, pgTypeEncoder } from '../../../src/util/encoders' import { DatabaseAdapter as PgDatabaseAdapter } from '../../../src/drivers/node-postgres/adapter' import { pgBuilder } from '../../../src/migrators/query-builder' diff --git a/clients/typescript/test/satellite/process.test.ts b/clients/typescript/test/satellite/process.test.ts index 3c20bd41ef..f48501b23a 100644 --- a/clients/typescript/test/satellite/process.test.ts +++ b/clients/typescript/test/satellite/process.test.ts @@ -37,12 +37,9 @@ import { } from '../../src/util/types' import { relations, ContextType as CommonContextType } from './common' -import { - DEFAULT_LOG_POS, - numberToBytes, - base64, - blobToHexString, -} from '../../src/util/common' +import { numberToBytes, base64, blobToHexString } from '../../src/util/encoders' + +import { DEFAULT_LOG_POS } from '../../src/util/common' import { Shape, SubscriptionData } from '../../src/satellite/shapes/types' import { mergeEntries } from '../../src/satellite/merge' diff --git a/clients/typescript/test/satellite/serialization.ts b/clients/typescript/test/satellite/serialization.ts index 23f74917a8..8338c71b56 100644 --- a/clients/typescript/test/satellite/serialization.ts +++ b/clients/typescript/test/satellite/serialization.ts @@ -9,7 +9,7 @@ import { DatabaseAdapter as DatabaseAdapterInterface } from '../../src/electric/ import { inferRelationsFromDb } from '../../src/util/relations' import { SatelliteOpts } from '../../src/satellite/config' import { QueryBuilder } from '../../src/migrators/query-builder' -import { TypeDecoder, TypeEncoder } from '../../src/util' +import { TypeDecoder, TypeEncoder } from '../../src/util/encoders' export type ContextType = { dialect: 'SQLite' | 'Postgres' diff --git a/clients/typescript/test/satellite/sqlite/serialization.test.ts b/clients/typescript/test/satellite/sqlite/serialization.test.ts index 271051f77a..60853808d3 100644 --- a/clients/typescript/test/satellite/sqlite/serialization.test.ts +++ b/clients/typescript/test/satellite/sqlite/serialization.test.ts @@ -4,7 +4,10 @@ import { DatabaseAdapter as SQLiteDatabaseAdapter } from '../../../src/drivers/b import { sqliteBuilder } from '../../../src/migrators/query-builder' import { opts } from '../common' import { ContextType, SetupFn, serializationTests } from '../serialization' -import { sqliteTypeDecoder, sqliteTypeEncoder } from '../../../src/util/common' +import { + sqliteTypeDecoder, + sqliteTypeEncoder, +} from '../../../src/util/encoders' const test = anyTest as TestFn diff --git a/clients/typescript/test/util/commmon.test.ts b/clients/typescript/test/util/commmon.test.ts index ef0d8f07da..5790f5bbe0 100644 --- a/clients/typescript/test/util/commmon.test.ts +++ b/clients/typescript/test/util/commmon.test.ts @@ -1,14 +1,13 @@ import test from 'ava' +import { getWaiter, isObject } from '../../src/util/common' import { - getWaiter, base64, textEncoder, textDecoder, - isObject, blobToHexString, hexStringToBlob, -} from '../../src/util/common' +} from '../../src/util/encoders' import { SatelliteError, SatelliteErrorCode } from '../../src/util/types' const OriginalEncoder = globalThis['TextEncoder'] diff --git a/clients/typescript/test/util/subscriptions.test.ts b/clients/typescript/test/util/subscriptions.test.ts index 7a6030c62b..84f1a8231e 100644 --- a/clients/typescript/test/util/subscriptions.test.ts +++ b/clients/typescript/test/util/subscriptions.test.ts @@ -6,7 +6,7 @@ import { InitialDataChange, SubscriptionData, } from '../../src/satellite/shapes/types' -import { base64 } from '../../src/util' +import { base64 } from '../../src/util/encoders' type ContextType = { manager: InMemorySubscriptionsManager From b8cfe9c93eaba7a104643e9012594380e0cbfc0f Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Wed, 17 Apr 2024 10:04:28 +0200 Subject: [PATCH 068/156] Extracted similar trigger tests for SQLite and PG into a common one. --- .../test/migrators/postgres/triggers.test.ts | 42 +++++++------------ .../test/migrators/sqlite/triggers.test.ts | 38 +++++++---------- clients/typescript/test/migrators/triggers.ts | 40 ++++++++++++++++++ 3 files changed, 68 insertions(+), 52 deletions(-) create mode 100644 clients/typescript/test/migrators/triggers.ts diff --git a/clients/typescript/test/migrators/postgres/triggers.test.ts b/clients/typescript/test/migrators/postgres/triggers.test.ts index 8f6b1731d5..bd1fc8dd2a 100644 --- a/clients/typescript/test/migrators/postgres/triggers.test.ts +++ b/clients/typescript/test/migrators/postgres/triggers.test.ts @@ -9,12 +9,12 @@ import { import { pgBuilder } from '../../../src/migrators/query-builder' import { makePgDatabase } from '../../support/node-postgres' import { Database, DatabaseAdapter } from '../../../src/drivers/node-postgres' +import { ContextType, triggerTests } from '../triggers' -type Context = { +type Context = ContextType & { db: Database - migrateDb: () => Promise - stopPG: () => Promise } + const test = testAny as TestFn const defaults = satelliteDefaults('public') const oplogTable = `"${defaults.oplogTable.namespace}"."${defaults.oplogTable.tablename}"` @@ -33,14 +33,18 @@ test.beforeEach(async (t) => { t.context = { db, + adapter, + defaults, + personTable, + dialect: 'Postgres', migrateDb: migrateDb.bind(null, adapter, personTable, pgBuilder), - stopPG: stop, + stopDb: stop, } }) test.afterEach.always(async (t) => { - const { stopPG } = t.context as any - await stopPG() + const { stopDb } = t.context as any + await stopDb() }) test('generateTableTriggers should create correct triggers for a table', (t) => { @@ -258,25 +262,7 @@ test('oplog trigger should handle Infinity values correctly', async (t) => { }) }) -test('oplog trigger should separate null blobs from empty blobs', async (t) => { - const { db, migrateDb } = t.context - const namespace = personTable.namespace - const tableName = personTable.tableName - - // Migrate the DB with the necessary tables and triggers - await migrateDb() - - // Insert null and empty rows in the table - const insertRowNullSQL = `INSERT INTO "${namespace}"."${tableName}" (id, name, age, bmi, int8, blob) VALUES (1, 'John Doe', 30, 25.5, 7, NULL)` - const insertRowEmptySQL = `INSERT INTO "${namespace}"."${tableName}" (id, name, age, bmi, int8, blob) VALUES (2, 'John Doe', 30, 25.5, 7, '\\x')` - await db.exec({ sql: insertRowNullSQL }) - await db.exec({ sql: insertRowEmptySQL }) - - // Check that the oplog table contains an entry for the inserted row - const { rows: oplogRows } = await db.exec({ - sql: `SELECT * FROM "${defaults.oplogTable.namespace}"."${defaults.oplogTable.tablename}"`, - }) - t.is(oplogRows.length, 2) - t.regex(oplogRows[0].newRow as string, /,\s*"blob":\s*null\s*,/) - t.regex(oplogRows[1].newRow as string, /,\s*"blob":\s*""\s*,/) -}) +// even though `Context` is a subtype of `ContextType`, +// we have to cast `test` which is of type `TestFn` to `TestFn` +// because `TestFn` does not declare its type parameter to be covariant +triggerTests(test as unknown as TestFn) diff --git a/clients/typescript/test/migrators/sqlite/triggers.test.ts b/clients/typescript/test/migrators/sqlite/triggers.test.ts index 0461b07eaa..b9da58a8ea 100644 --- a/clients/typescript/test/migrators/sqlite/triggers.test.ts +++ b/clients/typescript/test/migrators/sqlite/triggers.test.ts @@ -10,8 +10,12 @@ import { } from '../../satellite/common' import { sqliteBuilder } from '../../../src/migrators/query-builder' import { DatabaseAdapter } from '../../../src/drivers/better-sqlite3' +import { ContextType, triggerTests } from '../triggers' + +type Context = ContextType & { + db: Database +} -type Context = { db: Database; migrateDb: () => Promise } const test = testAny as TestFn const defaults = satelliteDefaults('main') const oplogTable = `"${defaults.oplogTable.namespace}"."${defaults.oplogTable.tablename}"` @@ -23,7 +27,12 @@ test.beforeEach(async (t) => { t.context = { db, + adapter, + defaults, + personTable, + dialect: 'SQLite', migrateDb: migrateDb.bind(null, adapter, personTable, sqliteBuilder), + stopDb: async () => {}, } }) @@ -158,26 +167,7 @@ test('oplog trigger should handle Infinity values correctly', async (t) => { }) }) -test('oplog trigger should separate null blobs from empty blobs', async (t) => { - const { db, migrateDb } = t.context - const tableName = personTable.tableName - - // Migrate the DB with the necessary tables and triggers - await migrateDb() - - // Insert null and empty rows in the table - const insertRowNullSQL = `INSERT INTO ${tableName} (id, name, age, bmi, int8, blob) VALUES (1, 'John Doe', 30, 25.5, 7, NULL)` - const insertRowEmptySQL = `INSERT INTO ${tableName} (id, name, age, bmi, int8, blob) VALUES (2, 'John Doe', 30, 25.5, 7, x'')` - db.exec(insertRowNullSQL) - db.exec(insertRowEmptySQL) - - // Check that the oplog table contains an entry for the inserted row - const oplogRows = db - .prepare( - `SELECT * FROM "${defaults.oplogTable.namespace}"."${defaults.oplogTable.tablename}"` - ) - .all() - t.is(oplogRows.length, 2) - t.regex(oplogRows[0].newRow, /,\s*"blob":\s*null\s*,/) - t.regex(oplogRows[1].newRow, /,\s*"blob":\s*""\s*,/) -}) +// even though `Context` is a subtype of `ContextType`, +// we have to cast `test` which is of type `TestFn` to `TestFn` +// because `TestFn` does not declare its type parameter to be covariant +triggerTests(test as unknown as TestFn) diff --git a/clients/typescript/test/migrators/triggers.ts b/clients/typescript/test/migrators/triggers.ts new file mode 100644 index 0000000000..f14074c2e9 --- /dev/null +++ b/clients/typescript/test/migrators/triggers.ts @@ -0,0 +1,40 @@ +import { TestFn } from 'ava' +import { DatabaseAdapter } from '../../src/electric' +import { Dialect } from '../../src/migrators/query-builder/builder' +import { Table } from '../../src/migrators/triggers' +import { SatelliteOpts } from '../../src/satellite/config' + +export type ContextType = { + adapter: DatabaseAdapter + dialect: Dialect + defaults: SatelliteOpts + personTable: Table + migrateDb: () => Promise + stopDb: () => Promise +} + +export const triggerTests = (test: TestFn) => { + test('oplog trigger should separate null blobs from empty blobs', async (t) => { + const { adapter, migrateDb, dialect, personTable, defaults } = t.context + const namespace = personTable.namespace + const tableName = personTable.tableName + + // Migrate the DB with the necessary tables and triggers + await migrateDb() + + // Insert null and empty rows in the table + const insertRowNullSQL = `INSERT INTO "${namespace}"."${tableName}" (id, name, age, bmi, int8, blob) VALUES (1, 'John Doe', 30, 25.5, 7, NULL)` + const blobValue = dialect === 'Postgres' ? `'\\x'` : `x''` + const insertRowEmptySQL = `INSERT INTO "${namespace}"."${tableName}" (id, name, age, bmi, int8, blob) VALUES (2, 'John Doe', 30, 25.5, 7, ${blobValue})` + await adapter.run({ sql: insertRowNullSQL }) + await adapter.run({ sql: insertRowEmptySQL }) + + // Check that the oplog table contains an entry for the inserted row + const oplogRows = await adapter.query({ + sql: `SELECT * FROM "${defaults.oplogTable.namespace}"."${defaults.oplogTable.tablename}"`, + }) + t.is(oplogRows.length, 2) + t.regex(oplogRows[0].newRow as string, /,\s*"blob":\s*null\s*,/) + t.regex(oplogRows[1].newRow as string, /,\s*"blob":\s*""\s*,/) + }) +} From 530923f09ddec4e39250256331300f70424e3493 Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Wed, 17 Apr 2024 10:45:27 +0200 Subject: [PATCH 069/156] Fix e2e TS client for blob test --- e2e/satellite_client/src/client.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/e2e/satellite_client/src/client.ts b/e2e/satellite_client/src/client.ts index 4f726c6b29..6956e135e7 100644 --- a/e2e/satellite_client/src/client.ts +++ b/e2e/satellite_client/src/client.ts @@ -373,7 +373,7 @@ export const get_blob = async (electric: Electric, id: string) => { } }) - if (res) { + if (res.blob) { // The PG driver returns a NodeJS Buffer but the e2e test matches on a plain Uint8Array. // So we convert the Buffer to a Uint8Array. // Note that Buffer is a subclass of Uint8Array. From 50150bf53da1cc103524ffd89eb88788b6a21bd7 Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Wed, 17 Apr 2024 11:07:50 +0200 Subject: [PATCH 070/156] Revert unintentional change in e2e tests --- e2e/tests/_shared.luxinc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/e2e/tests/_shared.luxinc b/e2e/tests/_shared.luxinc index 188789d005..5397b29417 100644 --- a/e2e/tests/_shared.luxinc +++ b/e2e/tests/_shared.luxinc @@ -96,14 +96,14 @@ [macro elixir_client_subscribe tables] """! - {:ok, %{err: nil}} = TestWsClient.make_rpc_call(conn, "subscribe", ProtocolHelpers.subscription_request(request_1: [tables: ~w|$tables|])) + {:ok, %{err: nil}} = TestWsClient.make_rpc_call(conn, "subscribe", ProtocolHelpers.subscription_request(request_1: ~w|$tables|)) """ ?rec \[\d+\]: %Electric.Satellite.SatSubsDataEnd\{\} [endmacro] [macro elixir_client_subscribe_with_id id tables] """! - {:ok, %{err: nil}} = TestWsClient.make_rpc_call(conn, "subscribe", ProtocolHelpers.subscription_request("$id", request_1: [tables: ~w|$tables|])) + {:ok, %{err: nil}} = TestWsClient.make_rpc_call(conn, "subscribe", ProtocolHelpers.subscription_request("$id", request_1: ~w|$tables|)) """ ?rec \[\d+\]: %Electric.Satellite.SatSubsDataEnd\{\} [endmacro] From 2f9bac04bd28827b1747ac3529f1fb22e6411806 Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Wed, 17 Apr 2024 11:09:45 +0200 Subject: [PATCH 071/156] Fixed style issue --- clients/typescript/src/migrators/query-builder/builder.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clients/typescript/src/migrators/query-builder/builder.ts b/clients/typescript/src/migrators/query-builder/builder.ts index 60fb3c27a4..4d1a6b2543 100644 --- a/clients/typescript/src/migrators/query-builder/builder.ts +++ b/clients/typescript/src/migrators/query-builder/builder.ts @@ -338,7 +338,7 @@ export abstract class QueryBuilder { columns: string[], records: Record[], maxParameters: number, - suffixSql: string = '' + suffixSql = '' ): Statement[] { const stmts: Statement[] = [] const columnCount = columns.length From 7dad7f1794b020aab6f501b560a095f444264840 Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Wed, 17 Apr 2024 11:39:33 +0200 Subject: [PATCH 072/156] Fix unit test for deserialisation of timestamptz values --- .../electric/test/electric/satellite/serialization_test.exs | 1 - 1 file changed, 1 deletion(-) diff --git a/components/electric/test/electric/satellite/serialization_test.exs b/components/electric/test/electric/satellite/serialization_test.exs index baa6ba2a6d..6160c5bcc2 100644 --- a/components/electric/test/electric/satellite/serialization_test.exs +++ b/components/electric/test/electric/satellite/serialization_test.exs @@ -222,7 +222,6 @@ defmodule Electric.Satellite.SerializationTest do {"2023-08-15 11:12:13Z", :timestamp}, {"2023-08-15 11:12:13+01", :timestamptz}, {"2023-08-15 11:12:13+99:98", :timestamptz}, - {"2023-08-15 11:12:13+00", :timestamptz}, {"2023-08-15 11:12:13", :timestamptz}, {"0000-08-15 23:00:00Z", :timestamptz}, {"-2000-08-15 23:00:00Z", :timestamptz}, From 6ce3fc694e5e073486909b5f73c8ceed8bd1b379 Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Wed, 17 Apr 2024 12:44:45 +0200 Subject: [PATCH 073/156] Fixed date parsing --- clients/typescript/src/client/conversions/datatypes/date.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clients/typescript/src/client/conversions/datatypes/date.ts b/clients/typescript/src/client/conversions/datatypes/date.ts index d0f4559943..fa6e3a24e7 100644 --- a/clients/typescript/src/client/conversions/datatypes/date.ts +++ b/clients/typescript/src/client/conversions/datatypes/date.ts @@ -39,7 +39,7 @@ export function deserialiseDate(v: string, pgType: PgDateType): Date { case PgDateType.PG_TIMESTAMPTZ: return parse(v) case PgDateType.PG_DATE: - return parse(`${v} 00:00:00.000`) + return parse(`${v}`) case PgDateType.PG_TIME: // interpret as local time From adb3eb150c9a55e26a9c68f85079386f38d41fdf Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Wed, 17 Apr 2024 13:48:23 +0200 Subject: [PATCH 074/156] Fixed SQLite compensation trigger --- clients/typescript/src/migrators/query-builder/sqliteBuilder.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clients/typescript/src/migrators/query-builder/sqliteBuilder.ts b/clients/typescript/src/migrators/query-builder/sqliteBuilder.ts index 43a8892773..a733616424 100644 --- a/clients/typescript/src/migrators/query-builder/sqliteBuilder.ts +++ b/clients/typescript/src/migrators/query-builder/sqliteBuilder.ts @@ -265,7 +265,7 @@ class SqliteBuilder extends QueryBuilder { dedent` CREATE TRIGGER compensation_${opTypeLower}_${namespace}_${tableName}_${childKey}_into_oplog AFTER ${opType} ON "${namespace}"."${tableName}" - WHEN 1 = (SELECT flag from _electric_trigger_settings WHERE namespace = '${namespace}' AND tablename = '${fkTableNamespace}.${fkTableName}') AND + WHEN 1 = (SELECT flag from _electric_trigger_settings WHERE namespace = '${namespace}' AND tablename = '${fkTableName}') AND 1 = (SELECT value from _electric_meta WHERE key = 'compensations') BEGIN INSERT INTO _electric_oplog (namespace, tablename, optype, primaryKey, newRow, oldRow, timestamp) From 17a5663e6aa7893a3b662a0ed7e71846fb17d097 Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Wed, 17 Apr 2024 14:14:34 +0200 Subject: [PATCH 075/156] Extend Electric unit test to check that timestamptz with +00 is valid --- .../test/electric/satellite/serialization_test.exs | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/components/electric/test/electric/satellite/serialization_test.exs b/components/electric/test/electric/satellite/serialization_test.exs index 6160c5bcc2..6ac5ed8432 100644 --- a/components/electric/test/electric/satellite/serialization_test.exs +++ b/components/electric/test/electric/satellite/serialization_test.exs @@ -121,7 +121,7 @@ defmodule Electric.Satellite.SerializationTest do describe "decode_record!" do test "decodes a SatOpRow struct into a map" do row = %SatOpRow{ - nulls_bitmask: <<0b00100000, 0b10000000>>, + nulls_bitmask: <<0b00100000, 0b01000000>>, values: [ "256", "hello", @@ -131,6 +131,7 @@ defmodule Electric.Satellite.SerializationTest do "-1.0e124", "2023-08-15 17:20:31", "2023-08-15 17:20:31Z", + "2023-08-15 17:20:31+00", "", "0400-02-29", "03:59:59", @@ -146,7 +147,8 @@ defmodule Electric.Satellite.SerializationTest do %{name: "real1", type: :float8}, %{name: "real2", type: :float8}, %{name: "t", type: :timestamp}, - %{name: "tz", type: :timestamptz}, + %{name: "tz1", type: :timestamptz}, + %{name: "tz2", type: :timestamptz}, %{name: "x", type: :float4, nullable?: true}, %{name: "date", type: :date}, %{name: "time", type: :time}, @@ -161,7 +163,8 @@ defmodule Electric.Satellite.SerializationTest do "real1" => "5.4", "real2" => "-1.0e124", "t" => "2023-08-15 17:20:31", - "tz" => "2023-08-15 17:20:31Z", + "tz1" => "2023-08-15 17:20:31Z", + "tz2" => "2023-08-15 17:20:31+00", "x" => nil, "date" => "0400-02-29", "time" => "03:59:59", From 270fc6be66dc9366b570bd66f6c3b3a0f2f1d23a Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Wed, 17 Apr 2024 15:27:41 +0200 Subject: [PATCH 076/156] Updated CI to run single entrypoint that runs both SQLite and PG e2e tests. --- .github/workflows/e2e.yml | 2 +- e2e/Makefile | 9 +++------ 2 files changed, 4 insertions(+), 7 deletions(-) diff --git a/.github/workflows/e2e.yml b/.github/workflows/e2e.yml index b9bbf432c7..305dc87598 100644 --- a/.github/workflows/e2e.yml +++ b/.github/workflows/e2e.yml @@ -80,7 +80,7 @@ jobs: - run: make lux - run: make deps pull - - run: make test_only && make -C tests test_pg + - run: make test_sqlite_and_pg id: tests env: ELECTRIC_IMAGE_NAME: electric-sql-ci/electric diff --git a/e2e/Makefile b/e2e/Makefile index ae4fd5820f..f10461a1f8 100644 --- a/e2e/Makefile +++ b/e2e/Makefile @@ -5,17 +5,14 @@ deps: lux make -C elixir_client build make -C prisma_example build +test_sqlite_and_pg: test_only + make -C tests test_pg + test_only: ${LUX} --junit tests test: deps pull test_only -test_pg: - DIALECT=Postgres make test - -test_only_pg: - DIALECT=Postgres make test_only - pull: docker compose -f services_templates.yaml pull \ postgresql From 566795fb59ac19a3be34434031e8c1f92a67a48c Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Wed, 17 Apr 2024 16:59:42 +0200 Subject: [PATCH 077/156] Fix issue with dates that were inconsistent between SQLite and PG. --- .../src/client/conversions/postgres.ts | 9 ++++---- .../src/client/conversions/sqlite.ts | 6 +---- .../src/client/conversions/types.ts | 4 ++++ .../src/drivers/node-postgres/database.ts | 22 +++++++++++++++++++ clients/typescript/src/util/common.ts | 3 ++- .../typescript/test/client/model/datatype.ts | 4 ++-- 6 files changed, 35 insertions(+), 13 deletions(-) diff --git a/clients/typescript/src/client/conversions/postgres.ts b/clients/typescript/src/client/conversions/postgres.ts index 18af8e5637..31df33a8b2 100644 --- a/clients/typescript/src/client/conversions/postgres.ts +++ b/clients/typescript/src/client/conversions/postgres.ts @@ -1,6 +1,6 @@ import { InvalidArgumentError } from '../validation/errors/invalidArgumentError' import { Converter } from './converter' -import { deserialiseDate, serialiseDate } from './datatypes/date' +import { serialiseDate } from './datatypes/date' import { isJsonNull } from './datatypes/json' import { PgBasicType, PgDateType, PgType } from './types' @@ -56,10 +56,9 @@ function fromPostgres(v: any, pgType: PgType): any { return v } - if (pgType === PgDateType.PG_TIME || pgType === PgDateType.PG_TIMETZ) { - // it's a serialised date - return deserialiseDate(v, pgType as PgDateType) - } + // no need to convert dates, times, or timestamps + // because we modified the parser in the node-pg driver + // to parse them how we want if (pgType === PgBasicType.PG_JSON || pgType === PgBasicType.PG_JSONB) { if (v === null) { diff --git a/clients/typescript/src/client/conversions/sqlite.ts b/clients/typescript/src/client/conversions/sqlite.ts index 9a95386720..3dcc06293f 100644 --- a/clients/typescript/src/client/conversions/sqlite.ts +++ b/clients/typescript/src/client/conversions/sqlite.ts @@ -4,7 +4,7 @@ import { deserialiseBoolean, serialiseBoolean } from './datatypes/boolean' import { deserialiseBlob, serialiseBlob } from './datatypes/blob' import { deserialiseDate, serialiseDate } from './datatypes/date' import { deserialiseJSON, serialiseJSON } from './datatypes/json' -import { PgBasicType, PgDateType, PgType } from './types' +import { PgBasicType, PgDateType, PgType, isPgDateType } from './types' /** * This module takes care of converting TypeScript values for Postgres-specific types to a SQLite storeable value and back. @@ -91,10 +91,6 @@ function fromSqlite(v: any, pgType: PgType): any { } } -function isPgDateType(pgType: PgType): boolean { - return (Object.values(PgDateType) as Array).includes(pgType) -} - export const sqliteConverter: Converter = { encode: toSqlite, decode: fromSqlite, diff --git a/clients/typescript/src/client/conversions/types.ts b/clients/typescript/src/client/conversions/types.ts index 6d356da93d..4325c8c6c1 100644 --- a/clients/typescript/src/client/conversions/types.ts +++ b/clients/typescript/src/client/conversions/types.ts @@ -29,3 +29,7 @@ export enum PgDateType { } export type PgType = PgBasicType | PgDateType + +export function isPgDateType(pgType: PgType): boolean { + return (Object.values(PgDateType) as Array).includes(pgType) +} diff --git a/clients/typescript/src/drivers/node-postgres/database.ts b/clients/typescript/src/drivers/node-postgres/database.ts index 997794d703..de7a9675fa 100644 --- a/clients/typescript/src/drivers/node-postgres/database.ts +++ b/clients/typescript/src/drivers/node-postgres/database.ts @@ -1,6 +1,8 @@ import pg from 'pg' import type { Client } from 'pg' import { Row, Statement } from '../../util' +import { PgDateType } from '../../client/conversions/types' +import { deserialiseDate } from '../../client/conversions/datatypes/date' const originalGetTypeParser = pg.types.getTypeParser @@ -37,6 +39,26 @@ export class ElectricDatabase implements Database { ) { return (val) => val } + + if ( + oid == pg.types.builtins.TIME || + oid == pg.types.builtins.TIMETZ || + oid == pg.types.builtins.TIMESTAMP || + oid == pg.types.builtins.TIMESTAMPTZ || + oid == pg.types.builtins.DATE + ) { + // Parse time, timestamp, and date values ourselves + // because the pg parser parses them differently from what we expect + const pgTypes = new Map([ + [pg.types.builtins.TIME, PgDateType.PG_TIME], + [pg.types.builtins.TIMETZ, PgDateType.PG_TIMETZ], + [pg.types.builtins.TIMESTAMP, PgDateType.PG_TIMESTAMP], + [pg.types.builtins.TIMESTAMPTZ, PgDateType.PG_TIMESTAMPTZ], + [pg.types.builtins.DATE, PgDateType.PG_DATE], + ]) + return (val: string) => + deserialiseDate(val, pgTypes.get(oid) as PgDateType) + } return originalGetTypeParser(oid) }) as typeof pg.types.getTypeParser, }, diff --git a/clients/typescript/src/util/common.ts b/clients/typescript/src/util/common.ts index 2cead1bd71..2f81aae8a2 100644 --- a/clients/typescript/src/util/common.ts +++ b/clients/typescript/src/util/common.ts @@ -66,6 +66,7 @@ export function isObject(value: any): value is object { typeof value === 'object' && value !== null && !Array.isArray(value) && - !ArrayBuffer.isView(value) + !ArrayBuffer.isView(value) && + !(value instanceof Date) ) } diff --git a/clients/typescript/test/client/model/datatype.ts b/clients/typescript/test/client/model/datatype.ts index b3276c3238..6982472f48 100644 --- a/clients/typescript/test/client/model/datatype.ts +++ b/clients/typescript/test/client/model/datatype.ts @@ -31,7 +31,7 @@ export const datatypeTests = (test: TestFn) => { }, }) - const expectedDate = new Date(`${date} 00:00:00.000`) + const expectedDate = new Date(date) t.deepEqual(res.date, expectedDate) const fetchRes = await tbl.findUnique({ @@ -53,7 +53,7 @@ export const datatypeTests = (test: TestFn) => { }, }) - const expectedDate = new Date(`${date} 00:00:00.000`) + const expectedDate = new Date(date) t.deepEqual(res.date, expectedDate) const fetchRes = await tbl.findUnique({ From fddaaa48576194f1d747cf161762882fa9a691c1 Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Thu, 18 Apr 2024 10:25:19 +0200 Subject: [PATCH 078/156] Catch error that is thrown for stopping PG in the unit tests. --- clients/typescript/test/satellite/process.test.ts | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/clients/typescript/test/satellite/process.test.ts b/clients/typescript/test/satellite/process.test.ts index f48501b23a..0663505bbb 100644 --- a/clients/typescript/test/satellite/process.test.ts +++ b/clients/typescript/test/satellite/process.test.ts @@ -77,7 +77,17 @@ const startSatellite = async ( ) => { await satellite.start(authState) satellite.setToken(token) - const connectionPromise = satellite.connectWithBackoff() + const connectionPromise = satellite.connectWithBackoff().catch((e) => { + if ( + e.message === 'terminating connection due to administrator command' || + e.message === + 'Client has encountered a connection error and is not queryable' + ) { + // This is to be expected as we stop Satellite at the end of the test + return + } + throw e + }) return { connectionPromise } } From 1d61d62c7d6b82314cf16686293b38d90a98a80e Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Thu, 18 Apr 2024 15:36:18 +0200 Subject: [PATCH 079/156] Set replication role to replica to disable FK checks when receiving subscription data and TXs when using Postgres on the client. --- .../src/migrators/query-builder/builder.ts | 5 ++++ .../src/migrators/query-builder/pgBuilder.ts | 3 ++ .../migrators/query-builder/sqliteBuilder.ts | 2 ++ clients/typescript/src/satellite/process.ts | 23 ++++++++++++++- .../typescript/test/satellite/process.test.ts | 29 +++++++++++++++++++ 5 files changed, 61 insertions(+), 1 deletion(-) diff --git a/clients/typescript/src/migrators/query-builder/builder.ts b/clients/typescript/src/migrators/query-builder/builder.ts index 4d1a6b2543..64f4aaf3f6 100644 --- a/clients/typescript/src/migrators/query-builder/builder.ts +++ b/clients/typescript/src/migrators/query-builder/builder.ts @@ -27,6 +27,11 @@ export abstract class QueryBuilder { */ abstract readonly getVersion: string + /** + * Disables foreign key checks. + */ + abstract readonly disableForeignKeys: string + /** * Returns the given query if the current SQL dialect is PostgreSQL. */ diff --git a/clients/typescript/src/migrators/query-builder/pgBuilder.ts b/clients/typescript/src/migrators/query-builder/pgBuilder.ts index 4be9c21875..a4eba8648c 100644 --- a/clients/typescript/src/migrators/query-builder/pgBuilder.ts +++ b/clients/typescript/src/migrators/query-builder/pgBuilder.ts @@ -14,6 +14,9 @@ class PgBuilder extends QueryBuilder { readonly paramSign = '$' readonly defaultNamespace = 'public' + /** **Disables** FKs for the duration of the transaction */ + readonly disableForeignKeys = 'SET LOCAL session_replication_role = replica;' + pgOnly(query: string) { return query } diff --git a/clients/typescript/src/migrators/query-builder/sqliteBuilder.ts b/clients/typescript/src/migrators/query-builder/sqliteBuilder.ts index a733616424..5890e2535a 100644 --- a/clients/typescript/src/migrators/query-builder/sqliteBuilder.ts +++ b/clients/typescript/src/migrators/query-builder/sqliteBuilder.ts @@ -18,6 +18,8 @@ class SqliteBuilder extends QueryBuilder { 'sqlite_temp_schema', ] + readonly disableForeignKeys = 'PRAGMA foreign_keys = OFF;' + pgOnly(_query: string) { return '' } diff --git a/clients/typescript/src/satellite/process.ts b/clients/typescript/src/satellite/process.ts index 79453922b7..8c7ca93b1b 100644 --- a/clients/typescript/src/satellite/process.ts +++ b/clients/typescript/src/satellite/process.ts @@ -475,7 +475,17 @@ export class SatelliteProcess implements Satellite { ) { const namespace = this.builder.defaultNamespace const stmts: Statement[] = [] - stmts.push({ sql: this.builder.deferForeignKeys }) + + if (this.builder.dialect === 'Postgres') { + // disable FK checks because order of inserts + // may not respect referential integrity + // and Postgres doesn't let us defer FKs + // that were not originally defined as deferrable + stmts.push({ sql: this.builder.disableForeignKeys }) + } else { + // Defer FKs on SQLite + stmts.push({ sql: this.builder.deferForeignKeys }) + } // It's much faster[1] to do less statements to insert the data instead of doing an insert statement for each row // so we're going to do just that, but with a caveat: SQLite has a max number of parameters in prepared statements, @@ -1294,6 +1304,17 @@ export class SatelliteProcess implements Satellite { const lsn = transaction.lsn let firstDMLChunk = true + if (this.builder.dialect === 'Postgres') { + // Temporarily disable FK checks because order of inserts + // may not respect referential integrity + // and Postgres doesn't let us defer FKs + // that were not originally defined as deferrable + stmts.push({ sql: this.builder.disableForeignKeys }) + } else { + // Defer FKs on SQLite + stmts.push({ sql: this.builder.deferForeignKeys }) + } + // update lsn. stmts.push(this.updateLsnStmt(lsn)) stmts.push(this._resetSeenAdditionalDataStmt()) diff --git a/clients/typescript/test/satellite/process.test.ts b/clients/typescript/test/satellite/process.test.ts index 0663505bbb..5b35bd1d78 100644 --- a/clients/typescript/test/satellite/process.test.ts +++ b/clients/typescript/test/satellite/process.test.ts @@ -1148,6 +1148,14 @@ export const processTests = (test: TestFn) => { builder, namespace, } = t.context + if (builder.dialect === 'Postgres') { + // Ignore this unit test for Postgres + // because we don't defer FK checks + // but completely disable them for incoming transactions + t.pass() + return + } + await runMigrations() if (builder.dialect === 'SQLite') { @@ -1269,6 +1277,18 @@ export const processTests = (test: TestFn) => { builder, namespace, } = t.context + // since this test disables compensations + // by putting the flag on 0 + // it is expecting a FK violation + if (builder.dialect === 'Postgres') { + // if we're running Postgres + // we are not deferring FK checks + // but completely disabling them for incoming transactions + // so the FK violation will not occur + t.pass() + return + } + await runMigrations() if (builder.dialect === 'SQLite') { @@ -1912,7 +1932,16 @@ export const processTests = (test: TestFn) => { authState, token, namespace, + builder, } = t.context + if (builder.dialect === 'Postgres') { + // Ignore this unit test for Postgres + // because we don't defer FK checks + // but completely disable them for incoming transactions + t.pass() + return + } + await runMigrations() const tablename = 'child' From 516f7ea99011166d61c60fbc3306844c4e998a8c Mon Sep 17 00:00:00 2001 From: Sam Willis Date: Thu, 18 Apr 2024 14:49:09 +0100 Subject: [PATCH 080/156] feature(client): PGlite driver and example (#1058) PGlite driver for Electric Builds on parameterized query support for PGlite: https://github.com/electric-sql/pglite/pull/39 --------- Co-authored-by: msfstef --- clients/typescript/package.json | 6 + .../typescript/src/drivers/pglite/adapter.ts | 26 + .../typescript/src/drivers/pglite/database.ts | 7 + .../typescript/src/drivers/pglite/index.ts | 45 + clients/typescript/src/drivers/pglite/mock.ts | 30 + .../typescript/test/drivers/pglite.test.ts | 173 + examples/web-pglite/.env | 3 + examples/web-pglite/.eslintrc.cjs | 18 + examples/web-pglite/.gitignore | 31 + examples/web-pglite/README.md | 87 + .../db/migrations/01-create_items_table.sql | 19 + examples/web-pglite/index.html | 13 + examples/web-pglite/package-lock.json | 6998 +++++++++++++++++ examples/web-pglite/package.json | 41 + examples/web-pglite/public/favicon.ico | Bin 0 -> 1659 bytes examples/web-pglite/public/robots.txt | 3 + examples/web-pglite/src/App.css | 25 + examples/web-pglite/src/App.tsx | 19 + examples/web-pglite/src/ElectricProvider.tsx | 72 + examples/web-pglite/src/Example.css | 41 + examples/web-pglite/src/Example.tsx | 56 + examples/web-pglite/src/assets/logo.svg | 11 + examples/web-pglite/src/auth.ts | 17 + examples/web-pglite/src/main.tsx | 10 + examples/web-pglite/src/style.css | 12 + examples/web-pglite/src/vite-env.d.ts | 1 + examples/web-pglite/tsconfig.json | 25 + examples/web-pglite/tsconfig.node.json | 10 + examples/web-pglite/vite.config.ts | 11 + 29 files changed, 7810 insertions(+) create mode 100644 clients/typescript/src/drivers/pglite/adapter.ts create mode 100644 clients/typescript/src/drivers/pglite/database.ts create mode 100644 clients/typescript/src/drivers/pglite/index.ts create mode 100644 clients/typescript/src/drivers/pglite/mock.ts create mode 100644 clients/typescript/test/drivers/pglite.test.ts create mode 100644 examples/web-pglite/.env create mode 100644 examples/web-pglite/.eslintrc.cjs create mode 100644 examples/web-pglite/.gitignore create mode 100644 examples/web-pglite/README.md create mode 100644 examples/web-pglite/db/migrations/01-create_items_table.sql create mode 100644 examples/web-pglite/index.html create mode 100644 examples/web-pglite/package-lock.json create mode 100644 examples/web-pglite/package.json create mode 100644 examples/web-pglite/public/favicon.ico create mode 100644 examples/web-pglite/public/robots.txt create mode 100644 examples/web-pglite/src/App.css create mode 100644 examples/web-pglite/src/App.tsx create mode 100644 examples/web-pglite/src/ElectricProvider.tsx create mode 100644 examples/web-pglite/src/Example.css create mode 100644 examples/web-pglite/src/Example.tsx create mode 100644 examples/web-pglite/src/assets/logo.svg create mode 100644 examples/web-pglite/src/auth.ts create mode 100644 examples/web-pglite/src/main.tsx create mode 100644 examples/web-pglite/src/style.css create mode 100644 examples/web-pglite/src/vite-env.d.ts create mode 100644 examples/web-pglite/tsconfig.json create mode 100644 examples/web-pglite/tsconfig.node.json create mode 100644 examples/web-pglite/vite.config.ts diff --git a/clients/typescript/package.json b/clients/typescript/package.json index 323e960f0e..8fa557e144 100644 --- a/clients/typescript/package.json +++ b/clients/typescript/package.json @@ -56,6 +56,7 @@ "./generic": "./dist/drivers/generic/index.js", "./node": "./dist/drivers/better-sqlite3/index.js", "./node-postgres": "./dist/drivers/node-postgres/index.js", + "./pglite": "./dist/drivers/pglite/index.js", "./react": "./dist/frameworks/react/index.js", "./tauri-postgres": "./dist/drivers/tauri-postgres/index.js", "./vuejs": "./dist/frameworks/vuejs/index.js", @@ -95,6 +96,9 @@ "node-postgres": [ "./dist/drivers/node-postgres/index.d.ts" ], + "pglite": [ + "./dist/drivers/pglite/index.d.ts" + ], "react": [ "./dist/frameworks/react/index.d.ts" ], @@ -212,6 +216,7 @@ "zod": "3.21.1" }, "devDependencies": { + "@electric-sql/pglite": "^0.1.4", "@electric-sql/prisma-generator": "workspace:*", "@op-engineering/op-sqlite": ">= 2.0.16", "@tauri-apps/plugin-sql": "2.0.0-alpha.5", @@ -268,6 +273,7 @@ }, "peerDependencies": { "@capacitor-community/sqlite": ">= 5.6.2", + "@electric-sql/pglite": ">= 0.1.4", "@op-engineering/op-sqlite": ">= 2.0.16", "@tauri-apps/plugin-sql": "2.0.0-alpha.5", "embedded-postgres": "16.1.1-beta.9", diff --git a/clients/typescript/src/drivers/pglite/adapter.ts b/clients/typescript/src/drivers/pglite/adapter.ts new file mode 100644 index 0000000000..fccdefe0a8 --- /dev/null +++ b/clients/typescript/src/drivers/pglite/adapter.ts @@ -0,0 +1,26 @@ +import { Database } from './database' +import { Row } from '../../util/types' +import { Statement } from '../../util' +import { SerialDatabaseAdapter as GenericDatabaseAdapter } from '../generic' +import { RunResult } from '../../electric/adapter' + +export class DatabaseAdapter extends GenericDatabaseAdapter { + readonly db: Database + readonly defaultNamespace = 'public' + + constructor(db: Database) { + super() + this.db = db + } + + async _run(statement: Statement): Promise { + const res = await this.db.query(statement.sql, statement.args) + return { + rowsAffected: res.affectedRows ?? 0, + } + } + + async _query(statement: Statement): Promise { + return (await this.db.query(statement.sql, statement.args)).rows + } +} diff --git a/clients/typescript/src/drivers/pglite/database.ts b/clients/typescript/src/drivers/pglite/database.ts new file mode 100644 index 0000000000..b106d59514 --- /dev/null +++ b/clients/typescript/src/drivers/pglite/database.ts @@ -0,0 +1,7 @@ +import type { PGlite } from '@electric-sql/pglite' + +// The relevant subset of the SQLitePlugin database client API +// that we need to ensure the client we're electrifying provides. +export interface Database + extends Pick { +} diff --git a/clients/typescript/src/drivers/pglite/index.ts b/clients/typescript/src/drivers/pglite/index.ts new file mode 100644 index 0000000000..28497b945f --- /dev/null +++ b/clients/typescript/src/drivers/pglite/index.ts @@ -0,0 +1,45 @@ +import { DatabaseAdapter as DatabaseAdapterI } from '../../electric/adapter' +import { DatabaseAdapter } from './adapter' +import { Database } from './database' +import { ElectricConfig } from '../../config' +import { electrify as baseElectrify, ElectrifyOptions } from '../../electric' +import { WebSocketWeb } from '../../sockets/web' +import { ElectricClient, DbSchema } from '../../client/model' +import { PgBundleMigrator } from '../../migrators/bundle' + +export { DatabaseAdapter } +export type { Database } + +export const electrify = async >( + db: T, + dbDescription: DB, + config: ElectricConfig, + opts?: ElectrifyOptions +): Promise> => { + const dbName = db.dataDir?.split('/').pop() ?? 'memory' + const adapter = opts?.adapter || new DatabaseAdapter(db) + const migrator = + opts?.migrator || new PgBundleMigrator(adapter, dbDescription.pgMigrations) + const socketFactory = opts?.socketFactory || WebSocketWeb + const prepare = async (_connection: DatabaseAdapterI) => undefined + + const configWithDialect = { + ...config, + dialect: 'Postgres', + } as const + + const client = await baseElectrify( + dbName, + dbDescription, + adapter, + socketFactory, + configWithDialect, + { + migrator, + prepare, + ...opts, + } + ) + + return client +} diff --git a/clients/typescript/src/drivers/pglite/mock.ts b/clients/typescript/src/drivers/pglite/mock.ts new file mode 100644 index 0000000000..4b3910d3d5 --- /dev/null +++ b/clients/typescript/src/drivers/pglite/mock.ts @@ -0,0 +1,30 @@ +import { Database } from './database' +import type { PGliteOptions, QueryOptions, Results } from '@electric-sql/pglite' + +export class MockDatabase implements Database { + dataDir?: string + fail: Error | undefined + + constructor(dataDir?: string, options?: PGliteOptions) { + this.dataDir = dataDir + } + + async query( + query: string, + params?: any[], + options?: QueryOptions + ): Promise> { + if (typeof this.fail !== 'undefined') throw this.fail + + return { + rows: [{ val: 1 } as T, { val: 2 } as T], + affectedRows: 0, + fields: [ + { + name: 'val', + dataTypeID: 0, + }, + ], + } + } +} diff --git a/clients/typescript/test/drivers/pglite.test.ts b/clients/typescript/test/drivers/pglite.test.ts new file mode 100644 index 0000000000..934d29139f --- /dev/null +++ b/clients/typescript/test/drivers/pglite.test.ts @@ -0,0 +1,173 @@ +import test from 'ava' + +import { MockDatabase } from '../../src/drivers/pglite/mock' +import { DatabaseAdapter } from '../../src/drivers/pglite' +import { PGlite } from '@electric-sql/pglite' + +test('database adapter run works', async (t) => { + const db = new MockDatabase('test.db') + const adapter = new DatabaseAdapter(db) + + const sql = 'drop table badgers' + const result = await adapter.run({ sql }) + + t.is(result.rowsAffected, 0) +}) + +test('database adapter query works', async (t) => { + const db = new MockDatabase('test.db') + const adapter = new DatabaseAdapter(db) + + const sql = 'select * from bars' + const result = await adapter.query({ sql }) + + t.deepEqual(result, [ + { + val: 1, + }, + { + val: 2, + }, + ]) +}) + +// Test with an actual PGlite +async function makeAdapter() { + const db = new PGlite() + const adapter = new DatabaseAdapter(db) + const createTableSql = + 'CREATE TABLE IF NOT EXISTS Post(id TEXT PRIMARY KEY, title TEXT, contents TEXT, nbr integer);' + await adapter.run({ sql: createTableSql }) + return adapter +} + +test('adapter run works on real DB', async (t) => { + const adapter = await makeAdapter() + const insertRecordSql = + "INSERT INTO Post (id, title, contents, nbr) VALUES ('i1', 't1', 'c1', 18)" + const res = await adapter.run({ sql: insertRecordSql }) + t.is(res.rowsAffected, 1) +}) + +test('adapter query works on real DB', async (t) => { + const adapter = await makeAdapter() + const insertRecordSql = + "INSERT INTO Post (id, title, contents, nbr) VALUES ('i1', 't1', 'c1', 18)" + await adapter.run({ sql: insertRecordSql }) + + const selectSql = + "SELECT * FROM Post WHERE (id = ('i1')) AND (nbr = (18)) LIMIT 1" + const res = await adapter.query({ sql: selectSql }) + t.deepEqual(res, [{ id: 'i1', title: 't1', contents: 'c1', nbr: 18 }]) +}) + +test('adapter runInTransaction works on real DB', async (t) => { + const adapter = await makeAdapter() + const insertRecord1Sql = + "INSERT INTO Post (id, title, contents, nbr) VALUES ('i1', 't1', 'c1', 18)" + const insertRecord2Sql = + "INSERT INTO Post (id, title, contents, nbr) VALUES ('i2', 't2', 'c2', 25)" + + const txRes = await adapter.runInTransaction( + { sql: insertRecord1Sql }, + { sql: insertRecord2Sql } + ) + + t.is(txRes.rowsAffected, 2) + + const selectAll = 'SELECT id FROM Post' + const res = await adapter.query({ sql: selectAll }) + + t.deepEqual(res, [{ id: 'i1' }, { id: 'i2' }]) +}) + +test('adapter runInTransaction rolls back on conflict', async (t) => { + const adapter = await makeAdapter() + const insertRecord1Sql = + "INSERT INTO Post (id, title, contents, nbr) VALUES ('i1', 't1', 'c1', 18)" + const insertRecord2Sql = + "INSERT INTO Post (id, title, contents, nbr) VALUES ('i1', 't2', 'c2', 25)" + + try { + await adapter.runInTransaction( + { sql: insertRecord1Sql }, + { sql: insertRecord2Sql } + ) + t.fail() // the transaction should be rejected because the primary key of the second record already exists + } catch (err) { + const castError = err as { code: string; detail: string } + t.is(castError.code, '23505') + t.is(castError.detail, 'Key (id)=(i1) already exists.') + + // Check that no posts were added to the DB + const selectAll = 'SELECT id FROM Post' + const res = await adapter.query({ sql: selectAll }) + t.deepEqual(res, []) + } +}) + +test('adapter supports dependent queries in transaction on real DB', async (t) => { + const adapter = await makeAdapter() + const [txRes, rowsAffected] = (await adapter.transaction>( + (tx, setResult) => { + let rowsAffected = 0 + tx.run( + { + sql: "INSERT INTO Post (id, title, contents, nbr) VALUES ('i1', 't1', 'c1', 18)", + }, + (tx2, res) => { + rowsAffected += res.rowsAffected + const select = { sql: "SELECT nbr FROM Post WHERE id = 'i1'" } + tx2.query(select, (tx3, rows) => { + const [res] = rows as unknown as Array<{ nbr: number }> + const newNbr = res.nbr + 2 + tx3.run( + { + sql: `INSERT INTO Post (id, title, contents, nbr) VALUES ('i2', 't2', 'c2', ${newNbr})`, + }, + (_, res) => { + rowsAffected += res.rowsAffected + setResult([newNbr, rowsAffected]) + } + ) + }) + } + ) + } + )) as unknown as Array + + t.is(txRes, 20) + t.is(rowsAffected, 2) + + const selectAll = 'SELECT * FROM Post' + const res = await adapter.query({ sql: selectAll }) + + t.deepEqual(res, [ + { id: 'i1', title: 't1', contents: 'c1', nbr: 18 }, + { id: 'i2', title: 't2', contents: 'c2', nbr: 20 }, + ]) +}) + +test('adapter rolls back dependent queries on conflict', async (t) => { + const adapter = await makeAdapter() + try { + await adapter.transaction((tx) => { + tx.run({ + sql: "INSERT INTO Post (id, title, contents, nbr) VALUES ('i1', 't1', 'c1', 18)", + }) + tx.run({ + sql: "INSERT INTO Post (id, title, contents, nbr) VALUES ('i1', 't2', 'c2', 20)", + }) + }) + t.fail() // the transaction should be rejected because the primary key of the second record already exists + } catch (err) { + const castError = err as { code: string; detail: string } + t.is(castError.code, '23505') + t.is(castError.detail, 'Key (id)=(i1) already exists.') + + // Check that no posts were added to the DB + const selectAll = 'SELECT id FROM Post' + const res = await adapter.query({ sql: selectAll }) + t.deepEqual(res, []) + } +}) diff --git a/examples/web-pglite/.env b/examples/web-pglite/.env new file mode 100644 index 0000000000..d3f9154535 --- /dev/null +++ b/examples/web-pglite/.env @@ -0,0 +1,3 @@ +ELECTRIC_SERVICE=http://localhost:5133 +ELECTRIC_PG_PROXY_PORT=65432 +ELECTRIC_IMAGE=electric:local-build \ No newline at end of file diff --git a/examples/web-pglite/.eslintrc.cjs b/examples/web-pglite/.eslintrc.cjs new file mode 100644 index 0000000000..d6c9537953 --- /dev/null +++ b/examples/web-pglite/.eslintrc.cjs @@ -0,0 +1,18 @@ +module.exports = { + root: true, + env: { browser: true, es2020: true }, + extends: [ + 'eslint:recommended', + 'plugin:@typescript-eslint/recommended', + 'plugin:react-hooks/recommended', + ], + ignorePatterns: ['dist', '.eslintrc.cjs'], + parser: '@typescript-eslint/parser', + plugins: ['react-refresh'], + rules: { + 'react-refresh/only-export-components': [ + 'warn', + { allowConstantExport: true }, + ], + }, +} diff --git a/examples/web-pglite/.gitignore b/examples/web-pglite/.gitignore new file mode 100644 index 0000000000..7f22c6d6c4 --- /dev/null +++ b/examples/web-pglite/.gitignore @@ -0,0 +1,31 @@ +# Logs +logs +*.log +npm-debug.log* +yarn-debug.log* +yarn-error.log* +pnpm-debug.log* +lerna-debug.log* + +node_modules +dist +dist-ssr +*.local + +# Editor directories and files +.vscode/* +!.vscode/extensions.json +.idea +.DS_Store +*.suo +*.ntvs* +*.njsproj +*.sln +*.sw? + +# Wasm +public/wa-sqlite-async.wasm + +# Env files +.env.local +.env.*.local diff --git a/examples/web-pglite/README.md b/examples/web-pglite/README.md new file mode 100644 index 0000000000..f2597fe7ab --- /dev/null +++ b/examples/web-pglite/README.md @@ -0,0 +1,87 @@ + + + + + ElectricSQL logo + + + +# ElectricSQL - PGlite example + +This is an example web application using ElectricSQL in the browser with [PGlite](https://github.com/electric-sql/pglite) a WASM build of Postgres. + +## Instructions + +Clone the [electric-sql/electric](https://github.com/electric-sql/electric) mono-repo and change directory into this example folder: + +```sh +git clone https://github.com/electric-sql/electric +cd electric/examples/web-pglite +``` + +## Pre-reqs + +You need [NodeJS >= 16.11 and Docker Compose v2](https://electric-sql.com/docs/usage/installation/prereqs). +``` + +## Install + +Install the dependencies: + +```sh +npm install +``` + +## Setup + +Start Postgres and Electric using Docker (see [running the examples](https://electric-sql.com/docs/examples/notes/running) for more options): + +```shell +npm run backend:up +# Or `npm run backend:start` to foreground +``` + +Note that, if useful, you can connect to Postgres using: + +```shell +npm run db:psql +``` + +Setup your [database schema](https://electric-sql.com/docs/usage/data-modelling): + +```shell +npm run db:migrate +``` + +Generate your [type-safe client](https://electric-sql.com/docs/usage/data-access/client): + +```shell +npm run client:generate +# or `yarn client:watch`` to re-generate whenever the DB schema changes +``` + +## Run + +Start your app: + +```sh +npm run dev +``` + +Open [localhost:5173](http://localhost:5173) in your web browser. + +## Develop + +`./src/Example.tsx` has the main example code. For more information see the: + +- [Documentation](https://electric-sql.com/docs) +- [Quickstart](https://electric-sql.com/docs/quickstart) +- [Usage guide](https://electric-sql.com/docs/usage) + +If you need help [let us know on Discord](https://discord.electric-sql.com). diff --git a/examples/web-pglite/db/migrations/01-create_items_table.sql b/examples/web-pglite/db/migrations/01-create_items_table.sql new file mode 100644 index 0000000000..0bba9c5a1c --- /dev/null +++ b/examples/web-pglite/db/migrations/01-create_items_table.sql @@ -0,0 +1,19 @@ +/* This is an example of an SQL DDL migration. It creates an `items` table and + * then calls an `electric.electrify` procedure to expose the table to the + * ElectricSQL replication machinery. + * + * Note that these statements are applied directly to the *Postgres* database. + * Electric then handles keeping the local SQLite database schema in sync with + * the electrified subset of your Postgres database schema. + * + * See https://electric-sql.com/docs/usage/data-modelling for more information. + */ + +-- Create a simple items table. +CREATE TABLE IF NOT EXISTS items ( + value TEXT PRIMARY KEY NOT NULL +); + +-- ⚡ +-- Electrify the items table +ALTER TABLE items ENABLE ELECTRIC; diff --git a/examples/web-pglite/index.html b/examples/web-pglite/index.html new file mode 100644 index 0000000000..f69b26596f --- /dev/null +++ b/examples/web-pglite/index.html @@ -0,0 +1,13 @@ + + + + + + + Web Example - ElectricSQL + + +
+ + + diff --git a/examples/web-pglite/package-lock.json b/examples/web-pglite/package-lock.json new file mode 100644 index 0000000000..1176e0f170 --- /dev/null +++ b/examples/web-pglite/package-lock.json @@ -0,0 +1,6998 @@ +{ + "name": "electric-sql-pglite-example", + "version": "0.9.3", + "lockfileVersion": 3, + "requires": true, + "packages": { + "": { + "name": "electric-sql-pglite-example", + "version": "0.9.3", + "license": "Apache-2.0", + "dependencies": { + "@electric-sql/pglite": "^0.1.4", + "electric-sql": "file:../../clients/typescript/electric-sql-0.10.1.tgz", + "react": "^18.2.0", + "react-dom": "^18.2.0" + }, + "devDependencies": { + "@databases/pg-migrations": "^5.0.3", + "@types/react": "^18.2.57", + "@types/react-dom": "^18.2.19", + "@typescript-eslint/eslint-plugin": "^6.21.0", + "@typescript-eslint/parser": "^6.21.0", + "@vitejs/plugin-react": "^4.2.1", + "eslint": "^8.56.0", + "eslint-plugin-react-hooks": "^4.6.0", + "eslint-plugin-react-refresh": "^0.4.5", + "typescript": "^5.3.3", + "vite": "^5.1.4" + } + }, + "node_modules/@aashutoshrathi/word-wrap": { + "version": "1.2.6", + "resolved": "https://registry.npmjs.org/@aashutoshrathi/word-wrap/-/word-wrap-1.2.6.tgz", + "integrity": "sha512-1Yjs2SvM8TflER/OD3cOjhWWOZb58A2t7wpE2S9XfBYTiIl+XFhQG2bjy4Pu1I+EAlCNUzRDYDdFwFYUKvXcIA==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/@ampproject/remapping": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/@ampproject/remapping/-/remapping-2.2.1.tgz", + "integrity": "sha512-lFMjJTrFL3j7L9yBxwYfCq2k6qqwHyzuUl/XBnif78PWTJYyL/dfowQHWE3sp6U6ZzqWiiIZnpTMO96zhkjwtg==", + "dev": true, + "dependencies": { + "@jridgewell/gen-mapping": "^0.3.0", + "@jridgewell/trace-mapping": "^0.3.9" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@babel/code-frame": { + "version": "7.23.5", + "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.23.5.tgz", + "integrity": "sha512-CgH3s1a96LipHCmSUmYFPwY7MNx8C3avkq7i4Wl3cfa662ldtUe4VM1TPXX70pfmrlWTb6jLqTYrZyT2ZTJBgA==", + "dev": true, + "dependencies": { + "@babel/highlight": "^7.23.4", + "chalk": "^2.4.2" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/code-frame/node_modules/ansi-styles": { + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", + "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", + "dev": true, + "dependencies": { + "color-convert": "^1.9.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/@babel/code-frame/node_modules/chalk": { + "version": "2.4.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz", + "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==", + "dev": true, + "dependencies": { + "ansi-styles": "^3.2.1", + "escape-string-regexp": "^1.0.5", + "supports-color": "^5.3.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/@babel/code-frame/node_modules/color-convert": { + "version": "1.9.3", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", + "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", + "dev": true, + "dependencies": { + "color-name": "1.1.3" + } + }, + "node_modules/@babel/code-frame/node_modules/color-name": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", + "integrity": "sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw==", + "dev": true + }, + "node_modules/@babel/code-frame/node_modules/escape-string-regexp": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", + "integrity": "sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg==", + "dev": true, + "engines": { + "node": ">=0.8.0" + } + }, + "node_modules/@babel/code-frame/node_modules/has-flag": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", + "integrity": "sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw==", + "dev": true, + "engines": { + "node": ">=4" + } + }, + "node_modules/@babel/code-frame/node_modules/supports-color": { + "version": "5.5.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz", + "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==", + "dev": true, + "dependencies": { + "has-flag": "^3.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/@babel/compat-data": { + "version": "7.23.5", + "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.23.5.tgz", + "integrity": "sha512-uU27kfDRlhfKl+w1U6vp16IuvSLtjAxdArVXPa9BvLkrr7CYIsxH5adpHObeAGY/41+syctUWOZ140a2Rvkgjw==", + "dev": true, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/core": { + "version": "7.23.9", + "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.23.9.tgz", + "integrity": "sha512-5q0175NOjddqpvvzU+kDiSOAk4PfdO6FvwCWoQ6RO7rTzEe8vlo+4HVfcnAREhD4npMs0e9uZypjTwzZPCf/cw==", + "dev": true, + "dependencies": { + "@ampproject/remapping": "^2.2.0", + "@babel/code-frame": "^7.23.5", + "@babel/generator": "^7.23.6", + "@babel/helper-compilation-targets": "^7.23.6", + "@babel/helper-module-transforms": "^7.23.3", + "@babel/helpers": "^7.23.9", + "@babel/parser": "^7.23.9", + "@babel/template": "^7.23.9", + "@babel/traverse": "^7.23.9", + "@babel/types": "^7.23.9", + "convert-source-map": "^2.0.0", + "debug": "^4.1.0", + "gensync": "^1.0.0-beta.2", + "json5": "^2.2.3", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/babel" + } + }, + "node_modules/@babel/core/node_modules/semver": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", + "dev": true, + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/@babel/generator": { + "version": "7.23.6", + "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.23.6.tgz", + "integrity": "sha512-qrSfCYxYQB5owCmGLbl8XRpX1ytXlpueOb0N0UmQwA073KZxejgQTzAmJezxvpwQD9uGtK2shHdi55QT+MbjIw==", + "dev": true, + "dependencies": { + "@babel/types": "^7.23.6", + "@jridgewell/gen-mapping": "^0.3.2", + "@jridgewell/trace-mapping": "^0.3.17", + "jsesc": "^2.5.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-compilation-targets": { + "version": "7.23.6", + "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.23.6.tgz", + "integrity": "sha512-9JB548GZoQVmzrFgp8o7KxdgkTGm6xs9DW0o/Pim72UDjzr5ObUQ6ZzYPqA+g9OTS2bBQoctLJrky0RDCAWRgQ==", + "dev": true, + "dependencies": { + "@babel/compat-data": "^7.23.5", + "@babel/helper-validator-option": "^7.23.5", + "browserslist": "^4.22.2", + "lru-cache": "^5.1.1", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-compilation-targets/node_modules/semver": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", + "dev": true, + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/@babel/helper-environment-visitor": { + "version": "7.22.20", + "resolved": "https://registry.npmjs.org/@babel/helper-environment-visitor/-/helper-environment-visitor-7.22.20.tgz", + "integrity": "sha512-zfedSIzFhat/gFhWfHtgWvlec0nqB9YEIVrpuwjruLlXfUSnA8cJB0miHKwqDnQ7d32aKo2xt88/xZptwxbfhA==", + "dev": true, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-function-name": { + "version": "7.23.0", + "resolved": "https://registry.npmjs.org/@babel/helper-function-name/-/helper-function-name-7.23.0.tgz", + "integrity": "sha512-OErEqsrxjZTJciZ4Oo+eoZqeW9UIiOcuYKRJA4ZAgV9myA+pOXhhmpfNCKjEH/auVfEYVFJ6y1Tc4r0eIApqiw==", + "dev": true, + "dependencies": { + "@babel/template": "^7.22.15", + "@babel/types": "^7.23.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-hoist-variables": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/helper-hoist-variables/-/helper-hoist-variables-7.22.5.tgz", + "integrity": "sha512-wGjk9QZVzvknA6yKIUURb8zY3grXCcOZt+/7Wcy8O2uctxhplmUPkOdlgoNhmdVee2c92JXbf1xpMtVNbfoxRw==", + "dev": true, + "dependencies": { + "@babel/types": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-module-imports": { + "version": "7.22.15", + "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.22.15.tgz", + "integrity": "sha512-0pYVBnDKZO2fnSPCrgM/6WMc7eS20Fbok+0r88fp+YtWVLZrp4CkafFGIp+W0VKw4a22sgebPT99y+FDNMdP4w==", + "dev": true, + "dependencies": { + "@babel/types": "^7.22.15" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-module-transforms": { + "version": "7.23.3", + "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.23.3.tgz", + "integrity": "sha512-7bBs4ED9OmswdfDzpz4MpWgSrV7FXlc3zIagvLFjS5H+Mk7Snr21vQ6QwrsoCGMfNC4e4LQPdoULEt4ykz0SRQ==", + "dev": true, + "dependencies": { + "@babel/helper-environment-visitor": "^7.22.20", + "@babel/helper-module-imports": "^7.22.15", + "@babel/helper-simple-access": "^7.22.5", + "@babel/helper-split-export-declaration": "^7.22.6", + "@babel/helper-validator-identifier": "^7.22.20" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/helper-plugin-utils": { + "version": "7.24.0", + "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.24.0.tgz", + "integrity": "sha512-9cUznXMG0+FxRuJfvL82QlTqIzhVW9sL0KjMPHhAOOvpQGL8QtdxnBKILjBqxlHyliz0yCa1G903ZXI/FuHy2w==", + "dev": true, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-simple-access": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/helper-simple-access/-/helper-simple-access-7.22.5.tgz", + "integrity": "sha512-n0H99E/K+Bika3++WNL17POvo4rKWZ7lZEp1Q+fStVbUi8nxPQEBOlTmCOxW/0JsS56SKKQ+ojAe2pHKJHN35w==", + "dev": true, + "dependencies": { + "@babel/types": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-split-export-declaration": { + "version": "7.22.6", + "resolved": "https://registry.npmjs.org/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.22.6.tgz", + "integrity": "sha512-AsUnxuLhRYsisFiaJwvp1QF+I3KjD5FOxut14q/GzovUe6orHLesW2C7d754kRm53h5gqrz6sFl6sxc4BVtE/g==", + "dev": true, + "dependencies": { + "@babel/types": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-string-parser": { + "version": "7.23.4", + "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.23.4.tgz", + "integrity": "sha512-803gmbQdqwdf4olxrX4AJyFBV/RTr3rSmOj0rKwesmzlfhYNDEs+/iOcznzpNWlJlIlTJC2QfPFcHB6DlzdVLQ==", + "dev": true, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-validator-identifier": { + "version": "7.22.20", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.22.20.tgz", + "integrity": "sha512-Y4OZ+ytlatR8AI+8KZfKuL5urKp7qey08ha31L8b3BwewJAoJamTzyvxPR/5D+KkdJCGPq/+8TukHBlY10FX9A==", + "dev": true, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-validator-option": { + "version": "7.23.5", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-option/-/helper-validator-option-7.23.5.tgz", + "integrity": "sha512-85ttAOMLsr53VgXkTbkx8oA6YTfT4q7/HzXSLEYmjcSTJPMPQtvq1BD79Byep5xMUYbGRzEpDsjUf3dyp54IKw==", + "dev": true, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helpers": { + "version": "7.23.9", + "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.23.9.tgz", + "integrity": "sha512-87ICKgU5t5SzOT7sBMfCOZQ2rHjRU+Pcb9BoILMYz600W6DkVRLFBPwQ18gwUVvggqXivaUakpnxWQGbpywbBQ==", + "dev": true, + "dependencies": { + "@babel/template": "^7.23.9", + "@babel/traverse": "^7.23.9", + "@babel/types": "^7.23.9" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/highlight": { + "version": "7.23.4", + "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.23.4.tgz", + "integrity": "sha512-acGdbYSfp2WheJoJm/EBBBLh/ID8KDc64ISZ9DYtBmC8/Q204PZJLHyzeB5qMzJ5trcOkybd78M4x2KWsUq++A==", + "dev": true, + "dependencies": { + "@babel/helper-validator-identifier": "^7.22.20", + "chalk": "^2.4.2", + "js-tokens": "^4.0.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/highlight/node_modules/ansi-styles": { + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", + "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", + "dev": true, + "dependencies": { + "color-convert": "^1.9.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/@babel/highlight/node_modules/chalk": { + "version": "2.4.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz", + "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==", + "dev": true, + "dependencies": { + "ansi-styles": "^3.2.1", + "escape-string-regexp": "^1.0.5", + "supports-color": "^5.3.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/@babel/highlight/node_modules/color-convert": { + "version": "1.9.3", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", + "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", + "dev": true, + "dependencies": { + "color-name": "1.1.3" + } + }, + "node_modules/@babel/highlight/node_modules/color-name": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", + "integrity": "sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw==", + "dev": true + }, + "node_modules/@babel/highlight/node_modules/escape-string-regexp": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", + "integrity": "sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg==", + "dev": true, + "engines": { + "node": ">=0.8.0" + } + }, + "node_modules/@babel/highlight/node_modules/has-flag": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", + "integrity": "sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw==", + "dev": true, + "engines": { + "node": ">=4" + } + }, + "node_modules/@babel/highlight/node_modules/supports-color": { + "version": "5.5.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz", + "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==", + "dev": true, + "dependencies": { + "has-flag": "^3.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/@babel/parser": { + "version": "7.23.9", + "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.23.9.tgz", + "integrity": "sha512-9tcKgqKbs3xGJ+NtKF2ndOBBLVwPjl1SHxPQkd36r3Dlirw3xWUeGaTbqr7uGZcTaxkVNwc+03SVP7aCdWrTlA==", + "dev": true, + "bin": { + "parser": "bin/babel-parser.js" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@babel/plugin-transform-react-jsx-self": { + "version": "7.23.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx-self/-/plugin-transform-react-jsx-self-7.23.3.tgz", + "integrity": "sha512-qXRvbeKDSfwnlJnanVRp0SfuWE5DQhwQr5xtLBzp56Wabyo+4CMosF6Kfp+eOD/4FYpql64XVJ2W0pVLlJZxOQ==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-react-jsx-source": { + "version": "7.23.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx-source/-/plugin-transform-react-jsx-source-7.23.3.tgz", + "integrity": "sha512-91RS0MDnAWDNvGC6Wio5XYkyWI39FMFO+JK9+4AlgaTH+yWwVTsw7/sn6LK0lH7c5F+TFkpv/3LfCJ1Ydwof/g==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/template": { + "version": "7.23.9", + "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.23.9.tgz", + "integrity": "sha512-+xrD2BWLpvHKNmX2QbpdpsBaWnRxahMwJjO+KZk2JOElj5nSmKezyS1B4u+QbHMTX69t4ukm6hh9lsYQ7GHCKA==", + "dev": true, + "dependencies": { + "@babel/code-frame": "^7.23.5", + "@babel/parser": "^7.23.9", + "@babel/types": "^7.23.9" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/traverse": { + "version": "7.23.9", + "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.23.9.tgz", + "integrity": "sha512-I/4UJ9vs90OkBtY6iiiTORVMyIhJ4kAVmsKo9KFc8UOxMeUfi2hvtIBsET5u9GizXE6/GFSuKCTNfgCswuEjRg==", + "dev": true, + "dependencies": { + "@babel/code-frame": "^7.23.5", + "@babel/generator": "^7.23.6", + "@babel/helper-environment-visitor": "^7.22.20", + "@babel/helper-function-name": "^7.23.0", + "@babel/helper-hoist-variables": "^7.22.5", + "@babel/helper-split-export-declaration": "^7.22.6", + "@babel/parser": "^7.23.9", + "@babel/types": "^7.23.9", + "debug": "^4.3.1", + "globals": "^11.1.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/types": { + "version": "7.23.9", + "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.23.9.tgz", + "integrity": "sha512-dQjSq/7HaSjRM43FFGnv5keM2HsxpmyV1PfaSVm0nzzjwwTmjOe6J4bC8e3+pTEIgHaHj+1ZlLThRJ2auc/w1Q==", + "dev": true, + "dependencies": { + "@babel/helper-string-parser": "^7.23.4", + "@babel/helper-validator-identifier": "^7.22.20", + "to-fast-properties": "^2.0.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@databases/connection-pool": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@databases/connection-pool/-/connection-pool-1.1.0.tgz", + "integrity": "sha512-/12/SNgl0V77mJTo5SX3yGPz4c9XGQwAlCfA0vlfs/0HcaErNpYXpmhj0StET07w6TmTJTnaUgX2EPcQK9ez5A==", + "dev": true, + "dependencies": { + "@databases/queue": "^1.0.0", + "is-promise": "^4.0.0" + } + }, + "node_modules/@databases/escape-identifier": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/@databases/escape-identifier/-/escape-identifier-1.0.3.tgz", + "integrity": "sha512-Su36iSVzaHxpVdISVMViUX/32sLvzxVgjZpYhzhotxZUuLo11GVWsiHwqkvUZijTLUxcDmUqEwGJO3O/soLuZA==", + "dev": true, + "dependencies": { + "@databases/validate-unicode": "^1.0.0" + } + }, + "node_modules/@databases/lock": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/@databases/lock/-/lock-2.1.0.tgz", + "integrity": "sha512-ReWnFE5qeCuO2SA5h5fDh/hE/vMolA+Epe6xkAQP1FL2nhnsTCYwN2JACk/kWctR4OQoh0njBjPZ0yfIptclcA==", + "dev": true, + "dependencies": { + "@databases/queue": "^1.0.0" + } + }, + "node_modules/@databases/migrations-base": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/@databases/migrations-base/-/migrations-base-3.0.1.tgz", + "integrity": "sha512-CutCQ1AjsEWqSuXInD8KwaZYa3/InYGFu3uZ/2pu0Ku4MHRab14+sKNXLk/dxHaJLplngLtCraBo8rL7/21Vhg==", + "dev": true, + "dependencies": { + "assert-never": "^1.2.1", + "chalk": "^4.1.0", + "deep-equal": "^2.0.4", + "interrogator": "^2.0.0", + "is-interactive": "^1.0.0", + "parameter-reducers": "^2.0.0", + "semver": "^7.3.2" + } + }, + "node_modules/@databases/pg": { + "version": "5.5.0", + "resolved": "https://registry.npmjs.org/@databases/pg/-/pg-5.5.0.tgz", + "integrity": "sha512-WIojK9AYIlNi5YRfc5YUOow3PQ82ClmwT9HG3nEsKLUERYieoVmHMYDQLS0ry6FjgJx+2yFs7LCw4kZpWu1TBw==", + "dev": true, + "dependencies": { + "@babel/code-frame": "^7.0.0", + "@databases/escape-identifier": "^1.0.3", + "@databases/pg-config": "^3.2.0", + "@databases/pg-connection-string": "^1.0.0", + "@databases/pg-data-type-id": "^3.0.0", + "@databases/pg-errors": "^1.0.0", + "@databases/push-to-async-iterable": "^3.0.0", + "@databases/shared": "^3.1.0", + "@databases/split-sql-query": "^1.0.4", + "@databases/sql": "^3.3.0", + "assert-never": "^1.2.1", + "pg": "^8.4.2", + "pg-cursor": "^2.4.2" + } + }, + "node_modules/@databases/pg-config": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/@databases/pg-config/-/pg-config-3.2.0.tgz", + "integrity": "sha512-hoPAK/F8gLcLgEJ8mLSnNvRlKqShwx5+GglDHIIfQhKF+Zz6M6QceiOefckS4WSjA0x2HClPvpercnXp9i24ag==", + "dev": true, + "dependencies": { + "cosmiconfig": "^8.1.0", + "funtypes": "^4.1.0" + } + }, + "node_modules/@databases/pg-connection-string": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/@databases/pg-connection-string/-/pg-connection-string-1.0.0.tgz", + "integrity": "sha512-8czOF9jlv7PlS7BPjnL82ynpDs1t8cu+C2jvdtMr37e8daPKMS7n1KfNE9xtr2Gq4QYKjynep097eYa5yIwcLA==", + "dev": true + }, + "node_modules/@databases/pg-data-type-id": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/@databases/pg-data-type-id/-/pg-data-type-id-3.0.0.tgz", + "integrity": "sha512-VqW1csN8pRsWJxjPsGIC9FQ8wyenfmGv0P//BaeDMAu/giM3IXKxKM8fkScUSQ00uqFK/L1iHS5g6dgodF3XzA==", + "dev": true + }, + "node_modules/@databases/pg-errors": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/@databases/pg-errors/-/pg-errors-1.0.0.tgz", + "integrity": "sha512-Yz3exbptZwOn4ZD/MSwY6z++XVyOFsMh5DERvSw3awRwJFnfdaqdeiIxxX0MVjM6KPihF0xxp8lPO7vTc5ydpw==", + "dev": true + }, + "node_modules/@databases/pg-migrations": { + "version": "5.0.3", + "resolved": "https://registry.npmjs.org/@databases/pg-migrations/-/pg-migrations-5.0.3.tgz", + "integrity": "sha512-mUKbVYiACRZJOhE96Y5Fr2IIJ2iBNPVM6L3LgBypH9zIQMOLixGpJ6ZS0oabtNIFJhjncXl5FVEtYaa8x5KkoA==", + "dev": true, + "dependencies": { + "@databases/migrations-base": "^3.0.1", + "@databases/pg": "^5.5.0", + "@databases/pg-config": "^3.2.0", + "assert-never": "^1.2.1", + "chalk": "^4.1.0", + "interrogator": "^2.0.0", + "is-interactive": "^1.0.0", + "parameter-reducers": "^2.0.0", + "semver": "^7.3.2", + "sucrase": "^3.16.0" + }, + "bin": { + "pg-migrations": "lib/cli.js" + } + }, + "node_modules/@databases/push-to-async-iterable": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/@databases/push-to-async-iterable/-/push-to-async-iterable-3.0.0.tgz", + "integrity": "sha512-xwu/yNgINdMU+fn6UwFsxh+pa6UrVPafY+0qm0RK0/nKyjllfDqSbwK4gSmdmLEwPYxKwch9CAE3P8NxN1hPSg==", + "dev": true, + "dependencies": { + "@databases/queue": "^1.0.0" + } + }, + "node_modules/@databases/queue": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@databases/queue/-/queue-1.0.1.tgz", + "integrity": "sha512-dqRU+/aQ4lhFzjPIkIhjB0+UEKMb76FoBgHOJUTcEblgatr/IhdhHliT3VVwcImXh35Mz297PAXE4yFM4eYWUQ==", + "dev": true + }, + "node_modules/@databases/shared": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/@databases/shared/-/shared-3.1.0.tgz", + "integrity": "sha512-bO1DIYAYDiWOCqVPvBio1JqZQYh4dph2M1av2w/REeFT6WBd64mTrOFlcxKV0CUAYT0UiJsDfPqEfw0/APRzWg==", + "dev": true, + "dependencies": { + "@databases/connection-pool": "^1.1.0", + "@databases/lock": "^2.1.0", + "@databases/queue": "^1.0.1", + "@databases/split-sql-query": "^1.0.4", + "@databases/sql": "^3.3.0" + } + }, + "node_modules/@databases/split-sql-query": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/@databases/split-sql-query/-/split-sql-query-1.0.4.tgz", + "integrity": "sha512-lDqDQvH34NNjLs0knaDvL6HKgPtishQlDYHfOkvbAd5VQOEhcDvvmG2zbBuFvS2HQAz5NsyLj5erGaxibkxhvQ==", + "dev": true, + "peerDependencies": { + "@databases/sql": "*" + } + }, + "node_modules/@databases/sql": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/@databases/sql/-/sql-3.3.0.tgz", + "integrity": "sha512-vj9huEy4mjJ48GS1Z8yvtMm4BYAnFYACUds25ym6Gd/gsnngkJ17fo62a6mmbNNwCBS/8467PmZR01Zs/06TjA==", + "dev": true + }, + "node_modules/@databases/validate-unicode": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/@databases/validate-unicode/-/validate-unicode-1.0.0.tgz", + "integrity": "sha512-dLKqxGcymeVwEb/6c44KjOnzaAafFf0Wxa8xcfEjx/qOl3rdijsKYBAtIGhtVtOlpPf/PFKfgTuFurSPn/3B/g==", + "dev": true + }, + "node_modules/@electric-sql/pglite": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/@electric-sql/pglite/-/pglite-0.1.4.tgz", + "integrity": "sha512-B1mWjMVHCjrqqdRXCk3a9v9XmtAg+i/GpH/fdlmkukFY2Ofm7lF+nJu5AjIk84NFdcG8IdvzS9fvQBXY5sXonQ==" + }, + "node_modules/@electric-sql/prisma-generator": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/@electric-sql/prisma-generator/-/prisma-generator-1.1.4.tgz", + "integrity": "sha512-zZQ88uBEKIhf6lKmnzWjOs65V5uVj/i2/N9aU4jpbJIf63n9LUgUh6abv7weFkvJsGddY/6I0bgE4a400FAkxA==", + "dependencies": { + "@prisma/generator-helper": "^4.11.0", + "code-block-writer": "^11.0.3", + "lodash": "^4.17.21", + "zod": "3.21.1" + }, + "bin": { + "electric-sql-prisma-generator": "dist/bin.js" + } + }, + "node_modules/@esbuild/aix-ppc64": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.19.12.tgz", + "integrity": "sha512-bmoCYyWdEL3wDQIVbcyzRyeKLgk2WtWLTWz1ZIAZF/EGbNOwSA6ew3PftJ1PqMiOOGu0OyFMzG53L0zqIpPeNA==", + "cpu": [ + "ppc64" + ], + "dev": true, + "optional": true, + "os": [ + "aix" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/android-arm": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.19.12.tgz", + "integrity": "sha512-qg/Lj1mu3CdQlDEEiWrlC4eaPZ1KztwGJ9B6J+/6G+/4ewxJg7gqj8eVYWvao1bXrqGiW2rsBZFSX3q2lcW05w==", + "cpu": [ + "arm" + ], + "dev": true, + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/android-arm64": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.19.12.tgz", + "integrity": "sha512-P0UVNGIienjZv3f5zq0DP3Nt2IE/3plFzuaS96vihvD0Hd6H/q4WXUGpCxD/E8YrSXfNyRPbpTq+T8ZQioSuPA==", + "cpu": [ + "arm64" + ], + "dev": true, + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/android-x64": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.19.12.tgz", + "integrity": "sha512-3k7ZoUW6Q6YqhdhIaq/WZ7HwBpnFBlW905Fa4s4qWJyiNOgT1dOqDiVAQFwBH7gBRZr17gLrlFCRzF6jFh7Kew==", + "cpu": [ + "x64" + ], + "dev": true, + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/darwin-arm64": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.19.12.tgz", + "integrity": "sha512-B6IeSgZgtEzGC42jsI+YYu9Z3HKRxp8ZT3cqhvliEHovq8HSX2YX8lNocDn79gCKJXOSaEot9MVYky7AKjCs8g==", + "cpu": [ + "arm64" + ], + "dev": true, + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/darwin-x64": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.19.12.tgz", + "integrity": "sha512-hKoVkKzFiToTgn+41qGhsUJXFlIjxI/jSYeZf3ugemDYZldIXIxhvwN6erJGlX4t5h417iFuheZ7l+YVn05N3A==", + "cpu": [ + "x64" + ], + "dev": true, + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/freebsd-arm64": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.19.12.tgz", + "integrity": "sha512-4aRvFIXmwAcDBw9AueDQ2YnGmz5L6obe5kmPT8Vd+/+x/JMVKCgdcRwH6APrbpNXsPz+K653Qg8HB/oXvXVukA==", + "cpu": [ + "arm64" + ], + "dev": true, + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/freebsd-x64": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.19.12.tgz", + "integrity": "sha512-EYoXZ4d8xtBoVN7CEwWY2IN4ho76xjYXqSXMNccFSx2lgqOG/1TBPW0yPx1bJZk94qu3tX0fycJeeQsKovA8gg==", + "cpu": [ + "x64" + ], + "dev": true, + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-arm": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.19.12.tgz", + "integrity": "sha512-J5jPms//KhSNv+LO1S1TX1UWp1ucM6N6XuL6ITdKWElCu8wXP72l9MM0zDTzzeikVyqFE6U8YAV9/tFyj0ti+w==", + "cpu": [ + "arm" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-arm64": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.19.12.tgz", + "integrity": "sha512-EoTjyYyLuVPfdPLsGVVVC8a0p1BFFvtpQDB/YLEhaXyf/5bczaGeN15QkR+O4S5LeJ92Tqotve7i1jn35qwvdA==", + "cpu": [ + "arm64" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-ia32": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.19.12.tgz", + "integrity": "sha512-Thsa42rrP1+UIGaWz47uydHSBOgTUnwBwNq59khgIwktK6x60Hivfbux9iNR0eHCHzOLjLMLfUMLCypBkZXMHA==", + "cpu": [ + "ia32" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-loong64": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.19.12.tgz", + "integrity": "sha512-LiXdXA0s3IqRRjm6rV6XaWATScKAXjI4R4LoDlvO7+yQqFdlr1Bax62sRwkVvRIrwXxvtYEHHI4dm50jAXkuAA==", + "cpu": [ + "loong64" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-mips64el": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.19.12.tgz", + "integrity": "sha512-fEnAuj5VGTanfJ07ff0gOA6IPsvrVHLVb6Lyd1g2/ed67oU1eFzL0r9WL7ZzscD+/N6i3dWumGE1Un4f7Amf+w==", + "cpu": [ + "mips64el" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-ppc64": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.19.12.tgz", + "integrity": "sha512-nYJA2/QPimDQOh1rKWedNOe3Gfc8PabU7HT3iXWtNUbRzXS9+vgB0Fjaqr//XNbd82mCxHzik2qotuI89cfixg==", + "cpu": [ + "ppc64" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-riscv64": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.19.12.tgz", + "integrity": "sha512-2MueBrlPQCw5dVJJpQdUYgeqIzDQgw3QtiAHUC4RBz9FXPrskyyU3VI1hw7C0BSKB9OduwSJ79FTCqtGMWqJHg==", + "cpu": [ + "riscv64" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-s390x": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.19.12.tgz", + "integrity": "sha512-+Pil1Nv3Umes4m3AZKqA2anfhJiVmNCYkPchwFJNEJN5QxmTs1uzyy4TvmDrCRNT2ApwSari7ZIgrPeUx4UZDg==", + "cpu": [ + "s390x" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-x64": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.19.12.tgz", + "integrity": "sha512-B71g1QpxfwBvNrfyJdVDexenDIt1CiDN1TIXLbhOw0KhJzE78KIFGX6OJ9MrtC0oOqMWf+0xop4qEU8JrJTwCg==", + "cpu": [ + "x64" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/netbsd-x64": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.19.12.tgz", + "integrity": "sha512-3ltjQ7n1owJgFbuC61Oj++XhtzmymoCihNFgT84UAmJnxJfm4sYCiSLTXZtE00VWYpPMYc+ZQmB6xbSdVh0JWA==", + "cpu": [ + "x64" + ], + "dev": true, + "optional": true, + "os": [ + "netbsd" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/openbsd-x64": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.19.12.tgz", + "integrity": "sha512-RbrfTB9SWsr0kWmb9srfF+L933uMDdu9BIzdA7os2t0TXhCRjrQyCeOt6wVxr79CKD4c+p+YhCj31HBkYcXebw==", + "cpu": [ + "x64" + ], + "dev": true, + "optional": true, + "os": [ + "openbsd" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/sunos-x64": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.19.12.tgz", + "integrity": "sha512-HKjJwRrW8uWtCQnQOz9qcU3mUZhTUQvi56Q8DPTLLB+DawoiQdjsYq+j+D3s9I8VFtDr+F9CjgXKKC4ss89IeA==", + "cpu": [ + "x64" + ], + "dev": true, + "optional": true, + "os": [ + "sunos" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/win32-arm64": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.19.12.tgz", + "integrity": "sha512-URgtR1dJnmGvX864pn1B2YUYNzjmXkuJOIqG2HdU62MVS4EHpU2946OZoTMnRUHklGtJdJZ33QfzdjGACXhn1A==", + "cpu": [ + "arm64" + ], + "dev": true, + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/win32-ia32": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.19.12.tgz", + "integrity": "sha512-+ZOE6pUkMOJfmxmBZElNOx72NKpIa/HFOMGzu8fqzQJ5kgf6aTGrcJaFsNiVMH4JKpMipyK+7k0n2UXN7a8YKQ==", + "cpu": [ + "ia32" + ], + "dev": true, + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/win32-x64": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.19.12.tgz", + "integrity": "sha512-T1QyPSDCyMXaO3pzBkF96E8xMkiRYbUEZADd29SyPGabqxMViNoii+NcK7eWJAEoU6RZyEm5lVSIjTmcdoB9HA==", + "cpu": [ + "x64" + ], + "dev": true, + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@eslint-community/eslint-utils": { + "version": "4.4.0", + "resolved": "https://registry.npmjs.org/@eslint-community/eslint-utils/-/eslint-utils-4.4.0.tgz", + "integrity": "sha512-1/sA4dwrzBAyeUoQ6oxahHKmrZvsnLCg4RfxW3ZFGGmQkSNQPFNLV9CUEFQP1x9EYXHTo5p6xdhZM1Ne9p/AfA==", + "dev": true, + "dependencies": { + "eslint-visitor-keys": "^3.3.0" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "peerDependencies": { + "eslint": "^6.0.0 || ^7.0.0 || >=8.0.0" + } + }, + "node_modules/@eslint-community/regexpp": { + "version": "4.10.0", + "resolved": "https://registry.npmjs.org/@eslint-community/regexpp/-/regexpp-4.10.0.tgz", + "integrity": "sha512-Cu96Sd2By9mCNTx2iyKOmq10v22jUVQv0lQnlGNy16oE9589yE+QADPbrMGCkA51cKZSg3Pu/aTJVTGfL/qjUA==", + "dev": true, + "engines": { + "node": "^12.0.0 || ^14.0.0 || >=16.0.0" + } + }, + "node_modules/@eslint/eslintrc": { + "version": "2.1.4", + "resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-2.1.4.tgz", + "integrity": "sha512-269Z39MS6wVJtsoUl10L60WdkhJVdPG24Q4eZTH3nnF6lpvSShEK3wQjDX9JRWAUPvPh7COouPpU9IrqaZFvtQ==", + "dev": true, + "dependencies": { + "ajv": "^6.12.4", + "debug": "^4.3.2", + "espree": "^9.6.0", + "globals": "^13.19.0", + "ignore": "^5.2.0", + "import-fresh": "^3.2.1", + "js-yaml": "^4.1.0", + "minimatch": "^3.1.2", + "strip-json-comments": "^3.1.1" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/@eslint/eslintrc/node_modules/brace-expansion": { + "version": "1.1.11", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", + "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "dev": true, + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/@eslint/eslintrc/node_modules/globals": { + "version": "13.24.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-13.24.0.tgz", + "integrity": "sha512-AhO5QUcj8llrbG09iWhPU2B204J1xnPeL8kQmVorSsy+Sjj1sk8gIyh6cUocGmH4L0UuhAJy+hJMRA4mgA4mFQ==", + "dev": true, + "dependencies": { + "type-fest": "^0.20.2" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/@eslint/eslintrc/node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/@eslint/eslintrc/node_modules/type-fest": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.20.2.tgz", + "integrity": "sha512-Ne+eE4r0/iWnpAxD852z3A+N0Bt5RN//NjJwRd2VFHEmrywxf5vsZlh4R6lixl6B+wz/8d+maTSAkN1FIkI3LQ==", + "dev": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/@eslint/js": { + "version": "8.56.0", + "resolved": "https://registry.npmjs.org/@eslint/js/-/js-8.56.0.tgz", + "integrity": "sha512-gMsVel9D7f2HLkBma9VbtzZRehRogVRfbr++f06nL2vnCGCNlzOD+/MUov/F4p8myyAHspEhVobgjpX64q5m6A==", + "dev": true, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + } + }, + "node_modules/@humanwhocodes/config-array": { + "version": "0.11.14", + "resolved": "https://registry.npmjs.org/@humanwhocodes/config-array/-/config-array-0.11.14.tgz", + "integrity": "sha512-3T8LkOmg45BV5FICb15QQMsyUSWrQ8AygVfC7ZG32zOalnqrilm018ZVCw0eapXux8FtA33q8PSRSstjee3jSg==", + "dev": true, + "dependencies": { + "@humanwhocodes/object-schema": "^2.0.2", + "debug": "^4.3.1", + "minimatch": "^3.0.5" + }, + "engines": { + "node": ">=10.10.0" + } + }, + "node_modules/@humanwhocodes/config-array/node_modules/brace-expansion": { + "version": "1.1.11", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", + "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "dev": true, + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/@humanwhocodes/config-array/node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/@humanwhocodes/module-importer": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@humanwhocodes/module-importer/-/module-importer-1.0.1.tgz", + "integrity": "sha512-bxveV4V8v5Yb4ncFTT3rPSgZBOpCkjfK0y4oVVVJwIuDVBRMDXrPyXRL988i5ap9m9bnyEEjWfm5WkBmtffLfA==", + "dev": true, + "engines": { + "node": ">=12.22" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/nzakas" + } + }, + "node_modules/@humanwhocodes/object-schema": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/@humanwhocodes/object-schema/-/object-schema-2.0.2.tgz", + "integrity": "sha512-6EwiSjwWYP7pTckG6I5eyFANjPhmPjUX9JRLUSfNPC7FX7zK9gyZAfUEaECL6ALTpGX5AjnBq3C9XmVWPitNpw==", + "dev": true + }, + "node_modules/@isaacs/cliui": { + "version": "8.0.2", + "resolved": "https://registry.npmjs.org/@isaacs/cliui/-/cliui-8.0.2.tgz", + "integrity": "sha512-O8jcjabXaleOG9DQ0+ARXWZBTfnP4WNAqzuiJK7ll44AmxGKv/J2M4TPjxjY3znBCfvBXFzucm1twdyFybFqEA==", + "dev": true, + "dependencies": { + "string-width": "^5.1.2", + "string-width-cjs": "npm:string-width@^4.2.0", + "strip-ansi": "^7.0.1", + "strip-ansi-cjs": "npm:strip-ansi@^6.0.1", + "wrap-ansi": "^8.1.0", + "wrap-ansi-cjs": "npm:wrap-ansi@^7.0.0" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/@isaacs/cliui/node_modules/ansi-regex": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.0.1.tgz", + "integrity": "sha512-n5M855fKb2SsfMIiFFoVrABHJC8QtHwVx+mHWP3QcEqBHYienj5dHSgjbxtC0WEZXYt4wcD6zrQElDPhFuZgfA==", + "dev": true, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-regex?sponsor=1" + } + }, + "node_modules/@isaacs/cliui/node_modules/ansi-styles": { + "version": "6.2.1", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.1.tgz", + "integrity": "sha512-bN798gFfQX+viw3R7yrGWRqnrN2oRkEkUjjl4JNn4E8GxxbjtG3FbrEIIY3l8/hrwUwIeCZvi4QuOTP4MErVug==", + "dev": true, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/@isaacs/cliui/node_modules/emoji-regex": { + "version": "9.2.2", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-9.2.2.tgz", + "integrity": "sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg==", + "dev": true + }, + "node_modules/@isaacs/cliui/node_modules/string-width": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-5.1.2.tgz", + "integrity": "sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA==", + "dev": true, + "dependencies": { + "eastasianwidth": "^0.2.0", + "emoji-regex": "^9.2.2", + "strip-ansi": "^7.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/@isaacs/cliui/node_modules/strip-ansi": { + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.0.tgz", + "integrity": "sha512-iq6eVVI64nQQTRYq2KtEg2d2uU7LElhTJwsH4YzIHZshxlgZms/wIc4VoDQTlG/IvVIrBKG06CrZnp0qv7hkcQ==", + "dev": true, + "dependencies": { + "ansi-regex": "^6.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/strip-ansi?sponsor=1" + } + }, + "node_modules/@isaacs/cliui/node_modules/wrap-ansi": { + "version": "8.1.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-8.1.0.tgz", + "integrity": "sha512-si7QWI6zUMq56bESFvagtmzMdGOtoxfR+Sez11Mobfc7tm+VkUckk9bW2UeffTGVUbOksxmSw0AA2gs8g71NCQ==", + "dev": true, + "dependencies": { + "ansi-styles": "^6.1.0", + "string-width": "^5.0.1", + "strip-ansi": "^7.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, + "node_modules/@jridgewell/gen-mapping": { + "version": "0.3.5", + "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.5.tgz", + "integrity": "sha512-IzL8ZoEDIBRWEzlCcRhOaCupYyN5gdIK+Q6fbFdPDg6HqX6jpkItn7DFIpW9LQzXG6Df9sA7+OKnq0qlz/GaQg==", + "dev": true, + "dependencies": { + "@jridgewell/set-array": "^1.2.1", + "@jridgewell/sourcemap-codec": "^1.4.10", + "@jridgewell/trace-mapping": "^0.3.24" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@jridgewell/resolve-uri": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz", + "integrity": "sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==", + "dev": true, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@jridgewell/set-array": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/@jridgewell/set-array/-/set-array-1.2.1.tgz", + "integrity": "sha512-R8gLRTZeyp03ymzP/6Lil/28tGeGEzhx1q2k703KGWRAI1VdvPIXdG70VJc2pAMw3NA6JKL5hhFu1sJX0Mnn/A==", + "dev": true, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@jridgewell/source-map": { + "version": "0.3.6", + "resolved": "https://registry.npmjs.org/@jridgewell/source-map/-/source-map-0.3.6.tgz", + "integrity": "sha512-1ZJTZebgqllO79ue2bm3rIGud/bOe0pP5BjSRCRxxYkEZS8STV7zN84UBbiYu7jy+eCKSnVIUgoWWE/tt+shMQ==", + "dev": true, + "optional": true, + "peer": true, + "dependencies": { + "@jridgewell/gen-mapping": "^0.3.5", + "@jridgewell/trace-mapping": "^0.3.25" + } + }, + "node_modules/@jridgewell/sourcemap-codec": { + "version": "1.4.15", + "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.4.15.tgz", + "integrity": "sha512-eF2rxCRulEKXHTRiDrDy6erMYWqNw4LPdQ8UQA4huuxaQsVeRPFl2oM8oDGxMFhJUWZf9McpLtJasDDZb/Bpeg==", + "dev": true + }, + "node_modules/@jridgewell/trace-mapping": { + "version": "0.3.25", + "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.25.tgz", + "integrity": "sha512-vNk6aEwybGtawWmy/PzwnGDOjCkLWSD2wqvjGGAgOAwCGWySYXfYoxt00IJkTF+8Lb57DwOb3Aa0o9CApepiYQ==", + "dev": true, + "dependencies": { + "@jridgewell/resolve-uri": "^3.1.0", + "@jridgewell/sourcemap-codec": "^1.4.14" + } + }, + "node_modules/@ljharb/through": { + "version": "2.3.12", + "resolved": "https://registry.npmjs.org/@ljharb/through/-/through-2.3.12.tgz", + "integrity": "sha512-ajo/heTlG3QgC8EGP6APIejksVAYt4ayz4tqoP3MolFELzcH1x1fzwEYRJTPO0IELutZ5HQ0c26/GqAYy79u3g==", + "dev": true, + "dependencies": { + "call-bind": "^1.0.5" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/@nodelib/fs.scandir": { + "version": "2.1.5", + "resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz", + "integrity": "sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==", + "dev": true, + "dependencies": { + "@nodelib/fs.stat": "2.0.5", + "run-parallel": "^1.1.9" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/@nodelib/fs.stat": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz", + "integrity": "sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==", + "dev": true, + "engines": { + "node": ">= 8" + } + }, + "node_modules/@nodelib/fs.walk": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/@nodelib/fs.walk/-/fs.walk-1.2.8.tgz", + "integrity": "sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==", + "dev": true, + "dependencies": { + "@nodelib/fs.scandir": "2.1.5", + "fastq": "^1.6.0" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/@pkgjs/parseargs": { + "version": "0.11.0", + "resolved": "https://registry.npmjs.org/@pkgjs/parseargs/-/parseargs-0.11.0.tgz", + "integrity": "sha512-+1VkjdD0QBLPodGrJUeqarH8VAIvQODIbwh9XpP5Syisf7YoQgsJKPNFoqqLQlu+VQ/tVSshMR6loPMn8U+dPg==", + "dev": true, + "optional": true, + "engines": { + "node": ">=14" + } + }, + "node_modules/@prisma/client": { + "version": "4.8.1", + "resolved": "https://registry.npmjs.org/@prisma/client/-/client-4.8.1.tgz", + "integrity": "sha512-d4xhZhETmeXK/yZ7K0KcVOzEfI5YKGGEr4F5SBV04/MU4ncN/HcE28sy3e4Yt8UFW0ZuImKFQJE+9rWt9WbGSQ==", + "hasInstallScript": true, + "dependencies": { + "@prisma/engines-version": "4.8.0-61.d6e67a83f971b175a593ccc12e15c4a757f93ffe" + }, + "engines": { + "node": ">=14.17" + }, + "peerDependencies": { + "prisma": "*" + }, + "peerDependenciesMeta": { + "prisma": { + "optional": true + } + } + }, + "node_modules/@prisma/debug": { + "version": "4.16.2", + "resolved": "https://registry.npmjs.org/@prisma/debug/-/debug-4.16.2.tgz", + "integrity": "sha512-7L7WbG0qNNZYgLpsVB8rCHCXEyHFyIycRlRDNwkVfjQmACC2OW6AWCYCbfdjQhkF/t7+S3njj8wAWAocSs+Brw==", + "dependencies": { + "@types/debug": "4.1.8", + "debug": "4.3.4", + "strip-ansi": "6.0.1" + } + }, + "node_modules/@prisma/engines": { + "version": "4.8.1", + "resolved": "https://registry.npmjs.org/@prisma/engines/-/engines-4.8.1.tgz", + "integrity": "sha512-93tctjNXcIS+i/e552IO6tqw17sX8liivv8WX9lDMCpEEe3ci+nT9F+1oHtAafqruXLepKF80i/D20Mm+ESlOw==", + "hasInstallScript": true + }, + "node_modules/@prisma/engines-version": { + "version": "4.8.0-61.d6e67a83f971b175a593ccc12e15c4a757f93ffe", + "resolved": "https://registry.npmjs.org/@prisma/engines-version/-/engines-version-4.8.0-61.d6e67a83f971b175a593ccc12e15c4a757f93ffe.tgz", + "integrity": "sha512-MHSOSexomRMom8QN4t7bu87wPPD+pa+hW9+71JnVcF3DqyyO/ycCLhRL1we3EojRpZxKvuyGho2REQsMCvxcJw==" + }, + "node_modules/@prisma/generator-helper": { + "version": "4.16.2", + "resolved": "https://registry.npmjs.org/@prisma/generator-helper/-/generator-helper-4.16.2.tgz", + "integrity": "sha512-bMOH7y73Ui7gpQrioFeavMQA+Tf8ksaVf8Nhs9rQNzuSg8SSV6E9baczob0L5KGZTSgYoqnrRxuo03kVJYrnIg==", + "dependencies": { + "@prisma/debug": "4.16.2", + "@types/cross-spawn": "6.0.2", + "cross-spawn": "7.0.3", + "kleur": "4.1.5" + } + }, + "node_modules/@protobufjs/aspromise": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@protobufjs/aspromise/-/aspromise-1.1.2.tgz", + "integrity": "sha512-j+gKExEuLmKwvz3OgROXtrJ2UG2x8Ch2YZUxahh+s1F2HZ+wAceUNLkvy6zKCPVRkU++ZWQrdxsUeQXmcg4uoQ==" + }, + "node_modules/@protobufjs/base64": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@protobufjs/base64/-/base64-1.1.2.tgz", + "integrity": "sha512-AZkcAA5vnN/v4PDqKyMR5lx7hZttPDgClv83E//FMNhR2TMcLUhfRUBHCmSl0oi9zMgDDqRUJkSxO3wm85+XLg==" + }, + "node_modules/@protobufjs/codegen": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/@protobufjs/codegen/-/codegen-2.0.4.tgz", + "integrity": "sha512-YyFaikqM5sH0ziFZCN3xDC7zeGaB/d0IUb9CATugHWbd1FRFwWwt4ld4OYMPWu5a3Xe01mGAULCdqhMlPl29Jg==" + }, + "node_modules/@protobufjs/eventemitter": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@protobufjs/eventemitter/-/eventemitter-1.1.0.tgz", + "integrity": "sha512-j9ednRT81vYJ9OfVuXG6ERSTdEL1xVsNgqpkxMsbIabzSo3goCjDIveeGv5d03om39ML71RdmrGNjG5SReBP/Q==" + }, + "node_modules/@protobufjs/fetch": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@protobufjs/fetch/-/fetch-1.1.0.tgz", + "integrity": "sha512-lljVXpqXebpsijW71PZaCYeIcE5on1w5DlQy5WH6GLbFryLUrBD4932W/E2BSpfRJWseIL4v/KPgBFxDOIdKpQ==", + "dependencies": { + "@protobufjs/aspromise": "^1.1.1", + "@protobufjs/inquire": "^1.1.0" + } + }, + "node_modules/@protobufjs/float": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/@protobufjs/float/-/float-1.0.2.tgz", + "integrity": "sha512-Ddb+kVXlXst9d+R9PfTIxh1EdNkgoRe5tOX6t01f1lYWOvJnSPDBlG241QLzcyPdoNTsblLUdujGSE4RzrTZGQ==" + }, + "node_modules/@protobufjs/inquire": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@protobufjs/inquire/-/inquire-1.1.0.tgz", + "integrity": "sha512-kdSefcPdruJiFMVSbn801t4vFK7KB/5gd2fYvrxhuJYg8ILrmn9SKSX2tZdV6V+ksulWqS7aXjBcRXl3wHoD9Q==" + }, + "node_modules/@protobufjs/path": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@protobufjs/path/-/path-1.1.2.tgz", + "integrity": "sha512-6JOcJ5Tm08dOHAbdR3GrvP+yUUfkjG5ePsHYczMFLq3ZmMkAD98cDgcT2iA1lJ9NVwFd4tH/iSSoe44YWkltEA==" + }, + "node_modules/@protobufjs/pool": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@protobufjs/pool/-/pool-1.1.0.tgz", + "integrity": "sha512-0kELaGSIDBKvcgS4zkjz1PeddatrjYcmMWOlAuAPwAeccUrPHdUqo/J6LiymHHEiJT5NrF1UVwxY14f+fy4WQw==" + }, + "node_modules/@protobufjs/utf8": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@protobufjs/utf8/-/utf8-1.1.0.tgz", + "integrity": "sha512-Vvn3zZrhQZkkBE8LSuW3em98c0FwgO4nxzv6OdSxPKJIEKY2bGbHn+mhGIPerzI4twdxaP8/0+06HBpwf345Lw==" + }, + "node_modules/@rollup/rollup-android-arm-eabi": { + "version": "4.12.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.12.0.tgz", + "integrity": "sha512-+ac02NL/2TCKRrJu2wffk1kZ+RyqxVUlbjSagNgPm94frxtr+XDL12E5Ll1enWskLrtrZ2r8L3wED1orIibV/w==", + "cpu": [ + "arm" + ], + "dev": true, + "optional": true, + "os": [ + "android" + ] + }, + "node_modules/@rollup/rollup-android-arm64": { + "version": "4.12.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.12.0.tgz", + "integrity": "sha512-OBqcX2BMe6nvjQ0Nyp7cC90cnumt8PXmO7Dp3gfAju/6YwG0Tj74z1vKrfRz7qAv23nBcYM8BCbhrsWqO7PzQQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "optional": true, + "os": [ + "android" + ] + }, + "node_modules/@rollup/rollup-darwin-arm64": { + "version": "4.12.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.12.0.tgz", + "integrity": "sha512-X64tZd8dRE/QTrBIEs63kaOBG0b5GVEd3ccoLtyf6IdXtHdh8h+I56C2yC3PtC9Ucnv0CpNFJLqKFVgCYe0lOQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "optional": true, + "os": [ + "darwin" + ] + }, + "node_modules/@rollup/rollup-darwin-x64": { + "version": "4.12.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.12.0.tgz", + "integrity": "sha512-cc71KUZoVbUJmGP2cOuiZ9HSOP14AzBAThn3OU+9LcA1+IUqswJyR1cAJj3Mg55HbjZP6OLAIscbQsQLrpgTOg==", + "cpu": [ + "x64" + ], + "dev": true, + "optional": true, + "os": [ + "darwin" + ] + }, + "node_modules/@rollup/rollup-linux-arm-gnueabihf": { + "version": "4.12.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.12.0.tgz", + "integrity": "sha512-a6w/Y3hyyO6GlpKL2xJ4IOh/7d+APaqLYdMf86xnczU3nurFTaVN9s9jOXQg97BE4nYm/7Ga51rjec5nfRdrvA==", + "cpu": [ + "arm" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm64-gnu": { + "version": "4.12.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.12.0.tgz", + "integrity": "sha512-0fZBq27b+D7Ar5CQMofVN8sggOVhEtzFUwOwPppQt0k+VR+7UHMZZY4y+64WJ06XOhBTKXtQB/Sv0NwQMXyNAA==", + "cpu": [ + "arm64" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm64-musl": { + "version": "4.12.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.12.0.tgz", + "integrity": "sha512-eTvzUS3hhhlgeAv6bfigekzWZjaEX9xP9HhxB0Dvrdbkk5w/b+1Sxct2ZuDxNJKzsRStSq1EaEkVSEe7A7ipgQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-riscv64-gnu": { + "version": "4.12.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.12.0.tgz", + "integrity": "sha512-ix+qAB9qmrCRiaO71VFfY8rkiAZJL8zQRXveS27HS+pKdjwUfEhqo2+YF2oI+H/22Xsiski+qqwIBxVewLK7sw==", + "cpu": [ + "riscv64" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-x64-gnu": { + "version": "4.12.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.12.0.tgz", + "integrity": "sha512-TenQhZVOtw/3qKOPa7d+QgkeM6xY0LtwzR8OplmyL5LrgTWIXpTQg2Q2ycBf8jm+SFW2Wt/DTn1gf7nFp3ssVA==", + "cpu": [ + "x64" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-x64-musl": { + "version": "4.12.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.12.0.tgz", + "integrity": "sha512-LfFdRhNnW0zdMvdCb5FNuWlls2WbbSridJvxOvYWgSBOYZtgBfW9UGNJG//rwMqTX1xQE9BAodvMH9tAusKDUw==", + "cpu": [ + "x64" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-win32-arm64-msvc": { + "version": "4.12.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.12.0.tgz", + "integrity": "sha512-JPDxovheWNp6d7AHCgsUlkuCKvtu3RB55iNEkaQcf0ttsDU/JZF+iQnYcQJSk/7PtT4mjjVG8N1kpwnI9SLYaw==", + "cpu": [ + "arm64" + ], + "dev": true, + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rollup/rollup-win32-ia32-msvc": { + "version": "4.12.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.12.0.tgz", + "integrity": "sha512-fjtuvMWRGJn1oZacG8IPnzIV6GF2/XG+h71FKn76OYFqySXInJtseAqdprVTDTyqPxQOG9Exak5/E9Z3+EJ8ZA==", + "cpu": [ + "ia32" + ], + "dev": true, + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rollup/rollup-win32-x64-msvc": { + "version": "4.12.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.12.0.tgz", + "integrity": "sha512-ZYmr5mS2wd4Dew/JjT0Fqi2NPB/ZhZ2VvPp7SmvPZb4Y1CG/LRcS6tcRo2cYU7zLK5A7cdbhWnnWmUjoI4qapg==", + "cpu": [ + "x64" + ], + "dev": true, + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@tauri-apps/api": { + "version": "2.0.0-alpha.13", + "resolved": "https://registry.npmjs.org/@tauri-apps/api/-/api-2.0.0-alpha.13.tgz", + "integrity": "sha512-sGgCkFahF3OZAHoGN5Ozt9WK7wJlbVZSgWpPQKNag4nSOX1+Py6VDRTEWriiJHDiV+gg31CWHnNXRy6TFoZmdA==", + "peer": true, + "engines": { + "node": ">= 18", + "npm": ">= 6.6.0", + "yarn": ">= 1.19.1" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/tauri" + } + }, + "node_modules/@tauri-apps/plugin-sql": { + "version": "2.0.0-alpha.5", + "resolved": "https://registry.npmjs.org/@tauri-apps/plugin-sql/-/plugin-sql-2.0.0-alpha.5.tgz", + "integrity": "sha512-u4rJZM357zWP+WmE/W1t1iu0/INQ9uqO8+dOFWzLSGqg2zgYcohTeV66hZoeCTRTzsV3qrKI3suWueLu1YKT7A==", + "peer": true, + "dependencies": { + "@tauri-apps/api": "2.0.0-alpha.13" + } + }, + "node_modules/@types/babel__core": { + "version": "7.20.5", + "resolved": "https://registry.npmjs.org/@types/babel__core/-/babel__core-7.20.5.tgz", + "integrity": "sha512-qoQprZvz5wQFJwMDqeseRXWv3rqMvhgpbXFfVyWhbx9X47POIA6i/+dXefEmZKoAgOaTdaIgNSMqMIU61yRyzA==", + "dev": true, + "dependencies": { + "@babel/parser": "^7.20.7", + "@babel/types": "^7.20.7", + "@types/babel__generator": "*", + "@types/babel__template": "*", + "@types/babel__traverse": "*" + } + }, + "node_modules/@types/babel__generator": { + "version": "7.6.8", + "resolved": "https://registry.npmjs.org/@types/babel__generator/-/babel__generator-7.6.8.tgz", + "integrity": "sha512-ASsj+tpEDsEiFr1arWrlN6V3mdfjRMZt6LtK/Vp/kreFLnr5QH5+DhvD5nINYZXzwJvXeGq+05iUXcAzVrqWtw==", + "dev": true, + "dependencies": { + "@babel/types": "^7.0.0" + } + }, + "node_modules/@types/babel__template": { + "version": "7.4.4", + "resolved": "https://registry.npmjs.org/@types/babel__template/-/babel__template-7.4.4.tgz", + "integrity": "sha512-h/NUaSyG5EyxBIp8YRxo4RMe2/qQgvyowRwVMzhYhBCONbW8PUsg4lkFMrhgZhUe5z3L3MiLDuvyJ/CaPa2A8A==", + "dev": true, + "dependencies": { + "@babel/parser": "^7.1.0", + "@babel/types": "^7.0.0" + } + }, + "node_modules/@types/babel__traverse": { + "version": "7.20.5", + "resolved": "https://registry.npmjs.org/@types/babel__traverse/-/babel__traverse-7.20.5.tgz", + "integrity": "sha512-WXCyOcRtH37HAUkpXhUduaxdm82b4GSlyTqajXviN4EfiuPgNYR109xMCKvpl6zPIpua0DGlMEDCq+g8EdoheQ==", + "dev": true, + "dependencies": { + "@babel/types": "^7.20.7" + } + }, + "node_modules/@types/cross-spawn": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/@types/cross-spawn/-/cross-spawn-6.0.2.tgz", + "integrity": "sha512-KuwNhp3eza+Rhu8IFI5HUXRP0LIhqH5cAjubUvGXXthh4YYBuP2ntwEX+Cz8GJoZUHlKo247wPWOfA9LYEq4cw==", + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@types/debug": { + "version": "4.1.8", + "resolved": "https://registry.npmjs.org/@types/debug/-/debug-4.1.8.tgz", + "integrity": "sha512-/vPO1EPOs306Cvhwv7KfVfYvOJqA/S/AXjaHQiJboCZzcNDb+TIJFN9/2C9DZ//ijSKWioNyUxD792QmDJ+HKQ==", + "dependencies": { + "@types/ms": "*" + } + }, + "node_modules/@types/estree": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.5.tgz", + "integrity": "sha512-/kYRxGDLWzHOB7q+wtSUQlFrtcdUccpfy+X+9iMBpHK8QLLhx2wIPYuS5DYtR9Wa/YlZAbIovy7qVdB1Aq6Lyw==", + "dev": true + }, + "node_modules/@types/json-schema": { + "version": "7.0.15", + "resolved": "https://registry.npmjs.org/@types/json-schema/-/json-schema-7.0.15.tgz", + "integrity": "sha512-5+fP8P8MFNC+AyZCDxrB2pkZFPGzqQWUzpSeuuVLvm8VMcorNYavBqoFcxK8bQz4Qsbn4oUEEem4wDLfcysGHA==", + "dev": true + }, + "node_modules/@types/ms": { + "version": "0.7.34", + "resolved": "https://registry.npmjs.org/@types/ms/-/ms-0.7.34.tgz", + "integrity": "sha512-nG96G3Wp6acyAgJqGasjODb+acrI7KltPiRxzHPXnP3NgI28bpQDRv53olbqGXbfcgF5aiiHmO3xpwEpS5Ld9g==" + }, + "node_modules/@types/node": { + "version": "20.11.19", + "resolved": "https://registry.npmjs.org/@types/node/-/node-20.11.19.tgz", + "integrity": "sha512-7xMnVEcZFu0DikYjWOlRq7NTPETrm7teqUT2WkQjrTIkEgUyyGdWsj/Zg8bEJt5TNklzbPD1X3fqfsHw3SpapQ==", + "dependencies": { + "undici-types": "~5.26.4" + } + }, + "node_modules/@types/prop-types": { + "version": "15.7.11", + "resolved": "https://registry.npmjs.org/@types/prop-types/-/prop-types-15.7.11.tgz", + "integrity": "sha512-ga8y9v9uyeiLdpKddhxYQkxNDrfvuPrlFb0N1qnZZByvcElJaXthF1UhvCh9TLWJBEHeNtdnbysW7Y6Uq8CVng==", + "dev": true + }, + "node_modules/@types/react": { + "version": "18.2.57", + "resolved": "https://registry.npmjs.org/@types/react/-/react-18.2.57.tgz", + "integrity": "sha512-ZvQsktJgSYrQiMirAN60y4O/LRevIV8hUzSOSNB6gfR3/o3wCBFQx3sPwIYtuDMeiVgsSS3UzCV26tEzgnfvQw==", + "dev": true, + "dependencies": { + "@types/prop-types": "*", + "@types/scheduler": "*", + "csstype": "^3.0.2" + } + }, + "node_modules/@types/react-dom": { + "version": "18.2.19", + "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-18.2.19.tgz", + "integrity": "sha512-aZvQL6uUbIJpjZk4U8JZGbau9KDeAwMfmhyWorxgBkqDIEf6ROjRozcmPIicqsUwPUjbkDfHKgGee1Lq65APcA==", + "dev": true, + "dependencies": { + "@types/react": "*" + } + }, + "node_modules/@types/scheduler": { + "version": "0.16.8", + "resolved": "https://registry.npmjs.org/@types/scheduler/-/scheduler-0.16.8.tgz", + "integrity": "sha512-WZLiwShhwLRmeV6zH+GkbOFT6Z6VklCItrDioxUnv+u4Ll+8vKeFySoFyK/0ctcRpOmwAicELfmys1sDc/Rw+A==", + "dev": true + }, + "node_modules/@types/semver": { + "version": "7.5.7", + "resolved": "https://registry.npmjs.org/@types/semver/-/semver-7.5.7.tgz", + "integrity": "sha512-/wdoPq1QqkSj9/QOeKkFquEuPzQbHTWAMPH/PaUMB+JuR31lXhlWXRZ52IpfDYVlDOUBvX09uBrPwxGT1hjNBg==", + "dev": true + }, + "node_modules/@typescript-eslint/eslint-plugin": { + "version": "6.21.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-6.21.0.tgz", + "integrity": "sha512-oy9+hTPCUFpngkEZUSzbf9MxI65wbKFoQYsgPdILTfbUldp5ovUuphZVe4i30emU9M/kP+T64Di0mxl7dSw3MA==", + "dev": true, + "dependencies": { + "@eslint-community/regexpp": "^4.5.1", + "@typescript-eslint/scope-manager": "6.21.0", + "@typescript-eslint/type-utils": "6.21.0", + "@typescript-eslint/utils": "6.21.0", + "@typescript-eslint/visitor-keys": "6.21.0", + "debug": "^4.3.4", + "graphemer": "^1.4.0", + "ignore": "^5.2.4", + "natural-compare": "^1.4.0", + "semver": "^7.5.4", + "ts-api-utils": "^1.0.1" + }, + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "@typescript-eslint/parser": "^6.0.0 || ^6.0.0-alpha", + "eslint": "^7.0.0 || ^8.0.0" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/@typescript-eslint/parser": { + "version": "6.21.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-6.21.0.tgz", + "integrity": "sha512-tbsV1jPne5CkFQCgPBcDOt30ItF7aJoZL997JSF7MhGQqOeT3svWRYxiqlfA5RUdlHN6Fi+EI9bxqbdyAUZjYQ==", + "dev": true, + "dependencies": { + "@typescript-eslint/scope-manager": "6.21.0", + "@typescript-eslint/types": "6.21.0", + "@typescript-eslint/typescript-estree": "6.21.0", + "@typescript-eslint/visitor-keys": "6.21.0", + "debug": "^4.3.4" + }, + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^7.0.0 || ^8.0.0" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/@typescript-eslint/scope-manager": { + "version": "6.21.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-6.21.0.tgz", + "integrity": "sha512-OwLUIWZJry80O99zvqXVEioyniJMa+d2GrqpUTqi5/v5D5rOrppJVBPa0yKCblcigC0/aYAzxxqQ1B+DS2RYsg==", + "dev": true, + "dependencies": { + "@typescript-eslint/types": "6.21.0", + "@typescript-eslint/visitor-keys": "6.21.0" + }, + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/type-utils": { + "version": "6.21.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/type-utils/-/type-utils-6.21.0.tgz", + "integrity": "sha512-rZQI7wHfao8qMX3Rd3xqeYSMCL3SoiSQLBATSiVKARdFGCYSRvmViieZjqc58jKgs8Y8i9YvVVhRbHSTA4VBag==", + "dev": true, + "dependencies": { + "@typescript-eslint/typescript-estree": "6.21.0", + "@typescript-eslint/utils": "6.21.0", + "debug": "^4.3.4", + "ts-api-utils": "^1.0.1" + }, + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^7.0.0 || ^8.0.0" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/@typescript-eslint/types": { + "version": "6.21.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-6.21.0.tgz", + "integrity": "sha512-1kFmZ1rOm5epu9NZEZm1kckCDGj5UJEf7P1kliH4LKu/RkwpsfqqGmY2OOcUs18lSlQBKLDYBOGxRVtrMN5lpg==", + "dev": true, + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/typescript-estree": { + "version": "6.21.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-6.21.0.tgz", + "integrity": "sha512-6npJTkZcO+y2/kr+z0hc4HwNfrrP4kNYh57ek7yCNlrBjWQ1Y0OS7jiZTkgumrvkX5HkEKXFZkkdFNkaW2wmUQ==", + "dev": true, + "dependencies": { + "@typescript-eslint/types": "6.21.0", + "@typescript-eslint/visitor-keys": "6.21.0", + "debug": "^4.3.4", + "globby": "^11.1.0", + "is-glob": "^4.0.3", + "minimatch": "9.0.3", + "semver": "^7.5.4", + "ts-api-utils": "^1.0.1" + }, + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/@typescript-eslint/utils": { + "version": "6.21.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-6.21.0.tgz", + "integrity": "sha512-NfWVaC8HP9T8cbKQxHcsJBY5YE1O33+jpMwN45qzWWaPDZgLIbo12toGMWnmhvCpd3sIxkpDw3Wv1B3dYrbDQQ==", + "dev": true, + "dependencies": { + "@eslint-community/eslint-utils": "^4.4.0", + "@types/json-schema": "^7.0.12", + "@types/semver": "^7.5.0", + "@typescript-eslint/scope-manager": "6.21.0", + "@typescript-eslint/types": "6.21.0", + "@typescript-eslint/typescript-estree": "6.21.0", + "semver": "^7.5.4" + }, + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^7.0.0 || ^8.0.0" + } + }, + "node_modules/@typescript-eslint/visitor-keys": { + "version": "6.21.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-6.21.0.tgz", + "integrity": "sha512-JJtkDduxLi9bivAB+cYOVMtbkqdPOhZ+ZI5LC47MIRrDV4Yn2o+ZnW10Nkmr28xRpSpdJ6Sm42Hjf2+REYXm0A==", + "dev": true, + "dependencies": { + "@typescript-eslint/types": "6.21.0", + "eslint-visitor-keys": "^3.4.1" + }, + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@ungap/structured-clone": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/@ungap/structured-clone/-/structured-clone-1.2.0.tgz", + "integrity": "sha512-zuVdFrMJiuCDQUMCzQaD6KL28MjnqqN8XnAqiEq9PNm/hCPTSGfrXCOfwj1ow4LFb/tNymJPwsNbVePc1xFqrQ==", + "dev": true + }, + "node_modules/@vitejs/plugin-react": { + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/@vitejs/plugin-react/-/plugin-react-4.2.1.tgz", + "integrity": "sha512-oojO9IDc4nCUUi8qIR11KoQm0XFFLIwsRBwHRR4d/88IWghn1y6ckz/bJ8GHDCsYEJee8mDzqtJxh15/cisJNQ==", + "dev": true, + "dependencies": { + "@babel/core": "^7.23.5", + "@babel/plugin-transform-react-jsx-self": "^7.23.3", + "@babel/plugin-transform-react-jsx-source": "^7.23.3", + "@types/babel__core": "^7.20.5", + "react-refresh": "^0.14.0" + }, + "engines": { + "node": "^14.18.0 || >=16.0.0" + }, + "peerDependencies": { + "vite": "^4.2.0 || ^5.0.0" + } + }, + "node_modules/acorn": { + "version": "8.11.3", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.11.3.tgz", + "integrity": "sha512-Y9rRfJG5jcKOE0CLisYbojUjIrIEE7AGMzA/Sm4BslANhbS+cDMpgBdcPT91oJ7OuJ9hYJBx59RjbhxVnrF8Xg==", + "dev": true, + "bin": { + "acorn": "bin/acorn" + }, + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/acorn-jsx": { + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/acorn-jsx/-/acorn-jsx-5.3.2.tgz", + "integrity": "sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==", + "dev": true, + "peerDependencies": { + "acorn": "^6.0.0 || ^7.0.0 || ^8.0.0" + } + }, + "node_modules/ajv": { + "version": "6.12.6", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", + "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", + "dev": true, + "dependencies": { + "fast-deep-equal": "^3.1.1", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.4.1", + "uri-js": "^4.2.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/ansi-escapes": { + "version": "4.3.2", + "resolved": "https://registry.npmjs.org/ansi-escapes/-/ansi-escapes-4.3.2.tgz", + "integrity": "sha512-gKXj5ALrKWQLsYG9jlTRmR/xKluxHV+Z9QEwNIgCfM1/uwPMCuzVVnh5mwTd+OuBZcwSIMbqssNWRm1lE51QaQ==", + "dev": true, + "dependencies": { + "type-fest": "^0.21.3" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "engines": { + "node": ">=8" + } + }, + "node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/any-promise": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/any-promise/-/any-promise-1.3.0.tgz", + "integrity": "sha512-7UvmKalWRt1wgjL1RrGxoSJW/0QZFIegpeGvZG9kjp8vrRu55XTHbwnqq2GpXm9uLbcuhxm3IqX9OB4MZR1b2A==", + "dev": true + }, + "node_modules/argparse": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", + "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==", + "dev": true + }, + "node_modules/array-buffer-byte-length": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/array-buffer-byte-length/-/array-buffer-byte-length-1.0.1.tgz", + "integrity": "sha512-ahC5W1xgou+KTXix4sAO8Ki12Q+jf4i0+tmk3sC+zgcynshkHxzpXdImBehiUYKKKDwvfFiJl1tZt6ewscS1Mg==", + "dependencies": { + "call-bind": "^1.0.5", + "is-array-buffer": "^3.0.4" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/array-union": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/array-union/-/array-union-2.1.0.tgz", + "integrity": "sha512-HGyxoOTYUyCM6stUe6EJgnd4EoewAI7zMdfqO+kGjnlZmBDz/cR5pf8r/cR4Wq60sL/p0IkcjUEEPwS3GFrIyw==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/arraybuffer.prototype.slice": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/arraybuffer.prototype.slice/-/arraybuffer.prototype.slice-1.0.3.tgz", + "integrity": "sha512-bMxMKAjg13EBSVscxTaYA4mRc5t1UAXa2kXiGTNfZ079HIWXEkKmkgFrh/nJqamaLSrXO5H4WFFkPEaLJWbs3A==", + "dependencies": { + "array-buffer-byte-length": "^1.0.1", + "call-bind": "^1.0.5", + "define-properties": "^1.2.1", + "es-abstract": "^1.22.3", + "es-errors": "^1.2.1", + "get-intrinsic": "^1.2.3", + "is-array-buffer": "^3.0.4", + "is-shared-array-buffer": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/assert-never": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/assert-never/-/assert-never-1.2.1.tgz", + "integrity": "sha512-TaTivMB6pYI1kXwrFlEhLeGfOqoDNdTxjCdwRfFFkEA30Eu+k48W34nlok2EYWJfFFzqaEmichdNM7th6M5HNw==", + "dev": true + }, + "node_modules/async-mutex": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/async-mutex/-/async-mutex-0.4.1.tgz", + "integrity": "sha512-WfoBo4E/TbCX1G95XTjbWTE3X2XLG0m1Xbv2cwOtuPdyH9CZvnaA5nCt1ucjaKEgW2A5IF71hxrRhr83Je5xjA==", + "dependencies": { + "tslib": "^2.4.0" + } + }, + "node_modules/available-typed-arrays": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/available-typed-arrays/-/available-typed-arrays-1.0.7.tgz", + "integrity": "sha512-wvUjBtSGN7+7SjNpq/9M2Tg350UZD3q62IFZLbRAR1bSMlCo1ZaeW+BJ+D090e4hIIZLBcTDWe4Mh4jvUDajzQ==", + "dependencies": { + "possible-typed-array-names": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/balanced-match": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", + "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", + "dev": true + }, + "node_modules/base-64": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/base-64/-/base-64-1.0.0.tgz", + "integrity": "sha512-kwDPIFCGx0NZHog36dj+tHiwP4QMzsZ3AgMViUBKI0+V5n4U0ufTCUMhnQ04diaRI8EX/QcPfql7zlhZ7j4zgg==" + }, + "node_modules/base64-js": { + "version": "1.5.1", + "resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.5.1.tgz", + "integrity": "sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ] + }, + "node_modules/better-sqlite3": { + "version": "8.7.0", + "resolved": "https://registry.npmjs.org/better-sqlite3/-/better-sqlite3-8.7.0.tgz", + "integrity": "sha512-99jZU4le+f3G6aIl6PmmV0cxUIWqKieHxsiF7G34CVFiE+/UabpYqkU0NJIkY/96mQKikHeBjtR27vFfs5JpEw==", + "hasInstallScript": true, + "dependencies": { + "bindings": "^1.5.0", + "prebuild-install": "^7.1.1" + } + }, + "node_modules/bindings": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/bindings/-/bindings-1.5.0.tgz", + "integrity": "sha512-p2q/t/mhvuOj/UeLlV6566GD/guowlr0hHxClI0W9m7MWYkL1F0hLo+0Aexs9HSPCtR1SXQ0TD3MMKrXZajbiQ==", + "dependencies": { + "file-uri-to-path": "1.0.0" + } + }, + "node_modules/bl": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/bl/-/bl-4.1.0.tgz", + "integrity": "sha512-1W07cM9gS6DcLperZfFSj+bWLtaPGSOHWhPiGzXmvVJbRLdG82sH/Kn8EtW1VqWVA54AKf2h5k5BbnIbwF3h6w==", + "dependencies": { + "buffer": "^5.5.0", + "inherits": "^2.0.4", + "readable-stream": "^3.4.0" + } + }, + "node_modules/brace-expansion": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz", + "integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==", + "dev": true, + "dependencies": { + "balanced-match": "^1.0.0" + } + }, + "node_modules/braces": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.2.tgz", + "integrity": "sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==", + "dev": true, + "dependencies": { + "fill-range": "^7.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/browserslist": { + "version": "4.23.0", + "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.23.0.tgz", + "integrity": "sha512-QW8HiM1shhT2GuzkvklfjcKDiWFXHOeFCIA/huJPwHsslwcydgk7X+z2zXpEijP98UCY7HbubZt5J2Zgvf0CaQ==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/browserslist" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "dependencies": { + "caniuse-lite": "^1.0.30001587", + "electron-to-chromium": "^1.4.668", + "node-releases": "^2.0.14", + "update-browserslist-db": "^1.0.13" + }, + "bin": { + "browserslist": "cli.js" + }, + "engines": { + "node": "^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7" + } + }, + "node_modules/buffer": { + "version": "5.7.1", + "resolved": "https://registry.npmjs.org/buffer/-/buffer-5.7.1.tgz", + "integrity": "sha512-EHcyIPBQ4BSGlvjB16k5KgAJ27CIsHY/2JBmCRReo48y9rQ3MaUzWX3KVlBa4U7MyX02HdVj0K7C3WaB3ju7FQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "dependencies": { + "base64-js": "^1.3.1", + "ieee754": "^1.1.13" + } + }, + "node_modules/buffer-alloc": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/buffer-alloc/-/buffer-alloc-1.2.0.tgz", + "integrity": "sha512-CFsHQgjtW1UChdXgbyJGtnm+O/uLQeZdtbDo8mfUgYXCHSM1wgrVxXm6bSyrUuErEb+4sYVGCzASBRot7zyrow==", + "dependencies": { + "buffer-alloc-unsafe": "^1.1.0", + "buffer-fill": "^1.0.0" + } + }, + "node_modules/buffer-alloc-unsafe": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/buffer-alloc-unsafe/-/buffer-alloc-unsafe-1.1.0.tgz", + "integrity": "sha512-TEM2iMIEQdJ2yjPJoSIsldnleVaAk1oW3DBVUykyOLsEsFmEc9kn+SFFPz+gl54KQNxlDnAwCXosOS9Okx2xAg==" + }, + "node_modules/buffer-crc32": { + "version": "0.2.13", + "resolved": "https://registry.npmjs.org/buffer-crc32/-/buffer-crc32-0.2.13.tgz", + "integrity": "sha512-VO9Ht/+p3SN7SKWqcrgEzjGbRSJYTx+Q1pTQC0wrWqHx0vpJraQ6GtHx8tvcg1rlK1byhU5gccxgOgj7B0TDkQ==", + "engines": { + "node": "*" + } + }, + "node_modules/buffer-fill": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/buffer-fill/-/buffer-fill-1.0.0.tgz", + "integrity": "sha512-T7zexNBwiiaCOGDg9xNX9PBmjrubblRkENuptryuI64URkXDFum9il/JGL8Lm8wYfAXpredVXXZz7eMHilimiQ==" + }, + "node_modules/buffer-from": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/buffer-from/-/buffer-from-1.1.2.tgz", + "integrity": "sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ==", + "dev": true, + "optional": true, + "peer": true + }, + "node_modules/buffer-writer": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/buffer-writer/-/buffer-writer-2.0.0.tgz", + "integrity": "sha512-a7ZpuTZU1TRtnwyCNW3I5dc0wWNC3VR9S++Ewyk2HHZdrO3CQJqSpd+95Us590V6AL7JqUAH2IwZ/398PmNFgw==", + "devOptional": true, + "engines": { + "node": ">=4" + } + }, + "node_modules/call-bind": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.7.tgz", + "integrity": "sha512-GHTSNSYICQ7scH7sZ+M2rFopRoLh8t2bLSW6BbgrtLsahOIB5iyAVJf9GjWK3cYTDaMj4XdBpM1cA6pIS0Kv2w==", + "dependencies": { + "es-define-property": "^1.0.0", + "es-errors": "^1.3.0", + "function-bind": "^1.1.2", + "get-intrinsic": "^1.2.4", + "set-function-length": "^1.2.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/callsites": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz", + "integrity": "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/caniuse-lite": { + "version": "1.0.30001588", + "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001588.tgz", + "integrity": "sha512-+hVY9jE44uKLkH0SrUTqxjxqNTOWHsbnQDIKjwkZ3lNTzUUVdBLBGXtj/q5Mp5u98r3droaZAewQuEDzjQdZlQ==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/caniuse-lite" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ] + }, + "node_modules/chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "dev": true, + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/chardet": { + "version": "0.7.0", + "resolved": "https://registry.npmjs.org/chardet/-/chardet-0.7.0.tgz", + "integrity": "sha512-mT8iDcrh03qDGRRmoA2hmBJnxpllMR+0/0qlzjqZES6NdiWDcZkCNAk4rPFZ9Q85r27unkiNNg8ZOiwZXBHwcA==", + "dev": true + }, + "node_modules/chownr": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/chownr/-/chownr-1.1.4.tgz", + "integrity": "sha512-jJ0bqzaylmJtVnNgzTeSOs8DPavpbYgEr/b0YL8/2GO3xJEhInFmhKMUnEJQjZumK7KXGFhUy89PrsJWlakBVg==" + }, + "node_modules/cli-cursor": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/cli-cursor/-/cli-cursor-3.1.0.tgz", + "integrity": "sha512-I/zHAwsKf9FqGoXM4WWRACob9+SNukZTd94DWF57E4toouRulbCxcUh6RKUEOQlYTHJnzkPMySvPNaaSLNfLZw==", + "dev": true, + "dependencies": { + "restore-cursor": "^3.1.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/cli-spinners": { + "version": "2.9.2", + "resolved": "https://registry.npmjs.org/cli-spinners/-/cli-spinners-2.9.2.tgz", + "integrity": "sha512-ywqV+5MmyL4E7ybXgKys4DugZbX0FC6LnwrhjuykIjnK9k8OQacQ7axGKnjDXWNhns0xot3bZI5h55H8yo9cJg==", + "dev": true, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/cli-width": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/cli-width/-/cli-width-4.1.0.tgz", + "integrity": "sha512-ouuZd4/dm2Sw5Gmqy6bGyNNNe1qt9RpmxveLSO7KcgsTnU7RXfsw+/bukWGo1abgBiMAic068rclZsO4IWmmxQ==", + "dev": true, + "engines": { + "node": ">= 12" + } + }, + "node_modules/clone": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/clone/-/clone-1.0.4.tgz", + "integrity": "sha512-JQHZ2QMW6l3aH/j6xCqQThY/9OH4D/9ls34cgkUBiEeocRTU04tHfKPBsUK1PqZCUQM7GiA0IIXJSuXHI64Kbg==", + "dev": true, + "engines": { + "node": ">=0.8" + } + }, + "node_modules/code-block-writer": { + "version": "11.0.3", + "resolved": "https://registry.npmjs.org/code-block-writer/-/code-block-writer-11.0.3.tgz", + "integrity": "sha512-NiujjUFB4SwScJq2bwbYUtXbZhBSlY6vYzm++3Q6oC+U+injTqfPYFK8wS9COOmb2lueqp0ZRB4nK1VYeHgNyw==" + }, + "node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dev": true, + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "dev": true + }, + "node_modules/commander": { + "version": "11.1.0", + "resolved": "https://registry.npmjs.org/commander/-/commander-11.1.0.tgz", + "integrity": "sha512-yPVavfyCcRhmorC7rWlkHn15b4wDVgVmBA7kV4QVBsF7kv/9TKJAbAXVTxvTnwP8HHKjRCJDClKbciiYS7p0DQ==", + "engines": { + "node": ">=16" + } + }, + "node_modules/concat-map": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", + "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==", + "dev": true + }, + "node_modules/convert-source-map": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-2.0.0.tgz", + "integrity": "sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg==", + "dev": true + }, + "node_modules/core-util-is": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.3.tgz", + "integrity": "sha512-ZQBvi1DcpJ4GDqanjucZ2Hj3wEO5pZDS89BWbkcrvdxksJorwUDDZamX9ldFkp9aw2lmBDLgkObEA4DWNJ9FYQ==" + }, + "node_modules/cosmiconfig": { + "version": "8.3.6", + "resolved": "https://registry.npmjs.org/cosmiconfig/-/cosmiconfig-8.3.6.tgz", + "integrity": "sha512-kcZ6+W5QzcJ3P1Mt+83OUv/oHFqZHIx8DuxG6eZ5RGMERoLqp4BuGjhHLYGK+Kf5XVkQvqBSmAy/nGWN3qDgEA==", + "dev": true, + "dependencies": { + "import-fresh": "^3.3.0", + "js-yaml": "^4.1.0", + "parse-json": "^5.2.0", + "path-type": "^4.0.0" + }, + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/d-fischer" + }, + "peerDependencies": { + "typescript": ">=4.9.5" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/cross-fetch": { + "version": "3.1.8", + "resolved": "https://registry.npmjs.org/cross-fetch/-/cross-fetch-3.1.8.tgz", + "integrity": "sha512-cvA+JwZoU0Xq+h6WkMvAUqPEYy92Obet6UdKLfW60qn99ftItKjB5T+BkyWOFWe2pUyfQ+IJHmpOTznqk1M6Kg==", + "dependencies": { + "node-fetch": "^2.6.12" + } + }, + "node_modules/cross-spawn": { + "version": "7.0.3", + "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz", + "integrity": "sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==", + "dependencies": { + "path-key": "^3.1.0", + "shebang-command": "^2.0.0", + "which": "^2.0.1" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/csstype": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/csstype/-/csstype-3.1.3.tgz", + "integrity": "sha512-M1uQkMl8rQK/szD0LNhtqxIPLpimGm8sOBwU7lLnCpSbTyY3yeU1Vc7l4KT5zT4s/yOxHH5O7tIuuLOCnLADRw==", + "dev": true + }, + "node_modules/debug": { + "version": "4.3.4", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", + "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", + "dependencies": { + "ms": "2.1.2" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/decompress": { + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/decompress/-/decompress-4.2.1.tgz", + "integrity": "sha512-e48kc2IjU+2Zw8cTb6VZcJQ3lgVbS4uuB1TfCHbiZIP/haNXm+SVyhu+87jts5/3ROpd82GSVCoNs/z8l4ZOaQ==", + "dependencies": { + "decompress-tar": "^4.0.0", + "decompress-tarbz2": "^4.0.0", + "decompress-targz": "^4.0.0", + "decompress-unzip": "^4.0.1", + "graceful-fs": "^4.1.10", + "make-dir": "^1.0.0", + "pify": "^2.3.0", + "strip-dirs": "^2.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/decompress-response": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/decompress-response/-/decompress-response-6.0.0.tgz", + "integrity": "sha512-aW35yZM6Bb/4oJlZncMH2LCoZtJXTRxES17vE3hoRiowU2kWHaJKFkSBDnDR+cm9J+9QhXmREyIfv0pji9ejCQ==", + "dependencies": { + "mimic-response": "^3.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/decompress-tar": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/decompress-tar/-/decompress-tar-4.1.1.tgz", + "integrity": "sha512-JdJMaCrGpB5fESVyxwpCx4Jdj2AagLmv3y58Qy4GE6HMVjWz1FeVQk1Ct4Kye7PftcdOo/7U7UKzYBJgqnGeUQ==", + "dependencies": { + "file-type": "^5.2.0", + "is-stream": "^1.1.0", + "tar-stream": "^1.5.2" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/decompress-tarbz2": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/decompress-tarbz2/-/decompress-tarbz2-4.1.1.tgz", + "integrity": "sha512-s88xLzf1r81ICXLAVQVzaN6ZmX4A6U4z2nMbOwobxkLoIIfjVMBg7TeguTUXkKeXni795B6y5rnvDw7rxhAq9A==", + "dependencies": { + "decompress-tar": "^4.1.0", + "file-type": "^6.1.0", + "is-stream": "^1.1.0", + "seek-bzip": "^1.0.5", + "unbzip2-stream": "^1.0.9" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/decompress-tarbz2/node_modules/file-type": { + "version": "6.2.0", + "resolved": "https://registry.npmjs.org/file-type/-/file-type-6.2.0.tgz", + "integrity": "sha512-YPcTBDV+2Tm0VqjybVd32MHdlEGAtuxS3VAYsumFokDSMG+ROT5wawGlnHDoz7bfMcMDt9hxuXvXwoKUx2fkOg==", + "engines": { + "node": ">=4" + } + }, + "node_modules/decompress-targz": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/decompress-targz/-/decompress-targz-4.1.1.tgz", + "integrity": "sha512-4z81Znfr6chWnRDNfFNqLwPvm4db3WuZkqV+UgXQzSngG3CEKdBkw5jrv3axjjL96glyiiKjsxJG3X6WBZwX3w==", + "dependencies": { + "decompress-tar": "^4.1.1", + "file-type": "^5.2.0", + "is-stream": "^1.1.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/decompress-unzip": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/decompress-unzip/-/decompress-unzip-4.0.1.tgz", + "integrity": "sha512-1fqeluvxgnn86MOh66u8FjbtJpAFv5wgCT9Iw8rcBqQcCo5tO8eiJw7NNTrvt9n4CRBVq7CstiS922oPgyGLrw==", + "dependencies": { + "file-type": "^3.8.0", + "get-stream": "^2.2.0", + "pify": "^2.3.0", + "yauzl": "^2.4.2" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/decompress-unzip/node_modules/file-type": { + "version": "3.9.0", + "resolved": "https://registry.npmjs.org/file-type/-/file-type-3.9.0.tgz", + "integrity": "sha512-RLoqTXE8/vPmMuTI88DAzhMYC99I8BWv7zYP4A1puo5HIjEJ5EX48ighy4ZyKMG9EDXxBgW6e++cn7d1xuFghA==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/deep-equal": { + "version": "2.2.3", + "resolved": "https://registry.npmjs.org/deep-equal/-/deep-equal-2.2.3.tgz", + "integrity": "sha512-ZIwpnevOurS8bpT4192sqAowWM76JDKSHYzMLty3BZGSswgq6pBaH3DhCSW5xVAZICZyKdOBPjwww5wfgT/6PA==", + "dev": true, + "dependencies": { + "array-buffer-byte-length": "^1.0.0", + "call-bind": "^1.0.5", + "es-get-iterator": "^1.1.3", + "get-intrinsic": "^1.2.2", + "is-arguments": "^1.1.1", + "is-array-buffer": "^3.0.2", + "is-date-object": "^1.0.5", + "is-regex": "^1.1.4", + "is-shared-array-buffer": "^1.0.2", + "isarray": "^2.0.5", + "object-is": "^1.1.5", + "object-keys": "^1.1.1", + "object.assign": "^4.1.4", + "regexp.prototype.flags": "^1.5.1", + "side-channel": "^1.0.4", + "which-boxed-primitive": "^1.0.2", + "which-collection": "^1.0.1", + "which-typed-array": "^1.1.13" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/deep-extend": { + "version": "0.6.0", + "resolved": "https://registry.npmjs.org/deep-extend/-/deep-extend-0.6.0.tgz", + "integrity": "sha512-LOHxIOaPYdHlJRtCQfDIVZtfw/ufM8+rVj649RIHzcm/vGwQRXFt6OPqIFWsm2XEMrNIEtWR64sY1LEKD2vAOA==", + "engines": { + "node": ">=4.0.0" + } + }, + "node_modules/deep-is": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/deep-is/-/deep-is-0.1.4.tgz", + "integrity": "sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ==" + }, + "node_modules/defaults": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/defaults/-/defaults-1.0.4.tgz", + "integrity": "sha512-eFuaLoy/Rxalv2kr+lqMlUnrDWV+3j4pljOIJgLIhI058IQfWJ7vXhyEIHu+HtC738klGALYxOKDO0bQP3tg8A==", + "dev": true, + "dependencies": { + "clone": "^1.0.2" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/define-data-property": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/define-data-property/-/define-data-property-1.1.4.tgz", + "integrity": "sha512-rBMvIzlpA8v6E+SJZoo++HAYqsLrkg7MSfIinMPFhmkorw7X+dOXVJQs+QT69zGkzMyfDnIMN2Wid1+NbL3T+A==", + "dependencies": { + "es-define-property": "^1.0.0", + "es-errors": "^1.3.0", + "gopd": "^1.0.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/define-properties": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/define-properties/-/define-properties-1.2.1.tgz", + "integrity": "sha512-8QmQKqEASLd5nx0U1B1okLElbUuuttJ/AnYmRXbbbGDWh6uS208EjD4Xqq/I9wK7u0v6O08XhTWnt5XtEbR6Dg==", + "dependencies": { + "define-data-property": "^1.0.1", + "has-property-descriptors": "^1.0.0", + "object-keys": "^1.1.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/detect-libc": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/detect-libc/-/detect-libc-2.0.2.tgz", + "integrity": "sha512-UX6sGumvvqSaXgdKGUsgZWqcUyIXZ/vZTrlRT/iobiKhGL0zL4d3osHj3uqllWJK+i+sixDS/3COVEOFbupFyw==", + "engines": { + "node": ">=8" + } + }, + "node_modules/dir-glob": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/dir-glob/-/dir-glob-3.0.1.tgz", + "integrity": "sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA==", + "dev": true, + "dependencies": { + "path-type": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/doctrine": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/doctrine/-/doctrine-3.0.0.tgz", + "integrity": "sha512-yS+Q5i3hBf7GBkd4KG8a7eBNNWNGLTaEwwYWUijIYM7zrlYDM0BFXHjjPWlWZ1Rg7UaddZeIDmi9jF3HmqiQ2w==", + "dev": true, + "dependencies": { + "esutils": "^2.0.2" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/dotenv": { + "version": "16.4.5", + "resolved": "https://registry.npmjs.org/dotenv/-/dotenv-16.4.5.tgz", + "integrity": "sha512-ZmdL2rui+eB2YwhsWzjInR8LldtZHGDoQ1ugH85ppHKwpUHL7j7rN0Ti9NCnGiQbhaZ11FpR+7ao1dNsmduNUg==", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://dotenvx.com" + } + }, + "node_modules/dotenv-flow": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/dotenv-flow/-/dotenv-flow-4.1.0.tgz", + "integrity": "sha512-0cwP9jpQBQfyHwvE0cRhraZMkdV45TQedA8AAUZMsFzvmLcQyc1HPv+oX0OOYwLFjIlvgVepQ+WuQHbqDaHJZg==", + "dependencies": { + "dotenv": "^16.0.0" + }, + "engines": { + "node": ">= 12.0.0" + } + }, + "node_modules/eastasianwidth": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/eastasianwidth/-/eastasianwidth-0.2.0.tgz", + "integrity": "sha512-I88TYZWc9XiYHRQ4/3c5rjjfgkjhLyW2luGIheGERbNQ6OY7yTybanSpDXZa8y7VUP9YmDcYa+eyq4ca7iLqWA==", + "dev": true + }, + "node_modules/electric-sql": { + "version": "0.10.1", + "resolved": "file:../../clients/typescript/electric-sql-0.10.1.tgz", + "integrity": "sha512-AnbgtjlnI7E6bPw6bCsQui9Ub2K56PdO0z1I89BfPTfqwjbbIYC3104GXmcRqT+dfu34acOG+g7mtK2YaZA7KQ==", + "license": "Apache-2.0", + "dependencies": { + "@electric-sql/prisma-generator": "1.1.4", + "@prisma/client": "4.8.1", + "@tauri-apps/api": "^1.5.3", + "async-mutex": "^0.4.0", + "base-64": "^1.0.0", + "better-sqlite3": "^8.4.0", + "commander": "^11.1.0", + "cross-fetch": "^3.1.5", + "decompress": "^4.2.1", + "dotenv-flow": "^4.1.0", + "events": "^3.3.0", + "exponential-backoff": "^3.1.0", + "frame-stream": "^3.0.1", + "get-port": "^7.0.0", + "jose": "^4.14.4", + "kysely": "^0.27.2", + "lodash.flow": "^3.5.0", + "lodash.groupby": "^4.6.0", + "lodash.isequal": "^4.5.0", + "lodash.mapvalues": "^4.6.0", + "lodash.omitby": "^4.6.0", + "lodash.partition": "^4.6.0", + "lodash.pick": "^4.4.0", + "lodash.throttle": "^4.1.1", + "lodash.uniqwith": "^4.5.0", + "loglevel": "^1.8.1", + "long": "^5.2.0", + "object.hasown": "^1.1.2", + "ohash": "^1.1.2", + "prisma": "4.8.1", + "prompts": "^2.4.2", + "protobufjs": "^7.1.1", + "squel": "^5.13.0", + "tcp-port-used": "^1.0.2", + "text-encoder-lite": "^2.0.0", + "ts-dedent": "^2.2.0", + "ws": "^8.8.1", + "zod": "3.21.1" + }, + "bin": { + "electric-sql": "dist/cli/main.js" + }, + "peerDependencies": { + "@capacitor-community/sqlite": ">= 5.6.2", + "@electric-sql/pglite": ">= 0.1.4", + "@op-engineering/op-sqlite": ">= 2.0.16", + "@tauri-apps/plugin-sql": "2.0.0-alpha.5", + "embedded-postgres": "16.1.1-beta.9", + "expo-sqlite": ">= 13.0.0", + "pg": "^8.11.3", + "react": ">= 16.8.0", + "react-dom": ">= 16.8.0", + "react-native": ">= 0.68.0", + "typeorm": ">=0.3.0", + "vue": ">=3.0.0", + "wa-sqlite": "rhashimoto/wa-sqlite#semver:^0.9.8" + }, + "peerDependenciesMeta": { + "@capacitor-community/sqlite": { + "optional": true + }, + "@op-engineering/op-sqlite": { + "optional": true + }, + "embedded-postgres": { + "optional": true + }, + "expo-sqlite": { + "optional": true + }, + "pg": { + "optional": true + }, + "react": { + "optional": true + }, + "react-native": { + "optional": true + }, + "typeorm": { + "optional": true + }, + "vue": { + "optional": true + }, + "wa-sqlite": { + "optional": true + } + } + }, + "node_modules/electric-sql/node_modules/@tauri-apps/api": { + "version": "1.5.3", + "resolved": "https://registry.npmjs.org/@tauri-apps/api/-/api-1.5.3.tgz", + "integrity": "sha512-zxnDjHHKjOsrIzZm6nO5Xapb/BxqUq1tc7cGkFXsFkGTsSWgCPH1D8mm0XS9weJY2OaR73I3k3S+b7eSzJDfqA==", + "engines": { + "node": ">= 14.6.0", + "npm": ">= 6.6.0", + "yarn": ">= 1.19.1" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/tauri" + } + }, + "node_modules/electron-to-chromium": { + "version": "1.4.677", + "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.4.677.tgz", + "integrity": "sha512-erDa3CaDzwJOpyvfKhOiJjBVNnMM0qxHq47RheVVwsSQrgBA9ZSGV9kdaOfZDPXcHzhG7lBxhj6A7KvfLJBd6Q==", + "dev": true + }, + "node_modules/emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", + "dev": true + }, + "node_modules/end-of-stream": { + "version": "1.4.4", + "resolved": "https://registry.npmjs.org/end-of-stream/-/end-of-stream-1.4.4.tgz", + "integrity": "sha512-+uw1inIHVPQoaVuHzRyXd21icM+cnt4CzD5rW+NC1wjOUSTOs+Te7FOv7AhN7vS9x/oIyhLP5PR1H+phQAHu5Q==", + "dependencies": { + "once": "^1.4.0" + } + }, + "node_modules/error-ex": { + "version": "1.3.2", + "resolved": "https://registry.npmjs.org/error-ex/-/error-ex-1.3.2.tgz", + "integrity": "sha512-7dFHNmqeFSEt2ZBsCriorKnn3Z2pj+fd9kmI6QoWw4//DL+icEBfc0U7qJCisqrTsKTjw4fNFy2pW9OqStD84g==", + "dev": true, + "dependencies": { + "is-arrayish": "^0.2.1" + } + }, + "node_modules/es-abstract": { + "version": "1.22.4", + "resolved": "https://registry.npmjs.org/es-abstract/-/es-abstract-1.22.4.tgz", + "integrity": "sha512-vZYJlk2u6qHYxBOTjAeg7qUxHdNfih64Uu2J8QqWgXZ2cri0ZpJAkzDUK/q593+mvKwlxyaxr6F1Q+3LKoQRgg==", + "dependencies": { + "array-buffer-byte-length": "^1.0.1", + "arraybuffer.prototype.slice": "^1.0.3", + "available-typed-arrays": "^1.0.6", + "call-bind": "^1.0.7", + "es-define-property": "^1.0.0", + "es-errors": "^1.3.0", + "es-set-tostringtag": "^2.0.2", + "es-to-primitive": "^1.2.1", + "function.prototype.name": "^1.1.6", + "get-intrinsic": "^1.2.4", + "get-symbol-description": "^1.0.2", + "globalthis": "^1.0.3", + "gopd": "^1.0.1", + "has-property-descriptors": "^1.0.2", + "has-proto": "^1.0.1", + "has-symbols": "^1.0.3", + "hasown": "^2.0.1", + "internal-slot": "^1.0.7", + "is-array-buffer": "^3.0.4", + "is-callable": "^1.2.7", + "is-negative-zero": "^2.0.2", + "is-regex": "^1.1.4", + "is-shared-array-buffer": "^1.0.2", + "is-string": "^1.0.7", + "is-typed-array": "^1.1.13", + "is-weakref": "^1.0.2", + "object-inspect": "^1.13.1", + "object-keys": "^1.1.1", + "object.assign": "^4.1.5", + "regexp.prototype.flags": "^1.5.2", + "safe-array-concat": "^1.1.0", + "safe-regex-test": "^1.0.3", + "string.prototype.trim": "^1.2.8", + "string.prototype.trimend": "^1.0.7", + "string.prototype.trimstart": "^1.0.7", + "typed-array-buffer": "^1.0.1", + "typed-array-byte-length": "^1.0.0", + "typed-array-byte-offset": "^1.0.0", + "typed-array-length": "^1.0.4", + "unbox-primitive": "^1.0.2", + "which-typed-array": "^1.1.14" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/es-define-property": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.0.tgz", + "integrity": "sha512-jxayLKShrEqqzJ0eumQbVhTYQM27CfT1T35+gCgDFoL82JLsXqTJ76zv6A0YLOgEnLUMvLzsDsGIrl8NFpT2gQ==", + "dependencies": { + "get-intrinsic": "^1.2.4" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-errors": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz", + "integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-get-iterator": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/es-get-iterator/-/es-get-iterator-1.1.3.tgz", + "integrity": "sha512-sPZmqHBe6JIiTfN5q2pEi//TwxmAFHwj/XEuYjTuse78i8KxaqMTTzxPoFKuzRpDpTJ+0NAbpfenkmH2rePtuw==", + "dev": true, + "dependencies": { + "call-bind": "^1.0.2", + "get-intrinsic": "^1.1.3", + "has-symbols": "^1.0.3", + "is-arguments": "^1.1.1", + "is-map": "^2.0.2", + "is-set": "^2.0.2", + "is-string": "^1.0.7", + "isarray": "^2.0.5", + "stop-iteration-iterator": "^1.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/es-set-tostringtag": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/es-set-tostringtag/-/es-set-tostringtag-2.0.3.tgz", + "integrity": "sha512-3T8uNMC3OQTHkFUsFq8r/BwAXLHvU/9O9mE0fBc/MY5iq/8H7ncvO947LmYA6ldWw9Uh8Yhf25zu6n7nML5QWQ==", + "dependencies": { + "get-intrinsic": "^1.2.4", + "has-tostringtag": "^1.0.2", + "hasown": "^2.0.1" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-to-primitive": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/es-to-primitive/-/es-to-primitive-1.2.1.tgz", + "integrity": "sha512-QCOllgZJtaUo9miYBcLChTUaHNjJF3PYs1VidD7AwiEj1kYxKeQTctLAezAOH5ZKRH0g2IgPn6KwB4IT8iRpvA==", + "dependencies": { + "is-callable": "^1.1.4", + "is-date-object": "^1.0.1", + "is-symbol": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/esbuild": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.19.12.tgz", + "integrity": "sha512-aARqgq8roFBj054KvQr5f1sFu0D65G+miZRCuJyJ0G13Zwx7vRar5Zhn2tkQNzIXcBrNVsv/8stehpj+GAjgbg==", + "dev": true, + "hasInstallScript": true, + "bin": { + "esbuild": "bin/esbuild" + }, + "engines": { + "node": ">=12" + }, + "optionalDependencies": { + "@esbuild/aix-ppc64": "0.19.12", + "@esbuild/android-arm": "0.19.12", + "@esbuild/android-arm64": "0.19.12", + "@esbuild/android-x64": "0.19.12", + "@esbuild/darwin-arm64": "0.19.12", + "@esbuild/darwin-x64": "0.19.12", + "@esbuild/freebsd-arm64": "0.19.12", + "@esbuild/freebsd-x64": "0.19.12", + "@esbuild/linux-arm": "0.19.12", + "@esbuild/linux-arm64": "0.19.12", + "@esbuild/linux-ia32": "0.19.12", + "@esbuild/linux-loong64": "0.19.12", + "@esbuild/linux-mips64el": "0.19.12", + "@esbuild/linux-ppc64": "0.19.12", + "@esbuild/linux-riscv64": "0.19.12", + "@esbuild/linux-s390x": "0.19.12", + "@esbuild/linux-x64": "0.19.12", + "@esbuild/netbsd-x64": "0.19.12", + "@esbuild/openbsd-x64": "0.19.12", + "@esbuild/sunos-x64": "0.19.12", + "@esbuild/win32-arm64": "0.19.12", + "@esbuild/win32-ia32": "0.19.12", + "@esbuild/win32-x64": "0.19.12" + } + }, + "node_modules/escalade": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.1.2.tgz", + "integrity": "sha512-ErCHMCae19vR8vQGe50xIsVomy19rg6gFu3+r3jkEO46suLMWBksvVyoGgQV+jOfl84ZSOSlmv6Gxa89PmTGmA==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/escape-string-regexp": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz", + "integrity": "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==", + "dev": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/eslint": { + "version": "8.56.0", + "resolved": "https://registry.npmjs.org/eslint/-/eslint-8.56.0.tgz", + "integrity": "sha512-Go19xM6T9puCOWntie1/P997aXxFsOi37JIHRWI514Hc6ZnaHGKY9xFhrU65RT6CcBEzZoGG1e6Nq+DT04ZtZQ==", + "dev": true, + "dependencies": { + "@eslint-community/eslint-utils": "^4.2.0", + "@eslint-community/regexpp": "^4.6.1", + "@eslint/eslintrc": "^2.1.4", + "@eslint/js": "8.56.0", + "@humanwhocodes/config-array": "^0.11.13", + "@humanwhocodes/module-importer": "^1.0.1", + "@nodelib/fs.walk": "^1.2.8", + "@ungap/structured-clone": "^1.2.0", + "ajv": "^6.12.4", + "chalk": "^4.0.0", + "cross-spawn": "^7.0.2", + "debug": "^4.3.2", + "doctrine": "^3.0.0", + "escape-string-regexp": "^4.0.0", + "eslint-scope": "^7.2.2", + "eslint-visitor-keys": "^3.4.3", + "espree": "^9.6.1", + "esquery": "^1.4.2", + "esutils": "^2.0.2", + "fast-deep-equal": "^3.1.3", + "file-entry-cache": "^6.0.1", + "find-up": "^5.0.0", + "glob-parent": "^6.0.2", + "globals": "^13.19.0", + "graphemer": "^1.4.0", + "ignore": "^5.2.0", + "imurmurhash": "^0.1.4", + "is-glob": "^4.0.0", + "is-path-inside": "^3.0.3", + "js-yaml": "^4.1.0", + "json-stable-stringify-without-jsonify": "^1.0.1", + "levn": "^0.4.1", + "lodash.merge": "^4.6.2", + "minimatch": "^3.1.2", + "natural-compare": "^1.4.0", + "optionator": "^0.9.3", + "strip-ansi": "^6.0.1", + "text-table": "^0.2.0" + }, + "bin": { + "eslint": "bin/eslint.js" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/eslint-plugin-react-hooks": { + "version": "4.6.0", + "resolved": "https://registry.npmjs.org/eslint-plugin-react-hooks/-/eslint-plugin-react-hooks-4.6.0.tgz", + "integrity": "sha512-oFc7Itz9Qxh2x4gNHStv3BqJq54ExXmfC+a1NjAta66IAN87Wu0R/QArgIS9qKzX3dXKPI9H5crl9QchNMY9+g==", + "dev": true, + "engines": { + "node": ">=10" + }, + "peerDependencies": { + "eslint": "^3.0.0 || ^4.0.0 || ^5.0.0 || ^6.0.0 || ^7.0.0 || ^8.0.0-0" + } + }, + "node_modules/eslint-plugin-react-refresh": { + "version": "0.4.5", + "resolved": "https://registry.npmjs.org/eslint-plugin-react-refresh/-/eslint-plugin-react-refresh-0.4.5.tgz", + "integrity": "sha512-D53FYKJa+fDmZMtriODxvhwrO+IOqrxoEo21gMA0sjHdU6dPVH4OhyFip9ypl8HOF5RV5KdTo+rBQLvnY2cO8w==", + "dev": true, + "peerDependencies": { + "eslint": ">=7" + } + }, + "node_modules/eslint-scope": { + "version": "7.2.2", + "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-7.2.2.tgz", + "integrity": "sha512-dOt21O7lTMhDM+X9mB4GX+DZrZtCUJPL/wlcTqxyrx5IvO0IYtILdtrQGQp+8n5S0gwSVmOf9NQrjMOgfQZlIg==", + "dev": true, + "dependencies": { + "esrecurse": "^4.3.0", + "estraverse": "^5.2.0" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/eslint-visitor-keys": { + "version": "3.4.3", + "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-3.4.3.tgz", + "integrity": "sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag==", + "dev": true, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/eslint/node_modules/brace-expansion": { + "version": "1.1.11", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", + "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "dev": true, + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/eslint/node_modules/globals": { + "version": "13.24.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-13.24.0.tgz", + "integrity": "sha512-AhO5QUcj8llrbG09iWhPU2B204J1xnPeL8kQmVorSsy+Sjj1sk8gIyh6cUocGmH4L0UuhAJy+hJMRA4mgA4mFQ==", + "dev": true, + "dependencies": { + "type-fest": "^0.20.2" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/eslint/node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/eslint/node_modules/type-fest": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.20.2.tgz", + "integrity": "sha512-Ne+eE4r0/iWnpAxD852z3A+N0Bt5RN//NjJwRd2VFHEmrywxf5vsZlh4R6lixl6B+wz/8d+maTSAkN1FIkI3LQ==", + "dev": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/espree": { + "version": "9.6.1", + "resolved": "https://registry.npmjs.org/espree/-/espree-9.6.1.tgz", + "integrity": "sha512-oruZaFkjorTpF32kDSI5/75ViwGeZginGGy2NoOSg3Q9bnwlnmDm4HLnkl0RE3n+njDXR037aY1+x58Z/zFdwQ==", + "dev": true, + "dependencies": { + "acorn": "^8.9.0", + "acorn-jsx": "^5.3.2", + "eslint-visitor-keys": "^3.4.1" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/esquery": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/esquery/-/esquery-1.5.0.tgz", + "integrity": "sha512-YQLXUplAwJgCydQ78IMJywZCceoqk1oH01OERdSAJc/7U2AylwjhSCLDEtqwg811idIS/9fIU5GjG73IgjKMVg==", + "dev": true, + "dependencies": { + "estraverse": "^5.1.0" + }, + "engines": { + "node": ">=0.10" + } + }, + "node_modules/esrecurse": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/esrecurse/-/esrecurse-4.3.0.tgz", + "integrity": "sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==", + "dev": true, + "dependencies": { + "estraverse": "^5.2.0" + }, + "engines": { + "node": ">=4.0" + } + }, + "node_modules/estraverse": { + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.3.0.tgz", + "integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==", + "dev": true, + "engines": { + "node": ">=4.0" + } + }, + "node_modules/esutils": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz", + "integrity": "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/events": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/events/-/events-3.3.0.tgz", + "integrity": "sha512-mQw+2fkQbALzQ7V0MY0IqdnXNOeTtP4r0lN9z7AAawCXgqea7bDii20AYrIBrFd/Hx0M2Ocz6S111CaFkUcb0Q==", + "engines": { + "node": ">=0.8.x" + } + }, + "node_modules/expand-template": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/expand-template/-/expand-template-2.0.3.tgz", + "integrity": "sha512-XYfuKMvj4O35f/pOXLObndIRvyQ+/+6AhODh+OKWj9S9498pHHn/IMszH+gt0fBCRWMNfk1ZSp5x3AifmnI2vg==", + "engines": { + "node": ">=6" + } + }, + "node_modules/exponential-backoff": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/exponential-backoff/-/exponential-backoff-3.1.1.tgz", + "integrity": "sha512-dX7e/LHVJ6W3DE1MHWi9S1EYzDESENfLrYohG2G++ovZrYOkm4Knwa0mc1cn84xJOR4KEU0WSchhLbd0UklbHw==" + }, + "node_modules/external-editor": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/external-editor/-/external-editor-3.1.0.tgz", + "integrity": "sha512-hMQ4CX1p1izmuLYyZqLMO/qGNw10wSv9QDCPfzXfyFrOaCSSoRfqE1Kf1s5an66J5JZC62NewG+mK49jOCtQew==", + "dev": true, + "dependencies": { + "chardet": "^0.7.0", + "iconv-lite": "^0.4.24", + "tmp": "^0.0.33" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/fast-deep-equal": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz", + "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==", + "dev": true + }, + "node_modules/fast-glob": { + "version": "3.3.2", + "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.3.2.tgz", + "integrity": "sha512-oX2ruAFQwf/Orj8m737Y5adxDQO0LAB7/S5MnxCdTNDd4p6BsyIVsv9JQsATbTSq8KHRpLwIHbVlUNatxd+1Ow==", + "dev": true, + "dependencies": { + "@nodelib/fs.stat": "^2.0.2", + "@nodelib/fs.walk": "^1.2.3", + "glob-parent": "^5.1.2", + "merge2": "^1.3.0", + "micromatch": "^4.0.4" + }, + "engines": { + "node": ">=8.6.0" + } + }, + "node_modules/fast-glob/node_modules/glob-parent": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", + "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", + "dev": true, + "dependencies": { + "is-glob": "^4.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/fast-json-stable-stringify": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz", + "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==", + "dev": true + }, + "node_modules/fast-levenshtein": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/fast-levenshtein/-/fast-levenshtein-2.0.6.tgz", + "integrity": "sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw==", + "dev": true + }, + "node_modules/fastq": { + "version": "1.17.1", + "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.17.1.tgz", + "integrity": "sha512-sRVD3lWVIXWg6By68ZN7vho9a1pQcN/WBFaAAsDDFzlJjvoGx0P8z7V1t72grFJfJhu3YPZBuu25f7Kaw2jN1w==", + "dev": true, + "dependencies": { + "reusify": "^1.0.4" + } + }, + "node_modules/fd-slicer": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/fd-slicer/-/fd-slicer-1.1.0.tgz", + "integrity": "sha512-cE1qsB/VwyQozZ+q1dGxR8LBYNZeofhEdUNGSMbQD3Gw2lAzX9Zb3uIU6Ebc/Fmyjo9AWWfnn0AUCHqtevs/8g==", + "dependencies": { + "pend": "~1.2.0" + } + }, + "node_modules/figures": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/figures/-/figures-3.2.0.tgz", + "integrity": "sha512-yaduQFRKLXYOGgEn6AZau90j3ggSOyiqXU0F9JZfeXYhNa+Jk4X+s45A2zg5jns87GAFa34BBm2kXw4XpNcbdg==", + "dev": true, + "dependencies": { + "escape-string-regexp": "^1.0.5" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/figures/node_modules/escape-string-regexp": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", + "integrity": "sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg==", + "dev": true, + "engines": { + "node": ">=0.8.0" + } + }, + "node_modules/file-entry-cache": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/file-entry-cache/-/file-entry-cache-6.0.1.tgz", + "integrity": "sha512-7Gps/XWymbLk2QLYK4NzpMOrYjMhdIxXuIvy2QBsLE6ljuodKvdkWs/cpyJJ3CVIVpH0Oi1Hvg1ovbMzLdFBBg==", + "dev": true, + "dependencies": { + "flat-cache": "^3.0.4" + }, + "engines": { + "node": "^10.12.0 || >=12.0.0" + } + }, + "node_modules/file-type": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/file-type/-/file-type-5.2.0.tgz", + "integrity": "sha512-Iq1nJ6D2+yIO4c8HHg4fyVb8mAJieo1Oloy1mLLaB2PvezNedhBVm+QU7g0qM42aiMbRXTxKKwGD17rjKNJYVQ==", + "engines": { + "node": ">=4" + } + }, + "node_modules/file-uri-to-path": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/file-uri-to-path/-/file-uri-to-path-1.0.0.tgz", + "integrity": "sha512-0Zt+s3L7Vf1biwWZ29aARiVYLx7iMGnEUl9x33fbB/j3jR81u/O2LbqK+Bm1CDSNDKVtJ/YjwY7TUd5SkeLQLw==" + }, + "node_modules/fill-range": { + "version": "7.0.1", + "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.0.1.tgz", + "integrity": "sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==", + "dev": true, + "dependencies": { + "to-regex-range": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/find-up": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-5.0.0.tgz", + "integrity": "sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==", + "dev": true, + "dependencies": { + "locate-path": "^6.0.0", + "path-exists": "^4.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/flat-cache": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/flat-cache/-/flat-cache-3.2.0.tgz", + "integrity": "sha512-CYcENa+FtcUKLmhhqyctpclsq7QF38pKjZHsGNiSQF5r4FtoKDWabFDl3hzaEQMvT1LHEysw5twgLvpYYb4vbw==", + "dev": true, + "dependencies": { + "flatted": "^3.2.9", + "keyv": "^4.5.3", + "rimraf": "^3.0.2" + }, + "engines": { + "node": "^10.12.0 || >=12.0.0" + } + }, + "node_modules/flatted": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/flatted/-/flatted-3.3.1.tgz", + "integrity": "sha512-X8cqMLLie7KsNUDSdzeN8FYK9rEt4Dt67OsG/DNGnYTSDBG4uFAJFBnUeiV+zCVAvwFy56IjM9sH51jVaEhNxw==", + "dev": true + }, + "node_modules/for-each": { + "version": "0.3.3", + "resolved": "https://registry.npmjs.org/for-each/-/for-each-0.3.3.tgz", + "integrity": "sha512-jqYfLp7mo9vIyQf8ykW2v7A+2N4QjeCeI5+Dz9XraiO1ign81wjiH7Fb9vSOWvQfNtmSa4H2RoQTrrXivdUZmw==", + "dependencies": { + "is-callable": "^1.1.3" + } + }, + "node_modules/foreground-child": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/foreground-child/-/foreground-child-3.1.1.tgz", + "integrity": "sha512-TMKDUnIte6bfb5nWv7V/caI169OHgvwjb7V4WkeUvbQQdjr5rWKqHFiKWb/fcOwB+CzBT+qbWjvj+DVwRskpIg==", + "dev": true, + "dependencies": { + "cross-spawn": "^7.0.0", + "signal-exit": "^4.0.1" + }, + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/foreground-child/node_modules/signal-exit": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-4.1.0.tgz", + "integrity": "sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw==", + "dev": true, + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/frame-stream": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/frame-stream/-/frame-stream-3.0.1.tgz", + "integrity": "sha512-Fu8Cdbt2hHfb7wp2HBG5AOfMO5qaglHoJuoiEoQKHS+mZtO/IsMiac3wEQtBVDmOLVmCmDeoutXbrfPlpwMiqg==", + "engines": { + "node": ">=14" + } + }, + "node_modules/fs-constants": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/fs-constants/-/fs-constants-1.0.0.tgz", + "integrity": "sha512-y6OAwoSIf7FyjMIv94u+b5rdheZEjzR63GTyZJm5qh4Bi+2YgwLCcI/fPFZkL5PSixOt6ZNKm+w+Hfp/Bciwow==" + }, + "node_modules/fs.realpath": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", + "integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==", + "dev": true + }, + "node_modules/fsevents": { + "version": "2.3.3", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", + "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==", + "dev": true, + "hasInstallScript": true, + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^8.16.0 || ^10.6.0 || >=11.0.0" + } + }, + "node_modules/function-bind": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz", + "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/function.prototype.name": { + "version": "1.1.6", + "resolved": "https://registry.npmjs.org/function.prototype.name/-/function.prototype.name-1.1.6.tgz", + "integrity": "sha512-Z5kx79swU5P27WEayXM1tBi5Ze/lbIyiNgU3qyXUOf9b2rgXYyF9Dy9Cx+IQv/Lc8WCG6L82zwUPpSS9hGehIg==", + "dependencies": { + "call-bind": "^1.0.2", + "define-properties": "^1.2.0", + "es-abstract": "^1.22.1", + "functions-have-names": "^1.2.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/functions-have-names": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/functions-have-names/-/functions-have-names-1.2.3.tgz", + "integrity": "sha512-xckBUXyTIqT97tq2x2AMb+g163b5JFysYk0x4qxNFwbfQkmNZoiRHb6sPzI9/QV33WeuvVYBUIiD4NzNIyqaRQ==", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/funtypes": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/funtypes/-/funtypes-4.2.0.tgz", + "integrity": "sha512-DvOtjiKvkeuXGV0O8LQh9quUP3bSOTEQPGv537Sao8kDq2rDbg48UsSJ7wlBLPzR2Mn0pV7cyAiq5pYG1oUyCQ==", + "dev": true + }, + "node_modules/gensync": { + "version": "1.0.0-beta.2", + "resolved": "https://registry.npmjs.org/gensync/-/gensync-1.0.0-beta.2.tgz", + "integrity": "sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==", + "dev": true, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/get-intrinsic": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.2.4.tgz", + "integrity": "sha512-5uYhsJH8VJBTv7oslg4BznJYhDoRI6waYCxMmCdnTrcCrHA/fCFKoTFz2JKKE0HdDFUF7/oQuhzumXJK7paBRQ==", + "dependencies": { + "es-errors": "^1.3.0", + "function-bind": "^1.1.2", + "has-proto": "^1.0.1", + "has-symbols": "^1.0.3", + "hasown": "^2.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/get-port": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/get-port/-/get-port-7.0.0.tgz", + "integrity": "sha512-mDHFgApoQd+azgMdwylJrv2DX47ywGq1i5VFJE7fZ0dttNq3iQMfsU4IvEgBHojA3KqEudyu7Vq+oN8kNaNkWw==", + "engines": { + "node": ">=16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/get-stream": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-2.3.1.tgz", + "integrity": "sha512-AUGhbbemXxrZJRD5cDvKtQxLuYaIbNtDTK8YqupCI393Q2KSTreEsLUN3ZxAWFGiKTzL6nKuzfcIvieflUX9qA==", + "dependencies": { + "object-assign": "^4.0.1", + "pinkie-promise": "^2.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/get-symbol-description": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/get-symbol-description/-/get-symbol-description-1.0.2.tgz", + "integrity": "sha512-g0QYk1dZBxGwk+Ngc+ltRH2IBp2f7zBkBMBJZCDerh6EhlhSR6+9irMCuT/09zD6qkarHUSn529sK/yL4S27mg==", + "dependencies": { + "call-bind": "^1.0.5", + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.4" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/github-from-package": { + "version": "0.0.0", + "resolved": "https://registry.npmjs.org/github-from-package/-/github-from-package-0.0.0.tgz", + "integrity": "sha512-SyHy3T1v2NUXn29OsWdxmK6RwHD+vkj3v8en8AOBZ1wBQ/hCAQ5bAQTD02kW4W9tUp/3Qh6J8r9EvntiyCmOOw==" + }, + "node_modules/glob": { + "version": "7.2.3", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", + "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", + "dev": true, + "dependencies": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.1.1", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + }, + "engines": { + "node": "*" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/glob-parent": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-6.0.2.tgz", + "integrity": "sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==", + "dev": true, + "dependencies": { + "is-glob": "^4.0.3" + }, + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/glob/node_modules/brace-expansion": { + "version": "1.1.11", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", + "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "dev": true, + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/glob/node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/globals": { + "version": "11.12.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-11.12.0.tgz", + "integrity": "sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA==", + "dev": true, + "engines": { + "node": ">=4" + } + }, + "node_modules/globalthis": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/globalthis/-/globalthis-1.0.3.tgz", + "integrity": "sha512-sFdI5LyBiNTHjRd7cGPWapiHWMOXKyuBNX/cWJ3NfzrZQVa8GI/8cofCl74AOVqq9W5kNmguTIzJ/1s2gyI9wA==", + "dependencies": { + "define-properties": "^1.1.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/globby": { + "version": "11.1.0", + "resolved": "https://registry.npmjs.org/globby/-/globby-11.1.0.tgz", + "integrity": "sha512-jhIXaOzy1sb8IyocaruWSn1TjmnBVs8Ayhcy83rmxNJ8q2uWKCAj3CnJY+KpGSXCueAPc0i05kVvVKtP1t9S3g==", + "dev": true, + "dependencies": { + "array-union": "^2.1.0", + "dir-glob": "^3.0.1", + "fast-glob": "^3.2.9", + "ignore": "^5.2.0", + "merge2": "^1.4.1", + "slash": "^3.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/gopd": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.0.1.tgz", + "integrity": "sha512-d65bNlIadxvpb/A2abVdlqKqV563juRnZ1Wtk6s1sIR8uNsXR70xqIzVqxVf1eTqDunwT2MkczEeaezCKTZhwA==", + "dependencies": { + "get-intrinsic": "^1.1.3" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/graceful-fs": { + "version": "4.2.11", + "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz", + "integrity": "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==" + }, + "node_modules/graphemer": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/graphemer/-/graphemer-1.4.0.tgz", + "integrity": "sha512-EtKwoO6kxCL9WO5xipiHTZlSzBm7WLT627TqC/uVRd0HKmq8NXyebnNYxDoBi7wt8eTWrUrKXCOVaFq9x1kgag==", + "dev": true + }, + "node_modules/has-bigints": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/has-bigints/-/has-bigints-1.0.2.tgz", + "integrity": "sha512-tSvCKtBr9lkF0Ex0aQiP9N+OpV4zi2r/Nee5VkRDbaqv35RLYMzbwQfFSZZH0kR+Rd6302UJZ2p/bJCEoR3VoQ==", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/has-property-descriptors": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/has-property-descriptors/-/has-property-descriptors-1.0.2.tgz", + "integrity": "sha512-55JNKuIW+vq4Ke1BjOTjM2YctQIvCT7GFzHwmfZPGo5wnrgkid0YQtnAleFSqumZm4az3n2BS+erby5ipJdgrg==", + "dependencies": { + "es-define-property": "^1.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-proto": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/has-proto/-/has-proto-1.0.3.tgz", + "integrity": "sha512-SJ1amZAJUiZS+PhsVLf5tGydlaVB8EdFpaSO4gmiUKUOxk8qzn5AIy4ZeJUmh22znIdk/uMAUT2pl3FxzVUH+Q==", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-symbols": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.0.3.tgz", + "integrity": "sha512-l3LCuF6MgDNwTDKkdYGEihYjt5pRPbEg46rtlmnSPlUbgmB8LOIrKJbYYFBSbnPaJexMKtiPO8hmeRjRz2Td+A==", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-tostringtag": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/has-tostringtag/-/has-tostringtag-1.0.2.tgz", + "integrity": "sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw==", + "dependencies": { + "has-symbols": "^1.0.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/hasown": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.1.tgz", + "integrity": "sha512-1/th4MHjnwncwXsIW6QMzlvYL9kG5e/CpVvLRZe4XPa8TOUNbCELqmvhDmnkNsAjwaG4+I8gJJL0JBvTTLO9qA==", + "dependencies": { + "function-bind": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/iconv-lite": { + "version": "0.4.24", + "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz", + "integrity": "sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==", + "dev": true, + "dependencies": { + "safer-buffer": ">= 2.1.2 < 3" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/ieee754": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/ieee754/-/ieee754-1.2.1.tgz", + "integrity": "sha512-dcyqhDvX1C46lXZcVqCpK+FtMRQVdIMN6/Df5js2zouUsqG7I6sFxitIC+7KYK29KdXOLHdu9zL4sFnoVQnqaA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ] + }, + "node_modules/ignore": { + "version": "5.3.1", + "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.3.1.tgz", + "integrity": "sha512-5Fytz/IraMjqpwfd34ke28PTVMjZjJG2MPn5t7OE4eUCUNf8BAa7b5WUS9/Qvr6mwOQS7Mk6vdsMno5he+T8Xw==", + "dev": true, + "engines": { + "node": ">= 4" + } + }, + "node_modules/import-fresh": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-3.3.0.tgz", + "integrity": "sha512-veYYhQa+D1QBKznvhUHxb8faxlrwUnxseDAbAp457E0wLNio2bOSKnjYDhMj+YiAq61xrMGhQk9iXVk5FzgQMw==", + "dev": true, + "dependencies": { + "parent-module": "^1.0.0", + "resolve-from": "^4.0.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/imurmurhash": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz", + "integrity": "sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==", + "dev": true, + "engines": { + "node": ">=0.8.19" + } + }, + "node_modules/inflight": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", + "integrity": "sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==", + "dev": true, + "dependencies": { + "once": "^1.3.0", + "wrappy": "1" + } + }, + "node_modules/inherits": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", + "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==" + }, + "node_modules/ini": { + "version": "1.3.8", + "resolved": "https://registry.npmjs.org/ini/-/ini-1.3.8.tgz", + "integrity": "sha512-JV/yugV2uzW5iMRSiZAyDtQd+nxtUnjeLt0acNdw98kKLrvuRVyB80tsREOE7yvGVgalhZ6RNXCmEHkUKBKxew==" + }, + "node_modules/inquirer": { + "version": "9.2.15", + "resolved": "https://registry.npmjs.org/inquirer/-/inquirer-9.2.15.tgz", + "integrity": "sha512-vI2w4zl/mDluHt9YEQ/543VTCwPKWiHzKtm9dM2V0NdFcqEexDAjUHzO1oA60HRNaVifGXXM1tRRNluLVHa0Kg==", + "dev": true, + "dependencies": { + "@ljharb/through": "^2.3.12", + "ansi-escapes": "^4.3.2", + "chalk": "^5.3.0", + "cli-cursor": "^3.1.0", + "cli-width": "^4.1.0", + "external-editor": "^3.1.0", + "figures": "^3.2.0", + "lodash": "^4.17.21", + "mute-stream": "1.0.0", + "ora": "^5.4.1", + "run-async": "^3.0.0", + "rxjs": "^7.8.1", + "string-width": "^4.2.3", + "strip-ansi": "^6.0.1", + "wrap-ansi": "^6.2.0" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/inquirer/node_modules/chalk": { + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-5.3.0.tgz", + "integrity": "sha512-dLitG79d+GV1Nb/VYcCDFivJeK1hiukt9QjRNVOsUtTy1rR1YJsmpGGTZ3qJos+uw7WmWF4wUwBd9jxjocFC2w==", + "dev": true, + "engines": { + "node": "^12.17.0 || ^14.13 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/internal-slot": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/internal-slot/-/internal-slot-1.0.7.tgz", + "integrity": "sha512-NGnrKwXzSms2qUUih/ILZ5JBqNTSa1+ZmP6flaIp6KmSElgE9qdndzS3cqjrDovwFdmwsGsLdeFgB6suw+1e9g==", + "dependencies": { + "es-errors": "^1.3.0", + "hasown": "^2.0.0", + "side-channel": "^1.0.4" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/interrogator": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/interrogator/-/interrogator-2.0.0.tgz", + "integrity": "sha512-1qxXpxXznMEpBz4SwV6H16jlCdzDhj2Oww2IEpecZ1ouu3Hr34JOibSRmKe+8fdWZiicaAH80hUispXEuCb4Jw==", + "dev": true, + "dependencies": { + "inquirer": "^9.1.4" + } + }, + "node_modules/ip-regex": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ip-regex/-/ip-regex-4.3.0.tgz", + "integrity": "sha512-B9ZWJxHHOHUhUjCPrMpLD4xEq35bUTClHM1S6CBU5ixQnkZmwipwgc96vAd7AAGM9TGHvJR+Uss+/Ak6UphK+Q==", + "engines": { + "node": ">=8" + } + }, + "node_modules/is-arguments": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/is-arguments/-/is-arguments-1.1.1.tgz", + "integrity": "sha512-8Q7EARjzEnKpt/PCD7e1cgUS0a6X8u5tdSiMqXhojOdoV9TsMsiO+9VLC5vAmO8N7/GmXn7yjR8qnA6bVAEzfA==", + "dev": true, + "dependencies": { + "call-bind": "^1.0.2", + "has-tostringtag": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-array-buffer": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/is-array-buffer/-/is-array-buffer-3.0.4.tgz", + "integrity": "sha512-wcjaerHw0ydZwfhiKbXJWLDY8A7yV7KhjQOpb83hGgGfId/aQa4TOvwyzn2PuswW2gPCYEL/nEAiSVpdOj1lXw==", + "dependencies": { + "call-bind": "^1.0.2", + "get-intrinsic": "^1.2.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-arrayish": { + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.2.1.tgz", + "integrity": "sha512-zz06S8t0ozoDXMG+ube26zeCTNXcKIPJZJi8hBrF4idCLms4CG9QtK7qBl1boi5ODzFpjswb5JPmHCbMpjaYzg==", + "dev": true + }, + "node_modules/is-bigint": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/is-bigint/-/is-bigint-1.0.4.tgz", + "integrity": "sha512-zB9CruMamjym81i2JZ3UMn54PKGsQzsJeo6xvN3HJJ4CAsQNB6iRutp2To77OfCNuoxspsIhzaPoO1zyCEhFOg==", + "dependencies": { + "has-bigints": "^1.0.1" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-boolean-object": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/is-boolean-object/-/is-boolean-object-1.1.2.tgz", + "integrity": "sha512-gDYaKHJmnj4aWxyj6YHyXVpdQawtVLHU5cb+eztPGczf6cjuTdwve5ZIEfgXqH4e57An1D1AKf8CZ3kYrQRqYA==", + "dependencies": { + "call-bind": "^1.0.2", + "has-tostringtag": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-callable": { + "version": "1.2.7", + "resolved": "https://registry.npmjs.org/is-callable/-/is-callable-1.2.7.tgz", + "integrity": "sha512-1BC0BVFhS/p0qtw6enp8e+8OD0UrK0oFLztSjNzhcKA3WDuJxxAPXzPuPtKkjEY9UUoEWlX/8fgKeu2S8i9JTA==", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-date-object": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/is-date-object/-/is-date-object-1.0.5.tgz", + "integrity": "sha512-9YQaSxsAiSwcvS33MBk3wTCVnWK+HhF8VZR2jRxehM16QcVOdHqPn4VPHmRK4lSr38n9JriurInLcP90xsYNfQ==", + "dependencies": { + "has-tostringtag": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-extglob": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", + "integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-fullwidth-code-point": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", + "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/is-glob": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz", + "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==", + "dev": true, + "dependencies": { + "is-extglob": "^2.1.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-interactive": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-interactive/-/is-interactive-1.0.0.tgz", + "integrity": "sha512-2HvIEKRoqS62guEC+qBjpvRubdX910WCMuJTZ+I9yvqKU2/12eSL549HMwtabb4oupdj2sMP50k+XJfB/8JE6w==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/is-map": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/is-map/-/is-map-2.0.2.tgz", + "integrity": "sha512-cOZFQQozTha1f4MxLFzlgKYPTyj26picdZTx82hbc/Xf4K/tZOOXSCkMvU4pKioRXGDLJRn0GM7Upe7kR721yg==", + "dev": true, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-natural-number": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/is-natural-number/-/is-natural-number-4.0.1.tgz", + "integrity": "sha512-Y4LTamMe0DDQIIAlaer9eKebAlDSV6huy+TWhJVPlzZh2o4tRP5SQWFlLn5N0To4mDD22/qdOq+veo1cSISLgQ==" + }, + "node_modules/is-negative-zero": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/is-negative-zero/-/is-negative-zero-2.0.3.tgz", + "integrity": "sha512-5KoIu2Ngpyek75jXodFvnafB6DJgr3u8uuK0LEZJjrU19DrMD3EVERaR8sjz8CCGgpZvxPl9SuE1GMVPFHx1mw==", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-number": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", + "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", + "dev": true, + "engines": { + "node": ">=0.12.0" + } + }, + "node_modules/is-number-object": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/is-number-object/-/is-number-object-1.0.7.tgz", + "integrity": "sha512-k1U0IRzLMo7ZlYIfzRu23Oh6MiIFasgpb9X76eqfFZAqwH44UI4KTBvBYIZ1dSL9ZzChTB9ShHfLkR4pdW5krQ==", + "dependencies": { + "has-tostringtag": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-path-inside": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/is-path-inside/-/is-path-inside-3.0.3.tgz", + "integrity": "sha512-Fd4gABb+ycGAmKou8eMftCupSir5lRxqf4aD/vd0cD2qc4HL07OjCeuHMr8Ro4CoMaeCKDB0/ECBOVWjTwUvPQ==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/is-promise": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/is-promise/-/is-promise-4.0.0.tgz", + "integrity": "sha512-hvpoI6korhJMnej285dSg6nu1+e6uxs7zG3BYAm5byqDsgJNWwxzM6z6iZiAgQR4TJ30JmBTOwqZUw3WlyH3AQ==", + "dev": true + }, + "node_modules/is-regex": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/is-regex/-/is-regex-1.1.4.tgz", + "integrity": "sha512-kvRdxDsxZjhzUX07ZnLydzS1TU/TJlTUHHY4YLL87e37oUA49DfkLqgy+VjFocowy29cKvcSiu+kIv728jTTVg==", + "dependencies": { + "call-bind": "^1.0.2", + "has-tostringtag": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-set": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/is-set/-/is-set-2.0.2.tgz", + "integrity": "sha512-+2cnTEZeY5z/iXGbLhPrOAaK/Mau5k5eXq9j14CpRTftq0pAJu2MwVRSZhyZWBzx3o6X795Lz6Bpb6R0GKf37g==", + "dev": true, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-shared-array-buffer": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/is-shared-array-buffer/-/is-shared-array-buffer-1.0.3.tgz", + "integrity": "sha512-nA2hv5XIhLR3uVzDDfCIknerhx8XUKnstuOERPNNIinXG7v9u+ohXF67vxm4TPTEPU6lm61ZkwP3c9PCB97rhg==", + "dependencies": { + "call-bind": "^1.0.7" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-stream": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-1.1.0.tgz", + "integrity": "sha512-uQPm8kcs47jx38atAcWTVxyltQYoPT68y9aWYdV6yWXSyW8mzSat0TL6CiWdZeCdF3KrAvpVtnHbTv4RN+rqdQ==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-string": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/is-string/-/is-string-1.0.7.tgz", + "integrity": "sha512-tE2UXzivje6ofPW7l23cjDOMa09gb7xlAqG6jG5ej6uPV32TlWP3NKPigtaGeHNu9fohccRYvIiZMfOOnOYUtg==", + "dependencies": { + "has-tostringtag": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-symbol": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/is-symbol/-/is-symbol-1.0.4.tgz", + "integrity": "sha512-C/CPBqKWnvdcxqIARxyOh4v1UUEOCHpgDa0WYgpKDFMszcrPcffg5uhwSgPCLD2WWxmq6isisz87tzT01tuGhg==", + "dependencies": { + "has-symbols": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-typed-array": { + "version": "1.1.13", + "resolved": "https://registry.npmjs.org/is-typed-array/-/is-typed-array-1.1.13.tgz", + "integrity": "sha512-uZ25/bUAlUY5fR4OKT4rZQEBrzQWYV9ZJYGGsUmEJ6thodVJ1HX64ePQ6Z0qPWP+m+Uq6e9UugrE38jeYsDSMw==", + "dependencies": { + "which-typed-array": "^1.1.14" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-unicode-supported": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/is-unicode-supported/-/is-unicode-supported-0.1.0.tgz", + "integrity": "sha512-knxG2q4UC3u8stRGyAVJCOdxFmv5DZiRcdlIaAQXAbSfJya+OhopNotLQrstBhququ4ZpuKbDc/8S6mgXgPFPw==", + "dev": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/is-url": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/is-url/-/is-url-1.2.4.tgz", + "integrity": "sha512-ITvGim8FhRiYe4IQ5uHSkj7pVaPDrCTkNd3yq3cV7iZAcJdHTUMPMEHcqSOy9xZ9qFenQCvi+2wjH9a1nXqHww==" + }, + "node_modules/is-weakmap": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-weakmap/-/is-weakmap-2.0.1.tgz", + "integrity": "sha512-NSBR4kH5oVj1Uwvv970ruUkCV7O1mzgVFO4/rev2cLRda9Tm9HrL70ZPut4rOHgY0FNrUu9BCbXA2sdQ+x0chA==", + "dev": true, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-weakref": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/is-weakref/-/is-weakref-1.0.2.tgz", + "integrity": "sha512-qctsuLZmIQ0+vSSMfoVvyFe2+GSEvnmZ2ezTup1SBse9+twCCeial6EEi3Nc2KFcf6+qz2FBPnjXsk8xhKSaPQ==", + "dependencies": { + "call-bind": "^1.0.2" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-weakset": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/is-weakset/-/is-weakset-2.0.2.tgz", + "integrity": "sha512-t2yVvttHkQktwnNNmBQ98AhENLdPUTDTE21uPqAQ0ARwQfGeQKRVS0NNurH7bTf7RrvcVn1OOge45CnBeHCSmg==", + "dev": true, + "dependencies": { + "call-bind": "^1.0.2", + "get-intrinsic": "^1.1.1" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is2": { + "version": "2.0.9", + "resolved": "https://registry.npmjs.org/is2/-/is2-2.0.9.tgz", + "integrity": "sha512-rZkHeBn9Zzq52sd9IUIV3a5mfwBY+o2HePMh0wkGBM4z4qjvy2GwVxQ6nNXSfw6MmVP6gf1QIlWjiOavhM3x5g==", + "dependencies": { + "deep-is": "^0.1.3", + "ip-regex": "^4.1.0", + "is-url": "^1.2.4" + }, + "engines": { + "node": ">=v0.10.0" + } + }, + "node_modules/isarray": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/isarray/-/isarray-2.0.5.tgz", + "integrity": "sha512-xHjhDr3cNBK0BzdUJSPXZntQUx/mwMS5Rw4A7lPJ90XGAO6ISP/ePDNuo0vhqOZU+UD5JoodwCAAoZQd3FeAKw==" + }, + "node_modules/isexe": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", + "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==" + }, + "node_modules/jackspeak": { + "version": "2.3.6", + "resolved": "https://registry.npmjs.org/jackspeak/-/jackspeak-2.3.6.tgz", + "integrity": "sha512-N3yCS/NegsOBokc8GAdM8UcmfsKiSS8cipheD/nivzr700H+nsMOxJjQnvwOcRYVuFkdH0wGUvW2WbXGmrZGbQ==", + "dev": true, + "dependencies": { + "@isaacs/cliui": "^8.0.2" + }, + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + }, + "optionalDependencies": { + "@pkgjs/parseargs": "^0.11.0" + } + }, + "node_modules/jose": { + "version": "4.15.4", + "resolved": "https://registry.npmjs.org/jose/-/jose-4.15.4.tgz", + "integrity": "sha512-W+oqK4H+r5sITxfxpSU+MMdr/YSWGvgZMQDIsNoBDGGy4i7GBPTtvFKibQzW06n3U3TqHjhvBJsirShsEJ6eeQ==", + "funding": { + "url": "https://github.com/sponsors/panva" + } + }, + "node_modules/js-tokens": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", + "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==" + }, + "node_modules/js-yaml": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz", + "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==", + "dev": true, + "dependencies": { + "argparse": "^2.0.1" + }, + "bin": { + "js-yaml": "bin/js-yaml.js" + } + }, + "node_modules/jsesc": { + "version": "2.5.2", + "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-2.5.2.tgz", + "integrity": "sha512-OYu7XEzjkCQ3C5Ps3QIZsQfNpqoJyZZA99wd9aWd05NCtC5pWOkShK2mkL6HXQR6/Cy2lbNdPlZBpuQHXE63gA==", + "dev": true, + "bin": { + "jsesc": "bin/jsesc" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/json-buffer": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/json-buffer/-/json-buffer-3.0.1.tgz", + "integrity": "sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ==", + "dev": true + }, + "node_modules/json-parse-even-better-errors": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/json-parse-even-better-errors/-/json-parse-even-better-errors-2.3.1.tgz", + "integrity": "sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w==", + "dev": true + }, + "node_modules/json-schema-traverse": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", + "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==", + "dev": true + }, + "node_modules/json-stable-stringify-without-jsonify": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/json-stable-stringify-without-jsonify/-/json-stable-stringify-without-jsonify-1.0.1.tgz", + "integrity": "sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw==", + "dev": true + }, + "node_modules/json5": { + "version": "2.2.3", + "resolved": "https://registry.npmjs.org/json5/-/json5-2.2.3.tgz", + "integrity": "sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==", + "dev": true, + "bin": { + "json5": "lib/cli.js" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/keyv": { + "version": "4.5.4", + "resolved": "https://registry.npmjs.org/keyv/-/keyv-4.5.4.tgz", + "integrity": "sha512-oxVHkHR/EJf2CNXnWxRLW6mg7JyCCUcG0DtEGmL2ctUo1PNTin1PUil+r/+4r5MpVgC/fn1kjsx7mjSujKqIpw==", + "dev": true, + "dependencies": { + "json-buffer": "3.0.1" + } + }, + "node_modules/kleur": { + "version": "4.1.5", + "resolved": "https://registry.npmjs.org/kleur/-/kleur-4.1.5.tgz", + "integrity": "sha512-o+NO+8WrRiQEE4/7nwRJhN1HWpVmJm511pBHUxPLtp0BUISzlBplORYSmTclCnJvQq2tKu/sgl3xVpkc7ZWuQQ==", + "engines": { + "node": ">=6" + } + }, + "node_modules/kysely": { + "version": "0.27.3", + "resolved": "https://registry.npmjs.org/kysely/-/kysely-0.27.3.tgz", + "integrity": "sha512-lG03Ru+XyOJFsjH3OMY6R/9U38IjDPfnOfDgO3ynhbDr+Dz8fak+X6L62vqu3iybQnj+lG84OttBuU9KY3L9kA==", + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/levn": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/levn/-/levn-0.4.1.tgz", + "integrity": "sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ==", + "dev": true, + "dependencies": { + "prelude-ls": "^1.2.1", + "type-check": "~0.4.0" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/lines-and-columns": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/lines-and-columns/-/lines-and-columns-1.2.4.tgz", + "integrity": "sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg==", + "dev": true + }, + "node_modules/locate-path": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz", + "integrity": "sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==", + "dev": true, + "dependencies": { + "p-locate": "^5.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/lodash": { + "version": "4.17.21", + "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz", + "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==" + }, + "node_modules/lodash.flow": { + "version": "3.5.0", + "resolved": "https://registry.npmjs.org/lodash.flow/-/lodash.flow-3.5.0.tgz", + "integrity": "sha512-ff3BX/tSioo+XojX4MOsOMhJw0nZoUEF011LX8g8d3gvjVbxd89cCio4BCXronjxcTUIJUoqKEUA+n4CqvvRPw==" + }, + "node_modules/lodash.groupby": { + "version": "4.6.0", + "resolved": "https://registry.npmjs.org/lodash.groupby/-/lodash.groupby-4.6.0.tgz", + "integrity": "sha512-5dcWxm23+VAoz+awKmBaiBvzox8+RqMgFhi7UvX9DHZr2HdxHXM/Wrf8cfKpsW37RNrvtPn6hSwNqurSILbmJw==" + }, + "node_modules/lodash.isequal": { + "version": "4.5.0", + "resolved": "https://registry.npmjs.org/lodash.isequal/-/lodash.isequal-4.5.0.tgz", + "integrity": "sha512-pDo3lu8Jhfjqls6GkMgpahsF9kCyayhgykjyLMNFTKWrpVdAQtYyB4muAMWozBB4ig/dtWAmsMxLEI8wuz+DYQ==" + }, + "node_modules/lodash.mapvalues": { + "version": "4.6.0", + "resolved": "https://registry.npmjs.org/lodash.mapvalues/-/lodash.mapvalues-4.6.0.tgz", + "integrity": "sha512-JPFqXFeZQ7BfS00H58kClY7SPVeHertPE0lNuCyZ26/XlN8TvakYD7b9bGyNmXbT/D3BbtPAAmq90gPWqLkxlQ==" + }, + "node_modules/lodash.merge": { + "version": "4.6.2", + "resolved": "https://registry.npmjs.org/lodash.merge/-/lodash.merge-4.6.2.tgz", + "integrity": "sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==", + "dev": true + }, + "node_modules/lodash.omitby": { + "version": "4.6.0", + "resolved": "https://registry.npmjs.org/lodash.omitby/-/lodash.omitby-4.6.0.tgz", + "integrity": "sha512-5OrRcIVR75M288p4nbI2WLAf3ndw2GD9fyNv3Bc15+WCxJDdZ4lYndSxGd7hnG6PVjiJTeJE2dHEGhIuKGicIQ==" + }, + "node_modules/lodash.partition": { + "version": "4.6.0", + "resolved": "https://registry.npmjs.org/lodash.partition/-/lodash.partition-4.6.0.tgz", + "integrity": "sha512-35L3dSF3Q6V1w5j6V3NhNlQjzsRDC/pYKCTdYTmwqSib+Q8ponkAmt/PwEOq3EmI38DSCl+SkIVwLd+uSlVdrg==" + }, + "node_modules/lodash.pick": { + "version": "4.4.0", + "resolved": "https://registry.npmjs.org/lodash.pick/-/lodash.pick-4.4.0.tgz", + "integrity": "sha512-hXt6Ul/5yWjfklSGvLQl8vM//l3FtyHZeuelpzK6mm99pNvN9yTDruNZPEJZD1oWrqo+izBmB7oUfWgcCX7s4Q==" + }, + "node_modules/lodash.throttle": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/lodash.throttle/-/lodash.throttle-4.1.1.tgz", + "integrity": "sha512-wIkUCfVKpVsWo3JSZlc+8MB5it+2AN5W8J7YVMST30UrvcQNZ1Okbj+rbVniijTWE6FGYy4XJq/rHkas8qJMLQ==" + }, + "node_modules/lodash.uniqwith": { + "version": "4.5.0", + "resolved": "https://registry.npmjs.org/lodash.uniqwith/-/lodash.uniqwith-4.5.0.tgz", + "integrity": "sha512-7lYL8bLopMoy4CTICbxygAUq6CdRJ36vFc80DucPueUee+d5NBRxz3FdT9Pes/HEx5mPoT9jwnsEJWz1N7uq7Q==" + }, + "node_modules/log-symbols": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/log-symbols/-/log-symbols-4.1.0.tgz", + "integrity": "sha512-8XPvpAA8uyhfteu8pIvQxpJZ7SYYdpUivZpGy6sFsBuKRY/7rQGavedeB8aK+Zkyq6upMFVL/9AW6vOYzfRyLg==", + "dev": true, + "dependencies": { + "chalk": "^4.1.0", + "is-unicode-supported": "^0.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/loglevel": { + "version": "1.9.1", + "resolved": "https://registry.npmjs.org/loglevel/-/loglevel-1.9.1.tgz", + "integrity": "sha512-hP3I3kCrDIMuRwAwHltphhDM1r8i55H33GgqjXbrisuJhF4kRhW1dNuxsRklp4bXl8DSdLaNLuiL4A/LWRfxvg==", + "engines": { + "node": ">= 0.6.0" + }, + "funding": { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/loglevel" + } + }, + "node_modules/long": { + "version": "5.2.3", + "resolved": "https://registry.npmjs.org/long/-/long-5.2.3.tgz", + "integrity": "sha512-lcHwpNoggQTObv5apGNCTdJrO69eHOZMi4BNC+rTLER8iHAqGrUVeLh/irVIM7zTw2bOXA8T6uNPeujwOLg/2Q==" + }, + "node_modules/loose-envify": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/loose-envify/-/loose-envify-1.4.0.tgz", + "integrity": "sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q==", + "dependencies": { + "js-tokens": "^3.0.0 || ^4.0.0" + }, + "bin": { + "loose-envify": "cli.js" + } + }, + "node_modules/lru-cache": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-5.1.1.tgz", + "integrity": "sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==", + "dev": true, + "dependencies": { + "yallist": "^3.0.2" + } + }, + "node_modules/make-dir": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-1.3.0.tgz", + "integrity": "sha512-2w31R7SJtieJJnQtGc7RVL2StM2vGYVfqUOvUDxH6bC6aJTxPxTF0GnIgCyu7tjockiUWAYQRbxa7vKn34s5sQ==", + "dependencies": { + "pify": "^3.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/make-dir/node_modules/pify": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/pify/-/pify-3.0.0.tgz", + "integrity": "sha512-C3FsVNH1udSEX48gGX1xfvwTWfsYWj5U+8/uK15BGzIGrKoUpghX8hWZwa/OFnakBiiVNmBvemTJR5mcy7iPcg==", + "engines": { + "node": ">=4" + } + }, + "node_modules/merge2": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz", + "integrity": "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==", + "dev": true, + "engines": { + "node": ">= 8" + } + }, + "node_modules/micromatch": { + "version": "4.0.5", + "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.5.tgz", + "integrity": "sha512-DMy+ERcEW2q8Z2Po+WNXuw3c5YaUSFjAO5GsJqfEl7UjvtIuFKO6ZrKvcItdy98dwFI2N1tg3zNIdKaQT+aNdA==", + "dev": true, + "dependencies": { + "braces": "^3.0.2", + "picomatch": "^2.3.1" + }, + "engines": { + "node": ">=8.6" + } + }, + "node_modules/mimic-fn": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-2.1.0.tgz", + "integrity": "sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/mimic-response": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/mimic-response/-/mimic-response-3.1.0.tgz", + "integrity": "sha512-z0yWI+4FDrrweS8Zmt4Ej5HdJmky15+L2e6Wgn3+iK5fWzb6T3fhNFq2+MeTRb064c6Wr4N/wv0DzQTjNzHNGQ==", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/minimatch": { + "version": "9.0.3", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.3.tgz", + "integrity": "sha512-RHiac9mvaRw0x3AYRgDC1CxAP7HTcNrrECeA8YYJeWnpo+2Q5CegtZjaotWTWxDG3UeGA1coE05iH1mPjT/2mg==", + "dev": true, + "dependencies": { + "brace-expansion": "^2.0.1" + }, + "engines": { + "node": ">=16 || 14 >=14.17" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/minimist": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.8.tgz", + "integrity": "sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/minipass": { + "version": "7.0.4", + "resolved": "https://registry.npmjs.org/minipass/-/minipass-7.0.4.tgz", + "integrity": "sha512-jYofLM5Dam9279rdkWzqHozUo4ybjdZmCsDHePy5V/PbBcVMiSZR97gmAy45aqi8CK1lG2ECd356FU86avfwUQ==", + "dev": true, + "engines": { + "node": ">=16 || 14 >=14.17" + } + }, + "node_modules/mkdirp-classic": { + "version": "0.5.3", + "resolved": "https://registry.npmjs.org/mkdirp-classic/-/mkdirp-classic-0.5.3.tgz", + "integrity": "sha512-gKLcREMhtuZRwRAfqP3RFW+TK4JqApVBtOIftVgjuABpAtpxhPGaDcfvbhNvD0B8iD1oUr/txX35NjcaY6Ns/A==" + }, + "node_modules/ms": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", + "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==" + }, + "node_modules/mute-stream": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/mute-stream/-/mute-stream-1.0.0.tgz", + "integrity": "sha512-avsJQhyd+680gKXyG/sQc0nXaC6rBkPOfyHYcFb9+hdkqQkR9bdnkJ0AMZhke0oesPqIO+mFFJ+IdBc7mst4IA==", + "dev": true, + "engines": { + "node": "^14.17.0 || ^16.13.0 || >=18.0.0" + } + }, + "node_modules/mz": { + "version": "2.7.0", + "resolved": "https://registry.npmjs.org/mz/-/mz-2.7.0.tgz", + "integrity": "sha512-z81GNO7nnYMEhrGh9LeymoE4+Yr0Wn5McHIZMK5cfQCl+NDX08sCZgUc9/6MHni9IWuFLm1Z3HTCXu2z9fN62Q==", + "dev": true, + "dependencies": { + "any-promise": "^1.0.0", + "object-assign": "^4.0.1", + "thenify-all": "^1.0.0" + } + }, + "node_modules/nanoid": { + "version": "3.3.7", + "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.7.tgz", + "integrity": "sha512-eSRppjcPIatRIMC1U6UngP8XFcz8MQWGQdt1MTBQ7NaAmvXDfvNxbvWV3x2y6CdEUciCSsDHDQZbhYaB8QEo2g==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "bin": { + "nanoid": "bin/nanoid.cjs" + }, + "engines": { + "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1" + } + }, + "node_modules/napi-build-utils": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/napi-build-utils/-/napi-build-utils-1.0.2.tgz", + "integrity": "sha512-ONmRUqK7zj7DWX0D9ADe03wbwOBZxNAfF20PlGfCWQcD3+/MakShIHrMqx9YwPTfxDdF1zLeL+RGZiR9kGMLdg==" + }, + "node_modules/natural-compare": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/natural-compare/-/natural-compare-1.4.0.tgz", + "integrity": "sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==", + "dev": true + }, + "node_modules/node-abi": { + "version": "3.55.0", + "resolved": "https://registry.npmjs.org/node-abi/-/node-abi-3.55.0.tgz", + "integrity": "sha512-uPEjtyh2tFEvWYt4Jw7McOD5FPcHkcxm/tHZc5PWaDB3JYq0rGFUbgaAK+CT5pYpQddBfsZVWI08OwoRfdfbcQ==", + "dependencies": { + "semver": "^7.3.5" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/node-fetch": { + "version": "2.7.0", + "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.7.0.tgz", + "integrity": "sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A==", + "dependencies": { + "whatwg-url": "^5.0.0" + }, + "engines": { + "node": "4.x || >=6.0.0" + }, + "peerDependencies": { + "encoding": "^0.1.0" + }, + "peerDependenciesMeta": { + "encoding": { + "optional": true + } + } + }, + "node_modules/node-releases": { + "version": "2.0.14", + "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.14.tgz", + "integrity": "sha512-y10wOWt8yZpqXmOgRo77WaHEmhYQYGNA6y421PKsKYWEK8aW+cqAphborZDhqfyKrbZEN92CN1X2KbafY2s7Yw==", + "dev": true + }, + "node_modules/object-assign": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz", + "integrity": "sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/object-inspect": { + "version": "1.13.1", + "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.13.1.tgz", + "integrity": "sha512-5qoj1RUiKOMsCCNLV1CBiPYE10sziTsnmNxkAI/rZhiD63CF7IqdFGC/XzjWjpSgLf0LxXX3bDFIh0E18f6UhQ==", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/object-is": { + "version": "1.1.5", + "resolved": "https://registry.npmjs.org/object-is/-/object-is-1.1.5.tgz", + "integrity": "sha512-3cyDsyHgtmi7I7DfSSI2LDp6SK2lwvtbg0p0R1e0RvTqF5ceGx+K2dfSjm1bKDMVCFEDAQvy+o8c6a7VujOddw==", + "dev": true, + "dependencies": { + "call-bind": "^1.0.2", + "define-properties": "^1.1.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/object-keys": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/object-keys/-/object-keys-1.1.1.tgz", + "integrity": "sha512-NuAESUOUMrlIXOfHKzD6bpPu3tYt3xvjNdRIQ+FeT0lNb4K8WR70CaDxhuNguS2XG+GjkyMwOzsN5ZktImfhLA==", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/object.assign": { + "version": "4.1.5", + "resolved": "https://registry.npmjs.org/object.assign/-/object.assign-4.1.5.tgz", + "integrity": "sha512-byy+U7gp+FVwmyzKPYhW2h5l3crpmGsxl7X2s8y43IgxvG4g3QZ6CffDtsNQy1WsmZpQbO+ybo0AlW7TY6DcBQ==", + "dependencies": { + "call-bind": "^1.0.5", + "define-properties": "^1.2.1", + "has-symbols": "^1.0.3", + "object-keys": "^1.1.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/object.hasown": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/object.hasown/-/object.hasown-1.1.3.tgz", + "integrity": "sha512-fFI4VcYpRHvSLXxP7yiZOMAd331cPfd2p7PFDVbgUsYOfCT3tICVqXWngbjr4m49OvsBwUBQ6O2uQoJvy3RexA==", + "dependencies": { + "define-properties": "^1.2.0", + "es-abstract": "^1.22.1" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/ohash": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/ohash/-/ohash-1.1.3.tgz", + "integrity": "sha512-zuHHiGTYTA1sYJ/wZN+t5HKZaH23i4yI1HMwbuXm24Nid7Dv0KcuRlKoNKS9UNfAVSBlnGLcuQrnOKWOZoEGaw==" + }, + "node_modules/once": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", + "integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==", + "dependencies": { + "wrappy": "1" + } + }, + "node_modules/onetime": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/onetime/-/onetime-5.1.2.tgz", + "integrity": "sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg==", + "dev": true, + "dependencies": { + "mimic-fn": "^2.1.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/optionator": { + "version": "0.9.3", + "resolved": "https://registry.npmjs.org/optionator/-/optionator-0.9.3.tgz", + "integrity": "sha512-JjCoypp+jKn1ttEFExxhetCKeJt9zhAgAve5FXHixTvFDW/5aEktX9bufBKLRRMdU7bNtpLfcGu94B3cdEJgjg==", + "dev": true, + "dependencies": { + "@aashutoshrathi/word-wrap": "^1.2.3", + "deep-is": "^0.1.3", + "fast-levenshtein": "^2.0.6", + "levn": "^0.4.1", + "prelude-ls": "^1.2.1", + "type-check": "^0.4.0" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/ora": { + "version": "5.4.1", + "resolved": "https://registry.npmjs.org/ora/-/ora-5.4.1.tgz", + "integrity": "sha512-5b6Y85tPxZZ7QytO+BQzysW31HJku27cRIlkbAXaNx+BdcVi+LlRFmVXzeF6a7JCwJpyw5c4b+YSVImQIrBpuQ==", + "dev": true, + "dependencies": { + "bl": "^4.1.0", + "chalk": "^4.1.0", + "cli-cursor": "^3.1.0", + "cli-spinners": "^2.5.0", + "is-interactive": "^1.0.0", + "is-unicode-supported": "^0.1.0", + "log-symbols": "^4.1.0", + "strip-ansi": "^6.0.0", + "wcwidth": "^1.0.1" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/os-tmpdir": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/os-tmpdir/-/os-tmpdir-1.0.2.tgz", + "integrity": "sha512-D2FR03Vir7FIu45XBY20mTb+/ZSWB00sjU9jdQXt83gDrI4Ztz5Fs7/yy74g2N5SVQY4xY1qDr4rNddwYRVX0g==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/p-limit": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz", + "integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==", + "dev": true, + "dependencies": { + "yocto-queue": "^0.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-locate": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-5.0.0.tgz", + "integrity": "sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==", + "dev": true, + "dependencies": { + "p-limit": "^3.0.2" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/packet-reader": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/packet-reader/-/packet-reader-1.0.0.tgz", + "integrity": "sha512-HAKu/fG3HpHFO0AA8WE8q2g+gBJaZ9MG7fcKk+IJPLTGAD6Psw4443l+9DGRbOIh3/aXr7Phy0TjilYivJo5XQ==", + "devOptional": true + }, + "node_modules/parameter-reducers": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/parameter-reducers/-/parameter-reducers-2.1.0.tgz", + "integrity": "sha512-aj9V6DUnNbj4YEmVxloPLX9duhklIC+SIOVUrVdaT3WfgEownET+TYg/JsjANQUNGe46dmOCHEKiuycL36cOnw==", + "dev": true + }, + "node_modules/parent-module": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/parent-module/-/parent-module-1.0.1.tgz", + "integrity": "sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==", + "dev": true, + "dependencies": { + "callsites": "^3.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/parse-json": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-5.2.0.tgz", + "integrity": "sha512-ayCKvm/phCGxOkYRSCM82iDwct8/EonSEgCSxWxD7ve6jHggsFl4fZVQBPRNgQoKiuV/odhFrGzQXZwbifC8Rg==", + "dev": true, + "dependencies": { + "@babel/code-frame": "^7.0.0", + "error-ex": "^1.3.1", + "json-parse-even-better-errors": "^2.3.0", + "lines-and-columns": "^1.1.6" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/path-exists": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", + "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/path-is-absolute": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", + "integrity": "sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/path-key": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", + "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", + "engines": { + "node": ">=8" + } + }, + "node_modules/path-scurry": { + "version": "1.10.1", + "resolved": "https://registry.npmjs.org/path-scurry/-/path-scurry-1.10.1.tgz", + "integrity": "sha512-MkhCqzzBEpPvxxQ71Md0b1Kk51W01lrYvlMzSUaIzNsODdd7mqhiimSZlr+VegAz5Z6Vzt9Xg2ttE//XBhH3EQ==", + "dev": true, + "dependencies": { + "lru-cache": "^9.1.1 || ^10.0.0", + "minipass": "^5.0.0 || ^6.0.2 || ^7.0.0" + }, + "engines": { + "node": ">=16 || 14 >=14.17" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/path-scurry/node_modules/lru-cache": { + "version": "10.2.0", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-10.2.0.tgz", + "integrity": "sha512-2bIM8x+VAf6JT4bKAljS1qUWgMsqZRPGJS6FSahIMPVvctcNhyVp7AJu7quxOW9jwkryBReKZY5tY5JYv2n/7Q==", + "dev": true, + "engines": { + "node": "14 || >=16.14" + } + }, + "node_modules/path-type": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-type/-/path-type-4.0.0.tgz", + "integrity": "sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/pend": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/pend/-/pend-1.2.0.tgz", + "integrity": "sha512-F3asv42UuXchdzt+xXqfW1OGlVBe+mxa2mqI0pg5yAHZPvFmY3Y6drSf/GQ1A86WgWEN9Kzh/WrgKa6iGcHXLg==" + }, + "node_modules/pg": { + "version": "8.11.3", + "resolved": "https://registry.npmjs.org/pg/-/pg-8.11.3.tgz", + "integrity": "sha512-+9iuvG8QfaaUrrph+kpF24cXkH1YOOUeArRNYIxq1viYHZagBxrTno7cecY1Fa44tJeZvaoG+Djpkc3JwehN5g==", + "devOptional": true, + "dependencies": { + "buffer-writer": "2.0.0", + "packet-reader": "1.0.0", + "pg-connection-string": "^2.6.2", + "pg-pool": "^3.6.1", + "pg-protocol": "^1.6.0", + "pg-types": "^2.1.0", + "pgpass": "1.x" + }, + "engines": { + "node": ">= 8.0.0" + }, + "optionalDependencies": { + "pg-cloudflare": "^1.1.1" + }, + "peerDependencies": { + "pg-native": ">=3.0.1" + }, + "peerDependenciesMeta": { + "pg-native": { + "optional": true + } + } + }, + "node_modules/pg-cloudflare": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/pg-cloudflare/-/pg-cloudflare-1.1.1.tgz", + "integrity": "sha512-xWPagP/4B6BgFO+EKz3JONXv3YDgvkbVrGw2mTo3D6tVDQRh1e7cqVGvyR3BE+eQgAvx1XhW/iEASj4/jCWl3Q==", + "dev": true, + "optional": true + }, + "node_modules/pg-connection-string": { + "version": "2.6.2", + "resolved": "https://registry.npmjs.org/pg-connection-string/-/pg-connection-string-2.6.2.tgz", + "integrity": "sha512-ch6OwaeaPYcova4kKZ15sbJ2hKb/VP48ZD2gE7i1J+L4MspCtBMAx8nMgz7bksc7IojCIIWuEhHibSMFH8m8oA==", + "devOptional": true + }, + "node_modules/pg-cursor": { + "version": "2.10.3", + "resolved": "https://registry.npmjs.org/pg-cursor/-/pg-cursor-2.10.3.tgz", + "integrity": "sha512-rDyBVoqPVnx/PTmnwQAYgusSeAKlTL++gmpf5klVK+mYMFEqsOc6VHHZnPKc/4lOvr4r6fiMuoxSFuBF1dx4FQ==", + "dev": true, + "peerDependencies": { + "pg": "^8" + } + }, + "node_modules/pg-int8": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/pg-int8/-/pg-int8-1.0.1.tgz", + "integrity": "sha512-WCtabS6t3c8SkpDBUlb1kjOs7l66xsGdKpIPZsg4wR+B3+u9UAum2odSsF9tnvxg80h4ZxLWMy4pRjOsFIqQpw==", + "devOptional": true, + "engines": { + "node": ">=4.0.0" + } + }, + "node_modules/pg-pool": { + "version": "3.6.1", + "resolved": "https://registry.npmjs.org/pg-pool/-/pg-pool-3.6.1.tgz", + "integrity": "sha512-jizsIzhkIitxCGfPRzJn1ZdcosIt3pz9Sh3V01fm1vZnbnCMgmGl5wvGGdNN2EL9Rmb0EcFoCkixH4Pu+sP9Og==", + "devOptional": true, + "peerDependencies": { + "pg": ">=8.0" + } + }, + "node_modules/pg-protocol": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/pg-protocol/-/pg-protocol-1.6.0.tgz", + "integrity": "sha512-M+PDm637OY5WM307051+bsDia5Xej6d9IR4GwJse1qA1DIhiKlksvrneZOYQq42OM+spubpcNYEo2FcKQrDk+Q==", + "devOptional": true + }, + "node_modules/pg-types": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/pg-types/-/pg-types-2.2.0.tgz", + "integrity": "sha512-qTAAlrEsl8s4OiEQY69wDvcMIdQN6wdz5ojQiOy6YRMuynxenON0O5oCpJI6lshc6scgAY8qvJ2On/p+CXY0GA==", + "devOptional": true, + "dependencies": { + "pg-int8": "1.0.1", + "postgres-array": "~2.0.0", + "postgres-bytea": "~1.0.0", + "postgres-date": "~1.0.4", + "postgres-interval": "^1.1.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/pgpass": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/pgpass/-/pgpass-1.0.5.tgz", + "integrity": "sha512-FdW9r/jQZhSeohs1Z3sI1yxFQNFvMcnmfuj4WBMUTxOrAyLMaTcE1aAMBiTlbMNaXvBCQuVi0R7hd8udDSP7ug==", + "devOptional": true, + "dependencies": { + "split2": "^4.1.0" + } + }, + "node_modules/picocolors": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.0.0.tgz", + "integrity": "sha512-1fygroTLlHu66zi26VoTDv8yRgm0Fccecssto+MhsZ0D/DGW2sm8E8AjW7NU5VVTRt5GxbeZ5qBuJr+HyLYkjQ==", + "dev": true + }, + "node_modules/picomatch": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", + "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", + "dev": true, + "engines": { + "node": ">=8.6" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/pify": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/pify/-/pify-2.3.0.tgz", + "integrity": "sha512-udgsAY+fTnvv7kI7aaxbqwWNb0AHiB0qBO89PZKPkoTmGOgdbrHDKD+0B2X4uTfJ/FT1R09r9gTsjUjNJotuog==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/pinkie": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/pinkie/-/pinkie-2.0.4.tgz", + "integrity": "sha512-MnUuEycAemtSaeFSjXKW/aroV7akBbY+Sv+RkyqFjgAe73F+MR0TBWKBRDkmfWq/HiFmdavfZ1G7h4SPZXaCSg==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/pinkie-promise": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/pinkie-promise/-/pinkie-promise-2.0.1.tgz", + "integrity": "sha512-0Gni6D4UcLTbv9c57DfxDGdr41XfgUjqWZu492f0cIGr16zDU06BWP/RAEvOuo7CQ0CNjHaLlM59YJJFm3NWlw==", + "dependencies": { + "pinkie": "^2.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/pirates": { + "version": "4.0.6", + "resolved": "https://registry.npmjs.org/pirates/-/pirates-4.0.6.tgz", + "integrity": "sha512-saLsH7WeYYPiD25LDuLRRY/i+6HaPYr6G1OUlN39otzkSTxKnubR9RTxS3/Kk50s1g2JTgFwWQDQyplC5/SHZg==", + "dev": true, + "engines": { + "node": ">= 6" + } + }, + "node_modules/possible-typed-array-names": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/possible-typed-array-names/-/possible-typed-array-names-1.0.0.tgz", + "integrity": "sha512-d7Uw+eZoloe0EHDIYoe+bQ5WXnGMOpmiZFTuMWCwpjzzkL2nTjcKiAk4hh8TjnGye2TwWOk3UXucZ+3rbmBa8Q==", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/postcss": { + "version": "8.4.35", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.35.tgz", + "integrity": "sha512-u5U8qYpBCpN13BsiEB0CbR1Hhh4Gc0zLFuedrHJKMctHCHAGrMdG0PRM/KErzAL3CU6/eckEtmHNB3x6e3c0vA==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/postcss" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "dependencies": { + "nanoid": "^3.3.7", + "picocolors": "^1.0.0", + "source-map-js": "^1.0.2" + }, + "engines": { + "node": "^10 || ^12 || >=14" + } + }, + "node_modules/postgres-array": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/postgres-array/-/postgres-array-2.0.0.tgz", + "integrity": "sha512-VpZrUqU5A69eQyW2c5CA1jtLecCsN2U/bD6VilrFDWq5+5UIEVO7nazS3TEcHf1zuPYO/sqGvUvW62g86RXZuA==", + "devOptional": true, + "engines": { + "node": ">=4" + } + }, + "node_modules/postgres-bytea": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/postgres-bytea/-/postgres-bytea-1.0.0.tgz", + "integrity": "sha512-xy3pmLuQqRBZBXDULy7KbaitYqLcmxigw14Q5sj8QBVLqEwXfeybIKVWiqAXTlcvdvb0+xkOtDbfQMOf4lST1w==", + "devOptional": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/postgres-date": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/postgres-date/-/postgres-date-1.0.7.tgz", + "integrity": "sha512-suDmjLVQg78nMK2UZ454hAG+OAW+HQPZ6n++TNDUX+L0+uUlLywnoxJKDou51Zm+zTCjrCl0Nq6J9C5hP9vK/Q==", + "devOptional": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/postgres-interval": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/postgres-interval/-/postgres-interval-1.2.0.tgz", + "integrity": "sha512-9ZhXKM/rw350N1ovuWHbGxnGh/SNJ4cnxHiM0rxE4VN41wsg8P8zWn9hv/buK00RP4WvlOyr/RBDiptyxVbkZQ==", + "devOptional": true, + "dependencies": { + "xtend": "^4.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/prebuild-install": { + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/prebuild-install/-/prebuild-install-7.1.1.tgz", + "integrity": "sha512-jAXscXWMcCK8GgCoHOfIr0ODh5ai8mj63L2nWrjuAgXE6tDyYGnx4/8o/rCgU+B4JSyZBKbeZqzhtwtC3ovxjw==", + "dependencies": { + "detect-libc": "^2.0.0", + "expand-template": "^2.0.3", + "github-from-package": "0.0.0", + "minimist": "^1.2.3", + "mkdirp-classic": "^0.5.3", + "napi-build-utils": "^1.0.1", + "node-abi": "^3.3.0", + "pump": "^3.0.0", + "rc": "^1.2.7", + "simple-get": "^4.0.0", + "tar-fs": "^2.0.0", + "tunnel-agent": "^0.6.0" + }, + "bin": { + "prebuild-install": "bin.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/prelude-ls": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/prelude-ls/-/prelude-ls-1.2.1.tgz", + "integrity": "sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g==", + "dev": true, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/prisma": { + "version": "4.8.1", + "resolved": "https://registry.npmjs.org/prisma/-/prisma-4.8.1.tgz", + "integrity": "sha512-ZMLnSjwulIeYfaU1O6/LF6PEJzxN5par5weykxMykS9Z6ara/j76JH3Yo2AH3bgJbPN4Z6NeCK9s5fDkzf33cg==", + "hasInstallScript": true, + "dependencies": { + "@prisma/engines": "4.8.1" + }, + "bin": { + "prisma": "build/index.js", + "prisma2": "build/index.js" + }, + "engines": { + "node": ">=14.17" + } + }, + "node_modules/process-nextick-args": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/process-nextick-args/-/process-nextick-args-2.0.1.tgz", + "integrity": "sha512-3ouUOpQhtgrbOa17J7+uxOTpITYWaGP7/AhoR3+A+/1e9skrzelGi/dXzEYyvbxubEF6Wn2ypscTKiKJFFn1ag==" + }, + "node_modules/prompts": { + "version": "2.4.2", + "resolved": "https://registry.npmjs.org/prompts/-/prompts-2.4.2.tgz", + "integrity": "sha512-NxNv/kLguCA7p3jE8oL2aEBsrJWgAakBpgmgK6lpPWV+WuOmY6r2/zbAVnP+T8bQlA0nzHXSJSJW0Hq7ylaD2Q==", + "dependencies": { + "kleur": "^3.0.3", + "sisteransi": "^1.0.5" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/prompts/node_modules/kleur": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/kleur/-/kleur-3.0.3.tgz", + "integrity": "sha512-eTIzlVOSUR+JxdDFepEYcBMtZ9Qqdef+rnzWdRZuMbOywu5tO2w2N7rqjoANZ5k9vywhL6Br1VRjUIgTQx4E8w==", + "engines": { + "node": ">=6" + } + }, + "node_modules/protobufjs": { + "version": "7.2.6", + "resolved": "https://registry.npmjs.org/protobufjs/-/protobufjs-7.2.6.tgz", + "integrity": "sha512-dgJaEDDL6x8ASUZ1YqWciTRrdOuYNzoOf27oHNfdyvKqHr5i0FV7FSLU+aIeFjyFgVxrpTOtQUi0BLLBymZaBw==", + "hasInstallScript": true, + "dependencies": { + "@protobufjs/aspromise": "^1.1.2", + "@protobufjs/base64": "^1.1.2", + "@protobufjs/codegen": "^2.0.4", + "@protobufjs/eventemitter": "^1.1.0", + "@protobufjs/fetch": "^1.1.0", + "@protobufjs/float": "^1.0.2", + "@protobufjs/inquire": "^1.1.0", + "@protobufjs/path": "^1.1.2", + "@protobufjs/pool": "^1.1.0", + "@protobufjs/utf8": "^1.1.0", + "@types/node": ">=13.7.0", + "long": "^5.0.0" + }, + "engines": { + "node": ">=12.0.0" + } + }, + "node_modules/pump": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/pump/-/pump-3.0.0.tgz", + "integrity": "sha512-LwZy+p3SFs1Pytd/jYct4wpv49HiYCqd9Rlc5ZVdk0V+8Yzv6jR5Blk3TRmPL1ft69TxP0IMZGJ+WPFU2BFhww==", + "dependencies": { + "end-of-stream": "^1.1.0", + "once": "^1.3.1" + } + }, + "node_modules/punycode": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.3.1.tgz", + "integrity": "sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/queue-microtask": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz", + "integrity": "sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ] + }, + "node_modules/rc": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/rc/-/rc-1.2.8.tgz", + "integrity": "sha512-y3bGgqKj3QBdxLbLkomlohkvsA8gdAiUQlSBJnBhfn+BPxg4bc62d8TcBW15wavDfgexCgccckhcZvywyQYPOw==", + "dependencies": { + "deep-extend": "^0.6.0", + "ini": "~1.3.0", + "minimist": "^1.2.0", + "strip-json-comments": "~2.0.1" + }, + "bin": { + "rc": "cli.js" + } + }, + "node_modules/rc/node_modules/strip-json-comments": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-2.0.1.tgz", + "integrity": "sha512-4gB8na07fecVVkOI6Rs4e7T6NOTki5EmL7TUduTs6bu3EdnSycntVJ4re8kgZA+wx9IueI2Y11bfbgwtzuE0KQ==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/react": { + "version": "18.2.0", + "resolved": "https://registry.npmjs.org/react/-/react-18.2.0.tgz", + "integrity": "sha512-/3IjMdb2L9QbBdWiW5e3P2/npwMBaU9mHCSCUzNln0ZCYbcfTsGbTJrU/kGemdH2IWmB2ioZ+zkxtmq6g09fGQ==", + "dependencies": { + "loose-envify": "^1.1.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/react-dom": { + "version": "18.2.0", + "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-18.2.0.tgz", + "integrity": "sha512-6IMTriUmvsjHUjNtEDudZfuDQUoWXVxKHhlEGSk81n4YFS+r/Kl99wXiwlVXtPBtJenozv2P+hxDsw9eA7Xo6g==", + "dependencies": { + "loose-envify": "^1.1.0", + "scheduler": "^0.23.0" + }, + "peerDependencies": { + "react": "^18.2.0" + } + }, + "node_modules/react-refresh": { + "version": "0.14.0", + "resolved": "https://registry.npmjs.org/react-refresh/-/react-refresh-0.14.0.tgz", + "integrity": "sha512-wViHqhAd8OHeLS/IRMJjTSDHF3U9eWi62F/MledQGPdJGDhodXJ9PBLNGr6WWL7qlH12Mt3TyTpbS+hGXMjCzQ==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/readable-stream": { + "version": "3.6.2", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.2.tgz", + "integrity": "sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA==", + "dependencies": { + "inherits": "^2.0.3", + "string_decoder": "^1.1.1", + "util-deprecate": "^1.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/regexp.prototype.flags": { + "version": "1.5.2", + "resolved": "https://registry.npmjs.org/regexp.prototype.flags/-/regexp.prototype.flags-1.5.2.tgz", + "integrity": "sha512-NcDiDkTLuPR+++OCKB0nWafEmhg/Da8aUPLPMQbK+bxKKCm1/S5he+AqYa4PlMCVBalb4/yxIRub6qkEx5yJbw==", + "dependencies": { + "call-bind": "^1.0.6", + "define-properties": "^1.2.1", + "es-errors": "^1.3.0", + "set-function-name": "^2.0.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/resolve-from": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz", + "integrity": "sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==", + "dev": true, + "engines": { + "node": ">=4" + } + }, + "node_modules/restore-cursor": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/restore-cursor/-/restore-cursor-3.1.0.tgz", + "integrity": "sha512-l+sSefzHpj5qimhFSE5a8nufZYAM3sBSVMAPtYkmC+4EH2anSGaEMXSD0izRQbu9nfyQ9y5JrVmp7E8oZrUjvA==", + "dev": true, + "dependencies": { + "onetime": "^5.1.0", + "signal-exit": "^3.0.2" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/reusify": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/reusify/-/reusify-1.0.4.tgz", + "integrity": "sha512-U9nH88a3fc/ekCF1l0/UP1IosiuIjyTh7hBvXVMHYgVcfGvt897Xguj2UOLDeI5BG2m7/uwyaLVT6fbtCwTyzw==", + "dev": true, + "engines": { + "iojs": ">=1.0.0", + "node": ">=0.10.0" + } + }, + "node_modules/rimraf": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz", + "integrity": "sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==", + "dev": true, + "dependencies": { + "glob": "^7.1.3" + }, + "bin": { + "rimraf": "bin.js" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/rollup": { + "version": "4.12.0", + "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.12.0.tgz", + "integrity": "sha512-wz66wn4t1OHIJw3+XU7mJJQV/2NAfw5OAk6G6Hoo3zcvz/XOfQ52Vgi+AN4Uxoxi0KBBwk2g8zPrTDA4btSB/Q==", + "dev": true, + "dependencies": { + "@types/estree": "1.0.5" + }, + "bin": { + "rollup": "dist/bin/rollup" + }, + "engines": { + "node": ">=18.0.0", + "npm": ">=8.0.0" + }, + "optionalDependencies": { + "@rollup/rollup-android-arm-eabi": "4.12.0", + "@rollup/rollup-android-arm64": "4.12.0", + "@rollup/rollup-darwin-arm64": "4.12.0", + "@rollup/rollup-darwin-x64": "4.12.0", + "@rollup/rollup-linux-arm-gnueabihf": "4.12.0", + "@rollup/rollup-linux-arm64-gnu": "4.12.0", + "@rollup/rollup-linux-arm64-musl": "4.12.0", + "@rollup/rollup-linux-riscv64-gnu": "4.12.0", + "@rollup/rollup-linux-x64-gnu": "4.12.0", + "@rollup/rollup-linux-x64-musl": "4.12.0", + "@rollup/rollup-win32-arm64-msvc": "4.12.0", + "@rollup/rollup-win32-ia32-msvc": "4.12.0", + "@rollup/rollup-win32-x64-msvc": "4.12.0", + "fsevents": "~2.3.2" + } + }, + "node_modules/run-async": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/run-async/-/run-async-3.0.0.tgz", + "integrity": "sha512-540WwVDOMxA6dN6We19EcT9sc3hkXPw5mzRNGM3FkdN/vtE9NFvj5lFAPNwUDmJjXidm3v7TC1cTE7t17Ulm1Q==", + "dev": true, + "engines": { + "node": ">=0.12.0" + } + }, + "node_modules/run-parallel": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/run-parallel/-/run-parallel-1.2.0.tgz", + "integrity": "sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "dependencies": { + "queue-microtask": "^1.2.2" + } + }, + "node_modules/rxjs": { + "version": "7.8.1", + "resolved": "https://registry.npmjs.org/rxjs/-/rxjs-7.8.1.tgz", + "integrity": "sha512-AA3TVj+0A2iuIoQkWEK/tqFjBq2j+6PO6Y0zJcvzLAFhEFIO3HL0vls9hWLncZbAAbK0mar7oZ4V079I/qPMxg==", + "dev": true, + "dependencies": { + "tslib": "^2.1.0" + } + }, + "node_modules/safe-array-concat": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/safe-array-concat/-/safe-array-concat-1.1.0.tgz", + "integrity": "sha512-ZdQ0Jeb9Ofti4hbt5lX3T2JcAamT9hfzYU1MNB+z/jaEbB6wfFfPIR/zEORmZqobkCCJhSjodobH6WHNmJ97dg==", + "dependencies": { + "call-bind": "^1.0.5", + "get-intrinsic": "^1.2.2", + "has-symbols": "^1.0.3", + "isarray": "^2.0.5" + }, + "engines": { + "node": ">=0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/safe-buffer": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", + "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ] + }, + "node_modules/safe-regex-test": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/safe-regex-test/-/safe-regex-test-1.0.3.tgz", + "integrity": "sha512-CdASjNJPvRa7roO6Ra/gLYBTzYzzPyyBXxIMdGW3USQLyjWEls2RgW5UBTXaQVp+OrpeCK3bLem8smtmheoRuw==", + "dependencies": { + "call-bind": "^1.0.6", + "es-errors": "^1.3.0", + "is-regex": "^1.1.4" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/safer-buffer": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz", + "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==", + "dev": true + }, + "node_modules/scheduler": { + "version": "0.23.0", + "resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.23.0.tgz", + "integrity": "sha512-CtuThmgHNg7zIZWAXi3AsyIzA3n4xx7aNyjwC2VJldO2LMVDhFK+63xGqq6CsJH4rTAt6/M+N4GhZiDYPx9eUw==", + "dependencies": { + "loose-envify": "^1.1.0" + } + }, + "node_modules/seek-bzip": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/seek-bzip/-/seek-bzip-1.0.6.tgz", + "integrity": "sha512-e1QtP3YL5tWww8uKaOCQ18UxIT2laNBXHjV/S2WYCiK4udiv8lkG89KRIoCjUagnAmCBurjF4zEVX2ByBbnCjQ==", + "dependencies": { + "commander": "^2.8.1" + }, + "bin": { + "seek-bunzip": "bin/seek-bunzip", + "seek-table": "bin/seek-bzip-table" + } + }, + "node_modules/seek-bzip/node_modules/commander": { + "version": "2.20.3", + "resolved": "https://registry.npmjs.org/commander/-/commander-2.20.3.tgz", + "integrity": "sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ==" + }, + "node_modules/semver": { + "version": "7.6.0", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.6.0.tgz", + "integrity": "sha512-EnwXhrlwXMk9gKu5/flx5sv/an57AkRplG3hTK68W7FRDN+k+OWBj65M7719OkA82XLBxrcX0KSHj+X5COhOVg==", + "dependencies": { + "lru-cache": "^6.0.0" + }, + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/semver/node_modules/lru-cache": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz", + "integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==", + "dependencies": { + "yallist": "^4.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/semver/node_modules/yallist": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", + "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==" + }, + "node_modules/set-function-length": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/set-function-length/-/set-function-length-1.2.1.tgz", + "integrity": "sha512-j4t6ccc+VsKwYHso+kElc5neZpjtq9EnRICFZtWyBsLojhmeF/ZBd/elqm22WJh/BziDe/SBiOeAt0m2mfLD0g==", + "dependencies": { + "define-data-property": "^1.1.2", + "es-errors": "^1.3.0", + "function-bind": "^1.1.2", + "get-intrinsic": "^1.2.3", + "gopd": "^1.0.1", + "has-property-descriptors": "^1.0.1" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/set-function-name": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/set-function-name/-/set-function-name-2.0.2.tgz", + "integrity": "sha512-7PGFlmtwsEADb0WYyvCMa1t+yke6daIG4Wirafur5kcf+MhUnPms1UeR0CKQdTZD81yESwMHbtn+TR+dMviakQ==", + "dependencies": { + "define-data-property": "^1.1.4", + "es-errors": "^1.3.0", + "functions-have-names": "^1.2.3", + "has-property-descriptors": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/shebang-command": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", + "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", + "dependencies": { + "shebang-regex": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/shebang-regex": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", + "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", + "engines": { + "node": ">=8" + } + }, + "node_modules/side-channel": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.0.5.tgz", + "integrity": "sha512-QcgiIWV4WV7qWExbN5llt6frQB/lBven9pqliLXfGPB+K9ZYXxDozp0wLkHS24kWCm+6YXH/f0HhnObZnZOBnQ==", + "dependencies": { + "call-bind": "^1.0.6", + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.4", + "object-inspect": "^1.13.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/signal-exit": { + "version": "3.0.7", + "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.7.tgz", + "integrity": "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==", + "dev": true + }, + "node_modules/simple-concat": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/simple-concat/-/simple-concat-1.0.1.tgz", + "integrity": "sha512-cSFtAPtRhljv69IK0hTVZQ+OfE9nePi/rtJmw5UjHeVyVroEqJXP1sFztKUy1qU+xvz3u/sfYJLa947b7nAN2Q==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ] + }, + "node_modules/simple-get": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/simple-get/-/simple-get-4.0.1.tgz", + "integrity": "sha512-brv7p5WgH0jmQJr1ZDDfKDOSeWWg+OVypG99A/5vYGPqJ6pxiaHLy8nxtFjBA7oMa01ebA9gfh1uMCFqOuXxvA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "dependencies": { + "decompress-response": "^6.0.0", + "once": "^1.3.1", + "simple-concat": "^1.0.0" + } + }, + "node_modules/sisteransi": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/sisteransi/-/sisteransi-1.0.5.tgz", + "integrity": "sha512-bLGGlR1QxBcynn2d5YmDX4MGjlZvy2MRBDRNHLJ8VI6l6+9FUiyTFNJ0IveOSP0bcXgVDPRcfGqA0pjaqUpfVg==" + }, + "node_modules/slash": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/slash/-/slash-3.0.0.tgz", + "integrity": "sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/source-map-js": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.0.2.tgz", + "integrity": "sha512-R0XvVJ9WusLiqTCEiGCmICCMplcCkIwwR11mOSD9CR5u+IXYdiseeEuXCVAjS54zqwkLcPNnmU4OeJ6tUrWhDw==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/source-map-support": { + "version": "0.5.21", + "resolved": "https://registry.npmjs.org/source-map-support/-/source-map-support-0.5.21.tgz", + "integrity": "sha512-uBHU3L3czsIyYXKX88fdrGovxdSCoTGDRZ6SYXtSRxLZUzHg5P/66Ht6uoUlHu9EZod+inXhKo3qQgwXUT/y1w==", + "dev": true, + "optional": true, + "peer": true, + "dependencies": { + "buffer-from": "^1.0.0", + "source-map": "^0.6.0" + } + }, + "node_modules/source-map-support/node_modules/source-map": { + "version": "0.6.1", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", + "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", + "dev": true, + "optional": true, + "peer": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/split2": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/split2/-/split2-4.2.0.tgz", + "integrity": "sha512-UcjcJOWknrNkF6PLX83qcHM6KHgVKNkV62Y8a5uYDVv9ydGQVwAHMKqHdJje1VTWpljG0WYpCDhrCdAOYH4TWg==", + "devOptional": true, + "engines": { + "node": ">= 10.x" + } + }, + "node_modules/squel": { + "version": "5.13.0", + "resolved": "https://registry.npmjs.org/squel/-/squel-5.13.0.tgz", + "integrity": "sha512-Fzd8zqbuqNwzodO3yO6MkX8qiDoVBuwqAaa3eKNz4idhBf24IQHbatBhLUiHAGGl962eGvPVRxzRuFWZlSf49w==", + "deprecated": "No longer maintained", + "engines": { + "node": ">= 0.12.0" + } + }, + "node_modules/stop-iteration-iterator": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/stop-iteration-iterator/-/stop-iteration-iterator-1.0.0.tgz", + "integrity": "sha512-iCGQj+0l0HOdZ2AEeBADlsRC+vsnDsZsbdSiH1yNSjcfKM7fdpCMfqAL/dwF5BLiw/XhRft/Wax6zQbhq2BcjQ==", + "dev": true, + "dependencies": { + "internal-slot": "^1.0.4" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/string_decoder": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.3.0.tgz", + "integrity": "sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA==", + "dependencies": { + "safe-buffer": "~5.2.0" + } + }, + "node_modules/string-width": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "dev": true, + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/string-width-cjs": { + "name": "string-width", + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "dev": true, + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/string.prototype.trim": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/string.prototype.trim/-/string.prototype.trim-1.2.8.tgz", + "integrity": "sha512-lfjY4HcixfQXOfaqCvcBuOIapyaroTXhbkfJN3gcB1OtyupngWK4sEET9Knd0cXd28kTUqu/kHoV4HKSJdnjiQ==", + "dependencies": { + "call-bind": "^1.0.2", + "define-properties": "^1.2.0", + "es-abstract": "^1.22.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/string.prototype.trimend": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/string.prototype.trimend/-/string.prototype.trimend-1.0.7.tgz", + "integrity": "sha512-Ni79DqeB72ZFq1uH/L6zJ+DKZTkOtPIHovb3YZHQViE+HDouuU4mBrLOLDn5Dde3RF8qw5qVETEjhu9locMLvA==", + "dependencies": { + "call-bind": "^1.0.2", + "define-properties": "^1.2.0", + "es-abstract": "^1.22.1" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/string.prototype.trimstart": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/string.prototype.trimstart/-/string.prototype.trimstart-1.0.7.tgz", + "integrity": "sha512-NGhtDFu3jCEm7B4Fy0DpLewdJQOZcQ0rGbwQ/+stjnrp2i+rlKeCvos9hOIeCmqwratM47OBxY7uFZzjxHXmrg==", + "dependencies": { + "call-bind": "^1.0.2", + "define-properties": "^1.2.0", + "es-abstract": "^1.22.1" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-ansi-cjs": { + "name": "strip-ansi", + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "dev": true, + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-dirs": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/strip-dirs/-/strip-dirs-2.1.0.tgz", + "integrity": "sha512-JOCxOeKLm2CAS73y/U4ZeZPTkE+gNVCzKt7Eox84Iej1LT/2pTWYpZKJuxwQpvX1LiZb1xokNR7RLfuBAa7T3g==", + "dependencies": { + "is-natural-number": "^4.0.1" + } + }, + "node_modules/strip-json-comments": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz", + "integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==", + "dev": true, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/sucrase": { + "version": "3.35.0", + "resolved": "https://registry.npmjs.org/sucrase/-/sucrase-3.35.0.tgz", + "integrity": "sha512-8EbVDiu9iN/nESwxeSxDKe0dunta1GOlHufmSSXxMD2z2/tMZpDMpvXQGsc+ajGo8y2uYUmixaSRUc/QPoQ0GA==", + "dev": true, + "dependencies": { + "@jridgewell/gen-mapping": "^0.3.2", + "commander": "^4.0.0", + "glob": "^10.3.10", + "lines-and-columns": "^1.1.6", + "mz": "^2.7.0", + "pirates": "^4.0.1", + "ts-interface-checker": "^0.1.9" + }, + "bin": { + "sucrase": "bin/sucrase", + "sucrase-node": "bin/sucrase-node" + }, + "engines": { + "node": ">=16 || 14 >=14.17" + } + }, + "node_modules/sucrase/node_modules/commander": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/commander/-/commander-4.1.1.tgz", + "integrity": "sha512-NOKm8xhkzAjzFx8B2v5OAHT+u5pRQc2UCa2Vq9jYL/31o2wi9mxBA7LIFs3sV5VSC49z6pEhfbMULvShKj26WA==", + "dev": true, + "engines": { + "node": ">= 6" + } + }, + "node_modules/sucrase/node_modules/glob": { + "version": "10.3.10", + "resolved": "https://registry.npmjs.org/glob/-/glob-10.3.10.tgz", + "integrity": "sha512-fa46+tv1Ak0UPK1TOy/pZrIybNNt4HCv7SDzwyfiOZkvZLEbjsZkJBPtDHVshZjbecAoAGSC20MjLDG/qr679g==", + "dev": true, + "dependencies": { + "foreground-child": "^3.1.0", + "jackspeak": "^2.3.5", + "minimatch": "^9.0.1", + "minipass": "^5.0.0 || ^6.0.2 || ^7.0.0", + "path-scurry": "^1.10.1" + }, + "bin": { + "glob": "dist/esm/bin.mjs" + }, + "engines": { + "node": ">=16 || 14 >=14.17" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dev": true, + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/tar-fs": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/tar-fs/-/tar-fs-2.1.1.tgz", + "integrity": "sha512-V0r2Y9scmbDRLCNex/+hYzvp/zyYjvFbHPNgVTKfQvVrb6guiE/fxP+XblDNR011utopbkex2nM4dHNV6GDsng==", + "dependencies": { + "chownr": "^1.1.1", + "mkdirp-classic": "^0.5.2", + "pump": "^3.0.0", + "tar-stream": "^2.1.4" + } + }, + "node_modules/tar-fs/node_modules/tar-stream": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/tar-stream/-/tar-stream-2.2.0.tgz", + "integrity": "sha512-ujeqbceABgwMZxEJnk2HDY2DlnUZ+9oEcb1KzTVfYHio0UE6dG71n60d8D2I4qNvleWrrXpmjpt7vZeF1LnMZQ==", + "dependencies": { + "bl": "^4.0.3", + "end-of-stream": "^1.4.1", + "fs-constants": "^1.0.0", + "inherits": "^2.0.3", + "readable-stream": "^3.1.1" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/tar-stream": { + "version": "1.6.2", + "resolved": "https://registry.npmjs.org/tar-stream/-/tar-stream-1.6.2.tgz", + "integrity": "sha512-rzS0heiNf8Xn7/mpdSVVSMAWAoy9bfb1WOTYC78Z0UQKeKa/CWS8FOq0lKGNa8DWKAn9gxjCvMLYc5PGXYlK2A==", + "dependencies": { + "bl": "^1.0.0", + "buffer-alloc": "^1.2.0", + "end-of-stream": "^1.0.0", + "fs-constants": "^1.0.0", + "readable-stream": "^2.3.0", + "to-buffer": "^1.1.1", + "xtend": "^4.0.0" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/tar-stream/node_modules/bl": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/bl/-/bl-1.2.3.tgz", + "integrity": "sha512-pvcNpa0UU69UT341rO6AYy4FVAIkUHuZXRIWbq+zHnsVcRzDDjIAhGuuYoi0d//cwIwtt4pkpKycWEfjdV+vww==", + "dependencies": { + "readable-stream": "^2.3.5", + "safe-buffer": "^5.1.1" + } + }, + "node_modules/tar-stream/node_modules/isarray": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz", + "integrity": "sha512-VLghIWNM6ELQzo7zwmcg0NmTVyWKYjvIeM83yjp0wRDTmUnrM678fQbcKBo6n2CJEF0szoG//ytg+TKla89ALQ==" + }, + "node_modules/tar-stream/node_modules/readable-stream": { + "version": "2.3.8", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.8.tgz", + "integrity": "sha512-8p0AUk4XODgIewSi0l8Epjs+EVnWiK7NoDIEGU0HhE7+ZyY8D1IMY7odu5lRrFXGg71L15KG8QrPmum45RTtdA==", + "dependencies": { + "core-util-is": "~1.0.0", + "inherits": "~2.0.3", + "isarray": "~1.0.0", + "process-nextick-args": "~2.0.0", + "safe-buffer": "~5.1.1", + "string_decoder": "~1.1.1", + "util-deprecate": "~1.0.1" + } + }, + "node_modules/tar-stream/node_modules/safe-buffer": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", + "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==" + }, + "node_modules/tar-stream/node_modules/string_decoder": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz", + "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==", + "dependencies": { + "safe-buffer": "~5.1.0" + } + }, + "node_modules/tcp-port-used": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/tcp-port-used/-/tcp-port-used-1.0.2.tgz", + "integrity": "sha512-l7ar8lLUD3XS1V2lfoJlCBaeoaWo/2xfYt81hM7VlvR4RrMVFqfmzfhLVk40hAb368uitje5gPtBRL1m/DGvLA==", + "dependencies": { + "debug": "4.3.1", + "is2": "^2.0.6" + } + }, + "node_modules/tcp-port-used/node_modules/debug": { + "version": "4.3.1", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.1.tgz", + "integrity": "sha512-doEwdvm4PCeK4K3RQN2ZC2BYUBaxwLARCqZmMjtF8a51J2Rb0xpVloFRnCODwqjpwnAoao4pelN8l3RJdv3gRQ==", + "dependencies": { + "ms": "2.1.2" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/terser": { + "version": "5.29.1", + "resolved": "https://registry.npmjs.org/terser/-/terser-5.29.1.tgz", + "integrity": "sha512-lZQ/fyaIGxsbGxApKmoPTODIzELy3++mXhS5hOqaAWZjQtpq/hFHAc+rm29NND1rYRxRWKcjuARNwULNXa5RtQ==", + "dev": true, + "optional": true, + "peer": true, + "dependencies": { + "@jridgewell/source-map": "^0.3.3", + "acorn": "^8.8.2", + "commander": "^2.20.0", + "source-map-support": "~0.5.20" + }, + "bin": { + "terser": "bin/terser" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/terser/node_modules/commander": { + "version": "2.20.3", + "resolved": "https://registry.npmjs.org/commander/-/commander-2.20.3.tgz", + "integrity": "sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ==", + "dev": true, + "optional": true, + "peer": true + }, + "node_modules/text-encoder-lite": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/text-encoder-lite/-/text-encoder-lite-2.0.0.tgz", + "integrity": "sha512-bo08ND8LlBwPeU23EluRUcO3p2Rsb/eN5EIfOVqfRmblNDEVKK5IzM9Qfidvo+odT0hhV8mpXQcP/M5MMzABXw==" + }, + "node_modules/text-table": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/text-table/-/text-table-0.2.0.tgz", + "integrity": "sha512-N+8UisAXDGk8PFXP4HAzVR9nbfmVJ3zYLAWiTIoqC5v5isinhr+r5uaO8+7r3BMfuNIufIsA7RdpVgacC2cSpw==", + "dev": true + }, + "node_modules/thenify": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/thenify/-/thenify-3.3.1.tgz", + "integrity": "sha512-RVZSIV5IG10Hk3enotrhvz0T9em6cyHBLkH/YAZuKqd8hRkKhSfCGIcP2KUY0EPxndzANBmNllzWPwak+bheSw==", + "dev": true, + "dependencies": { + "any-promise": "^1.0.0" + } + }, + "node_modules/thenify-all": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/thenify-all/-/thenify-all-1.6.0.tgz", + "integrity": "sha512-RNxQH/qI8/t3thXJDwcstUO4zeqo64+Uy/+sNVRBx4Xn2OX+OZ9oP+iJnNFqplFra2ZUVeKCSa2oVWi3T4uVmA==", + "dev": true, + "dependencies": { + "thenify": ">= 3.1.0 < 4" + }, + "engines": { + "node": ">=0.8" + } + }, + "node_modules/through": { + "version": "2.3.8", + "resolved": "https://registry.npmjs.org/through/-/through-2.3.8.tgz", + "integrity": "sha512-w89qg7PI8wAdvX60bMDP+bFoD5Dvhm9oLheFp5O4a2QF0cSBGsBX4qZmadPMvVqlLJBBci+WqGGOAPvcDeNSVg==" + }, + "node_modules/tmp": { + "version": "0.0.33", + "resolved": "https://registry.npmjs.org/tmp/-/tmp-0.0.33.tgz", + "integrity": "sha512-jRCJlojKnZ3addtTOjdIqoRuPEKBvNXcGYqzO6zWZX8KfKEpnGY5jfggJQ3EjKuu8D4bJRr0y+cYJFmYbImXGw==", + "dev": true, + "dependencies": { + "os-tmpdir": "~1.0.2" + }, + "engines": { + "node": ">=0.6.0" + } + }, + "node_modules/to-buffer": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/to-buffer/-/to-buffer-1.1.1.tgz", + "integrity": "sha512-lx9B5iv7msuFYE3dytT+KE5tap+rNYw+K4jVkb9R/asAb+pbBSM17jtunHplhBe6RRJdZx3Pn2Jph24O32mOVg==" + }, + "node_modules/to-fast-properties": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/to-fast-properties/-/to-fast-properties-2.0.0.tgz", + "integrity": "sha512-/OaKK0xYrs3DmxRYqL/yDc+FxFUVYhDlXMhRmv3z915w2HF1tnN1omB354j8VUGO/hbRzyD6Y3sA7v7GS/ceog==", + "dev": true, + "engines": { + "node": ">=4" + } + }, + "node_modules/to-regex-range": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", + "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", + "dev": true, + "dependencies": { + "is-number": "^7.0.0" + }, + "engines": { + "node": ">=8.0" + } + }, + "node_modules/tr46": { + "version": "0.0.3", + "resolved": "https://registry.npmjs.org/tr46/-/tr46-0.0.3.tgz", + "integrity": "sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw==" + }, + "node_modules/ts-api-utils": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/ts-api-utils/-/ts-api-utils-1.2.1.tgz", + "integrity": "sha512-RIYA36cJn2WiH9Hy77hdF9r7oEwxAtB/TS9/S4Qd90Ap4z5FSiin5zEiTL44OII1Y3IIlEvxwxFUVgrHSZ/UpA==", + "dev": true, + "engines": { + "node": ">=16" + }, + "peerDependencies": { + "typescript": ">=4.2.0" + } + }, + "node_modules/ts-dedent": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/ts-dedent/-/ts-dedent-2.2.0.tgz", + "integrity": "sha512-q5W7tVM71e2xjHZTlgfTDoPF/SmqKG5hddq9SzR49CH2hayqRKJtQ4mtRlSxKaJlR/+9rEM+mnBHf7I2/BQcpQ==", + "engines": { + "node": ">=6.10" + } + }, + "node_modules/ts-interface-checker": { + "version": "0.1.13", + "resolved": "https://registry.npmjs.org/ts-interface-checker/-/ts-interface-checker-0.1.13.tgz", + "integrity": "sha512-Y/arvbn+rrz3JCKl9C4kVNfTfSm2/mEp5FSz5EsZSANGPSlQrpRI5M4PKF+mJnE52jOO90PnPSc3Ur3bTQw0gA==", + "dev": true + }, + "node_modules/tslib": { + "version": "2.6.2", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.6.2.tgz", + "integrity": "sha512-AEYxH93jGFPn/a2iVAwW87VuUIkR1FVUKB77NwMF7nBTDkDrrT/Hpt/IrCJ0QXhW27jTBDcf5ZY7w6RiqTMw2Q==" + }, + "node_modules/tunnel-agent": { + "version": "0.6.0", + "resolved": "https://registry.npmjs.org/tunnel-agent/-/tunnel-agent-0.6.0.tgz", + "integrity": "sha512-McnNiV1l8RYeY8tBgEpuodCC1mLUdbSN+CYBL7kJsJNInOP8UjDDEwdk6Mw60vdLLrr5NHKZhMAOSrR2NZuQ+w==", + "dependencies": { + "safe-buffer": "^5.0.1" + }, + "engines": { + "node": "*" + } + }, + "node_modules/type-check": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/type-check/-/type-check-0.4.0.tgz", + "integrity": "sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew==", + "dev": true, + "dependencies": { + "prelude-ls": "^1.2.1" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/type-fest": { + "version": "0.21.3", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.21.3.tgz", + "integrity": "sha512-t0rzBq87m3fVcduHDUFhKmyyX+9eo6WQjZvf51Ea/M0Q7+T374Jp1aUiyUl0GKxp8M/OETVHSDvmkyPgvX+X2w==", + "dev": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/typed-array-buffer": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/typed-array-buffer/-/typed-array-buffer-1.0.2.tgz", + "integrity": "sha512-gEymJYKZtKXzzBzM4jqa9w6Q1Jjm7x2d+sh19AdsD4wqnMPDYyvwpsIc2Q/835kHuo3BEQ7CjelGhfTsoBb2MQ==", + "dependencies": { + "call-bind": "^1.0.7", + "es-errors": "^1.3.0", + "is-typed-array": "^1.1.13" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/typed-array-byte-length": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/typed-array-byte-length/-/typed-array-byte-length-1.0.1.tgz", + "integrity": "sha512-3iMJ9q0ao7WE9tWcaYKIptkNBuOIcZCCT0d4MRvuuH88fEoEH62IuQe0OtraD3ebQEoTRk8XCBoknUNc1Y67pw==", + "dependencies": { + "call-bind": "^1.0.7", + "for-each": "^0.3.3", + "gopd": "^1.0.1", + "has-proto": "^1.0.3", + "is-typed-array": "^1.1.13" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/typed-array-byte-offset": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/typed-array-byte-offset/-/typed-array-byte-offset-1.0.2.tgz", + "integrity": "sha512-Ous0vodHa56FviZucS2E63zkgtgrACj7omjwd/8lTEMEPFFyjfixMZ1ZXenpgCFBBt4EC1J2XsyVS2gkG0eTFA==", + "dependencies": { + "available-typed-arrays": "^1.0.7", + "call-bind": "^1.0.7", + "for-each": "^0.3.3", + "gopd": "^1.0.1", + "has-proto": "^1.0.3", + "is-typed-array": "^1.1.13" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/typed-array-length": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/typed-array-length/-/typed-array-length-1.0.5.tgz", + "integrity": "sha512-yMi0PlwuznKHxKmcpoOdeLwxBoVPkqZxd7q2FgMkmD3bNwvF5VW0+UlUQ1k1vmktTu4Yu13Q0RIxEP8+B+wloA==", + "dependencies": { + "call-bind": "^1.0.7", + "for-each": "^0.3.3", + "gopd": "^1.0.1", + "has-proto": "^1.0.3", + "is-typed-array": "^1.1.13", + "possible-typed-array-names": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/typescript": { + "version": "5.3.3", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.3.3.tgz", + "integrity": "sha512-pXWcraxM0uxAS+tN0AG/BF2TyqmHO014Z070UsJ+pFvYuRSq8KH8DmWpnbXe0pEPDHXZV3FcAbJkijJ5oNEnWw==", + "dev": true, + "bin": { + "tsc": "bin/tsc", + "tsserver": "bin/tsserver" + }, + "engines": { + "node": ">=14.17" + } + }, + "node_modules/unbox-primitive": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/unbox-primitive/-/unbox-primitive-1.0.2.tgz", + "integrity": "sha512-61pPlCD9h51VoreyJ0BReideM3MDKMKnh6+V9L08331ipq6Q8OFXZYiqP6n/tbHx4s5I9uRhcye6BrbkizkBDw==", + "dependencies": { + "call-bind": "^1.0.2", + "has-bigints": "^1.0.2", + "has-symbols": "^1.0.3", + "which-boxed-primitive": "^1.0.2" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/unbzip2-stream": { + "version": "1.4.3", + "resolved": "https://registry.npmjs.org/unbzip2-stream/-/unbzip2-stream-1.4.3.tgz", + "integrity": "sha512-mlExGW4w71ebDJviH16lQLtZS32VKqsSfk80GCfUlwT/4/hNRFsoscrF/c++9xinkMzECL1uL9DDwXqFWkruPg==", + "dependencies": { + "buffer": "^5.2.1", + "through": "^2.3.8" + } + }, + "node_modules/undici-types": { + "version": "5.26.5", + "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-5.26.5.tgz", + "integrity": "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==" + }, + "node_modules/update-browserslist-db": { + "version": "1.0.13", + "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.0.13.tgz", + "integrity": "sha512-xebP81SNcPuNpPP3uzeW1NYXxI3rxyJzF3pD6sH4jE7o/IX+WtSpwnVU+qIsDPyk0d3hmFQ7mjqc6AtV604hbg==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/browserslist" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "dependencies": { + "escalade": "^3.1.1", + "picocolors": "^1.0.0" + }, + "bin": { + "update-browserslist-db": "cli.js" + }, + "peerDependencies": { + "browserslist": ">= 4.21.0" + } + }, + "node_modules/uri-js": { + "version": "4.4.1", + "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.1.tgz", + "integrity": "sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==", + "dev": true, + "dependencies": { + "punycode": "^2.1.0" + } + }, + "node_modules/util-deprecate": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", + "integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==" + }, + "node_modules/vite": { + "version": "5.1.4", + "resolved": "https://registry.npmjs.org/vite/-/vite-5.1.4.tgz", + "integrity": "sha512-n+MPqzq+d9nMVTKyewqw6kSt+R3CkvF9QAKY8obiQn8g1fwTscKxyfaYnC632HtBXAQGc1Yjomphwn1dtwGAHg==", + "dev": true, + "dependencies": { + "esbuild": "^0.19.3", + "postcss": "^8.4.35", + "rollup": "^4.2.0" + }, + "bin": { + "vite": "bin/vite.js" + }, + "engines": { + "node": "^18.0.0 || >=20.0.0" + }, + "funding": { + "url": "https://github.com/vitejs/vite?sponsor=1" + }, + "optionalDependencies": { + "fsevents": "~2.3.3" + }, + "peerDependencies": { + "@types/node": "^18.0.0 || >=20.0.0", + "less": "*", + "lightningcss": "^1.21.0", + "sass": "*", + "stylus": "*", + "sugarss": "*", + "terser": "^5.4.0" + }, + "peerDependenciesMeta": { + "@types/node": { + "optional": true + }, + "less": { + "optional": true + }, + "lightningcss": { + "optional": true + }, + "sass": { + "optional": true + }, + "stylus": { + "optional": true + }, + "sugarss": { + "optional": true + }, + "terser": { + "optional": true + } + } + }, + "node_modules/wa-sqlite": { + "version": "0.9.11", + "resolved": "git+ssh://git@github.com/rhashimoto/wa-sqlite.git#390744d41c61aa0bbd53d3c738abef5e23f71cc4", + "optional": true, + "peer": true + }, + "node_modules/wcwidth": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/wcwidth/-/wcwidth-1.0.1.tgz", + "integrity": "sha512-XHPEwS0q6TaxcvG85+8EYkbiCux2XtWG2mkc47Ng2A77BQu9+DqIOJldST4HgPkuea7dvKSj5VgX3P1d4rW8Tg==", + "dev": true, + "dependencies": { + "defaults": "^1.0.3" + } + }, + "node_modules/webidl-conversions": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-3.0.1.tgz", + "integrity": "sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ==" + }, + "node_modules/whatwg-url": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-5.0.0.tgz", + "integrity": "sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw==", + "dependencies": { + "tr46": "~0.0.3", + "webidl-conversions": "^3.0.0" + } + }, + "node_modules/which": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", + "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", + "dependencies": { + "isexe": "^2.0.0" + }, + "bin": { + "node-which": "bin/node-which" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/which-boxed-primitive": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/which-boxed-primitive/-/which-boxed-primitive-1.0.2.tgz", + "integrity": "sha512-bwZdv0AKLpplFY2KZRX6TvyuN7ojjr7lwkg6ml0roIy9YeuSr7JS372qlNW18UQYzgYK9ziGcerWqZOmEn9VNg==", + "dependencies": { + "is-bigint": "^1.0.1", + "is-boolean-object": "^1.1.0", + "is-number-object": "^1.0.4", + "is-string": "^1.0.5", + "is-symbol": "^1.0.3" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/which-collection": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/which-collection/-/which-collection-1.0.1.tgz", + "integrity": "sha512-W8xeTUwaln8i3K/cY1nGXzdnVZlidBcagyNFtBdD5kxnb4TvGKR7FfSIS3mYpwWS1QUCutfKz8IY8RjftB0+1A==", + "dev": true, + "dependencies": { + "is-map": "^2.0.1", + "is-set": "^2.0.1", + "is-weakmap": "^2.0.1", + "is-weakset": "^2.0.1" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/which-typed-array": { + "version": "1.1.14", + "resolved": "https://registry.npmjs.org/which-typed-array/-/which-typed-array-1.1.14.tgz", + "integrity": "sha512-VnXFiIW8yNn9kIHN88xvZ4yOWchftKDsRJ8fEPacX/wl1lOvBrhsJ/OeJCXq7B0AaijRuqgzSKalJoPk+D8MPg==", + "dependencies": { + "available-typed-arrays": "^1.0.6", + "call-bind": "^1.0.5", + "for-each": "^0.3.3", + "gopd": "^1.0.1", + "has-tostringtag": "^1.0.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/wrap-ansi": { + "version": "6.2.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-6.2.0.tgz", + "integrity": "sha512-r6lPcBGxZXlIcymEu7InxDMhdW0KDxpLgoFLcguasxCaJ/SOIZwINatK9KY/tf+ZrlywOKU0UDj3ATXUBfxJXA==", + "dev": true, + "dependencies": { + "ansi-styles": "^4.0.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/wrap-ansi-cjs": { + "name": "wrap-ansi", + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", + "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", + "dev": true, + "dependencies": { + "ansi-styles": "^4.0.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, + "node_modules/wrappy": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", + "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==" + }, + "node_modules/ws": { + "version": "8.16.0", + "resolved": "https://registry.npmjs.org/ws/-/ws-8.16.0.tgz", + "integrity": "sha512-HS0c//TP7Ina87TfiPUz1rQzMhHrl/SG2guqRcTOIUYD2q8uhUdNHZYJUaQ8aTGPzCh+c6oawMKW35nFl1dxyQ==", + "engines": { + "node": ">=10.0.0" + }, + "peerDependencies": { + "bufferutil": "^4.0.1", + "utf-8-validate": ">=5.0.2" + }, + "peerDependenciesMeta": { + "bufferutil": { + "optional": true + }, + "utf-8-validate": { + "optional": true + } + } + }, + "node_modules/xtend": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/xtend/-/xtend-4.0.2.tgz", + "integrity": "sha512-LKYU1iAXJXUgAXn9URjiu+MWhyUXHsvfp7mcuYm9dSUKK0/CjtrUwFAxD82/mCWbtLsGjFIad0wIsod4zrTAEQ==", + "engines": { + "node": ">=0.4" + } + }, + "node_modules/yallist": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-3.1.1.tgz", + "integrity": "sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==", + "dev": true + }, + "node_modules/yauzl": { + "version": "2.10.0", + "resolved": "https://registry.npmjs.org/yauzl/-/yauzl-2.10.0.tgz", + "integrity": "sha512-p4a9I6X6nu6IhoGmBqAcbJy1mlC4j27vEPZX9F4L4/vZT3Lyq1VkFHw/V/PUcB9Buo+DG3iHkT0x3Qya58zc3g==", + "dependencies": { + "buffer-crc32": "~0.2.3", + "fd-slicer": "~1.1.0" + } + }, + "node_modules/yocto-queue": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz", + "integrity": "sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==", + "dev": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/zod": { + "version": "3.21.1", + "resolved": "https://registry.npmjs.org/zod/-/zod-3.21.1.tgz", + "integrity": "sha512-+dTu2m6gmCbO9Ahm4ZBDapx2O6ZY9QSPXst2WXjcznPMwf2YNpn3RevLx4KkZp1OPW/ouFcoBtBzFz/LeY69oA==", + "funding": { + "url": "https://github.com/sponsors/colinhacks" + } + } + } +} diff --git a/examples/web-pglite/package.json b/examples/web-pglite/package.json new file mode 100644 index 0000000000..bbb56a756b --- /dev/null +++ b/examples/web-pglite/package.json @@ -0,0 +1,41 @@ +{ + "name": "electric-sql-pglite-example", + "version": "0.9.3", + "author": "ElectricSQL", + "license": "Apache-2.0", + "type": "module", + "scripts": { + "backend:start": "npx electric-sql start --with-postgres", + "backend:stop": "npx electric-sql stop", + "backend:up": "npx electric-sql start --with-postgres --detach", + "backend:down": "npx electric-sql stop --remove", + "client:generate": "npx electric-sql generate", + "client:watch": "npx electric-sql generate --watch", + "db:migrate": "npx electric-sql with-config \"npx pg-migrations apply --database {{ELECTRIC_PROXY}} --directory ./db/migrations\"", + "db:psql": "npx electric-sql psql", + "electric:start": "npx electric-sql start", + "dev": "vite", + "build": "tsc && vite build", + "lint": "eslint . --ext ts,tsx --report-unused-disable-directives --max-warnings 0", + "preview": "vite preview" + }, + "dependencies": { + "@electric-sql/pglite": "^0.1.4", + "electric-sql": "file:../../clients/typescript/electric-sql-0.10.1.tgz", + "react": "^18.2.0", + "react-dom": "^18.2.0" + }, + "devDependencies": { + "@databases/pg-migrations": "^5.0.3", + "@types/react": "^18.2.57", + "@types/react-dom": "^18.2.19", + "@typescript-eslint/eslint-plugin": "^6.21.0", + "@typescript-eslint/parser": "^6.21.0", + "@vitejs/plugin-react": "^4.2.1", + "eslint": "^8.56.0", + "eslint-plugin-react-hooks": "^4.6.0", + "eslint-plugin-react-refresh": "^0.4.5", + "typescript": "^5.3.3", + "vite": "^5.1.4" + } +} diff --git a/examples/web-pglite/public/favicon.ico b/examples/web-pglite/public/favicon.ico new file mode 100644 index 0000000000000000000000000000000000000000..e55095b98323d3dd569e75945b47d612e02b620b GIT binary patch literal 1659 zcmXw32~<;88on=!frKpNJxJIR8d(Y=fdLIrlxPvehQ%o=ZNV5Ug31u2NIV1Py}AHu z6;q@rq6DNv1&^p$gSDC*sdaTbL*lx`(xzXgWV;9&!JbO!v|NNO!( ziI=jLg_BzLfq@3lUk@Ir!9YFfLK$oM65_>j=Eo7lO9xm{e!CW0p{uf5PR>4SK zLH-^i{Ct!x3?w%y8L6@C#Q~HP3i^gPgu`VA`cbNPvIG1m-xh%TYJ#qkzCMo7Sw;V1 z4Sk~o;kqF%Tte4(Y~O{HihSsH4Q*>G!gYsz7djlu1%^7%ucqa$M?5^h{W|DQ4J~&A z%;!7o%Yp8mggrfBKF_fv6XE(!iNy|%v7YZmx z@(_-jQ)V*Mdx9O{OV5zdH^dWeRyln04~8U$c=;gIbDYpoNv_}RBu#|9`7BWgGwvf+ zLh(8^1k|mQHl#srbx-)~mE{?W+ox|QO zu&;n}yol7imlY98I$z3M8A1H<5cA_mMv|D3vVyTbj{ezd$K9C@<=fc)K2zag+4DCk zFGxy-`Wh590FX8(#I8u)_3z1{><#c*WdA61rXh(Gq|>iUQ3ZPMDffvx$|@(nOl6RN+E$g#S4A=ZP zo@U9gy_u*Tq<3p}^l#zy#R#=x@-N9_V_6}labTV9WX9N)yjz>ql8efm{{n=1?coFH zM=CTvl&MAi4{IBb^}LNi-KvK7^Ocs-GQM&LUf_oxT;q<;2J`UdZL6NOK3P0y)V-s# zE8g$FPN^naTZR`6^07Z#Ec8D-W^d1mrZ`h_YUzlbyzI8kC9-Rq-#I9O1=U%TXi ztFK9;XpgprzDrw1tDh@6WAV=TU6u;o8Ixj*SN5p!)zsQ>e4`(}{Lh*)>2a+F6<>*I zf#0**@!@hbrClq}NxP!GM*9mj|A6l6=IbLnI*lIRn|cKPktM(|nIrd`Dck?{i}}ec zqnsD=&Hpjqw-CP$b;S*qTRw-iI6^}&oK^Kod%9ZR7nMnCQ0wI!?K8=#`sqc%xan*u zAxn96Z@=WU?a5b_V|wGwu1~^(mV+shkR$!z@x$$T(P*#UTY70|XdF3Y+uYtM%6lq! zs&7^+hDC!nK2>VIE;3KWOy>g4lkO9oIi*1>kAo3!!%Fq$&`Iv@gMg zwUjvh`XpUvkcWeK?QIY=w}3ePzmqiGfIJZU=i0md@(;1*^O>(iFm`cxHMpmQp|rPj zlA+4wo4J=1LzeO$ewmnWZt}pc$LN7|?j_kEl)g?U(1fYURVP5#K)EwbY_axAZ{13eVd)m2Gf<&Sm54VIA3slCitWUGh%Yq8 zpx6E!arMs%uMVg%VQa6m=cY&Uq1VZN*eU!L@x=^t=^L1#Zi|UJG6JHmb*nse8_`TC zXx1OmZ9xUlxmkafF3&at1(}5&x*}Tvbj~b<#Uf>a55;gevE%3nU|T;Svqq%?j6Eui z#%gN0oDpd_wk^+m^~fM-!x_%_DnhTRhw0tR7h$hFe@`&=(9}_FaknhbNFy_S)n^F~ zuWc^k8dL^$;AH5(e~eLBFX}kr1~d<-@JMJt9c!xt&GzT2wkg0;zSxKA{m8ZmOSkob zZ6j@|whD0nakbl7+z6!xv0caO=<2lH;6Vvi^hD|3mK7EB#Mq9vHq5n=( +
+ logo + + + +
+ + ) +} diff --git a/examples/web-pglite/src/ElectricProvider.tsx b/examples/web-pglite/src/ElectricProvider.tsx new file mode 100644 index 0000000000..7d63217344 --- /dev/null +++ b/examples/web-pglite/src/ElectricProvider.tsx @@ -0,0 +1,72 @@ +import { useEffect, useState } from 'react' + +import { LIB_VERSION } from 'electric-sql/version' +import { makeElectricContext } from 'electric-sql/react' +import { uniqueTabId } from 'electric-sql/util' +import { electrify } from 'electric-sql/pglite' +import { PGlite } from '@electric-sql/pglite' + +import { authToken } from './auth' +import { Electric, schema } from './generated/client' + +const { ElectricProvider, useElectric } = makeElectricContext() + +// We use a global database instance to avoid reinitializing the database +// when the component re-renders under React strict mode. +let db: PGlite + +const ElectricProviderComponent = ({ + children, +}: { + children: React.ReactNode +}) => { + const [electric, setElectric] = useState() + + useEffect(() => { + let isMounted = true + + const init = async () => { + const config = { + debug: import.meta.env.DEV, + url: import.meta.env.ELECTRIC_SERVICE, + } + + const { tabId } = uniqueTabId() + const scopedDbName = `idb://basic-${LIB_VERSION}-${tabId}.db` + + db ??= new PGlite(scopedDbName, { + relaxedDurability: true, + }) + const client = await electrify(db, schema, config) + await client.connect(authToken()) + + if (!isMounted) { + return + } + + setElectric(client) + } + + const cleanup = async () => { + if (electric) { + await electric.close() + } + } + + init() + + return () => { + cleanup() + isMounted = false + } + }, []) + + if (electric === undefined) { + return null + } + + return {children} +} + +// eslint-disable-next-line react-refresh/only-export-components +export { ElectricProviderComponent as ElectricProvider, useElectric } diff --git a/examples/web-pglite/src/Example.css b/examples/web-pglite/src/Example.css new file mode 100644 index 0000000000..201f902c58 --- /dev/null +++ b/examples/web-pglite/src/Example.css @@ -0,0 +1,41 @@ +.controls { + margin-bottom: 1.5rem; +} + +.button { + display: inline-block; + line-height: 1.3; + text-align: center; + text-decoration: none; + vertical-align: middle; + cursor: pointer; + user-select: none; + width: calc(15vw + 100px); + margin-right: .5rem!important; + margin-left: .5rem!important; + border-radius: 32px; + text-shadow: 2px 6px 20px rgba(0,0,0,0.4); + box-shadow: rgba(0,0,0,0.5) 1px 2px 8px 0px; + background: #1e2123; + border: 2px solid #229089; + color: #f9fdff; + font-size: 16px; + font-weight: 500; + padding: 10px 18px; +} + +.item { + display: block; + line-height: 1.3; + text-align: center; + vertical-align: middle; + width: calc(30vw - 1.5rem + 200px); + margin-right: auto; + margin-left: auto; + border-radius: 32px; + border: 1.5px solid #bbb; + box-shadow: rgba(0,0,0,0.3) 1px 2px 8px 0px; + color: #f9fdff; + font-size: 13px; + padding: 10px 18px; +} diff --git a/examples/web-pglite/src/Example.tsx b/examples/web-pglite/src/Example.tsx new file mode 100644 index 0000000000..177ca18c11 --- /dev/null +++ b/examples/web-pglite/src/Example.tsx @@ -0,0 +1,56 @@ +import { useEffect } from 'react' +import { useLiveQuery } from 'electric-sql/react' +import { genUUID } from 'electric-sql/util' +import { Items as Item } from './generated/client' +import { useElectric } from './ElectricProvider' + +import './Example.css' + +export const Example = () => { + const { db } = useElectric()! + const { results } = useLiveQuery(db.items.liveMany()) + + useEffect(() => { + const syncItems = async () => { + // Resolves when the shape subscription has been established. + const shape = await db.items.sync() + + // Resolves when the data has been synced into the local database. + await shape.synced + } + + syncItems() + }, []) + + const addItem = async () => { + await db.items.create({ + data: { + value: genUUID(), + }, + }) + } + + const clearItems = async () => { + await db.items.deleteMany() + } + + const items: Item[] = results ?? [] + + return ( +
+
+ + +
+ {items.map((item: Item, index: number) => ( +

+ {item.value} +

+ ))} +
+ ) +} diff --git a/examples/web-pglite/src/assets/logo.svg b/examples/web-pglite/src/assets/logo.svg new file mode 100644 index 0000000000..f5ec440a63 --- /dev/null +++ b/examples/web-pglite/src/assets/logo.svg @@ -0,0 +1,11 @@ + + + + diff --git a/examples/web-pglite/src/auth.ts b/examples/web-pglite/src/auth.ts new file mode 100644 index 0000000000..c28f71f8ad --- /dev/null +++ b/examples/web-pglite/src/auth.ts @@ -0,0 +1,17 @@ +import { insecureAuthToken } from 'electric-sql/auth' +import { genUUID } from 'electric-sql/util' + +// Generate an insecure authentication JWT. +// See https://electric-sql.com/docs/usage/auth for more details. +export const authToken = () => { + const subKey = '__electric_sub' + let sub = window.sessionStorage.getItem(subKey) + if (!sub) { + // This is just a demo. In a real app, the user ID would + // usually come from somewhere else :) + sub = genUUID() + window.sessionStorage.setItem(subKey, sub) + } + const claims = { sub } + return insecureAuthToken(claims) +} diff --git a/examples/web-pglite/src/main.tsx b/examples/web-pglite/src/main.tsx new file mode 100644 index 0000000000..99e75acb71 --- /dev/null +++ b/examples/web-pglite/src/main.tsx @@ -0,0 +1,10 @@ +import React from 'react' +import ReactDOM from 'react-dom/client' +import App from './App' +import './style.css' + +ReactDOM.createRoot(document.getElementById('root')!).render( + + + , +) diff --git a/examples/web-pglite/src/style.css b/examples/web-pglite/src/style.css new file mode 100644 index 0000000000..abc5cdbc38 --- /dev/null +++ b/examples/web-pglite/src/style.css @@ -0,0 +1,12 @@ +body { + margin: 0; + font-family: 'Helvetica Neue', Helvetica, sans-serif; + -webkit-font-smoothing: antialiased; + -moz-osx-font-smoothing: grayscale; + background: #1c1e20; + min-width: 360px; +} + +code { + font-family: source-code-pro, Menlo, Monaco, Consolas, 'Courier New', monospace; +} diff --git a/examples/web-pglite/src/vite-env.d.ts b/examples/web-pglite/src/vite-env.d.ts new file mode 100644 index 0000000000..11f02fe2a0 --- /dev/null +++ b/examples/web-pglite/src/vite-env.d.ts @@ -0,0 +1 @@ +/// diff --git a/examples/web-pglite/tsconfig.json b/examples/web-pglite/tsconfig.json new file mode 100644 index 0000000000..a7fc6fbf23 --- /dev/null +++ b/examples/web-pglite/tsconfig.json @@ -0,0 +1,25 @@ +{ + "compilerOptions": { + "target": "ES2020", + "useDefineForClassFields": true, + "lib": ["ES2020", "DOM", "DOM.Iterable"], + "module": "ESNext", + "skipLibCheck": true, + + /* Bundler mode */ + "moduleResolution": "bundler", + "allowImportingTsExtensions": true, + "resolveJsonModule": true, + "isolatedModules": true, + "noEmit": true, + "jsx": "react-jsx", + + /* Linting */ + "strict": true, + "noUnusedLocals": true, + "noUnusedParameters": true, + "noFallthroughCasesInSwitch": true + }, + "include": ["src"], + "references": [{ "path": "./tsconfig.node.json" }] +} diff --git a/examples/web-pglite/tsconfig.node.json b/examples/web-pglite/tsconfig.node.json new file mode 100644 index 0000000000..42872c59f5 --- /dev/null +++ b/examples/web-pglite/tsconfig.node.json @@ -0,0 +1,10 @@ +{ + "compilerOptions": { + "composite": true, + "skipLibCheck": true, + "module": "ESNext", + "moduleResolution": "bundler", + "allowSyntheticDefaultImports": true + }, + "include": ["vite.config.ts"] +} diff --git a/examples/web-pglite/vite.config.ts b/examples/web-pglite/vite.config.ts new file mode 100644 index 0000000000..baed928f57 --- /dev/null +++ b/examples/web-pglite/vite.config.ts @@ -0,0 +1,11 @@ +import { defineConfig } from 'vite' +import react from '@vitejs/plugin-react' + +// https://vitejs.dev/config/ +export default defineConfig({ + plugins: [react()], + envPrefix: 'ELECTRIC_', + optimizeDeps: { + exclude: ['@electric-sql/pglite'], + }, +}) From 5dc8fccb749ca3ff74075063850693c9343c54ec Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Thu, 18 Apr 2024 15:54:40 +0200 Subject: [PATCH 081/156] Added PGlite and tauri sqlite to optional peer deps --- clients/typescript/package.json | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/clients/typescript/package.json b/clients/typescript/package.json index 8fa557e144..dfb2e63dcd 100644 --- a/clients/typescript/package.json +++ b/clients/typescript/package.json @@ -289,10 +289,16 @@ "zod": "3.21.1" }, "peerDependenciesMeta": { + "@capacitor-community/sqlite": { + "optional": true + }, + "@electric-sql/pglite": { + "optional": true + }, "@op-engineering/op-sqlite": { "optional": true }, - "@capacitor-community/sqlite": { + "@tauri-apps/plugin-sql": { "optional": true }, "@tauri-apps/plugin-sql": { From 7f20115f0244d9f70b5958f0d6c0049e9f6657be Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Mon, 22 Apr 2024 08:58:06 +0200 Subject: [PATCH 082/156] PGlite unit tests --- .../typescript/src/drivers/pglite/database.ts | 4 +- .../test/client/model/pglite/datatype.test.ts | 52 ++++ .../test/migrators/pglite/builder.test.ts | 58 ++++ .../test/migrators/pglite/bundle.test.ts | 35 +++ .../test/migrators/pglite/schema.test.ts | 60 ++++ .../test/migrators/pglite/triggers.test.ts | 262 ++++++++++++++++++ clients/typescript/test/satellite/common.ts | 16 ++ .../pglite/process.migration.test.ts | 22 ++ .../satellite/pglite/process.tags.test.ts | 16 ++ .../test/satellite/pglite/process.test.ts | 28 ++ .../satellite/pglite/process.timing.test.ts | 16 ++ .../satellite/pglite/serialization.test.ts | 26 ++ 12 files changed, 592 insertions(+), 3 deletions(-) create mode 100644 clients/typescript/test/client/model/pglite/datatype.test.ts create mode 100644 clients/typescript/test/migrators/pglite/builder.test.ts create mode 100644 clients/typescript/test/migrators/pglite/bundle.test.ts create mode 100644 clients/typescript/test/migrators/pglite/schema.test.ts create mode 100644 clients/typescript/test/migrators/pglite/triggers.test.ts create mode 100644 clients/typescript/test/satellite/pglite/process.migration.test.ts create mode 100644 clients/typescript/test/satellite/pglite/process.tags.test.ts create mode 100644 clients/typescript/test/satellite/pglite/process.test.ts create mode 100644 clients/typescript/test/satellite/pglite/process.timing.test.ts create mode 100644 clients/typescript/test/satellite/pglite/serialization.test.ts diff --git a/clients/typescript/src/drivers/pglite/database.ts b/clients/typescript/src/drivers/pglite/database.ts index b106d59514..0ecc780adf 100644 --- a/clients/typescript/src/drivers/pglite/database.ts +++ b/clients/typescript/src/drivers/pglite/database.ts @@ -2,6 +2,4 @@ import type { PGlite } from '@electric-sql/pglite' // The relevant subset of the SQLitePlugin database client API // that we need to ensure the client we're electrifying provides. -export interface Database - extends Pick { -} +export interface Database extends Pick {} diff --git a/clients/typescript/test/client/model/pglite/datatype.test.ts b/clients/typescript/test/client/model/pglite/datatype.test.ts new file mode 100644 index 0000000000..7e2c48e1d5 --- /dev/null +++ b/clients/typescript/test/client/model/pglite/datatype.test.ts @@ -0,0 +1,52 @@ +import anyTest, { TestFn } from 'ava' + +import { MockRegistry } from '../../../../src/satellite/mock' + +import { electrify } from '../../../../src/drivers/pglite' +import { + _NOT_UNIQUE_, + _RECORD_NOT_FOUND_, +} from '../../../../src/client/validation/errors/messages' +import { schema } from '../../generated' +import { ContextType, datatypeTests } from '../datatype' +import { PGlite } from '@electric-sql/pglite' + +// Run all tests in this file serially +// because there are a lot of tests +// and it would lead to PG running out of shared memory +const test = anyTest.serial as TestFn< + ContextType & { + stop: () => Promise + } +> + +test.beforeEach(async (t) => { + const db = new PGlite() + const electric = await electrify( + db, + schema, + {}, + { registry: new MockRegistry() } + ) + + const tbl = electric.db.DataTypes + + // Sync all shapes such that we don't get warnings on every query + await tbl.sync() + + await db.query( + `CREATE TABLE "DataTypes"("id" INT4 PRIMARY KEY, "date" DATE, "time" TIME, "timetz" TIMETZ, "timestamp" TIMESTAMP, "timestamptz" TIMESTAMPTZ, "bool" BOOL, "uuid" UUID, "int2" INT2, "int4" INT4, "int8" INT8, "float4" FLOAT4, "float8" FLOAT8, "json" JSONB, "bytea" BYTEA, "relatedId" INT4);` + ) + + t.context = { + tbl, + stop: () => db.close(), + dialect: 'Postgres', + } +}) + +test.afterEach.always(async (t) => { + await t.context.stop() +}) + +datatypeTests(test as unknown as TestFn) diff --git a/clients/typescript/test/migrators/pglite/builder.test.ts b/clients/typescript/test/migrators/pglite/builder.test.ts new file mode 100644 index 0000000000..c628fb25ee --- /dev/null +++ b/clients/typescript/test/migrators/pglite/builder.test.ts @@ -0,0 +1,58 @@ +import anyTest, { TestFn } from 'ava' +import { makeMigration, parseMetadata } from '../../../src/migrators/builder' +import { ContextType, makeMigrationMetaData } from '../builder.test' +import { PGlite } from '@electric-sql/pglite' +import { DatabaseAdapter } from '../../../src/drivers/pglite' +import { PgBundleMigrator } from '../../../src/migrators' +import { pgBuilder } from '../../../src/migrators/query-builder' + +const test = anyTest as TestFn + +test.beforeEach(async (t) => { + const builder = pgBuilder + const migrationMetaData = makeMigrationMetaData(builder) + + t.context = { + migrationMetaData, + builder, + } +}) + +// No need to run the bundleTests because +// they are already ran by `../postgres/builder.test.ts` +// and the tests do not use an actual PG database +//bundleTests(test) + +test('load migration from meta data', async (t) => { + const { migrationMetaData, builder } = t.context + const migration = makeMigration(parseMetadata(migrationMetaData), builder) + const db = new PGlite() + const stop = () => db.close() + const adapter = new DatabaseAdapter(db) + const migrator = new PgBundleMigrator(adapter, [migration]) + + // Apply the migration + await migrator.up() + + // Check that the DB is initialized with the stars table + const tables = await adapter.query({ + sql: ` + SELECT table_name + FROM information_schema.tables + WHERE table_schema = 'public' AND table_name = 'stars';`, + }) + + const starIdx = tables.findIndex((tbl) => tbl.table_name === 'stars') + t.assert(starIdx >= 0) // must exist + + const columns = await adapter + .query({ + sql: `SELECT column_name + FROM information_schema.columns + WHERE table_name = 'stars';`, + }) + .then((columns) => columns.map((column) => column.column_name)) + + t.deepEqual(columns, ['id', 'avatar_url', 'name', 'starred_at', 'username']) + await stop() +}) diff --git a/clients/typescript/test/migrators/pglite/bundle.test.ts b/clients/typescript/test/migrators/pglite/bundle.test.ts new file mode 100644 index 0000000000..3f7a65e441 --- /dev/null +++ b/clients/typescript/test/migrators/pglite/bundle.test.ts @@ -0,0 +1,35 @@ +import anyTest, { TestFn } from 'ava' + +import { DatabaseAdapter } from '../../../src/drivers/pglite' +import { PgBundleMigrator as BundleMigrator } from '../../../src/migrators/bundle' + +import { randomValue } from '../../../src/util/random' + +import { PGlite } from '@electric-sql/pglite' +import { ContextType, bundleTests } from '../bundle.test' + +import migrations from '../../support/migrations/pg-migrations.js' + +const test = anyTest as TestFn + +test.beforeEach(async (t) => { + const dbName = `bundle-migrator-${randomValue()}` + const db = new PGlite() + const stop = () => db.close() + const adapter = new DatabaseAdapter(db) + + t.context = { + dbName, + adapter, + migrations, + BundleMigrator, + stop, + } +}) + +test.afterEach.always(async (t) => { + const { stop } = t.context as ContextType + await stop() +}) + +bundleTests(test) diff --git a/clients/typescript/test/migrators/pglite/schema.test.ts b/clients/typescript/test/migrators/pglite/schema.test.ts new file mode 100644 index 0000000000..e296631b3e --- /dev/null +++ b/clients/typescript/test/migrators/pglite/schema.test.ts @@ -0,0 +1,60 @@ +import test from 'ava' + +import { Database, DatabaseAdapter } from '../../../src/drivers/pglite' +import { PgBundleMigrator as BundleMigrator } from '../../../src/migrators/bundle' +import { satelliteDefaults } from '../../../src/satellite/config' + +import { randomValue } from '../../../src/util/random' + +import migrations from '../../support/migrations/pg-migrations.js' +import { PGlite } from '@electric-sql/pglite' + +type Context = { + dbName: string + adapter: DatabaseAdapter + db: Database + stopPG: () => Promise +} + +test.beforeEach(async (t) => { + const dbName = `schema-migrations-${randomValue()}` + const db = new PGlite() + const stop = () => db.close() + const adapter = new DatabaseAdapter(db) + + t.context = { + adapter, + dbName, + stopPG: stop, + } +}) + +test.afterEach.always(async (t) => { + const { stopPG } = t.context as Context + await stopPG() +}) + +test('check schema keys are unique', async (t) => { + const { adapter } = t.context as Context + + const migrator = new BundleMigrator(adapter, migrations) + await migrator.up() + const defaults = satelliteDefaults( + migrator.electricQueryBuilder.defaultNamespace + ) + const metaTable = `"${defaults.metaTable.namespace}"."${defaults.metaTable.tablename}"` + + await adapter.run({ + sql: `INSERT INTO ${metaTable} (key, value) values ('key', 'value')`, + }) + try { + await adapter.run({ + sql: `INSERT INTO ${metaTable} (key, value) values ('key', 'value')`, + }) + t.fail() + } catch (err) { + const castError = err as { code: string; detail: string } + t.is(castError.code, '23505') + t.is(castError.detail, 'Key (key)=(key) already exists.') + } +}) diff --git a/clients/typescript/test/migrators/pglite/triggers.test.ts b/clients/typescript/test/migrators/pglite/triggers.test.ts new file mode 100644 index 0000000000..2cd976a604 --- /dev/null +++ b/clients/typescript/test/migrators/pglite/triggers.test.ts @@ -0,0 +1,262 @@ +import { dedent } from 'ts-dedent' +import testAny, { TestFn } from 'ava' +import { generateTableTriggers } from '../../../src/migrators/triggers' +import { satelliteDefaults } from '../../../src/satellite/config' +import { + migrateDb, + personTable as getPersonTable, +} from '../../satellite/common' +import { pgBuilder } from '../../../src/migrators/query-builder' +import { PGlite } from '@electric-sql/pglite' +import { Database, DatabaseAdapter } from '../../../src/drivers/pglite' +import { ContextType, triggerTests } from '../triggers' + +type Context = ContextType & { + db: Database +} + +const test = testAny as TestFn +const defaults = satelliteDefaults('public') +const oplogTable = `"${defaults.oplogTable.namespace}"."${defaults.oplogTable.tablename}"` + +const personTable = getPersonTable('public') +const personNamespace = personTable.namespace +const personTableName = personTable.tableName +const qualifiedPersonTable = `"${personNamespace}"."${personTableName}"` + +test.beforeEach(async (t) => { + const db = new PGlite() + const stop = () => db.close() + const adapter = new DatabaseAdapter(db) + + t.context = { + db, + adapter, + defaults, + personTable, + dialect: 'Postgres', + migrateDb: migrateDb.bind(null, adapter, personTable, pgBuilder), + stopDb: stop, + } +}) + +test.afterEach.always(async (t) => { + const { stopDb } = t.context as any + await stopDb() +}) + +test('generateTableTriggers should create correct triggers for a table', (t) => { + // Generate the oplog triggers + const triggers = generateTableTriggers(personTable, pgBuilder) + + // Check that the oplog triggers are correct + const triggersSQL = triggers.map((t) => t.sql).join('\n') + t.assert( + triggersSQL.includes( + dedent` + CREATE TRIGGER insert_public_personTable_into_oplog + AFTER INSERT ON "public"."personTable" + FOR EACH ROW + EXECUTE FUNCTION insert_public_personTable_into_oplog_function(); + ` + ) + ) + + t.assert( + triggersSQL.includes( + dedent` + CREATE OR REPLACE FUNCTION insert_public_personTable_into_oplog_function() + RETURNS TRIGGER AS $$ + BEGIN + DECLARE + flag_value INTEGER; + BEGIN + -- Get the flag value from _electric_trigger_settings + SELECT flag INTO flag_value FROM "public"._electric_trigger_settings WHERE namespace = 'public' AND tablename = 'personTable'; + + IF flag_value = 1 THEN + -- Insert into _electric_oplog + INSERT INTO "public"._electric_oplog (namespace, tablename, optype, "primaryKey", "newRow", "oldRow", timestamp) + VALUES ( + 'public', + 'personTable', + 'INSERT', + json_strip_nulls(json_build_object('id', cast(new."id" as TEXT))), + jsonb_build_object('age', new."age", 'blob', CASE WHEN new."blob" IS NOT NULL THEN encode(new."blob"::bytea, 'hex') ELSE NULL END, 'bmi', cast(new."bmi" as TEXT), 'id', cast(new."id" as TEXT), 'int8', cast(new."int8" as TEXT), 'name', new."name"), + NULL, + NULL + ); + END IF; + + RETURN NEW; + END; + END; + $$ LANGUAGE plpgsql; + ` + ) + ) + + t.assert( + triggersSQL.includes( + dedent` + CREATE TRIGGER update_public_personTable_into_oplog + AFTER UPDATE ON "public"."personTable" + FOR EACH ROW + EXECUTE FUNCTION update_public_personTable_into_oplog_function(); + ` + ) + ) + + t.assert( + triggersSQL.includes( + dedent` + CREATE OR REPLACE FUNCTION update_public_personTable_into_oplog_function() + RETURNS TRIGGER AS $$ + BEGIN + DECLARE + flag_value INTEGER; + BEGIN + -- Get the flag value from _electric_trigger_settings + SELECT flag INTO flag_value FROM "public"._electric_trigger_settings WHERE namespace = 'public' AND tablename = 'personTable'; + + IF flag_value = 1 THEN + -- Insert into _electric_oplog + INSERT INTO "public"._electric_oplog (namespace, tablename, optype, "primaryKey", "newRow", "oldRow", timestamp) + VALUES ( + 'public', + 'personTable', + 'UPDATE', + json_strip_nulls(json_build_object('id', cast(new."id" as TEXT))), + jsonb_build_object('age', new."age", 'blob', CASE WHEN new."blob" IS NOT NULL THEN encode(new."blob"::bytea, 'hex') ELSE NULL END, 'bmi', cast(new."bmi" as TEXT), 'id', cast(new."id" as TEXT), 'int8', cast(new."int8" as TEXT), 'name', new."name"), + jsonb_build_object('age', old."age", 'blob', CASE WHEN old."blob" IS NOT NULL THEN encode(old."blob"::bytea, 'hex') ELSE NULL END, 'bmi', cast(old."bmi" as TEXT), 'id', cast(old."id" as TEXT), 'int8', cast(old."int8" as TEXT), 'name', old."name"), + NULL + ); + END IF; + + RETURN NEW; + END; + END; + $$ LANGUAGE plpgsql; + ` + ) + ) + + t.assert( + triggersSQL.includes( + dedent` + CREATE TRIGGER delete_public_personTable_into_oplog + AFTER DELETE ON "public"."personTable" + FOR EACH ROW + EXECUTE FUNCTION delete_public_personTable_into_oplog_function(); + ` + ) + ) + + t.assert( + triggersSQL.includes( + dedent` + CREATE OR REPLACE FUNCTION delete_public_personTable_into_oplog_function() + RETURNS TRIGGER AS $$ + BEGIN + DECLARE + flag_value INTEGER; + BEGIN + -- Get the flag value from _electric_trigger_settings + SELECT flag INTO flag_value FROM "public"._electric_trigger_settings WHERE namespace = 'public' AND tablename = 'personTable'; + + IF flag_value = 1 THEN + -- Insert into _electric_oplog + INSERT INTO "public"._electric_oplog (namespace, tablename, optype, "primaryKey", "newRow", "oldRow", timestamp) + VALUES ( + 'public', + 'personTable', + 'DELETE', + json_strip_nulls(json_build_object('id', cast(old."id" as TEXT))), + NULL, + jsonb_build_object('age', old."age", 'blob', CASE WHEN old."blob" IS NOT NULL THEN encode(old."blob"::bytea, 'hex') ELSE NULL END, 'bmi', cast(old."bmi" as TEXT), 'id', cast(old."id" as TEXT), 'int8', cast(old."int8" as TEXT), 'name', old."name"), + NULL + ); + END IF; + + RETURN NEW; + END; + END; + $$ LANGUAGE plpgsql; + ` + ) + ) +}) + +test('oplog insertion trigger should insert row into oplog table', async (t) => { + const { db, migrateDb } = t.context + + // Migrate the DB with the necessary tables and triggers + await migrateDb() + + // Insert a row in the table + const insertRowSQL = `INSERT INTO ${qualifiedPersonTable} (id, name, age, bmi, int8, blob) VALUES (1, 'John Doe', 30, 25.5, 7, '\\x0001ff')` + await db.query(insertRowSQL) + + // Check that the oplog table contains an entry for the inserted row + const { rows: oplogRows } = await db.query(`SELECT * FROM ${oplogTable}`) + t.is(oplogRows.length, 1) + t.deepEqual(oplogRows[0], { + namespace: 'public', + tablename: personTableName, + optype: 'INSERT', + // `id` and `bmi` values are stored as strings + // because we cast REAL values to text in the trigger + // to circumvent SQLite's bug in the `json_object` function + // that is used in the triggers. + // cf. `joinColsForJSON` function in `src/migrators/triggers.ts` + // These strings are then parsed back into real numbers + // by the `deserialiseRow` function in `src/satellite/oplog.ts` + primaryKey: '{"id":"1"}', + newRow: + '{"id": "1", "age": 30, "bmi": "25.5", "blob": "0001ff", "int8": "7", "name": "John Doe"}', // BigInts are serialized as strings in the oplog + oldRow: null, + timestamp: null, + rowid: 1, + clearTags: '[]', + }) +}) + +test('oplog trigger should handle Infinity values correctly', async (t) => { + const { db, migrateDb } = t.context + const tableName = personTable.tableName + + // Migrate the DB with the necessary tables and triggers + await migrateDb() + + // Insert a row in the table + const insertRowSQL = `INSERT INTO ${qualifiedPersonTable} (id, name, age, bmi, int8) VALUES ('-Infinity', 'John Doe', 30, 'Infinity', 7)` + await db.query(insertRowSQL) + + // Check that the oplog table contains an entry for the inserted row + const { rows: oplogRows } = await db.query(`SELECT * FROM ${oplogTable}`) + t.is(oplogRows.length, 1) + t.deepEqual(oplogRows[0], { + namespace: 'public', + tablename: tableName, + optype: 'INSERT', + // `id` and `bmi` values are stored as strings + // because we cast REAL values to text in the trigger + // to circumvent SQLite's bug in the `json_object` function + // that is used in the triggers. + // cf. `joinColsForJSON` function in `src/migrators/triggers.ts` + // These strings are then parsed back into real numbers + // by the `deserialiseRow` function in `src/satellite/oplog.ts` + primaryKey: '{"id":"-Infinity"}', + newRow: + '{"id": "-Infinity", "age": 30, "bmi": "Infinity", "blob": null, "int8": "7", "name": "John Doe"}', // BigInts are serialized as strings in the oplog + oldRow: null, + timestamp: null, + rowid: 1, + clearTags: '[]', + }) +}) + +// even though `Context` is a subtype of `ContextType`, +// we have to cast `test` which is of type `TestFn` to `TestFn` +// because `TestFn` does not declare its type parameter to be covariant +triggerTests(test as unknown as TestFn) diff --git a/clients/typescript/test/satellite/common.ts b/clients/typescript/test/satellite/common.ts index e00a6604ab..f9b5845686 100644 --- a/clients/typescript/test/satellite/common.ts +++ b/clients/typescript/test/satellite/common.ts @@ -25,6 +25,8 @@ import { QueryBuilder } from '../../src/migrators/query-builder' import { BundleMigratorBase } from '../../src/migrators/bundle' import { makePgDatabase } from '../support/node-postgres' import { DatabaseAdapter as PgDatabaseAdapter } from '../../src/drivers/node-postgres/adapter' +import { PGlite } from '@electric-sql/pglite' +import { DatabaseAdapter as PgliteDatabaseAdapter } from '../../src/drivers/pglite/adapter' import { DatabaseAdapter } from '../../src/electric/adapter' export const dbDescription = new DbSchema( @@ -328,6 +330,20 @@ export const makePgContext = async ( t.context.stop = stop } +export const makePgliteContext = async ( + t: ExecutionContext, + namespace: string, + options: Opts = opts(namespace) +) => { + const dbName = `test-${randomValue()}` + const db = new PGlite() + const stop = () => db.close() + const adapter = new PgliteDatabaseAdapter(db) + const migrator = new PgBundleMigrator(adapter, pgMigrations) + makeContextInternal(t, dbName, adapter, migrator, namespace, options) + t.context.stop = stop +} + export const mockElectricClient = async ( db: SqliteDB, registry: Registry | GlobalRegistry, diff --git a/clients/typescript/test/satellite/pglite/process.migration.test.ts b/clients/typescript/test/satellite/pglite/process.migration.test.ts new file mode 100644 index 0000000000..6dbc9216eb --- /dev/null +++ b/clients/typescript/test/satellite/pglite/process.migration.test.ts @@ -0,0 +1,22 @@ +import testAny, { TestFn } from 'ava' +import { cleanAndStopSatellite, makePgliteContext } from '../common' +import { getPgMatchingShadowEntries } from '../../support/satellite-helpers' +import { pgBuilder } from '../../../src/migrators/query-builder' +import { + commonSetup, + ContextType, + processMigrationTests, +} from '../process.migration.test' + +const test = testAny as TestFn + +test.beforeEach(async (t) => { + const namespace = 'public' + await makePgliteContext(t, namespace) + t.context.getMatchingShadowEntries = getPgMatchingShadowEntries + t.context.builder = pgBuilder + await commonSetup(t) +}) +test.afterEach.always(cleanAndStopSatellite) + +processMigrationTests(test) diff --git a/clients/typescript/test/satellite/pglite/process.tags.test.ts b/clients/typescript/test/satellite/pglite/process.tags.test.ts new file mode 100644 index 0000000000..9c075150b5 --- /dev/null +++ b/clients/typescript/test/satellite/pglite/process.tags.test.ts @@ -0,0 +1,16 @@ +import anyTest, { TestFn } from 'ava' + +import { makePgliteContext, cleanAndStopSatellite } from '../common' + +import { getPgMatchingShadowEntries } from '../../support/satellite-helpers' +import { processTagsTests, ContextType } from '../process.tags.test' + +const test = anyTest as TestFn +test.beforeEach(async (t) => { + const namespace = 'public' + await makePgliteContext(t, namespace) + t.context.getMatchingShadowEntries = getPgMatchingShadowEntries +}) +test.afterEach.always(cleanAndStopSatellite) + +processTagsTests(test) diff --git a/clients/typescript/test/satellite/pglite/process.test.ts b/clients/typescript/test/satellite/pglite/process.test.ts new file mode 100644 index 0000000000..fdbc7d48d0 --- /dev/null +++ b/clients/typescript/test/satellite/pglite/process.test.ts @@ -0,0 +1,28 @@ +import anyTest, { TestFn } from 'ava' + +import { getPgMatchingShadowEntries } from '../../support/satellite-helpers' + +import { makePgliteContext, cleanAndStopSatellite } from '../common' + +import { pgBuilder } from '../../../src/migrators/query-builder' +import { processTests, ContextType } from '../process.test' +import { QualifiedTablename } from '../../../src/util' + +// Run all tests in this file serially +// because there are a lot of tests +// and it would lead to PG running out of shared memory +const test = anyTest.serial as TestFn +test.serial = test // because the common test file uses `test.serial` for some tests (but for PG all tests are serial) +test.beforeEach(async (t) => { + const namespace = 'public' + await makePgliteContext(t, namespace) + t.context.builder = pgBuilder + t.context.getMatchingShadowEntries = getPgMatchingShadowEntries + t.context.qualifiedParentTableName = new QualifiedTablename( + namespace, + 'parent' + ).toString() +}) +test.afterEach.always(cleanAndStopSatellite) + +processTests(test) diff --git a/clients/typescript/test/satellite/pglite/process.timing.test.ts b/clients/typescript/test/satellite/pglite/process.timing.test.ts new file mode 100644 index 0000000000..c11e6eb052 --- /dev/null +++ b/clients/typescript/test/satellite/pglite/process.timing.test.ts @@ -0,0 +1,16 @@ +import anyTest, { TestFn } from 'ava' +import { processTimingTests } from '../process.timing.test' +import { + makePgliteContext, + cleanAndStopSatellite, + ContextType, +} from '../common' + +const test = anyTest as TestFn +test.beforeEach(async (t) => { + const namespace = 'public' + await makePgliteContext(t, namespace) +}) +test.afterEach.always(cleanAndStopSatellite) + +processTimingTests(test) diff --git a/clients/typescript/test/satellite/pglite/serialization.test.ts b/clients/typescript/test/satellite/pglite/serialization.test.ts new file mode 100644 index 0000000000..db66d65e29 --- /dev/null +++ b/clients/typescript/test/satellite/pglite/serialization.test.ts @@ -0,0 +1,26 @@ +import anyTest, { ExecutionContext, TestFn } from 'ava' +import { PGlite } from '@electric-sql/pglite' +import { opts } from '../common' +import { ContextType, SetupFn, serializationTests } from '../serialization' +import { pgTypeDecoder, pgTypeEncoder } from '../../../src/util/encoders' +import { DatabaseAdapter as PgDatabaseAdapter } from '../../../src/drivers/pglite' +import { pgBuilder } from '../../../src/migrators/query-builder' + +const test = anyTest as TestFn + +const setupPG: SetupFn = async (t: ExecutionContext) => { + const db = new PGlite() + const stop = () => db.close() + t.teardown(async () => await stop()) + const namespace = 'public' + return [new PgDatabaseAdapter(db), pgBuilder, opts(namespace)] +} + +test.beforeEach(async (t) => { + t.context.dialect = 'Postgres' + t.context.encoder = pgTypeEncoder + t.context.decoder = pgTypeDecoder + t.context.setup = setupPG +}) + +serializationTests(test) From 6ed32f899976558afc948c2a384dc6df4a134314 Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Mon, 22 Apr 2024 10:07:46 +0200 Subject: [PATCH 083/156] Fixed date and time support for PGlite --- .../typescript/src/client/conversions/postgres.ts | 14 +++++++++++--- .../src/client/execution/transactionalDB.ts | 6 +++++- .../src/drivers/node-postgres/database.ts | 4 +--- 3 files changed, 17 insertions(+), 7 deletions(-) diff --git a/clients/typescript/src/client/conversions/postgres.ts b/clients/typescript/src/client/conversions/postgres.ts index 31df33a8b2..c220b374a0 100644 --- a/clients/typescript/src/client/conversions/postgres.ts +++ b/clients/typescript/src/client/conversions/postgres.ts @@ -1,8 +1,8 @@ import { InvalidArgumentError } from '../validation/errors/invalidArgumentError' import { Converter } from './converter' -import { serialiseDate } from './datatypes/date' +import { deserialiseDate, serialiseDate } from './datatypes/date' import { isJsonNull } from './datatypes/json' -import { PgBasicType, PgDateType, PgType } from './types' +import { PgBasicType, PgDateType, PgType, isPgDateType } from './types' /** * This module takes care of converting TypeScript values to a Postgres storeable value and back. @@ -17,7 +17,7 @@ function toPostgres(v: any, pgType: PgType): any { return v } - if (pgType === PgDateType.PG_TIME || pgType === PgDateType.PG_TIMETZ) { + if (isPgDateType(pgType)) { if (!(v instanceof Date)) throw new InvalidArgumentError( `Unexpected value ${v}. Expected a Date object.` @@ -81,6 +81,14 @@ function fromPostgres(v: any, pgType: PgType): any { return Math.fround(v) } + if (pgType === PgDateType.PG_TIME || pgType === PgDateType.PG_TIMETZ) { + // dates and timestamps are parsed into JS Date objects + // by the underlying PG driver we use + // But time and timetz values are returned as strings + // so we parse them into a JS Date object ourselves + return deserialiseDate(v, pgType as PgDateType) + } + return v } diff --git a/clients/typescript/src/client/execution/transactionalDB.ts b/clients/typescript/src/client/execution/transactionalDB.ts index 1dc97a757f..818608325b 100644 --- a/clients/typescript/src/client/execution/transactionalDB.ts +++ b/clients/typescript/src/client/execution/transactionalDB.ts @@ -58,7 +58,11 @@ export class TransactionalDB implements DB { this._converter, Transformation.Decode ) - return schema.parse(transformedRow) + try { + return schema.parse(transformedRow) + } catch (e) { + throw e + } }) successCallback( new TransactionalDB(tx, this._fields, this._converter), diff --git a/clients/typescript/src/drivers/node-postgres/database.ts b/clients/typescript/src/drivers/node-postgres/database.ts index de7a9675fa..76abb9e652 100644 --- a/clients/typescript/src/drivers/node-postgres/database.ts +++ b/clients/typescript/src/drivers/node-postgres/database.ts @@ -41,13 +41,11 @@ export class ElectricDatabase implements Database { } if ( - oid == pg.types.builtins.TIME || - oid == pg.types.builtins.TIMETZ || oid == pg.types.builtins.TIMESTAMP || oid == pg.types.builtins.TIMESTAMPTZ || oid == pg.types.builtins.DATE ) { - // Parse time, timestamp, and date values ourselves + // Parse timestamps and date values ourselves // because the pg parser parses them differently from what we expect const pgTypes = new Map([ [pg.types.builtins.TIME, PgDateType.PG_TIME], From 4b8ad758e04e62b29abda2d8485d9b3d29b0658b Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Mon, 22 Apr 2024 10:27:44 +0200 Subject: [PATCH 084/156] Remove obsolete conversions from JSON (they are only needed once we support top-level JSON null values) --- clients/typescript/src/client/conversions/postgres.ts | 10 ++++++---- .../typescript/src/drivers/node-postgres/database.ts | 8 +++++--- clients/typescript/src/drivers/pglite/mock.ts | 8 ++++---- 3 files changed, 15 insertions(+), 11 deletions(-) diff --git a/clients/typescript/src/client/conversions/postgres.ts b/clients/typescript/src/client/conversions/postgres.ts index c220b374a0..0fa3d267f0 100644 --- a/clients/typescript/src/client/conversions/postgres.ts +++ b/clients/typescript/src/client/conversions/postgres.ts @@ -56,10 +56,8 @@ function fromPostgres(v: any, pgType: PgType): any { return v } - // no need to convert dates, times, or timestamps - // because we modified the parser in the node-pg driver - // to parse them how we want - + /* + // FIXME: the specialised conversions below are needed when adding support for top-level JSON null values if (pgType === PgBasicType.PG_JSON || pgType === PgBasicType.PG_JSONB) { if (v === null) { // DB null @@ -69,8 +67,12 @@ function fromPostgres(v: any, pgType: PgType): any { // JSON null value return { __is_electric_json_null__: true } } + if (typeof v === 'object') { + return v + } return JSON.parse(v) } + */ if (pgType === PgBasicType.PG_INT8) { return BigInt(v) // needed because the node-pg driver returns bigints as strings diff --git a/clients/typescript/src/drivers/node-postgres/database.ts b/clients/typescript/src/drivers/node-postgres/database.ts index 76abb9e652..069f6197d3 100644 --- a/clients/typescript/src/drivers/node-postgres/database.ts +++ b/clients/typescript/src/drivers/node-postgres/database.ts @@ -29,16 +29,18 @@ export class ElectricDatabase implements Database { text: statement.sql, values: statement.args, types: { - // Modify the parser to not parse JSON values - // Instead, return them as strings - // our conversions will correctly parse them getTypeParser: ((oid: number) => { + /* + // Modify the parser to not parse JSON values + // Instead, return them as strings + // our conversions will correctly parse them if ( oid === pg.types.builtins.JSON || oid === pg.types.builtins.JSONB ) { return (val) => val } + */ if ( oid == pg.types.builtins.TIMESTAMP || diff --git a/clients/typescript/src/drivers/pglite/mock.ts b/clients/typescript/src/drivers/pglite/mock.ts index 4b3910d3d5..2fd4f334ea 100644 --- a/clients/typescript/src/drivers/pglite/mock.ts +++ b/clients/typescript/src/drivers/pglite/mock.ts @@ -5,14 +5,14 @@ export class MockDatabase implements Database { dataDir?: string fail: Error | undefined - constructor(dataDir?: string, options?: PGliteOptions) { + constructor(dataDir?: string, _options?: PGliteOptions) { this.dataDir = dataDir } async query( - query: string, - params?: any[], - options?: QueryOptions + _query: string, + _params?: any[], + _options?: QueryOptions ): Promise> { if (typeof this.fail !== 'undefined') throw this.fail From 838a791cff81f68670e21d3c717b80f10c73b14f Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Mon, 22 Apr 2024 13:53:03 +0200 Subject: [PATCH 085/156] Added unit test for performSnapshot being stopped gracefully --- .../typescript/test/satellite/process.test.ts | 36 ++++++++++++++++++- 1 file changed, 35 insertions(+), 1 deletion(-) diff --git a/clients/typescript/test/satellite/process.test.ts b/clients/typescript/test/satellite/process.test.ts index 5b35bd1d78..75bf62800e 100644 --- a/clients/typescript/test/satellite/process.test.ts +++ b/clients/typescript/test/satellite/process.test.ts @@ -35,7 +35,7 @@ import { SatelliteError, SatelliteErrorCode, } from '../../src/util/types' -import { relations, ContextType as CommonContextType } from './common' +import { relations, ContextType as CommonContextType, clean } from './common' import { numberToBytes, base64, blobToHexString } from '../../src/util/encoders' @@ -2482,4 +2482,38 @@ export const processTests = (test: TestFn) => { await satellite._performSnapshot() t.pass() }) + + test("don't leave a snapshot running when stopping", async (t) => { + const { adapter, runMigrations, satellite, authState } = t.context + await runMigrations() + await satellite._setAuthState(authState) + + // Make the adapter slower, to interleave stopping the process and closing the db with a snapshot + const transaction = satellite.adapter.transaction.bind(satellite.adapter) + satellite.adapter.transaction = (f) => + new Promise((res) => { + setTimeout(() => transaction(f).then(res), 500) + }) + + // Add something to the oplog + await adapter.run({ + sql: `INSERT INTO parent(id, value) VALUES (1,'val1')`, + }) + + // // Perform snapshot with the mutex, to emulate a real scenario + const snapshotPromise = satellite._mutexSnapshot() + // Give some time to start the "slow" snapshot + await sleepAsync(100) + + // Stop the process while the snapshot is being performed + await satellite.stop() + + // Remove/close the database connection + await clean(t) + + // Wait for the snapshot to finish to consider the test successful + await snapshotPromise + + t.pass() + }) } From ec294ee71a0037f5ac216174d64d84b0555e1b50 Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Mon, 22 Apr 2024 13:58:13 +0200 Subject: [PATCH 086/156] Upgraded PGlite --- clients/typescript/package.json | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/clients/typescript/package.json b/clients/typescript/package.json index dfb2e63dcd..dd8a5c95e1 100644 --- a/clients/typescript/package.json +++ b/clients/typescript/package.json @@ -216,7 +216,7 @@ "zod": "3.21.1" }, "devDependencies": { - "@electric-sql/pglite": "^0.1.4", + "@electric-sql/pglite": "^0.1.5", "@electric-sql/prisma-generator": "workspace:*", "@op-engineering/op-sqlite": ">= 2.0.16", "@tauri-apps/plugin-sql": "2.0.0-alpha.5", @@ -273,7 +273,7 @@ }, "peerDependencies": { "@capacitor-community/sqlite": ">= 5.6.2", - "@electric-sql/pglite": ">= 0.1.4", + "@electric-sql/pglite": ">= 0.1.5", "@op-engineering/op-sqlite": ">= 2.0.16", "@tauri-apps/plugin-sql": "2.0.0-alpha.5", "embedded-postgres": "16.1.1-beta.9", From b2f26240260c20427504464b689d0787b6ff47c3 Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Mon, 22 Apr 2024 14:19:17 +0200 Subject: [PATCH 087/156] Move common query from migrator to query builder --- clients/typescript/src/migrators/bundle.ts | 37 ++----------------- .../src/migrators/query-builder/builder.ts | 5 +++ .../src/migrators/query-builder/pgBuilder.ts | 10 +++++ .../migrators/query-builder/sqliteBuilder.ts | 7 ++++ 4 files changed, 26 insertions(+), 33 deletions(-) diff --git a/clients/typescript/src/migrators/bundle.ts b/clients/typescript/src/migrators/bundle.ts index ead197a18c..753385b09f 100644 --- a/clients/typescript/src/migrators/bundle.ts +++ b/clients/typescript/src/migrators/bundle.ts @@ -8,12 +8,7 @@ import { import { DatabaseAdapter } from '../electric/adapter' import { buildInitialMigration as makeBaseMigration } from './schema' import Log from 'loglevel' -import { - SatelliteError, - SatelliteErrorCode, - SqlValue, - Statement, -} from '../util' +import { SatelliteError, SatelliteErrorCode, SqlValue } from '../util' import { ElectricSchema } from './schema' import { Kysely, @@ -62,16 +57,6 @@ export abstract class BundleMigratorBase implements Migrator { this.eb = expressionBuilder() } - /** - * Returns a SQL statement that checks if the given table exists. - * @param namespace The namespace where to check. - * @param tableName The name of the table to check for existence. - */ - abstract createTableExistsStatement( - namespace: string, - tableName: string - ): Statement - async up(): Promise { const existing = await this.queryApplied() const unapplied = await this.validateApplied(this.migrations, existing) @@ -90,9 +75,9 @@ export abstract class BundleMigratorBase implements Migrator { // If this is the first time we're running migrations, then the // migrations table won't exist. const namespace = this.electricQueryBuilder.defaultNamespace - const tableExists = this.createTableExistsStatement( - namespace, - this.tableName + const tableExists = this.electricQueryBuilder.tableExists( + this.tableName, + namespace ) const tables = await this.adapter.query(tableExists) return tables.length > 0 @@ -225,13 +210,6 @@ export class SqliteBundleMigrator extends BundleMigratorBase { } super(adapter, migrations, config, sqliteBuilder) } - - createTableExistsStatement(_namespace: string, tableName: string): Statement { - return { - sql: `SELECT 1 FROM sqlite_master WHERE type = 'table' AND name = ?`, - args: [tableName], - } - } } export class PgBundleMigrator extends BundleMigratorBase { @@ -246,11 +224,4 @@ export class PgBundleMigrator extends BundleMigratorBase { } super(adapter, migrations, config, pgBuilder) } - - createTableExistsStatement(namespace: string, tableName: string): Statement { - return { - sql: `SELECT 1 FROM information_schema.tables WHERE table_schema = $1 AND table_name = $2`, - args: [namespace, tableName], - } - } } diff --git a/clients/typescript/src/migrators/query-builder/builder.ts b/clients/typescript/src/migrators/query-builder/builder.ts index 64f4aaf3f6..ed1e7c7257 100644 --- a/clients/typescript/src/migrators/query-builder/builder.ts +++ b/clients/typescript/src/migrators/query-builder/builder.ts @@ -59,6 +59,11 @@ export abstract class QueryBuilder { */ abstract makePositionalParam(i: number): string + /** + * Checks if the given table exists. + */ + abstract tableExists(tableName: string, namespace?: string): Statement + /** * Counts tables whose name is included in `tables`. * The count is returned as `countName`. diff --git a/clients/typescript/src/migrators/query-builder/pgBuilder.ts b/clients/typescript/src/migrators/query-builder/pgBuilder.ts index a4eba8648c..017c88ab28 100644 --- a/clients/typescript/src/migrators/query-builder/pgBuilder.ts +++ b/clients/typescript/src/migrators/query-builder/pgBuilder.ts @@ -33,6 +33,16 @@ class PgBuilder extends QueryBuilder { return [] } + tableExists( + tableName: string, + namespace: string = this.defaultNamespace + ): Statement { + return { + sql: `SELECT 1 FROM information_schema.tables WHERE table_schema = $1 AND table_name = $2`, + args: [namespace, tableName], + } + } + countTablesIn(countName: string, tables: string[]): Statement { const sql = dedent` SELECT COUNT(table_name)::integer AS "${countName}" diff --git a/clients/typescript/src/migrators/query-builder/sqliteBuilder.ts b/clients/typescript/src/migrators/query-builder/sqliteBuilder.ts index 5890e2535a..030c3a6ea6 100644 --- a/clients/typescript/src/migrators/query-builder/sqliteBuilder.ts +++ b/clients/typescript/src/migrators/query-builder/sqliteBuilder.ts @@ -36,6 +36,13 @@ class SqliteBuilder extends QueryBuilder { return [query] } + tableExists(tableName: string, _namespace?: string): Statement { + return { + sql: `SELECT 1 FROM sqlite_master WHERE type = 'table' AND name = ?`, + args: [tableName], + } + } + countTablesIn(countName: string, tables: string[]): Statement { const sql = dedent` SELECT count(name) as ${countName} FROM sqlite_master From cf7b2f1232aac9b3877af8ea9f7c1c9844b5cdb1 Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Mon, 22 Apr 2024 15:46:42 +0200 Subject: [PATCH 088/156] Modified ava configuration to allow running tests for a specific dialect --- clients/typescript/ava.config.js | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/clients/typescript/ava.config.js b/clients/typescript/ava.config.js index c192d92d56..e7208b1d4e 100644 --- a/clients/typescript/ava.config.js +++ b/clients/typescript/ava.config.js @@ -1,4 +1,5 @@ const [major, minor, _patch] = process.versions.node.split('.').map(Number) +const testDialect = process.env.DIALECT let loaderArg if ( @@ -11,9 +12,26 @@ if ( loaderArg = '--loader=tsx' } +const files = ['test/**/*.test.ts', 'test/**/*.test.tsx'] +const ignorePostgres = ['!test/**/postgres/**'] +const ignorePglite = ['!test/**/pglite/**'] +const ignoreSqlite = ['!test/**/sqlite/**'] + +if (testDialect === 'postgres') { + files.push(...ignorePglite, ...ignoreSqlite) +} + +if (testDialect === 'pglite') { + files.push(...ignorePostgres, ...ignoreSqlite) +} + +if (testDialect === 'sqlite') { + files.push(...ignorePostgres, ...ignorePglite) +} + export default { timeout: '10m', - files: ['test/**/*.test.ts', 'test/**/*.test.tsx'], + files, extensions: { ts: 'module', tsx: 'module', From 5cc79b4a56a0ea63353d1b5e5db21fe4c61a74eb Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Mon, 22 Apr 2024 15:47:57 +0200 Subject: [PATCH 089/156] Remove obsolete comment --- clients/typescript/test/migrators/builder.test.ts | 6 ------ 1 file changed, 6 deletions(-) diff --git a/clients/typescript/test/migrators/builder.test.ts b/clients/typescript/test/migrators/builder.test.ts index ccfd361b7f..4a5887c79f 100644 --- a/clients/typescript/test/migrators/builder.test.ts +++ b/clients/typescript/test/migrators/builder.test.ts @@ -98,12 +98,6 @@ export const makeMigrationMetaData = (builder: QueryBuilder) => { } } -/* - How to make adapter for PG: - //const { db, stop } = await makePgDatabase('load-migration-meta-data', 5500) - //const adapter = new DatabaseAdapter(db) - */ - export type ContextType = { migrationMetaData: ReturnType builder: QueryBuilder From af2a1c9080c23c7637320f6fdf43a6691f7b4b29 Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Mon, 22 Apr 2024 16:03:23 +0200 Subject: [PATCH 090/156] Small changes to tests --- .../test/client/model/pglite/builder.test.ts | 4 ++++ .../typescript/test/migrators/pglite/builder.test.ts | 8 ++++++-- clients/typescript/test/satellite/merge.test.ts | 11 +++++++++++ 3 files changed, 21 insertions(+), 2 deletions(-) create mode 100644 clients/typescript/test/client/model/pglite/builder.test.ts diff --git a/clients/typescript/test/client/model/pglite/builder.test.ts b/clients/typescript/test/client/model/pglite/builder.test.ts new file mode 100644 index 0000000000..f44c534195 --- /dev/null +++ b/clients/typescript/test/client/model/pglite/builder.test.ts @@ -0,0 +1,4 @@ +import { builderTests } from '../builder' + +const dialect = 'Postgres' +builderTests(dialect) diff --git a/clients/typescript/test/migrators/pglite/builder.test.ts b/clients/typescript/test/migrators/pglite/builder.test.ts index c628fb25ee..306dba1ee5 100644 --- a/clients/typescript/test/migrators/pglite/builder.test.ts +++ b/clients/typescript/test/migrators/pglite/builder.test.ts @@ -1,6 +1,10 @@ import anyTest, { TestFn } from 'ava' import { makeMigration, parseMetadata } from '../../../src/migrators/builder' -import { ContextType, makeMigrationMetaData } from '../builder.test' +import { + ContextType, + bundleTests, + makeMigrationMetaData, +} from '../builder.test' import { PGlite } from '@electric-sql/pglite' import { DatabaseAdapter } from '../../../src/drivers/pglite' import { PgBundleMigrator } from '../../../src/migrators' @@ -21,7 +25,7 @@ test.beforeEach(async (t) => { // No need to run the bundleTests because // they are already ran by `../postgres/builder.test.ts` // and the tests do not use an actual PG database -//bundleTests(test) +bundleTests(test) test('load migration from meta data', async (t) => { const { migrationMetaData, builder } = t.context diff --git a/clients/typescript/test/satellite/merge.test.ts b/clients/typescript/test/satellite/merge.test.ts index 8a716cc147..64f9dba51d 100644 --- a/clients/typescript/test/satellite/merge.test.ts +++ b/clients/typescript/test/satellite/merge.test.ts @@ -22,9 +22,11 @@ import { } from '../../src/migrators/query-builder' import { DatabaseAdapter as SQLiteDatabaseAdapter } from '../../src/drivers/better-sqlite3' import { DatabaseAdapter as PgDatabaseAdapter } from '../../src/drivers/node-postgres/adapter' +import { DatabaseAdapter as PgliteDatabaseAdapter } from '../../src/drivers/pglite' import { DatabaseAdapter as DatabaseAdapterInterface } from '../../src/electric/adapter' import { makePgDatabase } from '../support/node-postgres' import { randomValue } from '../../src/util/random' +import { PGlite } from '@electric-sql/pglite' const qualifiedMergeTable = new QualifiedTablename( 'main', @@ -200,11 +202,20 @@ const setupPG: SetupFn = async (t: ExecutionContext) => { const defaults = satelliteDefaults(namespace) return [new PgDatabaseAdapter(db), pgBuilder, namespace, defaults] } + +const setupPglite: SetupFn = async (t: ExecutionContext) => { + const db = new PGlite() + t.teardown(async () => await db.close()) + const namespace = 'public' + const defaults = satelliteDefaults(namespace) + return [new PgliteDatabaseAdapter(db), pgBuilder, namespace, defaults] +} ;( [ ['SQLite', setupSqlite], ['Postgres', setupPG], + ['PGlite', setupPglite] ] as const ).forEach(([dialect, setup]) => { test(`(${dialect}) merge works on oplog entries`, async (t) => { From 7fa4806abcd346c00a7ab06b5fff12ac92b777b9 Mon Sep 17 00:00:00 2001 From: Oleksii Sholik Date: Thu, 15 Feb 2024 14:02:20 +0200 Subject: [PATCH 091/156] Add support for ?dialect=postgresql to /api/migrations Fix Elixir tests --- .../electric/test/electric/plug_test.exs | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/components/electric/test/electric/plug_test.exs b/components/electric/test/electric/plug_test.exs index 907622cd1d..ae1553ebbf 100644 --- a/components/electric/test/electric/plug_test.exs +++ b/components/electric/test/electric/plug_test.exs @@ -214,11 +214,25 @@ defmodule Electric.PlugTest do assert [ {~c"0003/migration.sql", "CREATE TABLE \"d\" (\n \"id\" TEXT NOT NULL,\n \"value\" TEXT NOT NULL,\n CONSTRAINT \"d_pkey\" PRIMARY KEY (\"id\")\n) WITHOUT ROWID;\n\n\nALTER TABLE \"d\" ADD COLUMN \"is_valid\" INTEGER;\n"}, - {~c"0003/metadata.json", _metadata_json_0003}, + {~c"0003/metadata.json", metadata_json_0003}, {~c"0004/migration.sql", "CREATE TABLE \"e\" (\n \"id\" TEXT NOT NULL,\n \"value\" TEXT NOT NULL,\n CONSTRAINT \"e_pkey\" PRIMARY KEY (\"id\")\n) WITHOUT ROWID;\n"}, - {~c"0004/metadata.json", _metadata_json_0004} + {~c"0004/metadata.json", metadata_json_0004} ] = file_list + + assert %{ + "format" => "SatOpMigrate", + "ops" => [_create_table, _alter_table], + "protocol_version" => "Electric.Satellite", + "version" => "0003" + } = Jason.decode!(metadata_json_0003) + + assert %{ + "format" => "SatOpMigrate", + "ops" => [_create_table], + "protocol_version" => "Electric.Satellite", + "version" => "0004" + } = Jason.decode!(metadata_json_0004) end test "returns error if dialect missing", _cxt do From 10f24491920fa73d0ff8fb5501fbb84bff01b340 Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Thu, 14 Mar 2024 09:43:24 +0100 Subject: [PATCH 092/156] Extend protocol with SQL dialect in start replication request. --- protocol/satellite.proto | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/protocol/satellite.proto b/protocol/satellite.proto index 7d91c9af26..412e306448 100644 --- a/protocol/satellite.proto +++ b/protocol/satellite.proto @@ -138,8 +138,10 @@ message SatInStartReplicationReq { */ repeated uint64 observed_transaction_data = 6; - // The SQL dialect used by the client - // Defaults to SQLite if not specified + /** + * The SQL dialect used by the client + * Defaults to SQLite if not specified + */ optional Dialect sql_dialect = 7; // Note: From 02f3758317e158d657105e3ac5dba459b69f31d3 Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Thu, 14 Mar 2024 10:15:35 +0100 Subject: [PATCH 093/156] Modified SatelliteClient to specify the SQL dialect in the StartReplicationReq message. --- clients/typescript/src/satellite/client.ts | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/clients/typescript/src/satellite/client.ts b/clients/typescript/src/satellite/client.ts index 837a413d50..426b62bd48 100644 --- a/clients/typescript/src/satellite/client.ts +++ b/clients/typescript/src/satellite/client.ts @@ -876,8 +876,7 @@ export class SatelliteClient implements Client { 'error', new SatelliteError( SatelliteErrorCode.UNEXPECTED_STATE, - `unexpected state ${ - ReplicationStatus[this.inbound.isReplicating] + `unexpected state ${ReplicationStatus[this.inbound.isReplicating] } handling 'relation' message` ) ) From ab664d514ea73b44c65e7a6ca0aa3d49962f09bb Mon Sep 17 00:00:00 2001 From: Oleksii Sholik Date: Thu, 14 Mar 2024 11:25:41 +0200 Subject: [PATCH 094/156] Support different dialects when encoding migrations for Satellite --- components/electric/lib/electric/satellite/protocol.ex | 3 ++- components/electric/lib/electric/satellite/protocol/state.ex | 2 ++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/components/electric/lib/electric/satellite/protocol.ex b/components/electric/lib/electric/satellite/protocol.ex index 3bd0afe6cb..40a47e447a 100644 --- a/components/electric/lib/electric/satellite/protocol.ex +++ b/components/electric/lib/electric/satellite/protocol.ex @@ -125,7 +125,8 @@ defmodule Electric.Satellite.Protocol do %State{} = state ) do Logger.debug( - "Received start replication request lsn: #{inspect(client_lsn)} with options: #{inspect(opts)} and dialect: #{inspect(msg.sql_dialect)}" + "Received start replication request lsn: #{inspect(client_lsn)} " <> + "with options: #{inspect(opts)} and dialect: #{inspect(msg.sql_dialect)}" ) with :ok <- validate_schema_version(msg.schema_version), diff --git a/components/electric/lib/electric/satellite/protocol/state.ex b/components/electric/lib/electric/satellite/protocol/state.ex index 8eb9679e06..f5af6a67a8 100644 --- a/components/electric/lib/electric/satellite/protocol/state.ex +++ b/components/electric/lib/electric/satellite/protocol/state.ex @@ -16,6 +16,7 @@ defmodule Electric.Satellite.Protocol.State do origin: "", subscriptions: %{}, subscription_data_fun: nil, + sql_dialect: Electric.Postgres.Dialect.SQLite, move_in_data_fun: nil, sql_dialect: Electric.Postgres.Dialect.SQLite, telemetry: nil @@ -33,6 +34,7 @@ defmodule Electric.Satellite.Protocol.State do origin: Connectors.origin(), subscriptions: map(), subscription_data_fun: fun(), + sql_dialect: Electric.Postgres.Dialect.SQLite | Electric.Postgres.Dialect.Postgresql, move_in_data_fun: fun(), sql_dialect: Electric.Postgres.Dialect.SQLite | Electric.Postgres.Dialect.Postgresql, telemetry: Telemetry.t() | nil From bf14493ca7117a1df29defff16284032756ffd52 Mon Sep 17 00:00:00 2001 From: Oleksii Sholik Date: Thu, 25 Jan 2024 13:46:44 +0200 Subject: [PATCH 095/156] Extend the SatOpMigrate message with enum type support --- protocol/satellite.proto | 26 ++++++++++++++++++++++---- 1 file changed, 22 insertions(+), 4 deletions(-) diff --git a/protocol/satellite.proto b/protocol/satellite.proto index 412e306448..80dbce707e 100644 --- a/protocol/satellite.proto +++ b/protocol/satellite.proto @@ -399,6 +399,7 @@ message SatOpMigrate { enum Type { CREATE_TABLE = 0; CREATE_INDEX = 1; + CREATE_ENUM_TYPE = 2; ALTER_ADD_COLUMN = 6; } message Stmt { @@ -440,17 +441,34 @@ message SatOpMigrate { repeated ForeignKey fks = 3; repeated string pks = 4; } + message EnumType { + string name = 1; + repeated string values = 2; + } + // the migration version as specified by the developer and put into // the postgresql migration as an electric function call string version = 1; - // a list of sql ddl statements to apply, converted from the pg originals + + // A list of SQL DDL statements to apply, translated from Postgres to SQLite dialect. + // // The migration machinery converts an `ALTER TABLE action1, action2, action3;` // query into a set of 3: `ALTER TABLE action1; ALTER TABLE action2,` etc // so we need to support 1+ statements for every migration event. + // + // There is an exception for enum types. Since SQLite does not have a matching concept, + // the original Postgres DDL statement `CREATE TYPE ... AS ENUM (...)` is included as is, + // without translation. repeated Stmt stmts = 2; - // The resulting table definition after applying these migrations - // (a DDL statement can only affect one table at a time). - optional Table table = 3; + + oneof affected_entity { + // The resulting table definition after applying these migrations + // (a DDL statement can only affect one table at a time). + Table table = 3; + + // This field is set if stmts includes a single item which is an enum type definition. + EnumType enum_type = 4; + } } // (Consumer) Request for new subscriptions From 572ef358ebfef3038ef4ecdd6c2735dbc3da151d Mon Sep 17 00:00:00 2001 From: Oleksii Sholik Date: Thu, 25 Jan 2024 14:13:17 +0200 Subject: [PATCH 096/156] Translate enum type DDL for SatOpMigrate messages --- .../lib/electric/postgres/dialect/builder.ex | 16 ++ .../lib/electric/postgres/dialect/sqlite.ex | 19 +++ .../lib/electric/postgres/replication.ex | 29 +++- .../lib/electric/postgres/schema/update.ex | 2 +- .../lib/electric/postgres/types/array.ex | 4 +- .../electric/test/electric/plug_test.exs | 72 +++++---- .../electric/postgres/replication_test.exs | 153 ++++++++++++++++-- .../electric/satellite/serialization_test.exs | 42 ++--- .../satellite/ws_validations_test.exs | 56 +++---- .../test/support/satellite_helpers.ex | 6 +- 10 files changed, 295 insertions(+), 104 deletions(-) diff --git a/components/electric/lib/electric/postgres/dialect/builder.ex b/components/electric/lib/electric/postgres/dialect/builder.ex index f54dfa2d3b..216dd1b0ab 100644 --- a/components/electric/lib/electric/postgres/dialect/builder.ex +++ b/components/electric/lib/electric/postgres/dialect/builder.ex @@ -73,6 +73,14 @@ defmodule Electric.Postgres.Dialect.Builder do quote_name(name) end + def quote_name([schema, name]) do + quote_name(schema) <> "." <> quote_name(name) + end + + def quote_name([name]) do + quote_name(name) + end + def quote_name(name) when is_binary(name) do ~s("#{name}") end @@ -86,6 +94,14 @@ defmodule Electric.Postgres.Dialect.Builder do unquoted_name(name) end + def unquoted_name([_schema, name]) do + name + end + + def unquoted_name([name]) do + name + end + def unquoted_name(name) when is_binary(name) do name end diff --git a/components/electric/lib/electric/postgres/dialect/sqlite.ex b/components/electric/lib/electric/postgres/dialect/sqlite.ex index 7541a7d9a0..cbda84e6ae 100644 --- a/components/electric/lib/electric/postgres/dialect/sqlite.ex +++ b/components/electric/lib/electric/postgres/dialect/sqlite.ex @@ -119,6 +119,25 @@ defmodule Electric.Postgres.Dialect.SQLite do ]) <> ";\n" end + # SQLite does not have an equivalent for enum types in Postgres. + # We pass the original statement through unchanged. + def to_sql(%Pg.CreateEnumStmt{} = stmt, _opts) do + name = Schema.AST.map(stmt.type_name) + values = Schema.AST.map(stmt.vals) + + serialized_values = + values + |> Electric.Postgres.Types.Array.serialize(?') + |> String.slice(1..-2//1) + + stmt([ + "-- CREATE TYPE", + quote_name(name), + "AS ENUM", + paren(serialized_values) + ]) <> ";\n" + end + defp alter_table_cmd(%Pg.Node{node: {_, cmd}}, table, opts) do alter_table_cmd(cmd, table, opts) end diff --git a/components/electric/lib/electric/postgres/replication.ex b/components/electric/lib/electric/postgres/replication.ex index 0916ada59d..2c92bb1b4d 100644 --- a/components/electric/lib/electric/postgres/replication.ex +++ b/components/electric/lib/electric/postgres/replication.ex @@ -83,6 +83,10 @@ defmodule Electric.Postgres.Replication do :CREATE_INDEX end + def stmt_type(%Pg.CreateEnumStmt{}) do + :CREATE_ENUM_TYPE + end + def stmt_type(%Pg.AlterTableStmt{cmds: [cmd]}) do case cmd do %{node: {:alter_table_cmd, %Pg.AlterTableCmd{subtype: :AT_AddColumn}}} -> @@ -140,9 +144,29 @@ defmodule Electric.Postgres.Replication do } ) + enum_type = + ast + |> Enum.filter(&match?(%Pg.CreateEnumStmt{}, &1)) + |> Enum.map(fn enum_ast -> + name = AST.map(enum_ast.type_name) + values = AST.map(enum_ast.vals) + %SatOpMigrate.EnumType{name: Dialect.table_name(name, dialect), values: values} + end) + |> case do + [] -> nil + [enum] -> enum + end + + affected_entity = + case {table, enum_type} do + {%SatOpMigrate.Table{}, nil} -> {:table, table} + {nil, %SatOpMigrate.EnumType{}} -> {:enum_type, enum_type} + {nil, nil} -> nil + end + {%SatOpMigrate{ version: SchemaLoader.Version.version(schema_version), - table: table, + affected_entity: affected_entity, stmts: stmts }, relations} end @@ -163,6 +187,9 @@ defmodule Electric.Postgres.Replication do } -> true + %Pg.CreateEnumStmt{} -> + true + _else -> false end) diff --git a/components/electric/lib/electric/postgres/schema/update.ex b/components/electric/lib/electric/postgres/schema/update.ex index 99e18b1215..4e62233dca 100644 --- a/components/electric/lib/electric/postgres/schema/update.ex +++ b/components/electric/lib/electric/postgres/schema/update.ex @@ -376,7 +376,7 @@ defmodule Electric.Postgres.Schema.Update do {[], schema} end - defp do_update(%PgQuery.CreateEnumStmt{} = action, schema, opts) do + defp do_update(%Pg.CreateEnumStmt{} = action, schema, opts) do name = case AST.map(action.type_name, opts) do [schema, name] -> %Proto.RangeVar{schema: schema, name: name} diff --git a/components/electric/lib/electric/postgres/types/array.ex b/components/electric/lib/electric/postgres/types/array.ex index 691d552a68..d6256bee2d 100644 --- a/components/electric/lib/electric/postgres/types/array.ex +++ b/components/electric/lib/electric/postgres/types/array.ex @@ -85,11 +85,11 @@ defmodule Electric.Postgres.Types.Array do iex> str |> parse() |> serialize() str """ - def serialize(array) when is_list(array) do + def serialize(array, quote_char \\ ?") when is_list(array) do array |> Enum.map_join(",", fn nil -> "null" - val when is_binary(val) -> val |> String.replace(~S|"|, ~S|\"|) |> enclose(~S|"|) + val when is_binary(val) -> val |> String.replace(~S|"|, ~S|\"|) |> enclose(<>) end) |> enclose("{", "}") end diff --git a/components/electric/test/electric/plug_test.exs b/components/electric/test/electric/plug_test.exs index ae1553ebbf..5bf9ee4a76 100644 --- a/components/electric/test/electric/plug_test.exs +++ b/components/electric/test/electric/plug_test.exs @@ -102,23 +102,25 @@ defmodule Electric.PlugTest do "CREATE TABLE \"a\" (\n \"id\" TEXT NOT NULL,\n \"value\" TEXT NOT NULL,\n CONSTRAINT \"a_pkey\" PRIMARY KEY (\"id\")\n) WITHOUT ROWID;\n" } ], - table: %SatOpMigrate.Table{ - name: "a", - columns: [ - %SatOpMigrate.Column{ - name: "id", - sqlite_type: "TEXT", - pg_type: %SatOpMigrate.PgColumnType{name: "uuid"} - }, - %SatOpMigrate.Column{ - name: "value", - sqlite_type: "TEXT", - pg_type: %SatOpMigrate.PgColumnType{name: "text"} - } - ], - fks: [], - pks: ["id"] - }, + affected_entity: + {:table, + %SatOpMigrate.Table{ + name: "a", + columns: [ + %SatOpMigrate.Column{ + name: "id", + sqlite_type: "TEXT", + pg_type: %SatOpMigrate.PgColumnType{name: "uuid"} + }, + %SatOpMigrate.Column{ + name: "value", + sqlite_type: "TEXT", + pg_type: %SatOpMigrate.PgColumnType{name: "text"} + } + ], + fks: [], + pks: ["id"] + }}, version: "0001" }} = op1 |> Base.decode64!() |> SatOpMigrate.decode() @@ -172,23 +174,25 @@ defmodule Electric.PlugTest do sql: "CREATE TABLE a (id uuid PRIMARY KEY, value text NOT NULL);" } ], - table: %SatOpMigrate.Table{ - name: "a", - columns: [ - %SatOpMigrate.Column{ - name: "id", - sqlite_type: "TEXT", - pg_type: %SatOpMigrate.PgColumnType{name: "uuid"} - }, - %SatOpMigrate.Column{ - name: "value", - sqlite_type: "TEXT", - pg_type: %SatOpMigrate.PgColumnType{name: "text"} - } - ], - fks: [], - pks: ["id"] - }, + affected_entity: + {:table, + %SatOpMigrate.Table{ + name: "a", + columns: [ + %SatOpMigrate.Column{ + name: "id", + sqlite_type: "TEXT", + pg_type: %SatOpMigrate.PgColumnType{name: "uuid"} + }, + %SatOpMigrate.Column{ + name: "value", + sqlite_type: "TEXT", + pg_type: %SatOpMigrate.PgColumnType{name: "text"} + } + ], + fks: [], + pks: ["id"] + }}, version: "0001" }} = op1 |> Base.decode64!() |> SatOpMigrate.decode() diff --git a/components/electric/test/electric/postgres/replication_test.exs b/components/electric/test/electric/postgres/replication_test.exs index a2c0de2bb1..c8b2a7935d 100644 --- a/components/electric/test/electric/postgres/replication_test.exs +++ b/components/electric/test/electric/postgres/replication_test.exs @@ -3,7 +3,7 @@ defmodule Electric.Postgres.ReplicationTest do use Electric.Satellite.Protobuf - alias Electric.Postgres.{Replication, Schema, Extension.SchemaLoader} + alias Electric.Postgres.{Dialect, Extension.SchemaLoader, Replication, Schema} def parse(sql) do Electric.Postgres.parse!(sql) @@ -13,7 +13,8 @@ defmodule Electric.Postgres.ReplicationTest do stmts = [ {"create table doorbel (id int8);", :CREATE_TABLE}, {"create index on frog (id asc);", :CREATE_INDEX}, - {"alter table public.fish add value text;", :ALTER_ADD_COLUMN} + {"alter table public.fish add value text;", :ALTER_ADD_COLUMN}, + {"create type foo as enum ('bar', 'baz');", :CREATE_ENUM_TYPE} ] for {sql, expected_type} <- stmts do @@ -117,7 +118,7 @@ defmodule Electric.Postgres.ReplicationTest do end describe "migrate/2" do - test "updates the schema and returns a valid protcol message" do + test "updates the schema and returns a valid protocol message" do stmt = "CREATE TABLE public.fish (id int8 PRIMARY KEY);" schema = schema_update(stmt) @@ -126,11 +127,10 @@ defmodule Electric.Postgres.ReplicationTest do assert {:ok, [msg], [{"public", "fish"}]} = Replication.migrate(schema_version, stmt) - # there are lots of tests that validate the schema is being properly updated - # assert Schema.table_names(schema) == [~s("public"."fish"), ~s("frog"), ~s("teeth"."front")] assert Schema.table_names(schema) == [~s("public"."fish")] - assert %SatOpMigrate{version: ^version} = msg - %{stmts: stmts, table: table} = msg + + assert %SatOpMigrate{version: ^version, stmts: stmts, affected_entity: {:table, table}} = + msg assert stmts == [ %SatOpMigrate.Stmt{ @@ -167,8 +167,9 @@ defmodule Electric.Postgres.ReplicationTest do assert {:ok, [msg], [{"teeth", "front"}]} = Replication.migrate(schema_version, stmt) assert Schema.table_names(schema) == [~s("public"."fish"), ~s("teeth"."front")] - assert %SatOpMigrate{version: ^version} = msg - %{stmts: stmts, table: table} = msg + + assert %SatOpMigrate{version: ^version, stmts: stmts, affected_entity: {:table, table}} = + msg assert stmts == [ %SatOpMigrate.Stmt{ @@ -223,9 +224,8 @@ defmodule Electric.Postgres.ReplicationTest do assert {:ok, [msg], [{"public", "fish"}]} = Replication.migrate(schema_version, stmt) - assert %SatOpMigrate{version: ^version} = msg - - %{stmts: stmts, table: table} = msg + assert %SatOpMigrate{version: ^version, stmts: stmts, affected_entity: {:table, table}} = + msg assert stmts == [ %SatOpMigrate.Stmt{ @@ -277,9 +277,9 @@ defmodule Electric.Postgres.ReplicationTest do version = "20230405134616" schema_version = SchemaLoader.Version.new(version, schema) assert {:ok, [msg], []} = Replication.migrate(schema_version, stmt) - assert %SatOpMigrate{version: ^version} = msg - %{stmts: stmts, table: table} = msg + assert %SatOpMigrate{version: ^version, stmts: stmts, affected_entity: nil} = + msg assert stmts == [ %SatOpMigrate.Stmt{ @@ -287,8 +287,6 @@ defmodule Electric.Postgres.ReplicationTest do type: :CREATE_INDEX } ] - - assert is_nil(table) end test "pg-only ddl statements don't generate a message" do @@ -313,6 +311,129 @@ defmodule Electric.Postgres.ReplicationTest do end end + test "updates the schema and returns a valid protocol message for an enum using sqlite dialect" do + stmt = "CREATE TYPE public.colour AS ENUM ('red', 'green', 'blue');" + schema = schema_update(stmt) + + version = "20240418002800" + schema_version = SchemaLoader.Version.new(version, schema) + + assert {:ok, [msg], []} = Replication.migrate(schema_version, stmt, Dialect.SQLite) + + assert %SatOpMigrate{version: ^version, stmts: stmts, affected_entity: {:enum_type, enum}} = + msg + + assert stmts == [ + %SatOpMigrate.Stmt{ + type: :CREATE_ENUM_TYPE, + sql: "-- CREATE TYPE \"public\".\"colour\" AS ENUM ('red','green','blue');\n" + } + ] + + assert enum == %SatOpMigrate.EnumType{name: "colour", values: ["red", "green", "blue"]} + + stmt = "CREATE TABLE public.wall (id int8 PRIMARY KEY, finish public.colour);" + schema = schema_update(stmt) + + version = "20240418002801" + schema_version = SchemaLoader.Version.new(version, schema) + + assert {:ok, [msg], [{"public", "wall"}]} = + Replication.migrate(schema_version, stmt, Dialect.SQLite) + + assert Schema.table_names(schema) == [~s("public"."wall")] + + assert %SatOpMigrate{version: ^version, stmts: stmts, affected_entity: {:table, table}} = + msg + + assert stmts == [ + %SatOpMigrate.Stmt{ + type: :CREATE_TABLE, + sql: + "CREATE TABLE \"wall\" (\n \"id\" INTEGER NOT NULL,\n \"finish\" TEXT,\n CONSTRAINT \"wall_pkey\" PRIMARY KEY (\"id\")\n) WITHOUT ROWID;\n" + } + ] + + assert table == %SatOpMigrate.Table{ + name: "wall", + columns: [ + %SatOpMigrate.Column{ + name: "id", + sqlite_type: "INTEGER", + pg_type: %SatOpMigrate.PgColumnType{name: "int8", array: [], size: []} + }, + %SatOpMigrate.Column{ + name: "finish", + sqlite_type: "TEXT", + pg_type: %SatOpMigrate.PgColumnType{name: "public.colour", array: [], size: []} + } + ], + fks: [], + pks: ["id"] + } + end + + test "updates the schema and returns a valid protocol message for an enum using postgresql dialect" do + stmt = "CREATE TYPE public.colour AS ENUM ('red', 'green', 'blue');" + schema = schema_update(stmt) + + version = "20240418002800" + schema_version = SchemaLoader.Version.new(version, schema) + + assert {:ok, [msg], []} = Replication.migrate(schema_version, stmt, Dialect.Postgresql) + + assert %SatOpMigrate{version: ^version, stmts: stmts, affected_entity: {:enum_type, enum}} = + msg + + assert stmts == [ + %SatOpMigrate.Stmt{ + type: :CREATE_ENUM_TYPE, + sql: "CREATE TYPE public.colour AS ENUM ('red', 'green', 'blue');" + } + ] + + assert enum == %SatOpMigrate.EnumType{name: "colour", values: ["red", "green", "blue"]} + + stmt = "CREATE TABLE public.wall (id int8 PRIMARY KEY,finish public.colour);" + schema = schema_update(stmt) + + version = "20240418002801" + schema_version = SchemaLoader.Version.new(version, schema) + + assert {:ok, [msg], [{"public", "wall"}]} = + Replication.migrate(schema_version, stmt, Dialect.Postgresql) + + assert Schema.table_names(schema) == [~s("public"."wall")] + + assert %SatOpMigrate{version: ^version, stmts: stmts, affected_entity: {:table, table}} = + msg + + assert stmts == [ + %SatOpMigrate.Stmt{ + type: :CREATE_TABLE, + sql: "CREATE TABLE public.wall (id int8 PRIMARY KEY,finish public.colour);" + } + ] + + assert table == %SatOpMigrate.Table{ + name: "wall", + columns: [ + %SatOpMigrate.Column{ + name: "id", + sqlite_type: "INTEGER", + pg_type: %SatOpMigrate.PgColumnType{name: "int8", array: [], size: []} + }, + %SatOpMigrate.Column{ + name: "finish", + sqlite_type: "TEXT", + pg_type: %SatOpMigrate.PgColumnType{name: "public.colour", array: [], size: []} + } + ], + fks: [], + pks: ["id"] + } + end + # TODO: actually I think this is a situation we *MUST* avoid by # checking for unsupported migrations in the pg event trigger # function. by the time it reaches this point it would be too late diff --git a/components/electric/test/electric/satellite/serialization_test.exs b/components/electric/test/electric/satellite/serialization_test.exs index 6ac5ed8432..7b4e10c704 100644 --- a/components/electric/test/electric/satellite/serialization_test.exs +++ b/components/electric/test/electric/satellite/serialization_test.exs @@ -461,12 +461,14 @@ defmodule Electric.Satellite.SerializationTest do stmts: [ %SatOpMigrate.Stmt{type: :CREATE_TABLE, sql: sql1} ], - table: %SatOpMigrate.Table{ - name: "something_else", - columns: [%SatOpMigrate.Column{name: "id", sqlite_type: "TEXT"}], - fks: [], - pks: ["id"] - } + affected_entity: + {:table, + %SatOpMigrate.Table{ + name: "something_else", + columns: [%SatOpMigrate.Column{name: "id", sqlite_type: "TEXT"}], + fks: [], + pks: ["id"] + }} } = migration1 assert sql1 =~ ~r/^CREATE TABLE "something_else"/ @@ -475,12 +477,14 @@ defmodule Electric.Satellite.SerializationTest do stmts: [ %SatOpMigrate.Stmt{type: :CREATE_TABLE, sql: sql2} ], - table: %SatOpMigrate.Table{ - name: "other_thing", - columns: [%SatOpMigrate.Column{name: "id", sqlite_type: "TEXT"}], - fks: [], - pks: ["id"] - } + affected_entity: + {:table, + %SatOpMigrate.Table{ + name: "other_thing", + columns: [%SatOpMigrate.Column{name: "id", sqlite_type: "TEXT"}], + fks: [], + pks: ["id"] + }} } = migration2 assert sql2 =~ ~r/^CREATE TABLE "other_thing"/ @@ -489,12 +493,14 @@ defmodule Electric.Satellite.SerializationTest do stmts: [ %SatOpMigrate.Stmt{type: :CREATE_TABLE, sql: sql3} ], - table: %SatOpMigrate.Table{ - name: "yet_another_thing", - columns: [%SatOpMigrate.Column{name: "id", sqlite_type: "TEXT"}], - fks: [], - pks: ["id"] - } + affected_entity: + {:table, + %SatOpMigrate.Table{ + name: "yet_another_thing", + columns: [%SatOpMigrate.Column{name: "id", sqlite_type: "TEXT"}], + fks: [], + pks: ["id"] + }} } = migration3 assert sql3 =~ ~r/^CREATE TABLE "yet_another_thing"/ diff --git a/components/electric/test/electric/satellite/ws_validations_test.exs b/components/electric/test/electric/satellite/ws_validations_test.exs index 300bbb7984..a17330271a 100644 --- a/components/electric/test/electric/satellite/ws_validations_test.exs +++ b/components/electric/test/electric/satellite/ws_validations_test.exs @@ -42,7 +42,7 @@ defmodule Electric.Satellite.WsValidationsTest do electrify: "public.foo" ) - within_replication_context(ctx, vsn, fn conn -> + within_replication_context(ctx, fn conn -> tx_op_log = serialize_trans(%{"id" => "1", "num" => "433", "t2" => "hello"}) MockClient.send_data(conn, tx_op_log) @@ -76,7 +76,7 @@ defmodule Electric.Satellite.WsValidationsTest do ] Enum.each(records, fn record -> - within_replication_context(ctx, vsn, fn conn -> + within_replication_context(ctx, fn conn -> tx_op_log = serialize_trans(record) MockClient.send_data(conn, tx_op_log) assert_receive {^conn, %SatErrorResp{error_type: :INVALID_REQUEST}}, @receive_timeout @@ -101,7 +101,7 @@ defmodule Electric.Satellite.WsValidationsTest do %{"id" => "3", "b" => nil} ] - within_replication_context(ctx, vsn, fn conn -> + within_replication_context(ctx, fn conn -> Enum.each(valid_records, fn record -> tx_op_log = serialize_trans(record) MockClient.send_data(conn, tx_op_log) @@ -122,7 +122,7 @@ defmodule Electric.Satellite.WsValidationsTest do ] Enum.each(invalid_records, fn record -> - within_replication_context(ctx, vsn, fn conn -> + within_replication_context(ctx, fn conn -> tx_op_log = serialize_trans(record) MockClient.send_data(conn, tx_op_log) assert_receive {^conn, %SatErrorResp{error_type: :INVALID_REQUEST}}, @receive_timeout @@ -150,7 +150,7 @@ defmodule Electric.Satellite.WsValidationsTest do %{"id" => "6", "i8_1" => "-9223372036854775808", "i8_2" => "+9223372036854775807"} ] - within_replication_context(ctx, vsn, fn conn -> + within_replication_context(ctx, fn conn -> Enum.each(valid_records, fn record -> tx_op_log = serialize_trans(record) MockClient.send_data(conn, tx_op_log) @@ -180,7 +180,7 @@ defmodule Electric.Satellite.WsValidationsTest do ] Enum.each(invalid_records, fn record -> - within_replication_context(ctx, vsn, fn conn -> + within_replication_context(ctx, fn conn -> tx_op_log = serialize_trans(record) MockClient.send_data(conn, tx_op_log) assert_receive {^conn, %SatErrorResp{error_type: :INVALID_REQUEST}}, @receive_timeout @@ -218,7 +218,7 @@ defmodule Electric.Satellite.WsValidationsTest do %{"id" => "16", "f4" => "nan", "f8" => "nAn"} ] - within_replication_context(ctx, vsn, fn conn -> + within_replication_context(ctx, fn conn -> Enum.each(valid_records, fn record -> tx_op_log = serialize_trans(record) MockClient.send_data(conn, tx_op_log) @@ -261,7 +261,7 @@ defmodule Electric.Satellite.WsValidationsTest do ] Enum.each(invalid_records, fn record -> - within_replication_context(ctx, vsn, fn conn -> + within_replication_context(ctx, fn conn -> tx_op_log = serialize_trans(record) MockClient.send_data(conn, tx_op_log) assert_receive {^conn, %SatErrorResp{error_type: :INVALID_REQUEST}}, @receive_timeout @@ -283,7 +283,7 @@ defmodule Electric.Satellite.WsValidationsTest do %{"id" => Electric.Utils.uuid4()} ] - within_replication_context(ctx, vsn, fn conn -> + within_replication_context(ctx, fn conn -> Enum.each(valid_records, fn record -> tx_op_log = serialize_trans(record) MockClient.send_data(conn, tx_op_log) @@ -301,7 +301,7 @@ defmodule Electric.Satellite.WsValidationsTest do ] Enum.each(invalid_records, fn record -> - within_replication_context(ctx, vsn, fn conn -> + within_replication_context(ctx, fn conn -> tx_op_log = serialize_trans(record) MockClient.send_data(conn, tx_op_log) assert_receive {^conn, %SatErrorResp{error_type: :INVALID_REQUEST}}, @receive_timeout @@ -324,7 +324,7 @@ defmodule Electric.Satellite.WsValidationsTest do %{"id" => "4", "d" => "0001-01-01"} ] - within_replication_context(ctx, vsn, fn conn -> + within_replication_context(ctx, fn conn -> Enum.each(valid_records, fn record -> tx_op_log = serialize_trans(record) MockClient.send_data(conn, tx_op_log) @@ -346,7 +346,7 @@ defmodule Electric.Satellite.WsValidationsTest do ] Enum.each(invalid_records, fn record -> - within_replication_context(ctx, vsn, fn conn -> + within_replication_context(ctx, fn conn -> tx_op_log = serialize_trans(record) MockClient.send_data(conn, tx_op_log) assert_receive {^conn, %SatErrorResp{error_type: :INVALID_REQUEST}}, @receive_timeout @@ -369,7 +369,7 @@ defmodule Electric.Satellite.WsValidationsTest do %{"id" => "4", "t" => "11:11:11.11"} ] - within_replication_context(ctx, vsn, fn conn -> + within_replication_context(ctx, fn conn -> Enum.each(valid_records, fn record -> tx_op_log = serialize_trans(record) MockClient.send_data(conn, tx_op_log) @@ -393,7 +393,7 @@ defmodule Electric.Satellite.WsValidationsTest do ] Enum.each(invalid_records, fn record -> - within_replication_context(ctx, vsn, fn conn -> + within_replication_context(ctx, fn conn -> tx_op_log = serialize_trans(record) MockClient.send_data(conn, tx_op_log) assert_receive {^conn, %SatErrorResp{error_type: :INVALID_REQUEST}}, @receive_timeout @@ -417,7 +417,7 @@ defmodule Electric.Satellite.WsValidationsTest do %{"id" => "2", "t2" => "2023-08-07 00:00:00Z"} ] - within_replication_context(ctx, vsn, fn conn -> + within_replication_context(ctx, fn conn -> Enum.each(valid_records, fn record -> tx_op_log = serialize_trans(record) MockClient.send_data(conn, tx_op_log) @@ -439,7 +439,7 @@ defmodule Electric.Satellite.WsValidationsTest do ] Enum.each(invalid_records, fn record -> - within_replication_context(ctx, vsn, fn conn -> + within_replication_context(ctx, fn conn -> tx_op_log = serialize_trans(record) MockClient.send_data(conn, tx_op_log) assert_receive {^conn, %SatErrorResp{error_type: :INVALID_REQUEST}}, @receive_timeout @@ -468,7 +468,7 @@ defmodule Electric.Satellite.WsValidationsTest do %{"id" => "7", "jb" => ~s'[1, 2.0, 3e5, true, false, null, "", ["It\'s \u26a1"]]'} ] - within_replication_context(ctx, vsn, fn conn -> + within_replication_context(ctx, fn conn -> Enum.each(valid_records, fn record -> tx_op_log = serialize_trans(record) MockClient.send_data(conn, tx_op_log) @@ -490,7 +490,7 @@ defmodule Electric.Satellite.WsValidationsTest do ] Enum.each(invalid_records, fn record -> - within_replication_context(ctx, vsn, fn conn -> + within_replication_context(ctx, fn conn -> tx_op_log = serialize_trans(record) MockClient.send_data(conn, tx_op_log) assert_receive {^conn, %SatErrorResp{error_type: :INVALID_REQUEST}}, @receive_timeout @@ -517,7 +517,7 @@ defmodule Electric.Satellite.WsValidationsTest do %{"id" => "5", "blob" => "\\x0001ff"} ] - within_replication_context(ctx, vsn, fn conn -> + within_replication_context(ctx, fn conn -> Enum.each(valid_records, fn record -> tx_op_log = serialize_trans(record) MockClient.send_data(conn, tx_op_log) @@ -545,7 +545,7 @@ defmodule Electric.Satellite.WsValidationsTest do %{"id" => "4", "cup_of" => nil} ] - within_replication_context(ctx, vsn, fn conn -> + within_replication_context(ctx, fn conn -> Enum.each(valid_records, fn record -> tx_op_log = serialize_trans(record) MockClient.send_data(conn, tx_op_log) @@ -563,7 +563,7 @@ defmodule Electric.Satellite.WsValidationsTest do ] Enum.each(invalid_records, fn record -> - within_replication_context(ctx, vsn, fn conn -> + within_replication_context(ctx, fn conn -> tx_op_log = serialize_trans(record) MockClient.send_data(conn, tx_op_log) assert_receive {^conn, %SatErrorResp{error_type: :INVALID_REQUEST}}, @receive_timeout @@ -571,7 +571,7 @@ defmodule Electric.Satellite.WsValidationsTest do end) end - defp within_replication_context(ctx, vsn, expectation_fn) do + defp within_replication_context(ctx, expectation_fn) do with_connect(ctx.conn_opts, fn conn -> # Replication start ceremony start_replication_and_assert_response(conn, 0) @@ -579,15 +579,9 @@ defmodule Electric.Satellite.WsValidationsTest do # Confirm the server has sent the migration to the client assert_receive {^conn, %SatRelation{table_name: @table_name} = relation}, @receive_timeout - assert_receive {^conn, - %SatOpLog{ - ops: [ - %SatTransOp{op: {:begin, %SatOpBegin{is_migration: true}}}, - %SatTransOp{op: {:migrate, %{version: ^vsn}}}, - %SatTransOp{op: {:commit, _}} - ] - }}, - @receive_timeout + assert_receive {^conn, %SatOpLog{ops: ops}}, @receive_timeout + assert %SatTransOp{op: {:begin, %SatOpBegin{is_migration: true}}} = List.first(ops) + assert %SatTransOp{op: {:commit, _}} = List.last(ops) # The client has to repeat the relation message to the server MockClient.send_data(conn, relation) diff --git a/components/electric/test/support/satellite_helpers.ex b/components/electric/test/support/satellite_helpers.ex index 63e3297bb3..b9c59a3873 100644 --- a/components/electric/test/support/satellite_helpers.ex +++ b/components/electric/test/support/satellite_helpers.ex @@ -118,7 +118,11 @@ defmodule ElectricTest.SatelliteHelpers do %SatOpLog{ ops: [ %{op: {:begin, %SatOpBegin{is_migration: true, lsn: lsn_str}}}, - %{op: {:migrate, %{version: ^version, table: %{name: ^table_name}}}}, + %{ + op: + {:migrate, + %{version: ^version, affected_entity: {:table, %{name: ^table_name}}}} + }, %{op: {:commit, _}} ] }} From a083c29e7c95d3bb0ab409c12b3ea2ebab8578da Mon Sep 17 00:00:00 2001 From: Oleksii Sholik Date: Wed, 17 Apr 2024 16:53:11 +0300 Subject: [PATCH 097/156] Update generated protocol messages --- .../src/_generated/protocol/satellite.ts | 96 +++- .../electric/satellite/protobuf_messages.ex | 455 +++++++++++++++++- 2 files changed, 525 insertions(+), 26 deletions(-) diff --git a/clients/typescript/src/_generated/protocol/satellite.ts b/clients/typescript/src/_generated/protocol/satellite.ts index 5bd4e7f18e..ad1db99218 100644 --- a/clients/typescript/src/_generated/protocol/satellite.ts +++ b/clients/typescript/src/_generated/protocol/satellite.ts @@ -429,22 +429,32 @@ export interface SatOpMigrate { */ version: string; /** - * a list of sql ddl statements to apply, converted from the pg originals + * A list of SQL DDL statements to apply, translated from Postgres to SQLite dialect. + * * The migration machinery converts an `ALTER TABLE action1, action2, action3;` * query into a set of 3: `ALTER TABLE action1; ALTER TABLE action2,` etc * so we need to support 1+ statements for every migration event. + * + * There is an exception for enum types. Since SQLite does not have a matching concept, + * the original Postgres DDL statement `CREATE TYPE ... AS ENUM (...)` is included as is, + * without translation. */ stmts: SatOpMigrate_Stmt[]; /** * The resulting table definition after applying these migrations * (a DDL statement can only affect one table at a time). */ - table?: SatOpMigrate_Table | undefined; + table?: + | SatOpMigrate_Table + | undefined; + /** This field is set if stmts includes a single item which is an enum type definition. */ + enumType?: SatOpMigrate_EnumType | undefined; } export enum SatOpMigrate_Type { CREATE_TABLE = 0, CREATE_INDEX = 1, + CREATE_ENUM_TYPE = 2, ALTER_ADD_COLUMN = 6, UNRECOGNIZED = -1, } @@ -499,6 +509,12 @@ export interface SatOpMigrate_Table { pks: string[]; } +export interface SatOpMigrate_EnumType { + $type: "Electric.Satellite.SatOpMigrate.EnumType"; + name: string; + values: string[]; +} + /** (Consumer) Request for new subscriptions */ export interface SatSubsReq { $type: "Electric.Satellite.SatSubsReq"; @@ -2738,7 +2754,7 @@ export const SatOpRow = { messageTypeRegistry.set(SatOpRow.$type, SatOpRow); function createBaseSatOpMigrate(): SatOpMigrate { - return { $type: "Electric.Satellite.SatOpMigrate", version: "", stmts: [], table: undefined }; + return { $type: "Electric.Satellite.SatOpMigrate", version: "", stmts: [], table: undefined, enumType: undefined }; } export const SatOpMigrate = { @@ -2754,6 +2770,9 @@ export const SatOpMigrate = { if (message.table !== undefined) { SatOpMigrate_Table.encode(message.table, writer.uint32(26).fork()).ldelim(); } + if (message.enumType !== undefined) { + SatOpMigrate_EnumType.encode(message.enumType, writer.uint32(34).fork()).ldelim(); + } return writer; }, @@ -2785,6 +2804,13 @@ export const SatOpMigrate = { message.table = SatOpMigrate_Table.decode(reader, reader.uint32()); continue; + case 4: + if (tag !== 34) { + break; + } + + message.enumType = SatOpMigrate_EnumType.decode(reader, reader.uint32()); + continue; } if ((tag & 7) === 4 || tag === 0) { break; @@ -2805,6 +2831,9 @@ export const SatOpMigrate = { message.table = (object.table !== undefined && object.table !== null) ? SatOpMigrate_Table.fromPartial(object.table) : undefined; + message.enumType = (object.enumType !== undefined && object.enumType !== null) + ? SatOpMigrate_EnumType.fromPartial(object.enumType) + : undefined; return message; }, }; @@ -3197,6 +3226,67 @@ export const SatOpMigrate_Table = { messageTypeRegistry.set(SatOpMigrate_Table.$type, SatOpMigrate_Table); +function createBaseSatOpMigrate_EnumType(): SatOpMigrate_EnumType { + return { $type: "Electric.Satellite.SatOpMigrate.EnumType", name: "", values: [] }; +} + +export const SatOpMigrate_EnumType = { + $type: "Electric.Satellite.SatOpMigrate.EnumType" as const, + + encode(message: SatOpMigrate_EnumType, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { + if (message.name !== "") { + writer.uint32(10).string(message.name); + } + for (const v of message.values) { + writer.uint32(18).string(v!); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): SatOpMigrate_EnumType { + const reader = input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseSatOpMigrate_EnumType(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.name = reader.string(); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.values.push(reader.string()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + create, I>>(base?: I): SatOpMigrate_EnumType { + return SatOpMigrate_EnumType.fromPartial(base ?? {}); + }, + + fromPartial, I>>(object: I): SatOpMigrate_EnumType { + const message = createBaseSatOpMigrate_EnumType(); + message.name = object.name ?? ""; + message.values = object.values?.map((e) => e) || []; + return message; + }, +}; + +messageTypeRegistry.set(SatOpMigrate_EnumType.$type, SatOpMigrate_EnumType); + function createBaseSatSubsReq(): SatSubsReq { return { $type: "Electric.Satellite.SatSubsReq", subscriptionId: "", shapeRequests: [] }; } diff --git a/components/electric/lib/electric/satellite/protobuf_messages.ex b/components/electric/lib/electric/satellite/protobuf_messages.ex index 093c4f4547..75fb1e1949 100644 --- a/components/electric/lib/electric/satellite/protobuf_messages.ex +++ b/components/electric/lib/electric/satellite/protobuf_messages.ex @@ -522,6 +522,15 @@ 1 end ), + ( + def encode(:CREATE_ENUM_TYPE) do + 2 + end + + def encode("CREATE_ENUM_TYPE") do + 2 + end + ), ( def encode(:ALTER_ADD_COLUMN) do 6 @@ -545,6 +554,9 @@ def decode(1) do :CREATE_INDEX end, + def decode(2) do + :CREATE_ENUM_TYPE + end, def decode(6) do :ALTER_ADD_COLUMN end @@ -556,7 +568,7 @@ @spec constants() :: [{integer(), atom()}] def constants() do - [{0, :CREATE_TABLE}, {1, :CREATE_INDEX}, {6, :ALTER_ADD_COLUMN}] + [{0, :CREATE_TABLE}, {1, :CREATE_INDEX}, {2, :CREATE_ENUM_TYPE}, {6, :ALTER_ADD_COLUMN}] end @spec has_constant?(any()) :: boolean() @@ -568,6 +580,9 @@ def has_constant?(:CREATE_INDEX) do true end, + def has_constant?(:CREATE_ENUM_TYPE) do + true + end, def has_constant?(:ALTER_ADD_COLUMN) do true end @@ -6810,7 +6825,7 @@ end, defmodule Electric.Satellite.SatOpMigrate do @moduledoc false - defstruct version: "", stmts: [], table: nil + defstruct version: "", stmts: [], affected_entity: nil ( ( @@ -6825,11 +6840,19 @@ @spec encode!(struct) :: iodata | no_return def encode!(msg) do - [] |> encode_table(msg) |> encode_version(msg) |> encode_stmts(msg) + [] |> encode_affected_entity(msg) |> encode_version(msg) |> encode_stmts(msg) end ) - [] + [ + defp encode_affected_entity(acc, msg) do + case msg.affected_entity do + nil -> acc + {:table, _field_value} -> encode_table(acc, msg) + {:enum_type, _field_value} -> encode_enum_type(acc, msg) + end + end + ] [ defp encode_version(acc, msg) do @@ -6865,14 +6888,21 @@ end, defp encode_table(acc, msg) do try do - case msg.table do - nil -> [acc] - child_field_value -> [acc, "\x1A", Protox.Encode.encode_message(child_field_value)] - end + {_, child_field_value} = msg.affected_entity + [acc, "\x1A", Protox.Encode.encode_message(child_field_value)] rescue ArgumentError -> reraise Protox.EncodingError.new(:table, "invalid field value"), __STACKTRACE__ end + end, + defp encode_enum_type(acc, msg) do + try do + {_, child_field_value} = msg.affected_entity + [acc, "\"", Protox.Encode.encode_message(child_field_value)] + rescue + ArgumentError -> + reraise Protox.EncodingError.new(:enum_type, "invalid field value"), __STACKTRACE__ + end end ] @@ -6928,16 +6958,38 @@ {delimited, rest} = Protox.Decode.parse_delimited(bytes, len) {[ - case msg.table do + case msg.affected_entity do {:table, previous_value} -> - {:table, - Protox.MergeMessage.merge( - previous_value, - Electric.Satellite.SatOpMigrate.Table.decode!(delimited) - )} + {:affected_entity, + {:table, + Protox.MergeMessage.merge( + previous_value, + Electric.Satellite.SatOpMigrate.Table.decode!(delimited) + )}} _ -> - {:table, Electric.Satellite.SatOpMigrate.Table.decode!(delimited)} + {:affected_entity, + {:table, Electric.Satellite.SatOpMigrate.Table.decode!(delimited)}} + end + ], rest} + + {4, _, bytes} -> + {len, bytes} = Protox.Varint.decode(bytes) + {delimited, rest} = Protox.Decode.parse_delimited(bytes, len) + + {[ + case msg.affected_entity do + {:enum_type, previous_value} -> + {:affected_entity, + {:enum_type, + Protox.MergeMessage.merge( + previous_value, + Electric.Satellite.SatOpMigrate.EnumType.decode!(delimited) + )}} + + _ -> + {:affected_entity, + {:enum_type, Electric.Satellite.SatOpMigrate.EnumType.decode!(delimited)}} end ], rest} @@ -7000,7 +7052,12 @@ %{ 1 => {:version, {:scalar, ""}, :string}, 2 => {:stmts, :unpacked, {:message, Electric.Satellite.SatOpMigrate.Stmt}}, - 3 => {:table, {:oneof, :_table}, {:message, Electric.Satellite.SatOpMigrate.Table}} + 3 => + {:table, {:oneof, :affected_entity}, + {:message, Electric.Satellite.SatOpMigrate.Table}}, + 4 => + {:enum_type, {:oneof, :affected_entity}, + {:message, Electric.Satellite.SatOpMigrate.EnumType}} } end @@ -7010,8 +7067,11 @@ } def defs_by_name() do %{ + enum_type: + {4, {:oneof, :affected_entity}, {:message, Electric.Satellite.SatOpMigrate.EnumType}}, stmts: {2, :unpacked, {:message, Electric.Satellite.SatOpMigrate.Stmt}}, - table: {3, {:oneof, :_table}, {:message, Electric.Satellite.SatOpMigrate.Table}}, + table: + {3, {:oneof, :affected_entity}, {:message, Electric.Satellite.SatOpMigrate.Table}}, version: {1, {:scalar, ""}, :string} } end @@ -7042,11 +7102,20 @@ %{ __struct__: Protox.Field, json_name: "table", - kind: {:oneof, :_table}, - label: :proto3_optional, + kind: {:oneof, :affected_entity}, + label: :optional, name: :table, tag: 3, type: {:message, Electric.Satellite.SatOpMigrate.Table} + }, + %{ + __struct__: Protox.Field, + json_name: "enumType", + kind: {:oneof, :affected_entity}, + label: :optional, + name: :enum_type, + tag: 4, + type: {:message, Electric.Satellite.SatOpMigrate.EnumType} } ] end @@ -7117,8 +7186,8 @@ %{ __struct__: Protox.Field, json_name: "table", - kind: {:oneof, :_table}, - label: :proto3_optional, + kind: {:oneof, :affected_entity}, + label: :optional, name: :table, tag: 3, type: {:message, Electric.Satellite.SatOpMigrate.Table} @@ -7130,8 +7199,8 @@ %{ __struct__: Protox.Field, json_name: "table", - kind: {:oneof, :_table}, - label: :proto3_optional, + kind: {:oneof, :affected_entity}, + label: :optional, name: :table, tag: 3, type: {:message, Electric.Satellite.SatOpMigrate.Table} @@ -7140,6 +7209,46 @@ [] ), + ( + def field_def(:enum_type) do + {:ok, + %{ + __struct__: Protox.Field, + json_name: "enumType", + kind: {:oneof, :affected_entity}, + label: :optional, + name: :enum_type, + tag: 4, + type: {:message, Electric.Satellite.SatOpMigrate.EnumType} + }} + end + + def field_def("enumType") do + {:ok, + %{ + __struct__: Protox.Field, + json_name: "enumType", + kind: {:oneof, :affected_entity}, + label: :optional, + name: :enum_type, + tag: 4, + type: {:message, Electric.Satellite.SatOpMigrate.EnumType} + }} + end + + def field_def("enum_type") do + {:ok, + %{ + __struct__: Protox.Field, + json_name: "enumType", + kind: {:oneof, :affected_entity}, + label: :optional, + name: :enum_type, + tag: 4, + type: {:message, Electric.Satellite.SatOpMigrate.EnumType} + }} + end + ), def field_def(_) do {:error, :no_such_field} end @@ -7173,6 +7282,9 @@ def default(:table) do {:error, :no_default_value} end, + def default(:enum_type) do + {:error, :no_default_value} + end, def default(_) do {:error, :no_such_field} end @@ -9263,6 +9375,303 @@ end ) end, + defmodule Electric.Satellite.SatOpMigrate.EnumType do + @moduledoc false + defstruct name: "", values: [] + + ( + ( + @spec encode(struct) :: {:ok, iodata} | {:error, any} + def encode(msg) do + try do + {:ok, encode!(msg)} + rescue + e in [Protox.EncodingError, Protox.RequiredFieldsError] -> {:error, e} + end + end + + @spec encode!(struct) :: iodata | no_return + def encode!(msg) do + [] |> encode_name(msg) |> encode_values(msg) + end + ) + + [] + + [ + defp encode_name(acc, msg) do + try do + if msg.name == "" do + acc + else + [acc, "\n", Protox.Encode.encode_string(msg.name)] + end + rescue + ArgumentError -> + reraise Protox.EncodingError.new(:name, "invalid field value"), __STACKTRACE__ + end + end, + defp encode_values(acc, msg) do + try do + case msg.values do + [] -> + acc + + values -> + [ + acc, + Enum.reduce(values, [], fn value, acc -> + [acc, "\x12", Protox.Encode.encode_string(value)] + end) + ] + end + rescue + ArgumentError -> + reraise Protox.EncodingError.new(:values, "invalid field value"), __STACKTRACE__ + end + end + ] + + [] + ) + + ( + ( + @spec decode(binary) :: {:ok, struct} | {:error, any} + def decode(bytes) do + try do + {:ok, decode!(bytes)} + rescue + e in [Protox.DecodingError, Protox.IllegalTagError, Protox.RequiredFieldsError] -> + {:error, e} + end + end + + ( + @spec decode!(binary) :: struct | no_return + def decode!(bytes) do + parse_key_value(bytes, struct(Electric.Satellite.SatOpMigrate.EnumType)) + end + ) + ) + + ( + @spec parse_key_value(binary, struct) :: struct + defp parse_key_value(<<>>, msg) do + msg + end + + defp parse_key_value(bytes, msg) do + {field, rest} = + case Protox.Decode.parse_key(bytes) do + {0, _, _} -> + raise %Protox.IllegalTagError{} + + {1, _, bytes} -> + {len, bytes} = Protox.Varint.decode(bytes) + {delimited, rest} = Protox.Decode.parse_delimited(bytes, len) + {[name: Protox.Decode.validate_string(delimited)], rest} + + {2, _, bytes} -> + {len, bytes} = Protox.Varint.decode(bytes) + {delimited, rest} = Protox.Decode.parse_delimited(bytes, len) + {[values: msg.values ++ [Protox.Decode.validate_string(delimited)]], rest} + + {tag, wire_type, rest} -> + {_, rest} = Protox.Decode.parse_unknown(tag, wire_type, rest) + {[], rest} + end + + msg_updated = struct(msg, field) + parse_key_value(rest, msg_updated) + end + ) + + [] + ) + + ( + @spec json_decode(iodata(), keyword()) :: {:ok, struct()} | {:error, any()} + def json_decode(input, opts \\ []) do + try do + {:ok, json_decode!(input, opts)} + rescue + e in Protox.JsonDecodingError -> {:error, e} + end + end + + @spec json_decode!(iodata(), keyword()) :: struct() | no_return() + def json_decode!(input, opts \\ []) do + {json_library_wrapper, json_library} = Protox.JsonLibrary.get_library(opts, :decode) + + Protox.JsonDecode.decode!( + input, + Electric.Satellite.SatOpMigrate.EnumType, + &json_library_wrapper.decode!(json_library, &1) + ) + end + + @spec json_encode(struct(), keyword()) :: {:ok, iodata()} | {:error, any()} + def json_encode(msg, opts \\ []) do + try do + {:ok, json_encode!(msg, opts)} + rescue + e in Protox.JsonEncodingError -> {:error, e} + end + end + + @spec json_encode!(struct(), keyword()) :: iodata() | no_return() + def json_encode!(msg, opts \\ []) do + {json_library_wrapper, json_library} = Protox.JsonLibrary.get_library(opts, :encode) + Protox.JsonEncode.encode!(msg, &json_library_wrapper.encode!(json_library, &1)) + end + ) + + ( + @deprecated "Use fields_defs()/0 instead" + @spec defs() :: %{ + required(non_neg_integer) => {atom, Protox.Types.kind(), Protox.Types.type()} + } + def defs() do + %{1 => {:name, {:scalar, ""}, :string}, 2 => {:values, :unpacked, :string}} + end + + @deprecated "Use fields_defs()/0 instead" + @spec defs_by_name() :: %{ + required(atom) => {non_neg_integer, Protox.Types.kind(), Protox.Types.type()} + } + def defs_by_name() do + %{name: {1, {:scalar, ""}, :string}, values: {2, :unpacked, :string}} + end + ) + + ( + @spec fields_defs() :: list(Protox.Field.t()) + def fields_defs() do + [ + %{ + __struct__: Protox.Field, + json_name: "name", + kind: {:scalar, ""}, + label: :optional, + name: :name, + tag: 1, + type: :string + }, + %{ + __struct__: Protox.Field, + json_name: "values", + kind: :unpacked, + label: :repeated, + name: :values, + tag: 2, + type: :string + } + ] + end + + [ + @spec(field_def(atom) :: {:ok, Protox.Field.t()} | {:error, :no_such_field}), + ( + def field_def(:name) do + {:ok, + %{ + __struct__: Protox.Field, + json_name: "name", + kind: {:scalar, ""}, + label: :optional, + name: :name, + tag: 1, + type: :string + }} + end + + def field_def("name") do + {:ok, + %{ + __struct__: Protox.Field, + json_name: "name", + kind: {:scalar, ""}, + label: :optional, + name: :name, + tag: 1, + type: :string + }} + end + + [] + ), + ( + def field_def(:values) do + {:ok, + %{ + __struct__: Protox.Field, + json_name: "values", + kind: :unpacked, + label: :repeated, + name: :values, + tag: 2, + type: :string + }} + end + + def field_def("values") do + {:ok, + %{ + __struct__: Protox.Field, + json_name: "values", + kind: :unpacked, + label: :repeated, + name: :values, + tag: 2, + type: :string + }} + end + + [] + ), + def field_def(_) do + {:error, :no_such_field} + end + ] + ) + + [] + + ( + @spec required_fields() :: [] + def required_fields() do + [] + end + ) + + ( + @spec syntax() :: atom() + def syntax() do + :proto3 + end + ) + + [ + @spec(default(atom) :: {:ok, boolean | integer | String.t() | float} | {:error, atom}), + def default(:name) do + {:ok, ""} + end, + def default(:values) do + {:error, :no_default_value} + end, + def default(_) do + {:error, :no_such_field} + end + ] + + ( + @spec file_options() :: nil + def file_options() do + nil + end + ) + end, defmodule Electric.Satellite.SatShapeDataBegin do @moduledoc false defstruct request_id: "", uuid: "" From ca6fcbee25e0d4a3e65435242772d1cc02476944 Mon Sep 17 00:00:00 2001 From: Oleksii Sholik Date: Thu, 18 Apr 2024 10:46:17 +0300 Subject: [PATCH 098/156] Format TS code --- clients/typescript/src/satellite/client.ts | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/clients/typescript/src/satellite/client.ts b/clients/typescript/src/satellite/client.ts index 426b62bd48..837a413d50 100644 --- a/clients/typescript/src/satellite/client.ts +++ b/clients/typescript/src/satellite/client.ts @@ -876,7 +876,8 @@ export class SatelliteClient implements Client { 'error', new SatelliteError( SatelliteErrorCode.UNEXPECTED_STATE, - `unexpected state ${ReplicationStatus[this.inbound.isReplicating] + `unexpected state ${ + ReplicationStatus[this.inbound.isReplicating] } handling 'relation' message` ) ) From 1f2812047503ce32ac77fe4ed6350b224e2d67a4 Mon Sep 17 00:00:00 2001 From: Oleksii Sholik Date: Mon, 22 Apr 2024 17:23:48 +0300 Subject: [PATCH 099/156] Do not include the CREATE TYPE statement for the SQLite dialecct --- .../lib/electric/postgres/dialect/sqlite.ex | 19 ------------------- .../lib/electric/postgres/replication.ex | 5 +++-- 2 files changed, 3 insertions(+), 21 deletions(-) diff --git a/components/electric/lib/electric/postgres/dialect/sqlite.ex b/components/electric/lib/electric/postgres/dialect/sqlite.ex index cbda84e6ae..7541a7d9a0 100644 --- a/components/electric/lib/electric/postgres/dialect/sqlite.ex +++ b/components/electric/lib/electric/postgres/dialect/sqlite.ex @@ -119,25 +119,6 @@ defmodule Electric.Postgres.Dialect.SQLite do ]) <> ";\n" end - # SQLite does not have an equivalent for enum types in Postgres. - # We pass the original statement through unchanged. - def to_sql(%Pg.CreateEnumStmt{} = stmt, _opts) do - name = Schema.AST.map(stmt.type_name) - values = Schema.AST.map(stmt.vals) - - serialized_values = - values - |> Electric.Postgres.Types.Array.serialize(?') - |> String.slice(1..-2//1) - - stmt([ - "-- CREATE TYPE", - quote_name(name), - "AS ENUM", - paren(serialized_values) - ]) <> ";\n" - end - defp alter_table_cmd(%Pg.Node{node: {_, cmd}}, table, opts) do alter_table_cmd(cmd, table, opts) end diff --git a/components/electric/lib/electric/postgres/replication.ex b/components/electric/lib/electric/postgres/replication.ex index 2c92bb1b4d..bf71c1091b 100644 --- a/components/electric/lib/electric/postgres/replication.ex +++ b/components/electric/lib/electric/postgres/replication.ex @@ -136,8 +136,9 @@ defmodule Electric.Postgres.Replication do end stmts = - Enum.map( - ast, + ast + |> Enum.reject(&match?(%Pg.CreateEnumStmt{}, &1)) + |> Enum.map( &%SatOpMigrate.Stmt{ type: stmt_type(&1), sql: to_sql(&1, stmt, dialect) From 48f17834afb20b8bd18290d3fce3d8abe2d4b178 Mon Sep 17 00:00:00 2001 From: Oleksii Sholik Date: Mon, 22 Apr 2024 18:49:05 +0300 Subject: [PATCH 100/156] fixup! Do not include the CREATE TYPE statement for the SQLite dialecct --- components/electric/lib/electric/postgres/replication.ex | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/components/electric/lib/electric/postgres/replication.ex b/components/electric/lib/electric/postgres/replication.ex index bf71c1091b..fea01626b8 100644 --- a/components/electric/lib/electric/postgres/replication.ex +++ b/components/electric/lib/electric/postgres/replication.ex @@ -137,7 +137,7 @@ defmodule Electric.Postgres.Replication do stmts = ast - |> Enum.reject(&match?(%Pg.CreateEnumStmt{}, &1)) + |> Enum.reject(&dialect == Dialect.SQLite and match?(%Pg.CreateEnumStmt{}, &1)) |> Enum.map( &%SatOpMigrate.Stmt{ type: stmt_type(&1), From 9266255b212918a7e7a0660a6dd2d63f73bbe577 Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Tue, 23 Apr 2024 10:23:39 +0200 Subject: [PATCH 101/156] Small fixes --- clients/typescript/test/satellite/merge.test.ts | 4 ++-- clients/typescript/test/satellite/process.test.ts | 6 ++---- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/clients/typescript/test/satellite/merge.test.ts b/clients/typescript/test/satellite/merge.test.ts index 64f9dba51d..b342e01f22 100644 --- a/clients/typescript/test/satellite/merge.test.ts +++ b/clients/typescript/test/satellite/merge.test.ts @@ -202,7 +202,7 @@ const setupPG: SetupFn = async (t: ExecutionContext) => { const defaults = satelliteDefaults(namespace) return [new PgDatabaseAdapter(db), pgBuilder, namespace, defaults] } - + const setupPglite: SetupFn = async (t: ExecutionContext) => { const db = new PGlite() t.teardown(async () => await db.close()) @@ -215,7 +215,7 @@ const setupPglite: SetupFn = async (t: ExecutionContext) => { [ ['SQLite', setupSqlite], ['Postgres', setupPG], - ['PGlite', setupPglite] + ['PGlite', setupPglite], ] as const ).forEach(([dialect, setup]) => { test(`(${dialect}) merge works on oplog entries`, async (t) => { diff --git a/clients/typescript/test/satellite/process.test.ts b/clients/typescript/test/satellite/process.test.ts index 75bf62800e..60adb0b57b 100644 --- a/clients/typescript/test/satellite/process.test.ts +++ b/clients/typescript/test/satellite/process.test.ts @@ -79,11 +79,9 @@ const startSatellite = async ( satellite.setToken(token) const connectionPromise = satellite.connectWithBackoff().catch((e) => { if ( - e.message === 'terminating connection due to administrator command' || - e.message === - 'Client has encountered a connection error and is not queryable' + e.message === 'terminating connection due to administrator command' ) { - // This is to be expected as we stop Satellite at the end of the test + // This is to be expected as we stop Postgres at the end of the test return } throw e From c3637181e1780945203e297aced19918c42dc031 Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Tue, 23 Apr 2024 10:53:06 +0200 Subject: [PATCH 102/156] Do not delay snapshot on start to next tick --- .../typescript/test/satellite/process.test.ts | 28 +++++++++++++++++-- 1 file changed, 25 insertions(+), 3 deletions(-) diff --git a/clients/typescript/test/satellite/process.test.ts b/clients/typescript/test/satellite/process.test.ts index 60adb0b57b..329120d879 100644 --- a/clients/typescript/test/satellite/process.test.ts +++ b/clients/typescript/test/satellite/process.test.ts @@ -78,9 +78,7 @@ const startSatellite = async ( await satellite.start(authState) satellite.setToken(token) const connectionPromise = satellite.connectWithBackoff().catch((e) => { - if ( - e.message === 'terminating connection due to administrator command' - ) { + if (e.message === 'terminating connection due to administrator command') { // This is to be expected as we stop Postgres at the end of the test return } @@ -2514,4 +2512,28 @@ export const processTests = (test: TestFn) => { t.pass() }) + + test("don't snapshot after closing satellite process", async (t) => { + // open and then immediately close + // check that no snapshot is called after close + const { satellite, authState, token } = t.context + const { connectionPromise } = await startSatellite( + satellite, + authState, + token + ) + + await connectionPromise + await satellite.stop() + + satellite._performSnapshot = () => { + t.fail('Snapshot was called') + return Promise.resolve(new Date()) + } + + // wait some time to see that mutexSnapshot is not called + await sleepAsync(50) + + t.pass() + }) } From e4934f6595cd2a93a2a9426540fe57eeb8482c85 Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Tue, 23 Apr 2024 11:27:25 +0200 Subject: [PATCH 103/156] Fix code style issues --- clients/typescript/src/client/execution/transactionalDB.ts | 6 +----- clients/typescript/src/drivers/pglite/database.ts | 2 +- 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/clients/typescript/src/client/execution/transactionalDB.ts b/clients/typescript/src/client/execution/transactionalDB.ts index 818608325b..1dc97a757f 100644 --- a/clients/typescript/src/client/execution/transactionalDB.ts +++ b/clients/typescript/src/client/execution/transactionalDB.ts @@ -58,11 +58,7 @@ export class TransactionalDB implements DB { this._converter, Transformation.Decode ) - try { - return schema.parse(transformedRow) - } catch (e) { - throw e - } + return schema.parse(transformedRow) }) successCallback( new TransactionalDB(tx, this._fields, this._converter), diff --git a/clients/typescript/src/drivers/pglite/database.ts b/clients/typescript/src/drivers/pglite/database.ts index 0ecc780adf..5fa76226a6 100644 --- a/clients/typescript/src/drivers/pglite/database.ts +++ b/clients/typescript/src/drivers/pglite/database.ts @@ -2,4 +2,4 @@ import type { PGlite } from '@electric-sql/pglite' // The relevant subset of the SQLitePlugin database client API // that we need to ensure the client we're electrifying provides. -export interface Database extends Pick {} +export type Database = Pick From 7dfdae38b177fe241127c4ab3d12a95e6e171bf5 Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Tue, 23 Apr 2024 13:27:50 +0200 Subject: [PATCH 104/156] Fix e2e test for JSON values --- e2e/satellite_client/src/client.ts | 9 ++++++++- e2e/tests/03.19_node_satellite_can_sync_json.lux | 4 ++-- 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/e2e/satellite_client/src/client.ts b/e2e/satellite_client/src/client.ts index 6956e135e7..3ae8c4b6e4 100644 --- a/e2e/satellite_client/src/client.ts +++ b/e2e/satellite_client/src/client.ts @@ -310,7 +310,14 @@ export const get_jsonb_raw = async (electric: Electric, id: string) => { sql: `SELECT jsb FROM jsons WHERE id = ${builder.makePositionalParam(1)};`, args: [id] }) as unknown as Array<{ jsb: string }> - return res[0]?.jsb + + const js = res[0]?.jsb + + if (builder.dialect === 'Postgres') { + return js + } + + return JSON.parse(js) // SQLite stores JSON as string so parse it } export const get_json = async (electric: Electric, id: string) => { diff --git a/e2e/tests/03.19_node_satellite_can_sync_json.lux b/e2e/tests/03.19_node_satellite_can_sync_json.lux index 330f5bc950..7082ac887a 100644 --- a/e2e/tests/03.19_node_satellite_can_sync_json.lux +++ b/e2e/tests/03.19_node_satellite_can_sync_json.lux @@ -37,7 +37,7 @@ # when parsed as JSON, whitespace is trimmed but key order is kept #[invoke node_get_json "row1" "{ a: 1, c: true, b: \"foo\", d: null, e: [1,2,3] }"] # jsonb trims white space and sorts keys - [invoke node_get_jsonb_raw "row1" "[{\"a\": 1}, {\"b\": 5, \"d\": false}]"] + [invoke node_get_jsonb_raw "row1" "[ { a: 1 }, { b: 5, d: false } ]"] [invoke node_get_jsonb "row1" "[ { a: 1 }, { b: 5, d: false } ]"] # write JSON null value and DB NULL value @@ -121,7 +121,7 @@ # when parsed as JSON, whitespace is trimmed but key order is kept #[invoke node_get_json "row1" "{ a: 1, c: true, b: \"foo\", d: null, e: [1,2,3] }"] # jsonb trims white space and sorts keys - [invoke node_get_jsonb_raw "row1" "[{\"a\": 1}, {\"b\": 5, \"d\": false}]"] + [invoke node_get_jsonb_raw "row1" "[ { a: 1 }, { b: 5, d: false } ]"] [invoke node_get_jsonb "row1" "[ { a: 1 }, { b: 5, d: false } ]"] #[invoke node_get_json "row2" "{ __is_electric_json_null__: true }"] From a78f30b1ad6cfc7d071bdfb075f1bdc55a9cb57b Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Tue, 23 Apr 2024 13:39:56 +0200 Subject: [PATCH 105/156] Remove debug statements --- .../src/client/conversions/datatypes/json.ts | 2 - .../src/drivers/node-postgres/database.ts | 75 +++++++------------ clients/typescript/src/satellite/process.ts | 10 +-- .../src/util/encoders/pgEncoders.ts | 8 +- .../src/util/encoders/sqliteEncoders.ts | 6 +- 5 files changed, 32 insertions(+), 69 deletions(-) diff --git a/clients/typescript/src/client/conversions/datatypes/json.ts b/clients/typescript/src/client/conversions/datatypes/json.ts index 032ae3249f..5b25e097c3 100644 --- a/clients/typescript/src/client/conversions/datatypes/json.ts +++ b/clients/typescript/src/client/conversions/datatypes/json.ts @@ -15,8 +15,6 @@ export function serialiseJSON(v: JSON): string { } export function deserialiseJSON(v: string): JSON { - console.log('DESERIALISING:\n' + v) - console.log('PARSED:\n' + JSON.parse(v)) if (v === JSON.stringify(null)) return { __is_electric_json_null__: true } return JSON.parse(v) } diff --git a/clients/typescript/src/drivers/node-postgres/database.ts b/clients/typescript/src/drivers/node-postgres/database.ts index 069f6197d3..93d400e13b 100644 --- a/clients/typescript/src/drivers/node-postgres/database.ts +++ b/clients/typescript/src/drivers/node-postgres/database.ts @@ -17,20 +17,15 @@ export interface Database { } export class ElectricDatabase implements Database { - constructor( - public name: string, - //private postgres: EmbeddedPostgres, - private db: Client - ) {} + constructor(public name: string, private db: Client) {} async exec(statement: Statement): Promise { - try { - const { rows, rowCount } = await this.db.query({ - text: statement.sql, - values: statement.args, - types: { - getTypeParser: ((oid: number) => { - /* + const { rows, rowCount } = await this.db.query({ + text: statement.sql, + values: statement.args, + types: { + getTypeParser: ((oid: number) => { + /* // Modify the parser to not parse JSON values // Instead, return them as strings // our conversions will correctly parse them @@ -42,40 +37,28 @@ export class ElectricDatabase implements Database { } */ - if ( - oid == pg.types.builtins.TIMESTAMP || - oid == pg.types.builtins.TIMESTAMPTZ || - oid == pg.types.builtins.DATE - ) { - // Parse timestamps and date values ourselves - // because the pg parser parses them differently from what we expect - const pgTypes = new Map([ - [pg.types.builtins.TIME, PgDateType.PG_TIME], - [pg.types.builtins.TIMETZ, PgDateType.PG_TIMETZ], - [pg.types.builtins.TIMESTAMP, PgDateType.PG_TIMESTAMP], - [pg.types.builtins.TIMESTAMPTZ, PgDateType.PG_TIMESTAMPTZ], - [pg.types.builtins.DATE, PgDateType.PG_DATE], - ]) - return (val: string) => - deserialiseDate(val, pgTypes.get(oid) as PgDateType) - } - return originalGetTypeParser(oid) - }) as typeof pg.types.getTypeParser, - }, - }) - return { - rows, - rowsModified: rowCount ?? 0, - } - } catch (e) { - console.log( - 'EXEC failed: ' + - JSON.stringify(e) + - '\n' + - 'Statement was: ' + - JSON.stringify(statement) - ) - throw e + if ( + oid == pg.types.builtins.TIMESTAMP || + oid == pg.types.builtins.TIMESTAMPTZ || + oid == pg.types.builtins.DATE + ) { + // Parse timestamps and date values ourselves + // because the pg parser parses them differently from what we expect + const pgTypes = new Map([ + [pg.types.builtins.TIMESTAMP, PgDateType.PG_TIMESTAMP], + [pg.types.builtins.TIMESTAMPTZ, PgDateType.PG_TIMESTAMPTZ], + [pg.types.builtins.DATE, PgDateType.PG_DATE], + ]) + return (val: string) => + deserialiseDate(val, pgTypes.get(oid) as PgDateType) + } + return originalGetTypeParser(oid) + }) as typeof pg.types.getTypeParser, + }, + }) + return { + rows, + rowsModified: rowCount ?? 0, } } } diff --git a/clients/typescript/src/satellite/process.ts b/clients/typescript/src/satellite/process.ts index 8c7ca93b1b..8bed68f34e 100644 --- a/clients/typescript/src/satellite/process.ts +++ b/clients/typescript/src/satellite/process.ts @@ -589,7 +589,6 @@ export class SatelliteProcess implements Satellite { ) try { - console.log('APPLYING SUBS DATA:\n' + JSON.stringify(stmts, null, 2)) await this.adapter.runInTransaction(...stmts) // We're explicitly not specifying rowids in these changes for now, @@ -1278,7 +1277,6 @@ export class SatelliteProcess implements Satellite { } async _applyTransaction(transaction: Transaction) { - console.log('APPLY TX: ' + JSON.stringify(transaction)) const namespace = this.builder.defaultNamespace const origin = transaction.origin! const commitTimestamp = new Date(transaction.commit_timestamp.toNumber()) @@ -1338,10 +1336,7 @@ export class SatelliteProcess implements Satellite { const { statements, tablenames } = await this._apply(entries, origin) entries.forEach((e) => opLogEntries.push(e)) - statements.forEach((s) => { - console.log('DML stmt: ' + JSON.stringify(s)) - stmts.push(s) - }) + statements.forEach((s) => stmts.push(s)) tablenames.forEach((n) => tablenamesSet.add(n)) } const processDDL = async (changes: SchemaChange[]) => { @@ -1349,7 +1344,6 @@ export class SatelliteProcess implements Satellite { const affectedTables: Map = new Map() changes.forEach((change) => { const changeStmt = { sql: change.sql } - console.log('DDL stmt: ' + JSON.stringify(changeStmt)) stmts.push(changeStmt) if ( @@ -1417,13 +1411,11 @@ export class SatelliteProcess implements Satellite { if (transaction.migrationVersion) { // If a migration version is specified // then the transaction is a migration - console.log('APPLYING MIGRATION') await this.migrator.applyIfNotAlready({ statements: allStatements, version: transaction.migrationVersion, }) } else { - console.log('APPLYING TRANSACTION') await this.adapter.runInTransaction(...allStatements) } diff --git a/clients/typescript/src/util/encoders/pgEncoders.ts b/clients/typescript/src/util/encoders/pgEncoders.ts index 72348f2c4e..a13a340d19 100644 --- a/clients/typescript/src/util/encoders/pgEncoders.ts +++ b/clients/typescript/src/util/encoders/pgEncoders.ts @@ -6,13 +6,7 @@ export const pgTypeEncoder = { ...sqliteTypeEncoder, bool: boolToBytes, json: (x: JSON) => { - const str = JSON.stringify(x) - console.log('GONNA ENCODE:\n' + x) - console.log('SERIALISED:\n' + str) - const res = textEncoder.encode(str) - console.log('TEXT ENCODED:\n' + res) - //return textEncoder.encode(serialiseJSON(x)) - return res + return textEncoder.encode(JSON.stringify(x)) }, } diff --git a/clients/typescript/src/util/encoders/sqliteEncoders.ts b/clients/typescript/src/util/encoders/sqliteEncoders.ts index 1a0911620d..19074f0af5 100644 --- a/clients/typescript/src/util/encoders/sqliteEncoders.ts +++ b/clients/typescript/src/util/encoders/sqliteEncoders.ts @@ -10,11 +10,7 @@ import { export const sqliteTypeEncoder = { bool: boolToBytes, text: (string: string) => textEncoder.encode(string), - json: (string: string) => { - const res = textEncoder.encode(string) - console.log('TEXTT ENCODED:\n' + res) - return res - }, + json: (string: string) => textEncoder.encode(string), timetz: (string: string) => sqliteTypeEncoder.text(stringToTimetzString(string)), } From 2e4dc873bcbc1e15d0ec5fb1c113b4b0c287eb16 Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Tue, 23 Apr 2024 13:44:24 +0200 Subject: [PATCH 106/156] Fixed duplicate key --- components/electric/lib/electric/satellite/protocol/state.ex | 1 - 1 file changed, 1 deletion(-) diff --git a/components/electric/lib/electric/satellite/protocol/state.ex b/components/electric/lib/electric/satellite/protocol/state.ex index f5af6a67a8..7f46e48974 100644 --- a/components/electric/lib/electric/satellite/protocol/state.ex +++ b/components/electric/lib/electric/satellite/protocol/state.ex @@ -16,7 +16,6 @@ defmodule Electric.Satellite.Protocol.State do origin: "", subscriptions: %{}, subscription_data_fun: nil, - sql_dialect: Electric.Postgres.Dialect.SQLite, move_in_data_fun: nil, sql_dialect: Electric.Postgres.Dialect.SQLite, telemetry: nil From 63adf955113009b1f676cc79c3cbf590127d1e69 Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Tue, 23 Apr 2024 13:45:55 +0200 Subject: [PATCH 107/156] Format Electric code --- components/electric/lib/electric/postgres/replication.ex | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/components/electric/lib/electric/postgres/replication.ex b/components/electric/lib/electric/postgres/replication.ex index fea01626b8..4d8491683d 100644 --- a/components/electric/lib/electric/postgres/replication.ex +++ b/components/electric/lib/electric/postgres/replication.ex @@ -137,7 +137,7 @@ defmodule Electric.Postgres.Replication do stmts = ast - |> Enum.reject(&dialect == Dialect.SQLite and match?(%Pg.CreateEnumStmt{}, &1)) + |> Enum.reject(&(dialect == Dialect.SQLite and match?(%Pg.CreateEnumStmt{}, &1))) |> Enum.map( &%SatOpMigrate.Stmt{ type: stmt_type(&1), From e0c3336fab0815c65c5a068430e80bfa68d0c44b Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Tue, 23 Apr 2024 13:55:35 +0200 Subject: [PATCH 108/156] Fix enum assertion in Electric unit test --- .../electric/test/electric/postgres/replication_test.exs | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/components/electric/test/electric/postgres/replication_test.exs b/components/electric/test/electric/postgres/replication_test.exs index c8b2a7935d..4550f373c0 100644 --- a/components/electric/test/electric/postgres/replication_test.exs +++ b/components/electric/test/electric/postgres/replication_test.exs @@ -323,12 +323,7 @@ defmodule Electric.Postgres.ReplicationTest do assert %SatOpMigrate{version: ^version, stmts: stmts, affected_entity: {:enum_type, enum}} = msg - assert stmts == [ - %SatOpMigrate.Stmt{ - type: :CREATE_ENUM_TYPE, - sql: "-- CREATE TYPE \"public\".\"colour\" AS ENUM ('red','green','blue');\n" - } - ] + assert stmts == [] assert enum == %SatOpMigrate.EnumType{name: "colour", values: ["red", "green", "blue"]} From 35e00299b4bcc5ac6c88b94125a606e2bfe40d5d Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Tue, 23 Apr 2024 14:32:37 +0200 Subject: [PATCH 109/156] Fix path for uploading lux results in CI --- .github/workflows/e2e.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/e2e.yml b/.github/workflows/e2e.yml index 305dc87598..3c986d2cb6 100644 --- a/.github/workflows/e2e.yml +++ b/.github/workflows/e2e.yml @@ -92,7 +92,7 @@ jobs: if: ${{ failure() && steps.tests.outcome == 'failure' }} with: name: lux_logs - path: e2e/lux_logs/run_* + path: e2e/lux_logs/**/run_* - name: Upload test results to Buildkite analytics if: ${{ !cancelled() && steps.tests.outcome != 'skipped' && env.BUILDKITE_ANALYTICS_TOKEN != '' }} working-directory: e2e/lux_logs/latest_run From 9aeccf0efdb32382830c13cf3209f657c0a6389e Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Tue, 23 Apr 2024 15:38:29 +0200 Subject: [PATCH 110/156] Fixed path to lux logs in CI --- .github/workflows/e2e.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/e2e.yml b/.github/workflows/e2e.yml index 3c986d2cb6..987d0b4d84 100644 --- a/.github/workflows/e2e.yml +++ b/.github/workflows/e2e.yml @@ -92,7 +92,7 @@ jobs: if: ${{ failure() && steps.tests.outcome == 'failure' }} with: name: lux_logs - path: e2e/lux_logs/**/run_* + path: e2e/**/lux_logs/run_* - name: Upload test results to Buildkite analytics if: ${{ !cancelled() && steps.tests.outcome != 'skipped' && env.BUILDKITE_ANALYTICS_TOKEN != '' }} working-directory: e2e/lux_logs/latest_run From fdb037da470885c5654e13cc351e47380f54ddcc Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Tue, 23 Apr 2024 15:54:04 +0200 Subject: [PATCH 111/156] Let migrationsFilePath take dialect as argument --- .../typescript/src/cli/migrations/migrate.ts | 23 ++++++++++--------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/clients/typescript/src/cli/migrations/migrate.ts b/clients/typescript/src/cli/migrations/migrate.ts index 53296f0138..89252c4338 100644 --- a/clients/typescript/src/cli/migrations/migrate.ts +++ b/clients/typescript/src/cli/migrations/migrate.ts @@ -17,6 +17,7 @@ import { start } from '../docker-commands/command-start' import { stop } from '../docker-commands/command-stop' import { withConfig } from '../configure/command-with-config' import { pgBuilder, sqliteBuilder } from '../../migrators/query-builder' +import { Dialect } from '../../migrators/query-builder/builder' // Rather than run `npx prisma` we resolve the path to the prisma binary so that // we can be sure we are using the same version of Prisma that is a dependency of @@ -177,7 +178,7 @@ async function watchMigrations(opts: GeneratorOptions) { async function getLatestMigration( opts: Omit ): Promise { - const migrationsFile = migrationsFilePath(opts, sqliteMigrationsFileName) + const migrationsFile = migrationsFilePath(opts, 'SQLite') // Read the migrations file contents and parse it // need to strip the `export default` before parsing. @@ -219,27 +220,25 @@ async function getLatestMigration( } async function bundleMigrationsFor( - dialect: 'sqlite' | 'postgresql', + dialect: Dialect, opts: Omit, tmpFolder: string ) { const config = opts.config - const folder = dialect === 'sqlite' ? 'migrations' : 'pg-migrations' + const folder = dialect === 'SQLite' ? 'migrations' : 'pg-migrations' const migrationsPath = path.join(tmpFolder, folder) await fs.mkdir(migrationsPath) const migrationEndpoint = config.SERVICE + `/api/migrations?dialect=${dialect}` const migrationsFolder = path.resolve(migrationsPath) - const migrationsFileName = - dialect === 'sqlite' ? sqliteMigrationsFileName : pgMigrationsFileName - const migrationsFile = migrationsFilePath(opts, migrationsFileName) + const migrationsFile = migrationsFilePath(opts, dialect) // Fetch the migrations from Electric endpoint and write them into `tmpFolder` await fetchMigrations(migrationEndpoint, migrationsFolder, tmpFolder) // Build the migrations - const builder = dialect === 'sqlite' ? sqliteBuilder : pgBuilder + const builder = dialect === 'SQLite' ? sqliteBuilder : pgBuilder return async () => { await buildMigrations(migrationsFolder, migrationsFile, builder) } @@ -268,12 +267,12 @@ async function _generate(opts: Omit) { try { const buildSqliteMigrations = await bundleMigrationsFor( - 'sqlite', + 'SQLite', opts, tmpFolder ) const buildPgMigrations = await bundleMigrationsFor( - 'postgresql', + 'Postgres', opts, tmpFolder ) @@ -715,10 +714,12 @@ async function fetchMigrations( function migrationsFilePath( opts: Omit, - filename: string + sqlDialect: Dialect ) { const outFolder = path.resolve(opts.config.CLIENT_PATH) - return path.join(outFolder, filename) + const migrationsFileName = + sqlDialect === 'SQLite' ? sqliteMigrationsFileName : pgMigrationsFileName + return path.join(outFolder, migrationsFileName) } function capitaliseFirstLetter(word: string): string { From b390851a7bd969b7b1e81b0c138015870bfae0ad Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Tue, 23 Apr 2024 16:31:47 +0200 Subject: [PATCH 112/156] Fixed isObject and isDataObject and their usages. --- .../src/client/conversions/converter.ts | 4 +-- .../src/client/conversions/input.ts | 2 +- .../typescript/src/client/model/builder.ts | 6 ++--- clients/typescript/src/util/common.ts | 8 +----- clients/typescript/test/util/commmon.test.ts | 26 +++++++++++++++++-- 5 files changed, 31 insertions(+), 15 deletions(-) diff --git a/clients/typescript/src/client/conversions/converter.ts b/clients/typescript/src/client/conversions/converter.ts index 72da5a0493..ea763750ff 100644 --- a/clients/typescript/src/client/conversions/converter.ts +++ b/clients/typescript/src/client/conversions/converter.ts @@ -16,7 +16,7 @@ export interface Converter { } /** - * Checks whether the provided value is a user-provided data object, e.g. a timestamp. + * Checks whether the provided value is a data object (e.g. a timestamp) and not a filter. * This is important because `input.ts` needs to distinguish between data objects and filter objects. * Data objects need to be converted to a SQLite storeable value, whereas filter objects need to be treated specially * as we have to transform the values of the filter's fields (cf. `transformFieldsAllowingFilters` in `input.ts`). @@ -24,5 +24,5 @@ export interface Converter { * @returns True if it is a data object, false otherwise. */ export function isDataObject(v: unknown): boolean { - return v instanceof Date || typeof v === 'bigint' + return v instanceof Date || typeof v === 'bigint' || ArrayBuffer.isView(v) } diff --git a/clients/typescript/src/client/conversions/input.ts b/clients/typescript/src/client/conversions/input.ts index 8adc61d5d2..80d238813e 100644 --- a/clients/typescript/src/client/conversions/input.ts +++ b/clients/typescript/src/client/conversions/input.ts @@ -377,7 +377,7 @@ export function transformField( return [field, transformedValue] } -function isFilterObject(value: any): boolean { +export function isFilterObject(value: any): boolean { // if it is an object it can only be a data object or a filter object return isObject(value) && !isDataObject(value) } diff --git a/clients/typescript/src/client/model/builder.ts b/clients/typescript/src/client/model/builder.ts index 9e3fb3db75..b76791212c 100644 --- a/clients/typescript/src/client/model/builder.ts +++ b/clients/typescript/src/client/model/builder.ts @@ -16,8 +16,8 @@ import Log from 'loglevel' import { ExtendedTableSchema } from './schema' import { PgBasicType } from '../conversions/types' import { HKT } from '../util/hkt' -import { isObject } from '../../util' import { Dialect } from '../../migrators/query-builder/builder' +import { isFilterObject } from '../conversions/input' const squelPostgres = squel.useFlavour('postgres') squelPostgres.registerValueHandler('bigint', function (bigint) { @@ -141,7 +141,7 @@ export class Builder { ): QueryBuilder { const unsupportedEntry = Object.entries(i.data).find((entry) => { const [_key, value] = entry - return isObject(value) + return isFilterObject(value) }) if (unsupportedEntry) throw new InvalidArgumentError( @@ -365,7 +365,7 @@ export function makeFilter( prefixFieldsWith ), ] - } else if (isObject(fieldValue) && !(fieldValue instanceof Date)) { + } else if (isFilterObject(fieldValue)) { // an object containing filters is provided // e.g. users.findMany({ where: { id: { in: [1, 2, 3] } } }) const fs = { diff --git a/clients/typescript/src/util/common.ts b/clients/typescript/src/util/common.ts index 2f81aae8a2..4ad3642da7 100644 --- a/clients/typescript/src/util/common.ts +++ b/clients/typescript/src/util/common.ts @@ -62,11 +62,5 @@ export function getWaiter(): Waiter { * @returns {boolean} whether the `value` is an actual object */ export function isObject(value: any): value is object { - return ( - typeof value === 'object' && - value !== null && - !Array.isArray(value) && - !ArrayBuffer.isView(value) && - !(value instanceof Date) - ) + return typeof value === 'object' && value !== null && !Array.isArray(value) } diff --git a/clients/typescript/test/util/commmon.test.ts b/clients/typescript/test/util/commmon.test.ts index 5790f5bbe0..f6a1bfb521 100644 --- a/clients/typescript/test/util/commmon.test.ts +++ b/clients/typescript/test/util/commmon.test.ts @@ -9,6 +9,7 @@ import { hexStringToBlob, } from '../../src/util/encoders' import { SatelliteError, SatelliteErrorCode } from '../../src/util/types' +import { isDataObject } from '../../src/client/conversions/converter' const OriginalEncoder = globalThis['TextEncoder'] const OriginalDecoder = globalThis['TextDecoder'] @@ -127,6 +128,27 @@ test('test isObject detects only objects and not arrays', (t) => { t.true(isObject({})) t.true(isObject({ field: 'value' })) t.false(isObject([])) - t.false(isObject(new Uint8Array())) - t.false(isObject(new Int8Array())) + t.true(isObject(new Uint8Array())) + t.true(isObject(new Int8Array())) + t.true(isObject(new Date())) + t.false(isObject(5n)) + // a filter object + t.true( + isObject({ + lt: 5, + }) + ) +}) + +test('test isDataObject detects data objects and not filter objects', (t) => { + t.true(isDataObject(new Date())) + t.true(isDataObject(new Uint8Array())) + t.true(isDataObject(new Int8Array())) + t.true(isDataObject(5n)) + // a filter object + t.false( + isDataObject({ + lt: 5, + }) + ) }) From 03ba5aa705a249e8925500ae6dd0c3ed163ec3d7 Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Tue, 23 Apr 2024 16:56:41 +0200 Subject: [PATCH 113/156] Minor fix in date parsing --- clients/typescript/src/client/conversions/datatypes/date.ts | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/clients/typescript/src/client/conversions/datatypes/date.ts b/clients/typescript/src/client/conversions/datatypes/date.ts index fa6e3a24e7..2225cbb16f 100644 --- a/clients/typescript/src/client/conversions/datatypes/date.ts +++ b/clients/typescript/src/client/conversions/datatypes/date.ts @@ -37,9 +37,8 @@ export function deserialiseDate(v: string, pgType: PgDateType): Date { switch (pgType) { case PgDateType.PG_TIMESTAMP: case PgDateType.PG_TIMESTAMPTZ: - return parse(v) case PgDateType.PG_DATE: - return parse(`${v}`) + return parse(v) case PgDateType.PG_TIME: // interpret as local time From d5b5d575e1daca253c4fede7bd378d3f2d273e29 Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Tue, 23 Apr 2024 16:59:55 +0200 Subject: [PATCH 114/156] Remove obsolete comment --- clients/typescript/src/client/execution/nonTransactionalDB.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clients/typescript/src/client/execution/nonTransactionalDB.ts b/clients/typescript/src/client/execution/nonTransactionalDB.ts index 00b0d4188a..93c7897c35 100644 --- a/clients/typescript/src/client/execution/nonTransactionalDB.ts +++ b/clients/typescript/src/client/execution/nonTransactionalDB.ts @@ -23,7 +23,7 @@ export class NonTransactionalDB implements DB { successCallback?: (db: DB, res: RunResult) => void, errorCallback?: (error: any) => void ) { - const { text, values } = statement.toParam() //{ numberedParameters: false }) + const { text, values } = statement.toParam() this._adapter .run({ sql: text, args: values }) .then((res) => { From 7149f00c01454ffc82aec8af655764030ad85bb2 Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Wed, 24 Apr 2024 09:11:56 +0200 Subject: [PATCH 115/156] Rewrite it statements as switch in Postgres conversions --- .../src/client/conversions/postgres.ts | 121 +++++++++--------- 1 file changed, 61 insertions(+), 60 deletions(-) diff --git a/clients/typescript/src/client/conversions/postgres.ts b/clients/typescript/src/client/conversions/postgres.ts index 0fa3d267f0..1048c8a216 100644 --- a/clients/typescript/src/client/conversions/postgres.ts +++ b/clients/typescript/src/client/conversions/postgres.ts @@ -2,7 +2,7 @@ import { InvalidArgumentError } from '../validation/errors/invalidArgumentError' import { Converter } from './converter' import { deserialiseDate, serialiseDate } from './datatypes/date' import { isJsonNull } from './datatypes/json' -import { PgBasicType, PgDateType, PgType, isPgDateType } from './types' +import { PgBasicType, PgDateType, PgType } from './types' /** * This module takes care of converting TypeScript values to a Postgres storeable value and back. @@ -17,37 +17,39 @@ function toPostgres(v: any, pgType: PgType): any { return v } - if (isPgDateType(pgType)) { - if (!(v instanceof Date)) - throw new InvalidArgumentError( - `Unexpected value ${v}. Expected a Date object.` - ) + switch (pgType) { + case PgDateType.PG_TIMESTAMP: + case PgDateType.PG_TIMESTAMPTZ: + case PgDateType.PG_DATE: + case PgDateType.PG_TIME: + case PgDateType.PG_TIMETZ: + if (!(v instanceof Date)) + throw new InvalidArgumentError( + `Unexpected value ${v}. Expected a Date object.` + ) + return serialiseDate(v, pgType) - return serialiseDate(v, pgType as PgDateType) - } + case PgBasicType.PG_JSON: + case PgBasicType.PG_JSONB: + // FIXME: the specialised conversion for null below is needed + // because of the pg package we use to connect to the PG database + // if we support other PG drivers then this may not be needed + // Ideally, we would do this conversion in the driver itself + if (isJsonNull(v)) { + // Also turn into a DB null + // because we currently don't support top-level JSON null value + // when using Postgres + return null // 'null' + } + return JSON.stringify(v) - if (pgType === PgBasicType.PG_JSON || pgType === PgBasicType.PG_JSONB) { - // FIXME: the specialised conversions below are needed because of the pg package - // we use to connect to the PG database - // if we support other PG drivers then this may not be needed - // Ideally, we would do this conversion in the driver itself - if (v === null) { - return null - } - if (isJsonNull(v)) { - // Also turn into a DB null - // because we currently don't support top-level JSON null value - // when using Postgres - return null // 'null' - } - return JSON.stringify(v) - } + case PgBasicType.PG_FLOAT4: + case PgBasicType.PG_REAL: + return Math.fround(v) - if (pgType === PgBasicType.PG_FLOAT4 || pgType === PgBasicType.PG_REAL) { - return Math.fround(v) + default: + return v } - - return v } function fromPostgres(v: any, pgType: PgType): any { @@ -56,42 +58,41 @@ function fromPostgres(v: any, pgType: PgType): any { return v } - /* - // FIXME: the specialised conversions below are needed when adding support for top-level JSON null values - if (pgType === PgBasicType.PG_JSON || pgType === PgBasicType.PG_JSONB) { - if (v === null) { - // DB null - return null - } - if (v === 'null') { - // JSON null value - return { __is_electric_json_null__: true } - } - if (typeof v === 'object') { - return v - } - return JSON.parse(v) - } - */ + switch (pgType) { + case PgBasicType.PG_INT8: + return BigInt(v) // needed because the node-pg driver returns bigints as strings - if (pgType === PgBasicType.PG_INT8) { - return BigInt(v) // needed because the node-pg driver returns bigints as strings - } + case PgBasicType.PG_FLOAT4: + case PgBasicType.PG_REAL: + // fround the number to represent it as a 32-bit float + return Math.fround(v) - if (pgType === PgBasicType.PG_FLOAT4 || pgType === PgBasicType.PG_REAL) { - // fround the number to represent it as a 32-bit float - return Math.fround(v) - } + case PgDateType.PG_TIME: + case PgDateType.PG_TIMETZ: + // dates and timestamps are parsed into JS Date objects + // by the underlying PG driver we use + // But time and timetz values are returned as strings + // so we parse them into a JS Date object ourselves + return deserialiseDate(v, pgType) - if (pgType === PgDateType.PG_TIME || pgType === PgDateType.PG_TIMETZ) { - // dates and timestamps are parsed into JS Date objects - // by the underlying PG driver we use - // But time and timetz values are returned as strings - // so we parse them into a JS Date object ourselves - return deserialiseDate(v, pgType as PgDateType) - } + // Note: i left the specialised conversions below in comment + // as they will be needed when we add support for top-level JSON null values + /* + case PgBasicType.PG_JSON: + case PgBasicType.PG_JSONB: + if (v === 'null') { + // JSON null value + return { __is_electric_json_null__: true } + } + if (typeof v === 'object') { + return v + } + return JSON.parse(v) + */ - return v + default: + return v + } } export const postgresConverter: Converter = { From 9b292163c293ea48a3fb5f6696834ad314c94fbf Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Wed, 24 Apr 2024 09:12:35 +0200 Subject: [PATCH 116/156] Renamed variable in SQL builder --- .../typescript/src/client/model/builder.ts | 25 ++++++++----------- 1 file changed, 10 insertions(+), 15 deletions(-) diff --git a/clients/typescript/src/client/model/builder.ts b/clients/typescript/src/client/model/builder.ts index b76791212c..66e4d3194f 100644 --- a/clients/typescript/src/client/model/builder.ts +++ b/clients/typescript/src/client/model/builder.ts @@ -30,7 +30,7 @@ squelPostgres.registerValueHandler(Uint8Array, function (uint8) { type AnyFindInput = FindInput export class Builder { - private _fullyQualifiedTableName: string + private _quotedTableName: string constructor( private _tableName: string, @@ -50,20 +50,17 @@ export class Builder { >, public dialect: Dialect ) { - this._fullyQualifiedTableName = `"${this._tableName}"` + this._quotedTableName = `"${this._tableName}"` squelPostgres.cls.DefaultQueryBuilderOptions.nameQuoteCharacter = '"' squelPostgres.cls.DefaultQueryBuilderOptions.autoQuoteFieldNames = true if (dialect === 'Postgres') { - //squelPostgres.cls.DefaultQueryBuilderOptions.autoQuoteTableNames = true - - // need to register it, otherwise squel complains that the Date type is not registered - // as Squel does not support it out-of-the-box but our Postgres drivers do support it. + // need to register the Date type + // as Squel does not support it out-of-the-box + // but our Postgres drivers do support it. squelPostgres.registerValueHandler(Date, (date) => date) - //this._fullyQualifiedTableName = `public."${this._tableName}"` } else { // Don't use numbered parameters if dialect is SQLite squelPostgres.cls.DefaultQueryBuilderOptions.numberedParameters = false - //this._fullyQualifiedTableName = `main."${this._tableName}"` } } @@ -71,7 +68,7 @@ export class Builder { // Make a SQL query out of the data const query = squelPostgres .insert() - .into(this._fullyQualifiedTableName) + .into(this._quotedTableName) .setFields(i.data) // Adds a `RETURNING` statement that returns all known fields @@ -82,7 +79,7 @@ export class Builder { createMany(i: CreateManyInput): QueryBuilder { const insert = squelPostgres .insert() - .into(this._fullyQualifiedTableName) + .into(this._quotedTableName) .setFieldsRows(i.data) return i.skipDuplicates ? insert.onConflict() // adds "ON CONFLICT DO NOTHING" to the query @@ -127,9 +124,7 @@ export class Builder { i: DeleteManyInput, idRequired = false ): QueryBuilder { - const deleteQuery = squelPostgres - .delete() - .from(this._fullyQualifiedTableName) + const deleteQuery = squelPostgres.delete().from(this._quotedTableName) const whereObject = i.where // safe because the schema for `where` adds an empty object as default which is provided if the `where` field is absent const fields = this.getFields(whereObject, idRequired) return addFilters(fields, whereObject, deleteQuery) @@ -152,7 +147,7 @@ export class Builder { const query = squelPostgres .update() - .table(this._fullyQualifiedTableName) + .table(this._quotedTableName) .setFields(i.data) // Adds a `RETURNING` statement that returns all known fields @@ -189,7 +184,7 @@ export class Builder { // and squel would add quotes around the entire cast const query = squelPostgres .select({ autoQuoteFieldNames: false }) - .from(this._fullyQualifiedTableName) // specify from which table to select + .from(this._quotedTableName) // specify from which table to select // only select the fields provided in `i.select` and the ones in `i.where` const addFieldSelectionP = this.addFieldSelection.bind( this, From 034d956e74c9078cc449547bf36574986f3e1ce3 Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Wed, 24 Apr 2024 09:26:44 +0200 Subject: [PATCH 117/156] Turned if statements into switch in triggers --- clients/typescript/src/migrators/triggers.ts | 34 ++++++++++---------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/clients/typescript/src/migrators/triggers.ts b/clients/typescript/src/migrators/triggers.ts index cce7e3a83a..70dfb03d4e 100644 --- a/clients/typescript/src/migrators/triggers.ts +++ b/clients/typescript/src/migrators/triggers.ts @@ -261,25 +261,25 @@ function joinColsForJSON( const transformIfNeeded = (col: string, targetedCol: string) => { const colType = colTypes[col] - // cast REALs, INT8s, BIGINTs to TEXT to work around SQLite's `json_object` bug - if ( - colType === 'FLOAT4' || - colType === 'REAL' || - colType === 'DOUBLE PRECISION' || - colType === 'FLOAT8' || - colType === 'INT8' || - colType === 'BIGINT' - ) { - return `cast(${targetedCol} as TEXT)` - } + switch (colType) { + case 'FLOAT4': + case 'REAL': + case 'DOUBLE PRECISION': + case 'FLOAT8': + case 'INT8': + case 'BIGINT': + // cast REALs, INT8s, BIGINTs to TEXT to work around SQLite's `json_object` bug + return `cast(${targetedCol} as TEXT)` + + case 'BYTEA': + // transform blobs/bytestrings into hexadecimal strings for JSON encoding + return `CASE WHEN ${targetedCol} IS NOT NULL THEN ${builder.toHex( + targetedCol + )} ELSE NULL END` - // transform blobs/bytestrings into hexadecimal strings for JSON encoding - if (colType === 'BYTEA') { - return `CASE WHEN ${targetedCol} IS NOT NULL THEN ${builder.toHex( - targetedCol - )} ELSE NULL END` + default: + return targetedCol } - return targetedCol } if (typeof target === 'undefined') { From 1d77131baf7bc0d4ae0354b3cad8a5c59f806d93 Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Wed, 24 Apr 2024 09:42:03 +0200 Subject: [PATCH 118/156] Introduced no-op encoders/decoders for bytea --- clients/typescript/src/satellite/client.ts | 4 ++-- clients/typescript/src/util/encoders/sqliteEncoders.ts | 2 ++ 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/clients/typescript/src/satellite/client.ts b/clients/typescript/src/satellite/client.ts index 837a413d50..323f33b338 100644 --- a/clients/typescript/src/satellite/client.ts +++ b/clients/typescript/src/satellite/client.ts @@ -1443,7 +1443,7 @@ function deserializeColumnData( case PgDateType.PG_TIMETZ: return decoder.timetz(column) case PgBasicType.PG_BYTEA: - return column + return decoder.bytea(column) /* case PgBasicType.PG_JSON: case PgBasicType.PG_JSONB: @@ -1467,7 +1467,7 @@ function serializeColumnData( case PgDateType.PG_TIMETZ: return encoder.timetz(columnValue as string) case PgBasicType.PG_BYTEA: - return columnValue as Uint8Array + return encoder.bytea(columnValue as Uint8Array) case PgBasicType.PG_JSON: case PgBasicType.PG_JSONB: return (encoder.json as any)(columnValue) diff --git a/clients/typescript/src/util/encoders/sqliteEncoders.ts b/clients/typescript/src/util/encoders/sqliteEncoders.ts index 19074f0af5..d0624d0cc4 100644 --- a/clients/typescript/src/util/encoders/sqliteEncoders.ts +++ b/clients/typescript/src/util/encoders/sqliteEncoders.ts @@ -13,6 +13,7 @@ export const sqliteTypeEncoder = { json: (string: string) => textEncoder.encode(string), timetz: (string: string) => sqliteTypeEncoder.text(stringToTimetzString(string)), + bytea: (bytes: Uint8Array) => bytes, // no-op } export const sqliteTypeDecoder = { @@ -21,6 +22,7 @@ export const sqliteTypeDecoder = { json: bytesToString, timetz: bytesToTimetzString, float: bytesToFloat, + bytea: (bytes: Uint8Array) => bytes, // no-op } export function boolToBytes(b: number) { From 8409e4c703ed0ebb9a59bf15d45330b6669b23db Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Wed, 24 Apr 2024 10:01:25 +0200 Subject: [PATCH 119/156] Decoder for JSONB --- clients/typescript/src/satellite/client.ts | 4 +--- clients/typescript/src/util/encoders/pgEncoders.ts | 3 +-- 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/clients/typescript/src/satellite/client.ts b/clients/typescript/src/satellite/client.ts index 323f33b338..8f4458e04c 100644 --- a/clients/typescript/src/satellite/client.ts +++ b/clients/typescript/src/satellite/client.ts @@ -1444,11 +1444,9 @@ function deserializeColumnData( return decoder.timetz(column) case PgBasicType.PG_BYTEA: return decoder.bytea(column) - /* case PgBasicType.PG_JSON: case PgBasicType.PG_JSONB: - return (decoder.json as any)(column) - */ + return decoder.json(column) default: // also covers user-defined enumeration types return decoder.text(column) diff --git a/clients/typescript/src/util/encoders/pgEncoders.ts b/clients/typescript/src/util/encoders/pgEncoders.ts index a13a340d19..8698269bc3 100644 --- a/clients/typescript/src/util/encoders/pgEncoders.ts +++ b/clients/typescript/src/util/encoders/pgEncoders.ts @@ -1,5 +1,5 @@ import { sqliteTypeEncoder, sqliteTypeDecoder } from './sqliteEncoders' -import { textEncoder, textDecoder } from './common' +import { textEncoder } from './common' import { trueByte, falseByte } from './common' export const pgTypeEncoder = { @@ -13,7 +13,6 @@ export const pgTypeEncoder = { export const pgTypeDecoder = { ...sqliteTypeDecoder, bool: bytesToBool, - json: (bs: Uint8Array) => JSON.parse(textDecoder.decode(bs)), } function boolToBytes(b: boolean) { From 1c3c0c1778b123226ff11c20f1ef39a9b7c7c25b Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Thu, 25 Apr 2024 10:18:47 +0200 Subject: [PATCH 120/156] Log when client reconnects and syncs from lsn on Electric and match on it in e2e test --- components/electric/lib/electric/satellite/protocol.ex | 2 ++ ...satellite_can_resume_replication_after_server_restart.lux | 5 +++++ 2 files changed, 7 insertions(+) diff --git a/components/electric/lib/electric/satellite/protocol.ex b/components/electric/lib/electric/satellite/protocol.ex index 40a47e447a..d30f6eeef3 100644 --- a/components/electric/lib/electric/satellite/protocol.ex +++ b/components/electric/lib/electric/satellite/protocol.ex @@ -528,6 +528,8 @@ defmodule Electric.Satellite.Protocol do if CachedWal.Api.lsn_in_cached_window?(state.origin, lsn) do case restore_client_state(msg.subscription_ids, msg.observed_transaction_data, lsn, state) do {:ok, state} -> + Logger.debug("Continuing sync for client #{state.client_id} from lsn #{lsn}") + state = state |> Telemetry.start_replication_span(subscriptions: length(msg.subscription_ids)) diff --git a/e2e/tests/03.25_node_satellite_can_resume_replication_after_server_restart.lux b/e2e/tests/03.25_node_satellite_can_resume_replication_after_server_restart.lux index dc5d301f70..671b6b084c 100644 --- a/e2e/tests/03.25_node_satellite_can_resume_replication_after_server_restart.lux +++ b/e2e/tests/03.25_node_satellite_can_resume_replication_after_server_restart.lux @@ -200,6 +200,11 @@ [shell satellite_2] ??Connectivity state changed: connected +[shell electric] + # ?+ matches the two lines in any order + ?+Continuing sync for client $client_1_id from + ?Continuing sync for client $client_2_id from + [shell pg_1] !SELECT * FROM electric.client_actions; ??(0 rows) From bb7cd1ad54adfd701d5325683b94547282f33b13 Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Thu, 25 Apr 2024 10:36:45 +0200 Subject: [PATCH 121/156] Address stefanos' comments --- clients/typescript/src/satellite/client.ts | 2 +- clients/typescript/src/satellite/process.ts | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/clients/typescript/src/satellite/client.ts b/clients/typescript/src/satellite/client.ts index 8f4458e04c..c3ba05b07e 100644 --- a/clients/typescript/src/satellite/client.ts +++ b/clients/typescript/src/satellite/client.ts @@ -1475,7 +1475,7 @@ function serializeColumnData( } function serializeNullData(): Uint8Array { - return sqliteTypeEncoder.text('') + return new Uint8Array() } export function toMessage(data: Uint8Array): SatPbMsg { diff --git a/clients/typescript/src/satellite/process.ts b/clients/typescript/src/satellite/process.ts index 8bed68f34e..97da57e210 100644 --- a/clients/typescript/src/satellite/process.ts +++ b/clients/typescript/src/satellite/process.ts @@ -144,9 +144,9 @@ export class SatelliteProcess implements Satellite { /** * To optimize inserting a lot of data when the subscription data comes, we need to do - * less `INSERT` queries, but SQLite supports only a limited amount of `?` positional - * arguments. Precisely, its either 999 for versions prior to 3.32.0 and 32766 for - * versions after. + * less `INSERT` queries, but SQLite/Postgres support only a limited amount of `?`/`$i` positional + * arguments. Precisely, its either 999 for SQLite versions prior to 3.32.0 and 32766 for + * versions after, and 65535 for Postgres. */ private maxSqlParameters: 999 | 32766 | 65535 = 999 private snapshotMutex: Mutex = new Mutex() From 08d3da3b461868d28fd573a11169a0053474b2e8 Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Thu, 25 Apr 2024 12:27:12 +0200 Subject: [PATCH 122/156] Rename old test files which now contain the common test logic but do not run the actual tests. --- .../test/migrators/{builder.test.ts => builder.ts} | 0 .../typescript/test/migrators/{bundle.test.ts => bundle.ts} | 0 clients/typescript/test/migrators/pglite/builder.test.ts | 6 +----- clients/typescript/test/migrators/pglite/bundle.test.ts | 2 +- clients/typescript/test/migrators/postgres/builder.test.ts | 6 +----- clients/typescript/test/migrators/postgres/bundle.test.ts | 2 +- clients/typescript/test/migrators/sqlite/builder.test.ts | 6 +----- clients/typescript/test/migrators/sqlite/bundle.test.ts | 2 +- .../test/satellite/pglite/process.migration.test.ts | 2 +- .../typescript/test/satellite/pglite/process.tags.test.ts | 2 +- clients/typescript/test/satellite/pglite/process.test.ts | 2 +- .../typescript/test/satellite/pglite/process.timing.test.ts | 2 +- .../test/satellite/postgres/process.migration.test.ts | 2 +- .../typescript/test/satellite/postgres/process.tags.test.ts | 2 +- clients/typescript/test/satellite/postgres/process.test.ts | 2 +- .../test/satellite/postgres/process.timing.test.ts | 2 +- .../{process.migration.test.ts => process.migration.ts} | 0 .../satellite/{process.tags.test.ts => process.tags.ts} | 0 .../satellite/{process.timing.test.ts => process.timing.ts} | 0 .../test/satellite/{process.test.ts => process.ts} | 0 .../test/satellite/sqlite/process.migration.test.ts | 2 +- .../typescript/test/satellite/sqlite/process.tags.test.ts | 2 +- clients/typescript/test/satellite/sqlite/process.test.ts | 2 +- .../typescript/test/satellite/sqlite/process.timing.test.ts | 2 +- 24 files changed, 18 insertions(+), 30 deletions(-) rename clients/typescript/test/migrators/{builder.test.ts => builder.ts} (100%) rename clients/typescript/test/migrators/{bundle.test.ts => bundle.ts} (100%) rename clients/typescript/test/satellite/{process.migration.test.ts => process.migration.ts} (100%) rename clients/typescript/test/satellite/{process.tags.test.ts => process.tags.ts} (100%) rename clients/typescript/test/satellite/{process.timing.test.ts => process.timing.ts} (100%) rename clients/typescript/test/satellite/{process.test.ts => process.ts} (100%) diff --git a/clients/typescript/test/migrators/builder.test.ts b/clients/typescript/test/migrators/builder.ts similarity index 100% rename from clients/typescript/test/migrators/builder.test.ts rename to clients/typescript/test/migrators/builder.ts diff --git a/clients/typescript/test/migrators/bundle.test.ts b/clients/typescript/test/migrators/bundle.ts similarity index 100% rename from clients/typescript/test/migrators/bundle.test.ts rename to clients/typescript/test/migrators/bundle.ts diff --git a/clients/typescript/test/migrators/pglite/builder.test.ts b/clients/typescript/test/migrators/pglite/builder.test.ts index 306dba1ee5..4dac188d41 100644 --- a/clients/typescript/test/migrators/pglite/builder.test.ts +++ b/clients/typescript/test/migrators/pglite/builder.test.ts @@ -1,10 +1,6 @@ import anyTest, { TestFn } from 'ava' import { makeMigration, parseMetadata } from '../../../src/migrators/builder' -import { - ContextType, - bundleTests, - makeMigrationMetaData, -} from '../builder.test' +import { ContextType, bundleTests, makeMigrationMetaData } from '../builder' import { PGlite } from '@electric-sql/pglite' import { DatabaseAdapter } from '../../../src/drivers/pglite' import { PgBundleMigrator } from '../../../src/migrators' diff --git a/clients/typescript/test/migrators/pglite/bundle.test.ts b/clients/typescript/test/migrators/pglite/bundle.test.ts index 3f7a65e441..60cd3b05ad 100644 --- a/clients/typescript/test/migrators/pglite/bundle.test.ts +++ b/clients/typescript/test/migrators/pglite/bundle.test.ts @@ -6,7 +6,7 @@ import { PgBundleMigrator as BundleMigrator } from '../../../src/migrators/bundl import { randomValue } from '../../../src/util/random' import { PGlite } from '@electric-sql/pglite' -import { ContextType, bundleTests } from '../bundle.test' +import { ContextType, bundleTests } from '../bundle' import migrations from '../../support/migrations/pg-migrations.js' diff --git a/clients/typescript/test/migrators/postgres/builder.test.ts b/clients/typescript/test/migrators/postgres/builder.test.ts index 2b09d73c72..fd1bb28d85 100644 --- a/clients/typescript/test/migrators/postgres/builder.test.ts +++ b/clients/typescript/test/migrators/postgres/builder.test.ts @@ -1,10 +1,6 @@ import anyTest, { TestFn } from 'ava' import { makeMigration, parseMetadata } from '../../../src/migrators/builder' -import { - ContextType, - bundleTests, - makeMigrationMetaData, -} from '../builder.test' +import { ContextType, bundleTests, makeMigrationMetaData } from '../builder' import { makePgDatabase } from '../../support/node-postgres' import { DatabaseAdapter } from '../../../src/drivers/node-postgres' import { PgBundleMigrator } from '../../../src/migrators' diff --git a/clients/typescript/test/migrators/postgres/bundle.test.ts b/clients/typescript/test/migrators/postgres/bundle.test.ts index c2ae7c07cd..6dfef4c3ee 100644 --- a/clients/typescript/test/migrators/postgres/bundle.test.ts +++ b/clients/typescript/test/migrators/postgres/bundle.test.ts @@ -6,7 +6,7 @@ import { PgBundleMigrator as BundleMigrator } from '../../../src/migrators/bundl import { randomValue } from '../../../src/util/random' import { makePgDatabase } from '../../support/node-postgres' -import { ContextType, bundleTests } from '../bundle.test' +import { ContextType, bundleTests } from '../bundle' import migrations from '../../support/migrations/pg-migrations.js' diff --git a/clients/typescript/test/migrators/sqlite/builder.test.ts b/clients/typescript/test/migrators/sqlite/builder.test.ts index 031f5448e3..6d2d3699a8 100644 --- a/clients/typescript/test/migrators/sqlite/builder.test.ts +++ b/clients/typescript/test/migrators/sqlite/builder.test.ts @@ -3,11 +3,7 @@ import { makeMigration, parseMetadata } from '../../../src/migrators/builder' import Database from 'better-sqlite3' import { DatabaseAdapter } from '../../../src/drivers/better-sqlite3' import { sqliteBuilder } from '../../../src/migrators/query-builder' -import { - ContextType, - bundleTests, - makeMigrationMetaData, -} from '../builder.test' +import { ContextType, bundleTests, makeMigrationMetaData } from '../builder' import { SqliteBundleMigrator } from '../../../src/migrators' const test = anyTest as TestFn diff --git a/clients/typescript/test/migrators/sqlite/bundle.test.ts b/clients/typescript/test/migrators/sqlite/bundle.test.ts index 1b13d035a0..fdccdc4d9f 100644 --- a/clients/typescript/test/migrators/sqlite/bundle.test.ts +++ b/clients/typescript/test/migrators/sqlite/bundle.test.ts @@ -7,7 +7,7 @@ import { DatabaseAdapter } from '../../../src/drivers/better-sqlite3/adapter' import { SqliteBundleMigrator as BundleMigrator } from '../../../src/migrators/bundle' import { randomValue } from '../../../src/util/random' -import { ContextType, bundleTests } from '../bundle.test' +import { ContextType, bundleTests } from '../bundle' import migrations from '../../support/migrations/migrations.js' diff --git a/clients/typescript/test/satellite/pglite/process.migration.test.ts b/clients/typescript/test/satellite/pglite/process.migration.test.ts index 6dbc9216eb..c03164e3b9 100644 --- a/clients/typescript/test/satellite/pglite/process.migration.test.ts +++ b/clients/typescript/test/satellite/pglite/process.migration.test.ts @@ -6,7 +6,7 @@ import { commonSetup, ContextType, processMigrationTests, -} from '../process.migration.test' +} from '../process.migration' const test = testAny as TestFn diff --git a/clients/typescript/test/satellite/pglite/process.tags.test.ts b/clients/typescript/test/satellite/pglite/process.tags.test.ts index 9c075150b5..c1775019e9 100644 --- a/clients/typescript/test/satellite/pglite/process.tags.test.ts +++ b/clients/typescript/test/satellite/pglite/process.tags.test.ts @@ -3,7 +3,7 @@ import anyTest, { TestFn } from 'ava' import { makePgliteContext, cleanAndStopSatellite } from '../common' import { getPgMatchingShadowEntries } from '../../support/satellite-helpers' -import { processTagsTests, ContextType } from '../process.tags.test' +import { processTagsTests, ContextType } from '../process.tags' const test = anyTest as TestFn test.beforeEach(async (t) => { diff --git a/clients/typescript/test/satellite/pglite/process.test.ts b/clients/typescript/test/satellite/pglite/process.test.ts index fdbc7d48d0..a5ebee3e89 100644 --- a/clients/typescript/test/satellite/pglite/process.test.ts +++ b/clients/typescript/test/satellite/pglite/process.test.ts @@ -5,7 +5,7 @@ import { getPgMatchingShadowEntries } from '../../support/satellite-helpers' import { makePgliteContext, cleanAndStopSatellite } from '../common' import { pgBuilder } from '../../../src/migrators/query-builder' -import { processTests, ContextType } from '../process.test' +import { processTests, ContextType } from '../process' import { QualifiedTablename } from '../../../src/util' // Run all tests in this file serially diff --git a/clients/typescript/test/satellite/pglite/process.timing.test.ts b/clients/typescript/test/satellite/pglite/process.timing.test.ts index c11e6eb052..5856e278d6 100644 --- a/clients/typescript/test/satellite/pglite/process.timing.test.ts +++ b/clients/typescript/test/satellite/pglite/process.timing.test.ts @@ -1,5 +1,5 @@ import anyTest, { TestFn } from 'ava' -import { processTimingTests } from '../process.timing.test' +import { processTimingTests } from '../process.timing' import { makePgliteContext, cleanAndStopSatellite, diff --git a/clients/typescript/test/satellite/postgres/process.migration.test.ts b/clients/typescript/test/satellite/postgres/process.migration.test.ts index ced08b7955..3157d14cab 100644 --- a/clients/typescript/test/satellite/postgres/process.migration.test.ts +++ b/clients/typescript/test/satellite/postgres/process.migration.test.ts @@ -6,7 +6,7 @@ import { commonSetup, ContextType, processMigrationTests, -} from '../process.migration.test' +} from '../process.migration' const test = testAny as TestFn diff --git a/clients/typescript/test/satellite/postgres/process.tags.test.ts b/clients/typescript/test/satellite/postgres/process.tags.test.ts index f564da4b89..8823b78ebe 100644 --- a/clients/typescript/test/satellite/postgres/process.tags.test.ts +++ b/clients/typescript/test/satellite/postgres/process.tags.test.ts @@ -3,7 +3,7 @@ import anyTest, { TestFn } from 'ava' import { makePgContext, cleanAndStopSatellite } from '../common' import { getPgMatchingShadowEntries } from '../../support/satellite-helpers' -import { processTagsTests, ContextType } from '../process.tags.test' +import { processTagsTests, ContextType } from '../process.tags' let port = 5100 diff --git a/clients/typescript/test/satellite/postgres/process.test.ts b/clients/typescript/test/satellite/postgres/process.test.ts index aeeca6b411..7342cc31fd 100644 --- a/clients/typescript/test/satellite/postgres/process.test.ts +++ b/clients/typescript/test/satellite/postgres/process.test.ts @@ -5,7 +5,7 @@ import { getPgMatchingShadowEntries } from '../../support/satellite-helpers' import { makePgContext, cleanAndStopSatellite } from '../common' import { pgBuilder } from '../../../src/migrators/query-builder' -import { processTests, ContextType } from '../process.test' +import { processTests, ContextType } from '../process' import { QualifiedTablename } from '../../../src/util' let port = 5200 diff --git a/clients/typescript/test/satellite/postgres/process.timing.test.ts b/clients/typescript/test/satellite/postgres/process.timing.test.ts index 385792add0..450aeaf9f5 100644 --- a/clients/typescript/test/satellite/postgres/process.timing.test.ts +++ b/clients/typescript/test/satellite/postgres/process.timing.test.ts @@ -1,5 +1,5 @@ import anyTest, { TestFn } from 'ava' -import { processTimingTests } from '../process.timing.test' +import { processTimingTests } from '../process.timing' import { makePgContext, cleanAndStopSatellite, ContextType } from '../common' let port = 4900 diff --git a/clients/typescript/test/satellite/process.migration.test.ts b/clients/typescript/test/satellite/process.migration.ts similarity index 100% rename from clients/typescript/test/satellite/process.migration.test.ts rename to clients/typescript/test/satellite/process.migration.ts diff --git a/clients/typescript/test/satellite/process.tags.test.ts b/clients/typescript/test/satellite/process.tags.ts similarity index 100% rename from clients/typescript/test/satellite/process.tags.test.ts rename to clients/typescript/test/satellite/process.tags.ts diff --git a/clients/typescript/test/satellite/process.timing.test.ts b/clients/typescript/test/satellite/process.timing.ts similarity index 100% rename from clients/typescript/test/satellite/process.timing.test.ts rename to clients/typescript/test/satellite/process.timing.ts diff --git a/clients/typescript/test/satellite/process.test.ts b/clients/typescript/test/satellite/process.ts similarity index 100% rename from clients/typescript/test/satellite/process.test.ts rename to clients/typescript/test/satellite/process.ts diff --git a/clients/typescript/test/satellite/sqlite/process.migration.test.ts b/clients/typescript/test/satellite/sqlite/process.migration.test.ts index 060d3eef0e..fb95c40352 100644 --- a/clients/typescript/test/satellite/sqlite/process.migration.test.ts +++ b/clients/typescript/test/satellite/sqlite/process.migration.test.ts @@ -6,7 +6,7 @@ import { ContextType, commonSetup, processMigrationTests, -} from '../process.migration.test' +} from '../process.migration' const test = testAny as TestFn diff --git a/clients/typescript/test/satellite/sqlite/process.tags.test.ts b/clients/typescript/test/satellite/sqlite/process.tags.test.ts index dd9d5753ab..5b0bb41d3d 100644 --- a/clients/typescript/test/satellite/sqlite/process.tags.test.ts +++ b/clients/typescript/test/satellite/sqlite/process.tags.test.ts @@ -2,7 +2,7 @@ import anyTest, { TestFn } from 'ava' import { makeContext, cleanAndStopSatellite } from '../common' -import { processTagsTests, ContextType } from '../process.tags.test' +import { processTagsTests, ContextType } from '../process.tags' import { getMatchingShadowEntries } from '../../support/satellite-helpers' const test = anyTest as TestFn diff --git a/clients/typescript/test/satellite/sqlite/process.test.ts b/clients/typescript/test/satellite/sqlite/process.test.ts index 5c43bbf515..69b6991c5c 100644 --- a/clients/typescript/test/satellite/sqlite/process.test.ts +++ b/clients/typescript/test/satellite/sqlite/process.test.ts @@ -5,7 +5,7 @@ import { getMatchingShadowEntries as getSQLiteMatchingShadowEntries } from '../. import { makeContext, cleanAndStopSatellite } from '../common' import { sqliteBuilder } from '../../../src/migrators/query-builder' -import { processTests, ContextType } from '../process.test' +import { processTests, ContextType } from '../process' import { QualifiedTablename } from '../../../src/util' const test = anyTest as TestFn diff --git a/clients/typescript/test/satellite/sqlite/process.timing.test.ts b/clients/typescript/test/satellite/sqlite/process.timing.test.ts index 135346b8ed..649fdebd9b 100644 --- a/clients/typescript/test/satellite/sqlite/process.timing.test.ts +++ b/clients/typescript/test/satellite/sqlite/process.timing.test.ts @@ -1,5 +1,5 @@ import anyTest, { TestFn } from 'ava' -import { processTimingTests } from '../process.timing.test' +import { processTimingTests } from '../process.timing' import { makeContext, clean, ContextType } from '../common' const test = anyTest as TestFn From 6d2c77f0c697b9c476692af896b8c4b770885926 Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Thu, 25 Apr 2024 12:37:17 +0200 Subject: [PATCH 123/156] Modify GH workflow to disable embedded-postgres tests in CI --- .../workflows/clients_typescript_tests.yml | 2 +- clients/typescript/Makefile | 3 ++ clients/typescript/ava.config.js | 42 +++++++++++++++---- clients/typescript/package.json | 1 + 4 files changed, 38 insertions(+), 10 deletions(-) diff --git a/.github/workflows/clients_typescript_tests.yml b/.github/workflows/clients_typescript_tests.yml index f3108dafcd..d95759c1fc 100644 --- a/.github/workflows/clients_typescript_tests.yml +++ b/.github/workflows/clients_typescript_tests.yml @@ -70,7 +70,7 @@ jobs: - name: Build run: make build - name: Run tests - run: make tests + run: make tests-CI maybe_publish: runs-on: ubuntu-latest needs: [test, check_types, verify_formatting] diff --git a/clients/typescript/Makefile b/clients/typescript/Makefile index 05b7ba3ff7..dd984878eb 100644 --- a/clients/typescript/Makefile +++ b/clients/typescript/Makefile @@ -17,6 +17,9 @@ build-dev: node_modules tests: pnpm run test +tests-CI: + pnpm run test-CI + style: prettier --check --loglevel warn . && eslint src --quiet diff --git a/clients/typescript/ava.config.js b/clients/typescript/ava.config.js index e7208b1d4e..d414e906d0 100644 --- a/clients/typescript/ava.config.js +++ b/clients/typescript/ava.config.js @@ -1,5 +1,17 @@ const [major, minor, _patch] = process.versions.node.split('.').map(Number) -const testDialect = process.env.DIALECT + +// Developers can provide a `TEST_ONLY_DIALECT` value of `postgres`, `pglite`, or `sqlite` +// to run the unit tests only for that dialect. +// Developers can also provide a `DISABLE_DIALECT` value of `postgres`, `pglite`, or `sqlite` +// to disable the unit tests for that dialect but run all others. +const testOnlyDialect = process.env.TEST_ONLY_DIALECT +const disableDialect = process.env.DISABLE_DIALECT + +if (testOnlyDialect && disableDialect) { + throw new Error( + 'Cannot set both TEST_ONLY_DIALECT and DISABLE_DIALECT environment variables.' + ) +} let loaderArg if ( @@ -17,16 +29,28 @@ const ignorePostgres = ['!test/**/postgres/**'] const ignorePglite = ['!test/**/pglite/**'] const ignoreSqlite = ['!test/**/sqlite/**'] -if (testDialect === 'postgres') { - files.push(...ignorePglite, ...ignoreSqlite) -} - -if (testDialect === 'pglite') { - files.push(...ignorePostgres, ...ignoreSqlite) +switch (testOnlyDialect) { + case 'postgres': + files.push(...ignorePglite, ...ignoreSqlite) + break + case 'pglite': + files.push(...ignorePostgres, ...ignoreSqlite) + break + case 'sqlite': + files.push(...ignorePostgres, ...ignorePglite) + break } -if (testDialect === 'sqlite') { - files.push(...ignorePostgres, ...ignorePglite) +switch (disableDialect) { + case 'postgres': + files.push(...ignorePostgres) + break + case 'pglite': + files.push(...ignorePglite) + break + case 'sqlite': + files.push(...ignoreSqlite) + break } export default { diff --git a/clients/typescript/package.json b/clients/typescript/package.json index dd8a5c95e1..4bbb59321f 100644 --- a/clients/typescript/package.json +++ b/clients/typescript/package.json @@ -164,6 +164,7 @@ "build": "shx rm -rf dist && npm run build:copy-docker && concurrently \"tsup\" \"tsc -p tsconfig.build.json\" && node scripts/fix-imports.js", "build:copy-docker": "shx mkdir -p ./dist/cli/docker-commands/docker && shx cp -r ./src/cli/docker-commands/docker ./dist/cli/docker-commands", "test": "ava", + "test-CI": "DISABLE_DIALECT=postgres npm run test", "generate-test-client": "npx tsx ./test/client/generateTestClient.ts", "typecheck": "tsc -p tsconfig.json", "posttest": "npm run typecheck", From 687a00a2c4f10421b749126ea5deaa767060b4d4 Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Mon, 29 Apr 2024 15:17:53 +0200 Subject: [PATCH 124/156] Fix namespace in SQLite trigger --- clients/typescript/src/migrators/query-builder/sqliteBuilder.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clients/typescript/src/migrators/query-builder/sqliteBuilder.ts b/clients/typescript/src/migrators/query-builder/sqliteBuilder.ts index 030c3a6ea6..e5a57c2e1e 100644 --- a/clients/typescript/src/migrators/query-builder/sqliteBuilder.ts +++ b/clients/typescript/src/migrators/query-builder/sqliteBuilder.ts @@ -274,7 +274,7 @@ class SqliteBuilder extends QueryBuilder { dedent` CREATE TRIGGER compensation_${opTypeLower}_${namespace}_${tableName}_${childKey}_into_oplog AFTER ${opType} ON "${namespace}"."${tableName}" - WHEN 1 = (SELECT flag from _electric_trigger_settings WHERE namespace = '${namespace}' AND tablename = '${fkTableName}') AND + WHEN 1 = (SELECT flag from _electric_trigger_settings WHERE namespace = '${fkTableNamespace}' AND tablename = '${fkTableName}') AND 1 = (SELECT value from _electric_meta WHERE key = 'compensations') BEGIN INSERT INTO _electric_oplog (namespace, tablename, optype, primaryKey, newRow, oldRow, timestamp) From a18cbfb0ae2f81382d563459f38fac538264d09f Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Mon, 29 Apr 2024 15:18:16 +0200 Subject: [PATCH 125/156] Enable PG triggers and fix compensation trigger --- clients/typescript/src/migrators/query-builder/pgBuilder.ts | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/clients/typescript/src/migrators/query-builder/pgBuilder.ts b/clients/typescript/src/migrators/query-builder/pgBuilder.ts index 017c88ab28..cc2bdc016d 100644 --- a/clients/typescript/src/migrators/query-builder/pgBuilder.ts +++ b/clients/typescript/src/migrators/query-builder/pgBuilder.ts @@ -256,6 +256,7 @@ class PgBuilder extends QueryBuilder { FOR EACH ROW EXECUTE FUNCTION update_ensure_${namespace}_${tablename}_primarykey_function(); `, + `ALTER TABLE "${namespace}"."${tablename}" ENABLE ALWAYS TRIGGER update_ensure_${namespace}_${tablename}_primarykey;`, ] } @@ -353,6 +354,7 @@ class PgBuilder extends QueryBuilder { FOR EACH ROW EXECUTE FUNCTION ${opTypeLower}_${namespace}_${tableName}_into_oplog_function(); `, + `ALTER TABLE "${namespace}"."${tableName}" ENABLE ALWAYS TRIGGER ${opTypeLower}_${namespace}_${tableName}_into_oplog;`, ] } @@ -386,7 +388,7 @@ class PgBuilder extends QueryBuilder { SELECT '${fkTableNamespace}', '${fkTableName}', - 'UPDATE', + 'COMPENSATION', ${this.removeSpaceAndNullValuesFromJson( this.createPKJsonObject(joinedFkPKs) )}, @@ -408,6 +410,7 @@ class PgBuilder extends QueryBuilder { FOR EACH ROW EXECUTE FUNCTION compensation_${opTypeLower}_${namespace}_${tableName}_${childKey}_into_oplog_function(); `, + `ALTER TABLE "${namespace}"."${tableName}" ENABLE ALWAYS TRIGGER compensation_${opTypeLower}_${namespace}_${tableName}_${childKey}_into_oplog;`, ] } From 8449079663fdb709ef2f3a93a735e94461df60b7 Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Mon, 29 Apr 2024 15:40:18 +0200 Subject: [PATCH 126/156] Remove double entry in package.json after rebase --- clients/typescript/package.json | 3 --- 1 file changed, 3 deletions(-) diff --git a/clients/typescript/package.json b/clients/typescript/package.json index 4bbb59321f..58d940d0b5 100644 --- a/clients/typescript/package.json +++ b/clients/typescript/package.json @@ -302,9 +302,6 @@ "@tauri-apps/plugin-sql": { "optional": true }, - "@tauri-apps/plugin-sql": { - "optional": true - }, "embedded-postgres": { "optional": true }, From a33fa29fce4359f48e5cda8e6a5d65491927ce64 Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Tue, 30 Apr 2024 08:54:26 +0200 Subject: [PATCH 127/156] Split e2e workflow in 2 in order to run the 03 tests with a matrix for SQLite and PG. --- .github/workflows/e2e.yml | 92 ++++++++++++++++++++++++++++++++++++++- e2e/Makefile | 6 +++ 2 files changed, 97 insertions(+), 1 deletion(-) diff --git a/.github/workflows/e2e.yml b/.github/workflows/e2e.yml index 987d0b4d84..d0f02a97ad 100644 --- a/.github/workflows/e2e.yml +++ b/.github/workflows/e2e.yml @@ -8,6 +8,7 @@ on: # Root files - "*" - "!pnpm-lock.yaml" + - "!clients/typescript/**" # Satellite tests run in a separate workflow # CI files not related to GH actions - ".buildkite/**" - "**/README.md" @@ -80,7 +81,7 @@ jobs: - run: make lux - run: make deps pull - - run: make test_sqlite_and_pg + - run: make test-no-satellite id: tests env: ELECTRIC_IMAGE_NAME: electric-sql-ci/electric @@ -110,3 +111,92 @@ jobs: -F "run_env[commit_sha]=$GITHUB_SHA" \ -F "run_env[url]=https://github.com/$GITHUB_REPOSITORY/actions/runs/$GITHUB_RUN_ID" \ https://analytics-api.buildkite.com/v1/uploads + + e2e_satellite_tests: + name: E2E Satellite tests + runs-on: electric-e2e-8-32 + strategy: + matrix: + dialect: [SQLite, Postgres] + defaults: + run: + working-directory: e2e + env: + BUILDKITE_ANALYTICS_TOKEN: ${{ secrets.BUILDKITE_TEST_ANALYTICS_E2E }} + DIALECT: ${{ matrix.dialect }} + steps: + - uses: actions/checkout@v3 + with: + ref: ${{ github.event.pull_request.head.sha }} + fetch-depth: 0 + + - name: Inject slug/short variables + uses: rlespinasse/github-slug-action@v4 + - name: Inject variables for `docker buildx` github actions caching + uses: crazy-max/ghaction-github-runtime@v2 + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v2 + - name: Log in to the Container registry + uses: docker/login-action@f054a8b539a109f9f41c372932f1ae047eff08c9 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + - uses: erlef/setup-beam@v1 + with: + otp-version: ${{ env.OTP_VERSION }} + elixir-version: ${{ env.ELIXIR_VERSION }} + + - run: | + echo "ELECTRIC_VERSION=$(make --silent print_version_from_git)" >> $GITHUB_ENV + working-directory: components/electric + - run: make docker-build-ci + env: + ELECTRIC_IMAGE_NAME: electric-sql-ci/electric + working-directory: components/electric + - run: make docker-build-ws-client + env: + ELECTRIC_CLIENT_IMAGE_NAME: electric-sql-ci/electric-ws-client + working-directory: components/electric + + - name: Cache built lux + uses: actions/cache@v3 + with: + path: | + e2e/lux/bin + e2e/lux/ebin + e2e/lux/priv + key: ${{ runner.os }}-luxbuilt-${{ env.OTP_VERSION }}-${{ env.ELIXIR_VERSION }} + + - run: make lux + - run: make deps pull + - run: make test-satellite-only + id: tests + env: + ELECTRIC_IMAGE_NAME: electric-sql-ci/electric + ELECTRIC_CLIENT_IMAGE_NAME: electric-sql-ci/electric-ws-client + ELECTRIC_IMAGE_TAG: ${{ env.ELECTRIC_VERSION }} + + - name: Upload lux logs + uses: actions/upload-artifact@v3 + if: ${{ failure() && steps.tests.outcome == 'failure' }} + with: + name: lux_logs + path: e2e/**/lux_logs/run_* + - name: Upload test results to Buildkite analytics + if: ${{ !cancelled() && steps.tests.outcome != 'skipped' && env.BUILDKITE_ANALYTICS_TOKEN != '' }} + working-directory: e2e/lux_logs/latest_run + run: | + curl \ + -X POST \ + --fail-with-body \ + -H "Authorization: Token token=\"$BUILDKITE_ANALYTICS_TOKEN\"" \ + -F "data=@lux_junit.xml" \ + -F "format=junit" \ + -F "run_env[CI]=github_actions" \ + -F "run_env[key]=$GITHUB_ACTION-$GITHUB_RUN_NUMBER-$GITHUB_RUN_ATTEMPT" \ + -F "run_env[number]=$GITHUB_RUN_NUMBER" \ + -F "run_env[branch]=$GITHUB_REF" \ + -F "run_env[commit_sha]=$GITHUB_SHA" \ + -F "run_env[url]=https://github.com/$GITHUB_REPOSITORY/actions/runs/$GITHUB_RUN_ID" \ + https://analytics-api.buildkite.com/v1/uploads \ No newline at end of file diff --git a/e2e/Makefile b/e2e/Makefile index f10461a1f8..5295f8a570 100644 --- a/e2e/Makefile +++ b/e2e/Makefile @@ -13,6 +13,12 @@ test_only: test: deps pull test_only +test-no-satellite: + find tests -type f -maxdepth 1 -name "*.lux" -and -not -name "03.*.lux" | sort -h | xargs ${LUX} --junit + +test-satellite-only: + ${LUX} --junit tests/03.*.lux + pull: docker compose -f services_templates.yaml pull \ postgresql From a0c616195ecda3ec2e33bcba5322336432ea7998 Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Tue, 30 Apr 2024 15:04:48 +0200 Subject: [PATCH 128/156] Do not enable PG triggers to always run, only run on local operations --- clients/typescript/src/migrators/query-builder/pgBuilder.ts | 3 --- 1 file changed, 3 deletions(-) diff --git a/clients/typescript/src/migrators/query-builder/pgBuilder.ts b/clients/typescript/src/migrators/query-builder/pgBuilder.ts index cc2bdc016d..f0c20dbba7 100644 --- a/clients/typescript/src/migrators/query-builder/pgBuilder.ts +++ b/clients/typescript/src/migrators/query-builder/pgBuilder.ts @@ -256,7 +256,6 @@ class PgBuilder extends QueryBuilder { FOR EACH ROW EXECUTE FUNCTION update_ensure_${namespace}_${tablename}_primarykey_function(); `, - `ALTER TABLE "${namespace}"."${tablename}" ENABLE ALWAYS TRIGGER update_ensure_${namespace}_${tablename}_primarykey;`, ] } @@ -354,7 +353,6 @@ class PgBuilder extends QueryBuilder { FOR EACH ROW EXECUTE FUNCTION ${opTypeLower}_${namespace}_${tableName}_into_oplog_function(); `, - `ALTER TABLE "${namespace}"."${tableName}" ENABLE ALWAYS TRIGGER ${opTypeLower}_${namespace}_${tableName}_into_oplog;`, ] } @@ -410,7 +408,6 @@ class PgBuilder extends QueryBuilder { FOR EACH ROW EXECUTE FUNCTION compensation_${opTypeLower}_${namespace}_${tableName}_${childKey}_into_oplog_function(); `, - `ALTER TABLE "${namespace}"."${tableName}" ENABLE ALWAYS TRIGGER compensation_${opTypeLower}_${namespace}_${tableName}_${childKey}_into_oplog;`, ] } From 7f03af31ac3d496e8fbb85585c9488bd14469baf Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Tue, 30 Apr 2024 15:07:08 +0200 Subject: [PATCH 129/156] Fix compensation trigger to read trigger flag from its own table. --- clients/typescript/src/migrators/query-builder/pgBuilder.ts | 2 +- .../typescript/src/migrators/query-builder/sqliteBuilder.ts | 2 +- clients/typescript/test/support/migrations/migrations.js | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/clients/typescript/src/migrators/query-builder/pgBuilder.ts b/clients/typescript/src/migrators/query-builder/pgBuilder.ts index f0c20dbba7..0c282dbd68 100644 --- a/clients/typescript/src/migrators/query-builder/pgBuilder.ts +++ b/clients/typescript/src/migrators/query-builder/pgBuilder.ts @@ -377,7 +377,7 @@ class PgBuilder extends QueryBuilder { flag_value INTEGER; meta_value INTEGER; BEGIN - SELECT flag INTO flag_value FROM "${namespace}"._electric_trigger_settings WHERE namespace = '${fkTableNamespace}' AND tablename = '${fkTableName}'; + SELECT flag INTO flag_value FROM "${namespace}"._electric_trigger_settings WHERE namespace = '${namespace}' AND tablename = '${tableName}'; SELECT value INTO meta_value FROM "${namespace}"._electric_meta WHERE key = 'compensations'; diff --git a/clients/typescript/src/migrators/query-builder/sqliteBuilder.ts b/clients/typescript/src/migrators/query-builder/sqliteBuilder.ts index e5a57c2e1e..3e692e9c08 100644 --- a/clients/typescript/src/migrators/query-builder/sqliteBuilder.ts +++ b/clients/typescript/src/migrators/query-builder/sqliteBuilder.ts @@ -274,7 +274,7 @@ class SqliteBuilder extends QueryBuilder { dedent` CREATE TRIGGER compensation_${opTypeLower}_${namespace}_${tableName}_${childKey}_into_oplog AFTER ${opType} ON "${namespace}"."${tableName}" - WHEN 1 = (SELECT flag from _electric_trigger_settings WHERE namespace = '${fkTableNamespace}' AND tablename = '${fkTableName}') AND + WHEN 1 = (SELECT flag from _electric_trigger_settings WHERE namespace = '${namespace}' AND tablename = '${tableName}') AND 1 = (SELECT value from _electric_meta WHERE key = 'compensations') BEGIN INSERT INTO _electric_oplog (namespace, tablename, optype, primaryKey, newRow, oldRow, timestamp) diff --git a/clients/typescript/test/support/migrations/migrations.js b/clients/typescript/test/support/migrations/migrations.js index ee493d6fba..85736ce4b4 100644 --- a/clients/typescript/test/support/migrations/migrations.js +++ b/clients/typescript/test/support/migrations/migrations.js @@ -37,9 +37,9 @@ export default [ 'DROP TRIGGER IF EXISTS delete_main_child_into_oplog;', "CREATE TRIGGER delete_main_child_into_oplog\n AFTER DELETE ON main.child\n WHEN 1 == (SELECT flag from _electric_trigger_settings WHERE namespace = 'main' AND tablename = 'child')\nBEGIN\n INSERT INTO _electric_oplog (namespace, tablename, optype, primaryKey, newRow, oldRow, timestamp)\n VALUES ('main', 'child', 'DELETE', json_object('id', old.id), NULL, json_object('id', old.id, 'parent', old.parent), NULL);\nEND;", 'DROP TRIGGER IF EXISTS compensation_insert_main_child_parent_into_oplog;', - "CREATE TRIGGER compensation_insert_main_child_parent_into_oplog\n AFTER INSERT ON main.child\n WHEN 1 == (SELECT flag from _electric_trigger_settings WHERE namespace = 'main' AND tablename = 'parent') AND\n 1 == (SELECT value from _electric_meta WHERE key == 'compensations')\nBEGIN\n INSERT INTO _electric_oplog (namespace, tablename, optype, primaryKey, newRow, oldRow, timestamp)\n SELECT 'main', 'parent', 'UPDATE', json_object('id', id), json_object('id', id, 'value', value, 'other', other), NULL, NULL\n FROM main.parent WHERE id = new.parent;\nEND;", + "CREATE TRIGGER compensation_insert_main_child_parent_into_oplog\n AFTER INSERT ON main.child\n WHEN 1 == (SELECT flag from _electric_trigger_settings WHERE namespace = 'main' AND tablename = 'child') AND\n 1 == (SELECT value from _electric_meta WHERE key == 'compensations')\nBEGIN\n INSERT INTO _electric_oplog (namespace, tablename, optype, primaryKey, newRow, oldRow, timestamp)\n SELECT 'main', 'parent', 'COMPENSATION', json_object('id', id), json_object('id', id, 'value', value, 'other', other), NULL, NULL\n FROM main.parent WHERE id = new.parent;\nEND;", 'DROP TRIGGER IF EXISTS compensation_update_main_child_parent_into_oplog;', - "CREATE TRIGGER compensation_update_main_child_parent_into_oplog\n AFTER UPDATE ON main.child\n WHEN 1 == (SELECT flag from _electric_trigger_settings WHERE namespace = 'main' AND tablename = 'parent') AND\n 1 == (SELECT value from _electric_meta WHERE key == 'compensations')\nBEGIN\n INSERT INTO _electric_oplog (namespace, tablename, optype, primaryKey, newRow, oldRow, timestamp)\n SELECT 'main', 'parent', 'UPDATE', json_object('id', id), json_object('id', id, 'value', value, 'other', other), NULL, NULL\n FROM main.parent WHERE id = new.parent;\nEND;", + "CREATE TRIGGER compensation_update_main_child_parent_into_oplog\n AFTER UPDATE ON main.child\n WHEN 1 == (SELECT flag from _electric_trigger_settings WHERE namespace = 'main' AND tablename = 'child') AND\n 1 == (SELECT value from _electric_meta WHERE key == 'compensations')\nBEGIN\n INSERT INTO _electric_oplog (namespace, tablename, optype, primaryKey, newRow, oldRow, timestamp)\n SELECT 'main', 'parent', 'COMPENSATION', json_object('id', id), json_object('id', id, 'value', value, 'other', other), NULL, NULL\n FROM main.parent WHERE id = new.parent;\nEND;", 'DROP TRIGGER IF EXISTS update_ensure_main_items_primarykey;', "CREATE TRIGGER update_ensure_main_items_primarykey\n BEFORE UPDATE ON main.items\nBEGIN\n SELECT\n CASE\n WHEN old.value != new.value THEN\n RAISE (ABORT,'cannot change the value of column value as it belongs to the primary key')\n END;\nEND;", 'DROP TRIGGER IF EXISTS insert_main_items_into_oplog;', From b4c0952981a7802ef3fc8529cd92bc28b972988c Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Tue, 30 Apr 2024 15:10:58 +0200 Subject: [PATCH 130/156] Fixed lockfile after rebase --- pnpm-lock.yaml | 285 ++++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 273 insertions(+), 12 deletions(-) diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index dface65568..c6bb303bab 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -23,6 +23,9 @@ importers: '@prisma/client': specifier: 4.8.1 version: 4.8.1(prisma@4.8.1) + '@tauri-apps/api': + specifier: ^1.5.3 + version: 1.5.3 async-mutex: specifier: ^0.4.0 version: 0.4.0 @@ -132,6 +135,9 @@ importers: specifier: 3.21.1 version: 3.21.1 devDependencies: + '@electric-sql/pglite': + specifier: ^0.1.5 + version: 0.1.5 '@op-engineering/op-sqlite': specifier: '>= 2.0.16' version: 2.0.20(react-native@0.68.0)(react@18.2.0) @@ -180,6 +186,9 @@ importers: '@types/node': specifier: ^18.8.4 version: 18.16.16 + '@types/pg': + specifier: ^8.11.0 + version: 8.11.0 '@types/prompts': specifier: ^2.4.9 version: 2.4.9 @@ -210,11 +219,14 @@ importers: concurrently: specifier: ^8.2.2 version: 8.2.2 + embedded-postgres: + specifier: 16.1.1-beta.9 + version: 16.1.1-beta.9 eslint: specifier: ^8.22.0 version: 8.41.0 expo-sqlite: - specifier: ^13.0.0 + specifier: ^13.1.0 version: 13.2.2(expo@50.0.17) glob: specifier: ^10.3.10 @@ -266,7 +278,7 @@ importers: version: 4.6.2 typeorm: specifier: ^0.3.9 - version: 0.3.16(better-sqlite3@8.4.0) + version: 0.3.16(better-sqlite3@8.4.0)(pg@8.11.3) typescript: specifier: ^5.3.3 version: 5.4.3 @@ -351,6 +363,9 @@ importers: jsonwebtoken: specifier: ^9.0.0 version: 9.0.0 + pg: + specifier: ^8.11.3 + version: 8.11.3 uuid: specifier: ^9.0.0 version: 9.0.0 @@ -3491,6 +3506,82 @@ packages: engines: {node: '>=0.1.90'} dev: false + /@electric-sql/pglite@0.1.5: + resolution: {integrity: sha512-eymv4ONNvoPZQTvOQIi5dbpR+J5HzEv0qQH9o/y3gvNheJV/P/NFcrbsfJZYTsDKoq7DKrTiFNexsRkJKy8x9Q==} + dev: true + + /@embedded-postgres/darwin-arm64@16.1.1-beta.11: + resolution: {integrity: sha512-/O12CSVZygoeA+LamdN4vlmPxVzXkwQ6GRVkAXN3oog2Oj5HEeAEvMzPojlMlAtSQW2av9mvjmssIRlBhFSgqQ==} + engines: {node: '>=16'} + cpu: [arm64] + os: [darwin] + requiresBuild: true + dev: true + optional: true + + /@embedded-postgres/darwin-x64@16.1.1-beta.11: + resolution: {integrity: sha512-PUB51kAxlDDvzx4nmiY9Uu2YAICrLadI1CsCGePs2Kv+wNF5JlpfWpWsyPCyo/pVQQ+c+Jbq9uy98Z3cAgBfzA==} + engines: {node: '>=16'} + cpu: [x64] + os: [darwin] + requiresBuild: true + dev: true + optional: true + + /@embedded-postgres/linux-arm64@16.1.1-beta.11: + resolution: {integrity: sha512-AaGOTznBc7f2fcU2QLGJ09qAcuj9/V8mUIdpi4kgur7PsGafVa+nOHZMVTUEJW59M0z4NnsrAAqxlr5ZpDPsXw==} + engines: {node: '>=16'} + cpu: [arm64] + os: [linux] + requiresBuild: true + dev: true + optional: true + + /@embedded-postgres/linux-arm@16.1.1-beta.11: + resolution: {integrity: sha512-jEst5b9I0sHyAQhCn8Z2MCjj8EK18X3PqVj7miEt7gjYvSPuprrdNkzIEaAR111e0gYtMONQbOqpECcfGLHm6w==} + engines: {node: '>=16'} + cpu: [arm] + os: [linux] + requiresBuild: true + dev: true + optional: true + + /@embedded-postgres/linux-ia32@16.1.1-beta.11: + resolution: {integrity: sha512-e37tdnp/MOuYYPIkqUD91KJYJXDM1n30drm01FAekrwdCGhGNQ2B12zdrGQLHvkpBYprEplTTSNHi9BLBcg7tA==} + engines: {node: '>=16'} + cpu: [ia32] + os: [linux] + requiresBuild: true + dev: true + optional: true + + /@embedded-postgres/linux-ppc64@16.1.1-beta.11: + resolution: {integrity: sha512-qlYegCvFfT3/vBDAHvwNY+XIVkTvzoknM+zKpI9SzbHpZHiIjQoRMx536gcKdZj81n83CWpHlTth63r26bARwQ==} + engines: {node: '>=16'} + cpu: [ppc64] + os: [linux] + requiresBuild: true + dev: true + optional: true + + /@embedded-postgres/linux-x64@16.1.1-beta.11: + resolution: {integrity: sha512-co6zRgE2ctWf+xmfLqurumwzEfzQFeIoLyt8iB3jk8Z5O+bacJ7n/o5q8NyO1xJQixuZuyyBVY2boK14fCtlQQ==} + engines: {node: '>=16'} + cpu: [x64] + os: [linux] + requiresBuild: true + dev: true + optional: true + + /@embedded-postgres/windows-x64@16.1.1-beta.11: + resolution: {integrity: sha512-Yc7YcWG3hthWCtg1wvXmnPJQ29sHFwk/Bnk5P8sArlx3nEiksSmO43Tb7DbaRqn9ELP8cUSUXrkWXWuLq3cPpA==} + engines: {node: '>=16'} + cpu: [x64] + os: [win32] + requiresBuild: true + dev: true + optional: true + /@esbuild/aix-ppc64@0.20.2: resolution: {integrity: sha512-D+EBOJHXdNZcLJRBkhENNG8Wji2kgc9AZ9KiPr1JuZjsNtyHzrsfLRrY0tk2H2aoFu6RANO1y1iPPUCDYWkb5g==} engines: {node: '>=12'} @@ -5043,7 +5134,7 @@ packages: dependencies: '@types/istanbul-lib-coverage': 2.0.4 '@types/istanbul-reports': 3.0.1 - '@types/node': 18.16.16 + '@types/node': 20.12.7 '@types/yargs': 15.0.15 chalk: 4.1.2 @@ -5053,7 +5144,7 @@ packages: dependencies: '@types/istanbul-lib-coverage': 2.0.4 '@types/istanbul-reports': 3.0.1 - '@types/node': 18.16.16 + '@types/node': 20.12.7 '@types/yargs': 16.0.5 chalk: 4.1.2 @@ -6212,6 +6303,11 @@ packages: defer-to-connect: 2.0.1 dev: true + /@tauri-apps/api@1.5.3: + resolution: {integrity: sha512-zxnDjHHKjOsrIzZm6nO5Xapb/BxqUq1tc7cGkFXsFkGTsSWgCPH1D8mm0XS9weJY2OaR73I3k3S+b7eSzJDfqA==} + engines: {node: '>= 14.6.0', npm: '>= 6.6.0', yarn: '>= 1.19.1'} + dev: false + /@tauri-apps/api@2.0.0-alpha.13: resolution: {integrity: sha512-sGgCkFahF3OZAHoGN5Ozt9WK7wJlbVZSgWpPQKNag4nSOX1+Py6VDRTEWriiJHDiV+gg31CWHnNXRy6TFoZmdA==} engines: {node: '>= 18', npm: '>= 6.6.0', yarn: '>= 1.19.1'} @@ -6324,7 +6420,7 @@ packages: /@types/graceful-fs@4.1.6: resolution: {integrity: sha512-Sig0SNORX9fdW+bQuTEovKj3uHcUL6LQKbCrrqb1X7J6/ReAbhCXRAhc+SMejhLELFj2QcyuxmUooZ4bt5ReSw==} dependencies: - '@types/node': 18.16.16 + '@types/node': 20.12.7 /@types/http-cache-semantics@4.0.1: resolution: {integrity: sha512-SZs7ekbP8CN0txVG2xVRH6EgKmEm31BOxA07vkFaETzZz1xh+cbt8BcI0slpymvwhx5dlFnQG2rTlPVQn+iRPQ==} @@ -6467,6 +6563,14 @@ packages: resolution: {integrity: sha512-xFdpkAkikBgqBdG9vIlsqffDV8GpvnPEzs0IUtr1v3BEB97ijsFQ4RXVbUZwjFThhB4MDSTUfvmxUD5PGx0wXA==} dev: true + /@types/pg@8.11.0: + resolution: {integrity: sha512-sDAlRiBNthGjNFfvt0k6mtotoVYVQ63pA8R4EMWka7crawSR60waVYR0HAgmPRs/e2YaeJTD/43OoZ3PFw80pw==} + dependencies: + '@types/node': 20.12.7 + pg-protocol: 1.6.1 + pg-types: 4.0.2 + dev: true + /@types/prompts@2.4.9: resolution: {integrity: sha512-qTxFi6Buiu8+50/+3DGIWLHM6QuWsEKugJnnP6iv2Mc4ncxE4A/OJkjuVOA+5X0X1S/nq5VJRa8Lu+nwcvbrKA==} dependencies: @@ -7577,6 +7681,11 @@ packages: engines: {node: '>=8'} dev: true + /async-exit-hook@2.0.1: + resolution: {integrity: sha512-NW2cX8m1Q7KPA7a5M2ULQeZ2wR5qI5PAbw5L0UOMxdioVk9PMZ0h1TmyZEkPYrCvYjDlFICusOu1dlEKAAeXBw==} + engines: {node: '>=0.12.0'} + dev: true + /async-limiter@1.0.1: resolution: {integrity: sha512-csOlWGAcRFJaI6m+F2WKdnMKr4HhdhFVBk0H/QbJFMCr+uO2kwohwXQPxw/9OCxp05r5ghVBFSyioixx3gfkNQ==} @@ -8184,6 +8293,10 @@ packages: /buffer-from@1.1.2: resolution: {integrity: sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ==} + /buffer-writer@2.0.0: + resolution: {integrity: sha512-a7ZpuTZU1TRtnwyCNW3I5dc0wWNC3VR9S++Ewyk2HHZdrO3CQJqSpd+95Us590V6AL7JqUAH2IwZ/398PmNFgw==} + engines: {node: '>=4'} + /buffer@5.7.1: resolution: {integrity: sha512-EHcyIPBQ4BSGlvjB16k5KgAJ27CIsHY/2JBmCRReo48y9rQ3MaUzWX3KVlBa4U7MyX02HdVj0K7C3WaB3ju7FQ==} dependencies: @@ -9560,6 +9673,24 @@ packages: /electron-to-chromium@1.4.677: resolution: {integrity: sha512-erDa3CaDzwJOpyvfKhOiJjBVNnMM0qxHq47RheVVwsSQrgBA9ZSGV9kdaOfZDPXcHzhG7lBxhj6A7KvfLJBd6Q==} + /embedded-postgres@16.1.1-beta.9: + resolution: {integrity: sha512-GhPY7VvJXsPPo84xED+hRKPO4rNt2SLHT2Ne0HgjtqSj3YAvto2gAgru+9tZ5UGyjOHSWCozZlsRVc14aOvObA==} + dependencies: + async-exit-hook: 2.0.1 + pg: 8.11.3 + optionalDependencies: + '@embedded-postgres/darwin-arm64': 16.1.1-beta.11 + '@embedded-postgres/darwin-x64': 16.1.1-beta.11 + '@embedded-postgres/linux-arm': 16.1.1-beta.11 + '@embedded-postgres/linux-arm64': 16.1.1-beta.11 + '@embedded-postgres/linux-ia32': 16.1.1-beta.11 + '@embedded-postgres/linux-ppc64': 16.1.1-beta.11 + '@embedded-postgres/linux-x64': 16.1.1-beta.11 + '@embedded-postgres/windows-x64': 16.1.1-beta.11 + transitivePeerDependencies: + - pg-native + dev: true + /emittery@0.11.0: resolution: {integrity: sha512-S/7tzL6v5i+4iJd627Nhv9cLFIo5weAIlGccqJFpnBoDB8U1TF2k5tez4J/QNuxyyhWuFqHg1L84Kd3m7iXg6g==} engines: {node: '>=12'} @@ -12637,7 +12768,7 @@ packages: dependencies: '@jest/types': 27.5.1 '@types/graceful-fs': 4.1.6 - '@types/node': 18.16.16 + '@types/node': 20.12.7 anymatch: 3.1.3 fb-watchman: 2.0.2 graceful-fs: 4.2.11 @@ -12658,7 +12789,7 @@ packages: resolution: {integrity: sha512-jZCyo6iIxO1aqUxpuBlwTDMkzOAJS4a3eYz3YzgxxVQFwLeSA7Jfq5cbqCY+JLvTDrWirgusI/0KwxKMgrdf7w==} engines: {node: ^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0} dependencies: - '@types/node': 18.16.16 + '@types/node': 20.12.7 graceful-fs: 4.2.11 /jest-util@27.5.1: @@ -12666,7 +12797,7 @@ packages: engines: {node: ^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0} dependencies: '@jest/types': 27.5.1 - '@types/node': 18.16.16 + '@types/node': 20.12.7 chalk: 4.1.2 ci-info: 3.8.0 graceful-fs: 4.2.11 @@ -12687,7 +12818,7 @@ packages: resolution: {integrity: sha512-KWYVV1c4i+jbMpaBC+U++4Va0cp8OisU185o73T1vo99hqi7w8tSJfUXYswwqqrjzwxa6KpRK54WhPvwf5w6PQ==} engines: {node: '>= 10.13.0'} dependencies: - '@types/node': 18.16.16 + '@types/node': 20.12.7 merge-stream: 2.0.0 supports-color: 7.2.0 @@ -12695,7 +12826,7 @@ packages: resolution: {integrity: sha512-7vuh85V5cdDofPyxn58nrPjBktZo0u9x1g8WtjQol+jZDaE+fhN+cIvTj11GndBnMnyfrUOG1sZQxCdjKh+DKg==} engines: {node: '>= 10.13.0'} dependencies: - '@types/node': 18.16.16 + '@types/node': 20.12.7 merge-stream: 2.0.0 supports-color: 8.1.1 @@ -14597,6 +14728,10 @@ packages: es-abstract: 1.21.2 dev: true + /obuf@1.1.2: + resolution: {integrity: sha512-PX1wu0AmAdPqOL1mWhqmlOd8kOIZQwGZw6rh7uby9fTc5lhaOWFLX3I6R1hrF9k3zUY40e6igsLGkDXK92LJNg==} + dev: true + /ohash@1.1.2: resolution: {integrity: sha512-9CIOSq5945rI045GFtcO3uudyOkYVY1nyfFxVQp+9BRgslr8jPNiSSrsFGg/BNTUFOLqx0P5tng6G32brIPw0w==} dev: false @@ -14960,6 +15095,9 @@ packages: semver: 7.6.0 dev: true + /packet-reader@1.0.0: + resolution: {integrity: sha512-HAKu/fG3HpHFO0AA8WE8q2g+gBJaZ9MG7fcKk+IJPLTGAD6Psw4443l+9DGRbOIh3/aXr7Phy0TjilYivJo5XQ==} + /pako@1.0.11: resolution: {integrity: sha512-4hLB8Py4zZce5s4yd9XzopqwVv/yGNhV1Bl8NTmCq1763HeK2+EwVTv+leGeL13Dnh2wfbqowVPXCIO0z4taYw==} dev: false @@ -15117,6 +15255,80 @@ packages: resolution: {integrity: sha512-F3asv42UuXchdzt+xXqfW1OGlVBe+mxa2mqI0pg5yAHZPvFmY3Y6drSf/GQ1A86WgWEN9Kzh/WrgKa6iGcHXLg==} dev: false + /pg-cloudflare@1.1.1: + resolution: {integrity: sha512-xWPagP/4B6BgFO+EKz3JONXv3YDgvkbVrGw2mTo3D6tVDQRh1e7cqVGvyR3BE+eQgAvx1XhW/iEASj4/jCWl3Q==} + requiresBuild: true + optional: true + + /pg-connection-string@2.6.4: + resolution: {integrity: sha512-v+Z7W/0EO707aNMaAEfiGnGL9sxxumwLl2fJvCQtMn9Fxsg+lPpPkdcyBSv/KFgpGdYkMfn+EI1Or2EHjpgLCA==} + + /pg-int8@1.0.1: + resolution: {integrity: sha512-WCtabS6t3c8SkpDBUlb1kjOs7l66xsGdKpIPZsg4wR+B3+u9UAum2odSsF9tnvxg80h4ZxLWMy4pRjOsFIqQpw==} + engines: {node: '>=4.0.0'} + + /pg-numeric@1.0.2: + resolution: {integrity: sha512-BM/Thnrw5jm2kKLE5uJkXqqExRUY/toLHda65XgFTBTFYZyopbKjBe29Ii3RbkvlsMoFwD+tHeGaCjjv0gHlyw==} + engines: {node: '>=4'} + dev: true + + /pg-pool@3.6.2(pg@8.11.3): + resolution: {integrity: sha512-Htjbg8BlwXqSBQ9V8Vjtc+vzf/6fVUuak/3/XXKA9oxZprwW3IMDQTGHP+KDmVL7rtd+R1QjbnCFPuTHm3G4hg==} + peerDependencies: + pg: '>=8.0' + dependencies: + pg: 8.11.3 + + /pg-protocol@1.6.1: + resolution: {integrity: sha512-jPIlvgoD63hrEuihvIg+tJhoGjUsLPn6poJY9N5CnlPd91c2T18T/9zBtLxZSb1EhYxBRoZJtzScCaWlYLtktg==} + + /pg-types@2.2.0: + resolution: {integrity: sha512-qTAAlrEsl8s4OiEQY69wDvcMIdQN6wdz5ojQiOy6YRMuynxenON0O5oCpJI6lshc6scgAY8qvJ2On/p+CXY0GA==} + engines: {node: '>=4'} + dependencies: + pg-int8: 1.0.1 + postgres-array: 2.0.0 + postgres-bytea: 1.0.0 + postgres-date: 1.0.7 + postgres-interval: 1.2.0 + + /pg-types@4.0.2: + resolution: {integrity: sha512-cRL3JpS3lKMGsKaWndugWQoLOCoP+Cic8oseVcbr0qhPzYD5DWXK+RZ9LY9wxRf7RQia4SCwQlXk0q6FCPrVng==} + engines: {node: '>=10'} + dependencies: + pg-int8: 1.0.1 + pg-numeric: 1.0.2 + postgres-array: 3.0.2 + postgres-bytea: 3.0.0 + postgres-date: 2.1.0 + postgres-interval: 3.0.0 + postgres-range: 1.1.4 + dev: true + + /pg@8.11.3: + resolution: {integrity: sha512-+9iuvG8QfaaUrrph+kpF24cXkH1YOOUeArRNYIxq1viYHZagBxrTno7cecY1Fa44tJeZvaoG+Djpkc3JwehN5g==} + engines: {node: '>= 8.0.0'} + peerDependencies: + pg-native: '>=3.0.1' + peerDependenciesMeta: + pg-native: + optional: true + dependencies: + buffer-writer: 2.0.0 + packet-reader: 1.0.0 + pg-connection-string: 2.6.4 + pg-pool: 3.6.2(pg@8.11.3) + pg-protocol: 1.6.1 + pg-types: 2.2.0 + pgpass: 1.0.5 + optionalDependencies: + pg-cloudflare: 1.1.1 + + /pgpass@1.0.5: + resolution: {integrity: sha512-FdW9r/jQZhSeohs1Z3sI1yxFQNFvMcnmfuj4WBMUTxOrAyLMaTcE1aAMBiTlbMNaXvBCQuVi0R7hd8udDSP7ug==} + dependencies: + split2: 4.2.0 + /picocolors@1.0.0: resolution: {integrity: sha512-1fygroTLlHu66zi26VoTDv8yRgm0Fccecssto+MhsZ0D/DGW2sm8E8AjW7NU5VVTRt5GxbeZ5qBuJr+HyLYkjQ==} @@ -15321,6 +15533,50 @@ packages: source-map-js: 1.2.0 dev: true + /postgres-array@2.0.0: + resolution: {integrity: sha512-VpZrUqU5A69eQyW2c5CA1jtLecCsN2U/bD6VilrFDWq5+5UIEVO7nazS3TEcHf1zuPYO/sqGvUvW62g86RXZuA==} + engines: {node: '>=4'} + + /postgres-array@3.0.2: + resolution: {integrity: sha512-6faShkdFugNQCLwucjPcY5ARoW1SlbnrZjmGl0IrrqewpvxvhSLHimCVzqeuULCbG0fQv7Dtk1yDbG3xv7Veog==} + engines: {node: '>=12'} + dev: true + + /postgres-bytea@1.0.0: + resolution: {integrity: sha512-xy3pmLuQqRBZBXDULy7KbaitYqLcmxigw14Q5sj8QBVLqEwXfeybIKVWiqAXTlcvdvb0+xkOtDbfQMOf4lST1w==} + engines: {node: '>=0.10.0'} + + /postgres-bytea@3.0.0: + resolution: {integrity: sha512-CNd4jim9RFPkObHSjVHlVrxoVQXz7quwNFpz7RY1okNNme49+sVyiTvTRobiLV548Hx/hb1BG+iE7h9493WzFw==} + engines: {node: '>= 6'} + dependencies: + obuf: 1.1.2 + dev: true + + /postgres-date@1.0.7: + resolution: {integrity: sha512-suDmjLVQg78nMK2UZ454hAG+OAW+HQPZ6n++TNDUX+L0+uUlLywnoxJKDou51Zm+zTCjrCl0Nq6J9C5hP9vK/Q==} + engines: {node: '>=0.10.0'} + + /postgres-date@2.1.0: + resolution: {integrity: sha512-K7Juri8gtgXVcDfZttFKVmhglp7epKb1K4pgrkLxehjqkrgPhfG6OO8LHLkfaqkbpjNRnra018XwAr1yQFWGcA==} + engines: {node: '>=12'} + dev: true + + /postgres-interval@1.2.0: + resolution: {integrity: sha512-9ZhXKM/rw350N1ovuWHbGxnGh/SNJ4cnxHiM0rxE4VN41wsg8P8zWn9hv/buK00RP4WvlOyr/RBDiptyxVbkZQ==} + engines: {node: '>=0.10.0'} + dependencies: + xtend: 4.0.2 + + /postgres-interval@3.0.0: + resolution: {integrity: sha512-BSNDnbyZCXSxgA+1f5UU2GmwhoI0aU5yMxRGO8CdFEcY2BQF9xm/7MqKnYoM1nJDk8nONNWDk9WeSmePFhQdlw==} + engines: {node: '>=12'} + dev: true + + /postgres-range@1.1.4: + resolution: {integrity: sha512-i/hbxIE9803Alj/6ytL7UHQxRvZkI9O4Sy+J3HGc4F4oo/2eQAjTSNJ0bfxyse3bH0nuVesCk+3IRLaMtG3H6w==} + dev: true + /pouchdb-collections@1.0.1: resolution: {integrity: sha512-31db6JRg4+4D5Yzc2nqsRqsA2oOkZS8DpFav3jf/qVNBxusKa2ClkEIZ2bJNpaDbMfWtnuSq59p6Bn+CipPMdg==} dev: true @@ -15546,7 +15802,7 @@ packages: '@protobufjs/pool': 1.1.0 '@protobufjs/utf8': 1.1.0 '@types/long': 4.0.2 - '@types/node': 18.16.16 + '@types/node': 20.12.7 long: 4.0.0 dev: true @@ -16851,6 +17107,10 @@ packages: dependencies: extend-shallow: 3.0.2 + /split2@4.2.0: + resolution: {integrity: sha512-UcjcJOWknrNkF6PLX83qcHM6KHgVKNkV62Y8a5uYDVv9ydGQVwAHMKqHdJje1VTWpljG0WYpCDhrCdAOYH4TWg==} + engines: {node: '>= 10.x'} + /split@1.0.1: resolution: {integrity: sha512-mTyOoPbrivtXnwnIxZRFYRrPNtEFKlpB2fvjSnCQUiAA6qAZzqwna5envK4uk6OIeP17CsdF3rSBGYVBsU0Tkg==} dependencies: @@ -17877,7 +18137,7 @@ packages: typed-array-byte-offset: 1.0.2 dev: true - /typeorm@0.3.16(better-sqlite3@8.4.0): + /typeorm@0.3.16(better-sqlite3@8.4.0)(pg@8.11.3): resolution: {integrity: sha512-wJ4Qy1oqRKNDdZiBTTaVMqwo/XxC52Q7uNPTjltPgLhvIW173bL6Iad0lhptMOsFlpixFPaUu3PNziaRBwX2Zw==} engines: {node: '>= 12.9.0'} hasBin: true @@ -17946,6 +18206,7 @@ packages: dotenv: 16.1.1 glob: 8.1.0 mkdirp: 2.1.6 + pg: 8.11.3 reflect-metadata: 0.1.13 sha.js: 2.4.11 tslib: 2.5.2 From ee3ed1b20a7e675949d2f6a33ed247c1815422f2 Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Tue, 30 Apr 2024 16:07:50 +0200 Subject: [PATCH 131/156] Disable restartability e2e test --- ... 03.25_node_pk_position_does_not_matter_for_compensations.lux} | 0 ...lite_can_resume_replication_after_server_restart.lux.disabled} | 0 2 files changed, 0 insertions(+), 0 deletions(-) rename e2e/tests/{03.26_node_pk_position_does_not_matter_for_compensations.lux => 03.25_node_pk_position_does_not_matter_for_compensations.lux} (100%) rename e2e/tests/{03.25_node_satellite_can_resume_replication_after_server_restart.lux => 03.xx_node_satellite_can_resume_replication_after_server_restart.lux.disabled} (100%) diff --git a/e2e/tests/03.26_node_pk_position_does_not_matter_for_compensations.lux b/e2e/tests/03.25_node_pk_position_does_not_matter_for_compensations.lux similarity index 100% rename from e2e/tests/03.26_node_pk_position_does_not_matter_for_compensations.lux rename to e2e/tests/03.25_node_pk_position_does_not_matter_for_compensations.lux diff --git a/e2e/tests/03.25_node_satellite_can_resume_replication_after_server_restart.lux b/e2e/tests/03.xx_node_satellite_can_resume_replication_after_server_restart.lux.disabled similarity index 100% rename from e2e/tests/03.25_node_satellite_can_resume_replication_after_server_restart.lux rename to e2e/tests/03.xx_node_satellite_can_resume_replication_after_server_restart.lux.disabled From b20f531310a6e05cc8b8a67b0a5af012032aec0a Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Tue, 30 Apr 2024 16:11:11 +0200 Subject: [PATCH 132/156] Remove duplicate key --- components/electric/lib/electric/satellite/protocol/state.ex | 1 - 1 file changed, 1 deletion(-) diff --git a/components/electric/lib/electric/satellite/protocol/state.ex b/components/electric/lib/electric/satellite/protocol/state.ex index 7f46e48974..8eb9679e06 100644 --- a/components/electric/lib/electric/satellite/protocol/state.ex +++ b/components/electric/lib/electric/satellite/protocol/state.ex @@ -33,7 +33,6 @@ defmodule Electric.Satellite.Protocol.State do origin: Connectors.origin(), subscriptions: map(), subscription_data_fun: fun(), - sql_dialect: Electric.Postgres.Dialect.SQLite | Electric.Postgres.Dialect.Postgresql, move_in_data_fun: fun(), sql_dialect: Electric.Postgres.Dialect.SQLite | Electric.Postgres.Dialect.Postgresql, telemetry: Telemetry.t() | nil From eaf50dd60983069c94e1b541b2de897d2da571e5 Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Tue, 30 Apr 2024 16:16:17 +0200 Subject: [PATCH 133/156] Address style comments --- clients/typescript/src/satellite/process.ts | 30 ++++++++++----------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/clients/typescript/src/satellite/process.ts b/clients/typescript/src/satellite/process.ts index 97da57e210..f042c756ab 100644 --- a/clients/typescript/src/satellite/process.ts +++ b/clients/typescript/src/satellite/process.ts @@ -1463,22 +1463,22 @@ export class SatelliteProcess implements Satellite { tables: QualifiedTablename[], flag: 0 | 1 ): Statement[] { + if (tables.length === 0) return [] const triggers = `"${this.opts.triggersTable.namespace}"."${this.opts.triggersTable.tablename}"` - const namespacesAndTableNames = tables - .map((tbl) => [tbl.namespace, tbl.tablename]) - .flat() - if (tables.length > 0) { - const pos = (i: number) => this.builder.makePositionalParam(i) - let i = 1 - return [ - { - sql: `UPDATE ${triggers} SET flag = ${pos(i++)} WHERE ${tables - .map((_) => `(namespace = ${pos(i++)} AND tablename = ${pos(i++)})`) - .join(' OR ')}`, - args: [flag, ...namespacesAndTableNames], - }, - ] - } else return [] + const namespacesAndTableNames = tables.flatMap((tbl) => [ + tbl.namespace, + tbl.tablename, + ]) + const pos = (i: number) => this.builder.makePositionalParam(i) + let i = 1 + return [ + { + sql: `UPDATE ${triggers} SET flag = ${pos(i++)} WHERE ${tables + .map((_) => `(namespace = ${pos(i++)} AND tablename = ${pos(i++)})`) + .join(' OR ')}`, + args: [flag, ...namespacesAndTableNames], + }, + ] } _addSeenAdditionalDataStmt(ref: string): Statement { From da2ecc8c8ac92b703ed2de547bcbc8f0a1d7de69 Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Wed, 1 May 2024 08:51:07 +0200 Subject: [PATCH 134/156] Remove Kysely dependency --- clients/typescript/package.json | 1 - clients/typescript/src/migrators/bundle.ts | 87 +++++-------------- clients/typescript/src/migrators/index.ts | 2 +- clients/typescript/src/migrators/mock.ts | 2 +- clients/typescript/src/migrators/schema.ts | 1 - clients/typescript/src/satellite/config.ts | 68 --------------- clients/typescript/src/satellite/mock.ts | 2 +- clients/typescript/src/satellite/process.ts | 2 +- .../test/client/model/shapes.test.ts | 2 +- .../test/migrators/pglite/schema.test.ts | 4 +- .../test/migrators/postgres/schema.test.ts | 4 +- .../test/migrators/sqlite/schema.test.ts | 4 +- .../test/satellite/registry.test.ts | 2 +- pnpm-lock.yaml | 8 -- 14 files changed, 32 insertions(+), 157 deletions(-) diff --git a/clients/typescript/package.json b/clients/typescript/package.json index 58d940d0b5..b9214ff9d0 100644 --- a/clients/typescript/package.json +++ b/clients/typescript/package.json @@ -192,7 +192,6 @@ "frame-stream": "^3.0.1", "get-port": "^7.0.0", "jose": "^4.14.4", - "kysely": "^0.27.2", "lodash.flow": "^3.5.0", "lodash.groupby": "^4.6.0", "lodash.isequal": "^4.5.0", diff --git a/clients/typescript/src/migrators/bundle.ts b/clients/typescript/src/migrators/bundle.ts index 753385b09f..63fa76a556 100644 --- a/clients/typescript/src/migrators/bundle.ts +++ b/clients/typescript/src/migrators/bundle.ts @@ -8,24 +8,10 @@ import { import { DatabaseAdapter } from '../electric/adapter' import { buildInitialMigration as makeBaseMigration } from './schema' import Log from 'loglevel' -import { SatelliteError, SatelliteErrorCode, SqlValue } from '../util' -import { ElectricSchema } from './schema' -import { - Kysely, - KyselyConfig, - sql as raw, - DummyDriver, - PostgresAdapter, - PostgresIntrospector, - PostgresQueryCompiler, - SqliteAdapter, - SqliteIntrospector, - SqliteQueryCompiler, - expressionBuilder, - ExpressionBuilder, -} from 'kysely' +import { SatelliteError, SatelliteErrorCode } from '../util' import { _electric_migrations } from '../satellite/config' import { pgBuilder, QueryBuilder, sqliteBuilder } from './query-builder' +import { dedent } from 'ts-dedent' export const SCHEMA_VSN_ERROR_MSG = `Local schema doesn't match server's. Clear local state through developer tools and retry connection manually. If error persists, re-generate the client. Check documentation (https://electric-sql.com/docs/reference/roadmap) to learn more.` @@ -36,25 +22,18 @@ export abstract class BundleMigratorBase implements Migrator { migrations: StmtMigration[] readonly tableName = _electric_migrations - queryBuilder: Kysely - eb: ExpressionBuilder constructor( adapter: DatabaseAdapter, migrations: Migration[] = [], - queryBuilderConfig: KyselyConfig, - public electricQueryBuilder: QueryBuilder, - private namespace: string = electricQueryBuilder.defaultNamespace + public queryBuilder: QueryBuilder, + private namespace: string = queryBuilder.defaultNamespace ) { this.adapter = adapter - const baseMigration = makeBaseMigration(electricQueryBuilder) + const baseMigration = makeBaseMigration(queryBuilder) this.migrations = [...baseMigration.migrations, ...migrations].map( makeStmtMigration ) - this.queryBuilder = new Kysely( - queryBuilderConfig - ).withSchema(namespace) - this.eb = expressionBuilder() } async up(): Promise { @@ -74,11 +53,8 @@ export abstract class BundleMigratorBase implements Migrator { async migrationsTableExists(): Promise { // If this is the first time we're running migrations, then the // migrations table won't exist. - const namespace = this.electricQueryBuilder.defaultNamespace - const tableExists = this.electricQueryBuilder.tableExists( - this.tableName, - namespace - ) + const namespace = this.queryBuilder.defaultNamespace + const tableExists = this.queryBuilder.tableExists(this.tableName, namespace) const tables = await this.adapter.query(tableExists) return tables.length > 0 } @@ -157,15 +133,16 @@ export abstract class BundleMigratorBase implements Migrator { ) } - const { sql, parameters } = raw` - INSERT INTO ${this.eb.table( - this.tableName - )} (version, applied_at) VALUES (${version}, ${Date.now().toString()}) - `.compile(this.queryBuilder) - await this.adapter.runInTransaction(...statements, { - sql, - args: parameters as SqlValue[], + sql: dedent` + INSERT INTO "${this.namespace}"."${ + this.tableName + }" (version, applied_at) + VALUES (${this.queryBuilder.makePositionalParam( + 1 + )}, ${this.queryBuilder.makePositionalParam(2)}); + `, + args: [version, Date.now().toString()], }) } @@ -176,14 +153,12 @@ export abstract class BundleMigratorBase implements Migrator { * that indicates if the migration was applied. */ async applyIfNotAlready(migration: StmtMigration): Promise { - const { sql, parameters } = raw` - SELECT 1 FROM ${this.eb.table(this.tableName)} - WHERE version = ${migration.version} - `.compile(this.queryBuilder) - const rows = await this.adapter.query({ - sql, - args: parameters as SqlValue[], + sql: dedent` + SELECT 1 FROM "${this.namespace}"."${this.tableName}" + WHERE version = ${this.queryBuilder.makePositionalParam(1)} + `, + args: [migration.version], }) const shouldApply = rows.length === 0 @@ -200,28 +175,12 @@ export abstract class BundleMigratorBase implements Migrator { export class SqliteBundleMigrator extends BundleMigratorBase { constructor(adapter: DatabaseAdapter, migrations: Migration[] = []) { - const config: KyselyConfig = { - dialect: { - createAdapter: () => new SqliteAdapter(), - createDriver: () => new DummyDriver(), - createIntrospector: (db) => new SqliteIntrospector(db), - createQueryCompiler: () => new SqliteQueryCompiler(), - }, - } - super(adapter, migrations, config, sqliteBuilder) + super(adapter, migrations, sqliteBuilder) } } export class PgBundleMigrator extends BundleMigratorBase { constructor(adapter: DatabaseAdapter, migrations: Migration[] = []) { - const config: KyselyConfig = { - dialect: { - createAdapter: () => new PostgresAdapter(), - createDriver: () => new DummyDriver(), - createIntrospector: (db) => new PostgresIntrospector(db), - createQueryCompiler: () => new PostgresQueryCompiler(), - }, - } - super(adapter, migrations, config, pgBuilder) + super(adapter, migrations, pgBuilder) } } diff --git a/clients/typescript/src/migrators/index.ts b/clients/typescript/src/migrators/index.ts index 65ce040aa1..8202b579ff 100644 --- a/clients/typescript/src/migrators/index.ts +++ b/clients/typescript/src/migrators/index.ts @@ -32,5 +32,5 @@ export interface Migrator { apply(migration: StmtMigration): Promise applyIfNotAlready(migration: StmtMigration): Promise querySchemaVersion(): Promise - electricQueryBuilder: QueryBuilder + queryBuilder: QueryBuilder } diff --git a/clients/typescript/src/migrators/mock.ts b/clients/typescript/src/migrators/mock.ts index 7a728e8c1d..7de3ece065 100644 --- a/clients/typescript/src/migrators/mock.ts +++ b/clients/typescript/src/migrators/mock.ts @@ -2,7 +2,7 @@ import { Migrator, StmtMigration } from './index' import { QueryBuilder } from './query-builder' export class MockMigrator implements Migrator { - electricQueryBuilder: QueryBuilder = null as any + queryBuilder: QueryBuilder = null as any async up(): Promise { return 1 diff --git a/clients/typescript/src/migrators/schema.ts b/clients/typescript/src/migrators/schema.ts index cf32fe0436..d2989dbc58 100644 --- a/clients/typescript/src/migrators/schema.ts +++ b/clients/typescript/src/migrators/schema.ts @@ -1,6 +1,5 @@ import { satelliteDefaults } from '../satellite/config' import { QueryBuilder } from './query-builder' -export type { ElectricSchema } from '../satellite/config' export const buildInitialMigration = (builder: QueryBuilder) => { const { metaTable, migrationsTable, oplogTable, triggersTable, shadowTable } = diff --git a/clients/typescript/src/satellite/config.ts b/clients/typescript/src/satellite/config.ts index 55db11f62f..455d99da14 100644 --- a/clients/typescript/src/satellite/config.ts +++ b/clients/typescript/src/satellite/config.ts @@ -1,6 +1,5 @@ import { IBackOffOptions } from 'exponential-backoff' import { QualifiedTablename } from '../util/tablename' -import { Insertable, Selectable, Updateable, Generated } from 'kysely' export type ConnectionBackoffOptions = Omit export interface SatelliteOpts { @@ -35,79 +34,12 @@ export interface SatelliteOverrides { minSnapshotWindow?: number } -// Describe the schema of the database for use with Kysely -// The names of the properties in this interface -// must be kept consistent with the names of the tables - export const _electric_oplog = '_electric_oplog' export const _electric_meta = '_electric_meta' export const _electric_migrations = '_electric_migrations' export const _electric_trigger_settings = '_electric_trigger_settings' export const _electric_shadow = '_electric_shadow' -export interface ElectricSchema { - [_electric_oplog]: OplogTable - [_electric_meta]: MetaTable - [_electric_migrations]: MigrationsTable - [_electric_trigger_settings]: TriggersTable - [_electric_shadow]: ShadowTable -} - -interface OplogTable { - rowid: number - namespace: string - tablename: string - optype: string - primaryKey: string - newRow: string | null - oldRow: string | null - timestamp: string - clearTags: string -} - -export type Oplog = Selectable -export type NewOplog = Insertable -export type OplogUpdate = Updateable - -interface MetaTable { - key: string - value: Buffer -} - -export type Meta = Selectable -export type NewMeta = Insertable -export type MetaUpdate = Updateable - -export interface MigrationsTable { - id: Generated - version: string - applied_at: string -} - -export type Migration = Selectable -export type NewMigration = Insertable -export type MigrationUpdate = Updateable - -interface TriggersTable { - tablename: string - flag: number -} - -export type Trigger = Selectable -export type NewTrigger = Insertable -export type TriggerUpdate = Updateable - -interface ShadowTable { - namespace: string - tablename: string - primaryKey: string - tags: string -} - -export type Shadow = Selectable -export type NewShadow = Insertable -export type ShadowUpdate = Updateable - export const satelliteDefaults: (namespace: string) => SatelliteOpts = ( namespace: string ) => { diff --git a/clients/typescript/src/satellite/mock.ts b/clients/typescript/src/satellite/mock.ts index a3b2eef208..2ad3f75370 100644 --- a/clients/typescript/src/satellite/mock.ts +++ b/clients/typescript/src/satellite/mock.ts @@ -160,7 +160,7 @@ export class MockRegistry extends BaseRegistry { throw new Error('Failed to start satellite process') } - const namespace = migrator.electricQueryBuilder.defaultNamespace + const namespace = migrator.queryBuilder.defaultNamespace const opts = { ...satelliteDefaults(namespace), ...overrides } const satellites = this.satellites diff --git a/clients/typescript/src/satellite/process.ts b/clients/typescript/src/satellite/process.ts index f042c756ab..c8c17a9a34 100644 --- a/clients/typescript/src/satellite/process.ts +++ b/clients/typescript/src/satellite/process.ts @@ -168,7 +168,7 @@ export class SatelliteProcess implements Satellite { this.migrator = migrator this.notifier = notifier this.client = client - this.builder = this.migrator.electricQueryBuilder + this.builder = this.migrator.queryBuilder this.opts = opts this.relations = {} diff --git a/clients/typescript/test/client/model/shapes.test.ts b/clients/typescript/test/client/model/shapes.test.ts index 278e12df74..6d5dbb4246 100644 --- a/clients/typescript/test/client/model/shapes.test.ts +++ b/clients/typescript/test/client/model/shapes.test.ts @@ -55,7 +55,7 @@ async function makeContext(t: ExecutionContext) { migrator, notifier, client, - satelliteDefaults(migrator.electricQueryBuilder.defaultNamespace) + satelliteDefaults(migrator.queryBuilder.defaultNamespace) ) const electric = ElectricClient.create( diff --git a/clients/typescript/test/migrators/pglite/schema.test.ts b/clients/typescript/test/migrators/pglite/schema.test.ts index e296631b3e..0027104564 100644 --- a/clients/typescript/test/migrators/pglite/schema.test.ts +++ b/clients/typescript/test/migrators/pglite/schema.test.ts @@ -39,9 +39,7 @@ test('check schema keys are unique', async (t) => { const migrator = new BundleMigrator(adapter, migrations) await migrator.up() - const defaults = satelliteDefaults( - migrator.electricQueryBuilder.defaultNamespace - ) + const defaults = satelliteDefaults(migrator.queryBuilder.defaultNamespace) const metaTable = `"${defaults.metaTable.namespace}"."${defaults.metaTable.tablename}"` await adapter.run({ diff --git a/clients/typescript/test/migrators/postgres/schema.test.ts b/clients/typescript/test/migrators/postgres/schema.test.ts index 98eacbd753..b3eb21c9cb 100644 --- a/clients/typescript/test/migrators/postgres/schema.test.ts +++ b/clients/typescript/test/migrators/postgres/schema.test.ts @@ -39,9 +39,7 @@ test('check schema keys are unique', async (t) => { const migrator = new BundleMigrator(adapter, migrations) await migrator.up() - const defaults = satelliteDefaults( - migrator.electricQueryBuilder.defaultNamespace - ) + const defaults = satelliteDefaults(migrator.queryBuilder.defaultNamespace) const metaTable = `"${defaults.metaTable.namespace}"."${defaults.metaTable.tablename}"` await adapter.run({ diff --git a/clients/typescript/test/migrators/sqlite/schema.test.ts b/clients/typescript/test/migrators/sqlite/schema.test.ts index 4e3cfdaed3..e17c6d1e73 100644 --- a/clients/typescript/test/migrators/sqlite/schema.test.ts +++ b/clients/typescript/test/migrators/sqlite/schema.test.ts @@ -39,9 +39,7 @@ test('check schema keys are unique', async (t) => { const migrator = new BundleMigrator(adapter, migrations) await migrator.up() - const defaults = satelliteDefaults( - migrator.electricQueryBuilder.defaultNamespace - ) + const defaults = satelliteDefaults(migrator.queryBuilder.defaultNamespace) const metaTable = `"${defaults.metaTable.namespace}"."${defaults.metaTable.tablename}"` await adapter.run({ diff --git a/clients/typescript/test/satellite/registry.test.ts b/clients/typescript/test/satellite/registry.test.ts index 101383fee0..7cf0e8019e 100644 --- a/clients/typescript/test/satellite/registry.test.ts +++ b/clients/typescript/test/satellite/registry.test.ts @@ -13,7 +13,7 @@ const dbName = 'test.db' const dbDescription = {} as DbSchema const adapter = {} as DatabaseAdapter -const migrator = { electricQueryBuilder: sqliteBuilder } as unknown as Migrator +const migrator = { queryBuilder: sqliteBuilder } as unknown as Migrator const notifier = {} as Notifier const socketFactory = {} as SocketFactory const config: InternalElectricConfig = { diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index c6bb303bab..4626567d4b 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -62,9 +62,6 @@ importers: jose: specifier: ^4.14.4 version: 4.14.4 - kysely: - specifier: ^0.27.2 - version: 0.27.2 lodash.flow: specifier: ^3.5.0 version: 3.5.0 @@ -13151,11 +13148,6 @@ packages: resolution: {integrity: sha512-o+NO+8WrRiQEE4/7nwRJhN1HWpVmJm511pBHUxPLtp0BUISzlBplORYSmTclCnJvQq2tKu/sgl3xVpkc7ZWuQQ==} engines: {node: '>=6'} - /kysely@0.27.2: - resolution: {integrity: sha512-DmRvEfiR/NLpgsTbSxma2ldekhsdcd65+MNiKXyd/qj7w7X5e3cLkXxcj+MypsRDjPhHQ/CD5u3Eq1sBYzX0bw==} - engines: {node: '>=14.0.0'} - dev: false - /latest-version@7.0.0: resolution: {integrity: sha512-KvNT4XqAMzdcL6ka6Tl3i2lYeFDgXNCuIX+xNx6ZMVR1dFq+idXd9FLKNMOIx0t9mJ9/HudyX4oZWXZQ0UJHeg==} engines: {node: '>=14.16'} From 622b601c930e3e3c521ff2a809cff15d98a4663b Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Wed, 1 May 2024 09:15:52 +0200 Subject: [PATCH 135/156] Remove unused file --- clients/typescript/src/util/statements.ts | 3 --- 1 file changed, 3 deletions(-) delete mode 100644 clients/typescript/src/util/statements.ts diff --git a/clients/typescript/src/util/statements.ts b/clients/typescript/src/util/statements.ts deleted file mode 100644 index bfac3d7263..0000000000 --- a/clients/typescript/src/util/statements.ts +++ /dev/null @@ -1,3 +0,0 @@ -export function isInsertUpdateOrDeleteStatement(stmt: string) { - return /^\s*(insert|update|delete)/i.test(stmt) -} From 9f4d49aa26bf82d9d4b146566a10827bc6b0cd88 Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Wed, 1 May 2024 09:16:34 +0200 Subject: [PATCH 136/156] Renamed Record type to DbRecord --- .../typescript/src/client/model/transforms.ts | 2 +- .../src/client/validation/validation.ts | 2 +- clients/typescript/src/notifiers/index.ts | 4 ++-- clients/typescript/src/satellite/client.ts | 18 ++++++++++-------- clients/typescript/src/satellite/index.ts | 6 +++--- clients/typescript/src/satellite/mock.ts | 2 +- clients/typescript/src/satellite/oplog.ts | 2 +- clients/typescript/src/satellite/process.ts | 2 +- clients/typescript/src/util/tablename.ts | 6 +++--- clients/typescript/src/util/types.ts | 8 ++++---- .../test/client/model/transforms.test.ts | 8 ++++---- .../typescript/test/satellite/serialization.ts | 8 ++++---- 12 files changed, 35 insertions(+), 33 deletions(-) diff --git a/clients/typescript/src/client/model/transforms.ts b/clients/typescript/src/client/model/transforms.ts index 8dd33664e7..b57b713aad 100644 --- a/clients/typescript/src/client/model/transforms.ts +++ b/clients/typescript/src/client/model/transforms.ts @@ -2,7 +2,7 @@ import { Satellite } from '../../satellite' import { QualifiedTablename, ReplicatedRowTransformer, - Record as DataRecord, + DbRecord as DataRecord, } from '../../util' import { Converter } from '../conversions/converter' import { Transformation, transformFields } from '../conversions/input' diff --git a/clients/typescript/src/client/validation/validation.ts b/clients/typescript/src/client/validation/validation.ts index 357e05be0d..5cfbd0aaab 100644 --- a/clients/typescript/src/client/validation/validation.ts +++ b/clients/typescript/src/client/validation/validation.ts @@ -1,6 +1,6 @@ import * as z from 'zod' import { InvalidArgumentError } from './errors/invalidArgumentError' -import { Record as DataRecord, isObject } from '../../util' +import { DbRecord as DataRecord, isObject } from '../../util' import { InvalidRecordTransformationError } from './errors/invalidRecordTransformationError' function deepOmit(obj: Record) { diff --git a/clients/typescript/src/notifiers/index.ts b/clients/typescript/src/notifiers/index.ts index 953a9f1b2f..0f6a6bfefa 100644 --- a/clients/typescript/src/notifiers/index.ts +++ b/clients/typescript/src/notifiers/index.ts @@ -5,7 +5,7 @@ import { DbName, RowId, DataChangeType, - Record, + DbRecord, } from '../util/types' export { EventNotifier } from './event' @@ -16,7 +16,7 @@ export interface AuthStateNotification { } export type RecordChange = { - primaryKey: Record + primaryKey: DbRecord type: `${DataChangeType}` | 'INITIAL' } export interface Change { diff --git a/clients/typescript/src/satellite/client.ts b/clients/typescript/src/satellite/client.ts index c3ba05b07e..b946b3f324 100644 --- a/clients/typescript/src/satellite/client.ts +++ b/clients/typescript/src/satellite/client.ts @@ -54,7 +54,7 @@ import { SatelliteError, SatelliteErrorCode, DataTransaction, - Record, + DbRecord, Relation, SchemaChange, StartReplicationResponse, @@ -151,8 +151,10 @@ export class SatelliteClient implements Client { // can only handle a single subscription at a time private subscriptionsDataCache: SubscriptionsDataCache - private replicationTransforms: Map> = - new Map() + private replicationTransforms: Map< + string, + ReplicatedRowTransformer + > = new Map() private socketHandler?: (any: any) => void private throttledPushTransaction?: () => void @@ -1283,7 +1285,7 @@ export class SatelliteClient implements Client { public setReplicationTransform( tableName: QualifiedTablename, - transform: ReplicatedRowTransformer + transform: ReplicatedRowTransformer ): void { this.replicationTransforms.set(tableName.tablename, transform) } @@ -1350,7 +1352,7 @@ function getColumnType( } export function serializeRow( - rec: Record, + rec: DbRecord, relation: Relation, dbDescription: DbSchema, encoder: TypeEncoder @@ -1384,19 +1386,19 @@ export function deserializeRow( relation: Relation, dbDescription: DbSchema, decoder: TypeDecoder -): Record +): DbRecord export function deserializeRow( row: SatOpRow | undefined, relation: Relation, dbDescription: DbSchema, decoder: TypeDecoder -): Record | undefined +): DbRecord | undefined export function deserializeRow( row: SatOpRow | undefined, relation: Relation, dbDescription: DbSchema, decoder: TypeDecoder -): Record | undefined { +): DbRecord | undefined { if (row == undefined) { return undefined } diff --git a/clients/typescript/src/satellite/index.ts b/clients/typescript/src/satellite/index.ts index 61b02c0b66..c2bba05969 100644 --- a/clients/typescript/src/satellite/index.ts +++ b/clients/typescript/src/satellite/index.ts @@ -20,7 +20,7 @@ import { SatelliteError, ReplicationStatus, AdditionalDataCallback, - Record, + DbRecord, ReplicatedRowTransformer, } from '../util/types' import { @@ -85,7 +85,7 @@ export interface Satellite { setReplicationTransform( tableName: QualifiedTablename, - transform: ReplicatedRowTransformer + transform: ReplicatedRowTransformer ): void clearReplicationTransform(tableName: QualifiedTablename): void } @@ -131,7 +131,7 @@ export interface Client { setReplicationTransform( tableName: QualifiedTablename, - transformer: ReplicatedRowTransformer + transformer: ReplicatedRowTransformer ): void clearReplicationTransform(tableName: QualifiedTablename): void } diff --git a/clients/typescript/src/satellite/mock.ts b/clients/typescript/src/satellite/mock.ts index 2ad3f75370..9d5bf7f8be 100644 --- a/clients/typescript/src/satellite/mock.ts +++ b/clients/typescript/src/satellite/mock.ts @@ -12,7 +12,7 @@ import { Relation, SatelliteErrorCode, RelationsCache, - Record as DataRecord, + DbRecord as DataRecord, StartReplicationResponse, StopReplicationResponse, OutboundStartedCallback, diff --git a/clients/typescript/src/satellite/oplog.ts b/clients/typescript/src/satellite/oplog.ts index 2576146e96..f9abfd7f6b 100644 --- a/clients/typescript/src/satellite/oplog.ts +++ b/clients/typescript/src/satellite/oplog.ts @@ -7,7 +7,7 @@ import { SqlValue, DataTransaction, DataChange, - Record as Rec, + DbRecord as Rec, Relation, } from '../util/types' import { union } from '../util/sets' diff --git a/clients/typescript/src/satellite/process.ts b/clients/typescript/src/satellite/process.ts index c8c17a9a34..7a1df0a1b9 100644 --- a/clients/typescript/src/satellite/process.ts +++ b/clients/typescript/src/satellite/process.ts @@ -37,7 +37,7 @@ import { Transaction, isDataChange, Uuid, - Record as DataRecord, + DbRecord as DataRecord, ReplicatedRowTransformer, } from '../util/types' import { SatelliteOpts } from './config' diff --git a/clients/typescript/src/util/tablename.ts b/clients/typescript/src/util/tablename.ts index ba5641911b..4780fa6dc7 100644 --- a/clients/typescript/src/util/tablename.ts +++ b/clients/typescript/src/util/tablename.ts @@ -14,9 +14,9 @@ export class QualifiedTablename { } toString(): string { - // Don't collapse it to "." because that can lead to clashes - // since both `QualifiedTablename("foo", "bar.baz")` and `QualifiedTablename("foo.bar", "baz")` - // would be collapsed to "foo.bar.baz". + // Don't collapse it to '.' because that can lead to clashes + // since both `QualifiedTablename('foo', 'bar.baz')` and `QualifiedTablename('foo.bar', 'baz')` + // would be collapsed to 'foo.bar.baz'. return JSON.stringify({ namespace: this.namespace, tablename: this.tablename, diff --git a/clients/typescript/src/util/types.ts b/clients/typescript/src/util/types.ts index a09f978ad4..ad0c3b5da8 100644 --- a/clients/typescript/src/util/types.ts +++ b/clients/typescript/src/util/types.ts @@ -149,15 +149,15 @@ export type Change = DataChange | SchemaChange export type DataChange = { relation: Relation type: DataChangeType - record?: Record - oldRecord?: Record + record?: DbRecord + oldRecord?: DbRecord tags: Tag[] } export type DataInsert = { relation: Relation type: DataChangeType.INSERT - record: Record + record: DbRecord tags: Tag[] } @@ -180,7 +180,7 @@ export function isDataChange(change: Change): change is DataChange { return 'relation' in change } -export type Record = { +export type DbRecord = { [key: string]: boolean | string | number | Uint8Array | undefined | null } diff --git a/clients/typescript/test/client/model/transforms.test.ts b/clients/typescript/test/client/model/transforms.test.ts index 8ec91b9ffb..65a2f481b3 100644 --- a/clients/typescript/test/client/model/transforms.test.ts +++ b/clients/typescript/test/client/model/transforms.test.ts @@ -7,7 +7,7 @@ import { import { schema, Post } from '../generated' import { transformTableRecord } from '../../../src/client/model/transforms' import { InvalidRecordTransformationError } from '../../../src/client/validation/errors/invalidRecordTransformationError' -import { Record } from '../../../src/util' +import { DbRecord } from '../../../src/util' import { sqliteConverter } from '../../../src/client/conversions/sqlite' const tableName = 'Post' @@ -24,7 +24,7 @@ const post1 = { } test('transformTableRecord should validate the input', (t) => { - const liftedTransform = (r: Record) => + const liftedTransform = (r: DbRecord) => transformTableRecord( r, (row: Post) => row, @@ -50,7 +50,7 @@ test('transformTableRecord should validate the input', (t) => { }) test('transformTableRecord should validate the output', (t) => { - const liftedTransform = (r: Record) => + const liftedTransform = (r: DbRecord) => transformTableRecord( r, // @ts-expect-error: incorrectly typed output @@ -68,7 +68,7 @@ test('transformTableRecord should validate the output', (t) => { }) test('transformTableRecord should validate output does not modify immutable fields', (t) => { - const liftedTransform = (r: Record) => + const liftedTransform = (r: DbRecord) => transformTableRecord( r, (row: Post) => ({ diff --git a/clients/typescript/test/satellite/serialization.ts b/clients/typescript/test/satellite/serialization.ts index 8338c71b56..2d286b6125 100644 --- a/clients/typescript/test/satellite/serialization.ts +++ b/clients/typescript/test/satellite/serialization.ts @@ -1,7 +1,7 @@ import { SatRelation_RelationType } from '../../src/_generated/protocol/satellite' import { serializeRow, deserializeRow } from '../../src/satellite/client' import { TestFn, ExecutionContext } from 'ava' -import { Relation, Record } from '../../src/util/types' +import { Relation, DbRecord } from '../../src/util/types' import { DbSchema, TableSchema } from '../../src/client/model/schema' import { PgBasicType } from '../../src/client/conversions/types' import { HKT } from '../../src/client/util/hkt' @@ -96,7 +96,7 @@ export const serializationTests = (test: TestFn) => { [] ) - const record: Record = { + const record: DbRecord = { name1: 'Hello', name2: 'World!', name3: null, @@ -152,7 +152,7 @@ export const serializationTests = (test: TestFn) => { t.deepEqual(d_row, record) // Test edge cases for floats such as NaN, Infinity, -Infinity - const record2: Record = { + const record2: DbRecord = { name1: 'Edge cases for Floats', name2: null, name3: null, @@ -262,7 +262,7 @@ export const serializationTests = (test: TestFn) => { [] ) - const record: Record = { + const record: DbRecord = { bit0: null, bit1: null, bit2: 'Filled', From e248677357fdb637799842ecfbef29b6b7424ede Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Wed, 1 May 2024 10:39:38 +0200 Subject: [PATCH 137/156] Serialise qualified table names with quotes around namespace and table name. --- .../src/drivers/node-postgres/database.ts | 83 +++++++------ clients/typescript/src/migrators/bundle.ts | 20 +-- .../src/migrators/query-builder/builder.ts | 105 ++++++---------- .../src/migrators/query-builder/pgBuilder.ts | 117 +++++++----------- .../migrators/query-builder/sqliteBuilder.ts | 105 +++++++--------- clients/typescript/src/migrators/schema.ts | 12 +- clients/typescript/src/migrators/triggers.ts | 48 ++++--- clients/typescript/src/satellite/process.ts | 49 ++++---- clients/typescript/src/util/tablename.ts | 32 +++-- .../test/migrators/pglite/schema.test.ts | 2 +- .../test/migrators/pglite/triggers.test.ts | 10 +- .../test/migrators/postgres/schema.test.ts | 2 +- .../test/migrators/postgres/triggers.test.ts | 10 +- .../test/migrators/sqlite/schema.test.ts | 2 +- .../test/migrators/sqlite/triggers.test.ts | 14 +-- clients/typescript/test/migrators/triggers.ts | 8 +- clients/typescript/test/satellite/common.ts | 9 +- .../typescript/test/satellite/merge.test.ts | 15 +-- clients/typescript/test/satellite/process.ts | 9 +- 19 files changed, 294 insertions(+), 358 deletions(-) diff --git a/clients/typescript/src/drivers/node-postgres/database.ts b/clients/typescript/src/drivers/node-postgres/database.ts index 93d400e13b..59aa7b7c31 100644 --- a/clients/typescript/src/drivers/node-postgres/database.ts +++ b/clients/typescript/src/drivers/node-postgres/database.ts @@ -20,45 +20,56 @@ export class ElectricDatabase implements Database { constructor(public name: string, private db: Client) {} async exec(statement: Statement): Promise { - const { rows, rowCount } = await this.db.query({ - text: statement.sql, - values: statement.args, - types: { - getTypeParser: ((oid: number) => { - /* - // Modify the parser to not parse JSON values - // Instead, return them as strings - // our conversions will correctly parse them + try { + const { rows, rowCount } = await this.db.query({ + text: statement.sql, + values: statement.args, + types: { + getTypeParser: ((oid: number) => { + /* + // Modify the parser to not parse JSON values + // Instead, return them as strings + // our conversions will correctly parse them + if ( + oid === pg.types.builtins.JSON || + oid === pg.types.builtins.JSONB + ) { + return (val) => val + } + */ + if ( - oid === pg.types.builtins.JSON || - oid === pg.types.builtins.JSONB + oid == pg.types.builtins.TIMESTAMP || + oid == pg.types.builtins.TIMESTAMPTZ || + oid == pg.types.builtins.DATE ) { - return (val) => val + // Parse timestamps and date values ourselves + // because the pg parser parses them differently from what we expect + const pgTypes = new Map([ + [pg.types.builtins.TIMESTAMP, PgDateType.PG_TIMESTAMP], + [pg.types.builtins.TIMESTAMPTZ, PgDateType.PG_TIMESTAMPTZ], + [pg.types.builtins.DATE, PgDateType.PG_DATE], + ]) + return (val: string) => + deserialiseDate(val, pgTypes.get(oid) as PgDateType) } - */ - - if ( - oid == pg.types.builtins.TIMESTAMP || - oid == pg.types.builtins.TIMESTAMPTZ || - oid == pg.types.builtins.DATE - ) { - // Parse timestamps and date values ourselves - // because the pg parser parses them differently from what we expect - const pgTypes = new Map([ - [pg.types.builtins.TIMESTAMP, PgDateType.PG_TIMESTAMP], - [pg.types.builtins.TIMESTAMPTZ, PgDateType.PG_TIMESTAMPTZ], - [pg.types.builtins.DATE, PgDateType.PG_DATE], - ]) - return (val: string) => - deserialiseDate(val, pgTypes.get(oid) as PgDateType) - } - return originalGetTypeParser(oid) - }) as typeof pg.types.getTypeParser, - }, - }) - return { - rows, - rowsModified: rowCount ?? 0, + return originalGetTypeParser(oid) + }) as typeof pg.types.getTypeParser, + }, + }) + return { + rows, + rowsModified: rowCount ?? 0, + } + } catch (e: any) { + console.log('EXEC ERROR: ' + e.message) + console.log( + 'STATEMENT was: ' + + statement.sql + + ' - args: ' + + JSON.stringify(statement.args, null, 2) + ) + throw e } } } diff --git a/clients/typescript/src/migrators/bundle.ts b/clients/typescript/src/migrators/bundle.ts index 63fa76a556..bcfcaa8ccf 100644 --- a/clients/typescript/src/migrators/bundle.ts +++ b/clients/typescript/src/migrators/bundle.ts @@ -8,7 +8,7 @@ import { import { DatabaseAdapter } from '../electric/adapter' import { buildInitialMigration as makeBaseMigration } from './schema' import Log from 'loglevel' -import { SatelliteError, SatelliteErrorCode } from '../util' +import { QualifiedTablename, SatelliteError, SatelliteErrorCode } from '../util' import { _electric_migrations } from '../satellite/config' import { pgBuilder, QueryBuilder, sqliteBuilder } from './query-builder' import { dedent } from 'ts-dedent' @@ -22,6 +22,7 @@ export abstract class BundleMigratorBase implements Migrator { migrations: StmtMigration[] readonly tableName = _electric_migrations + readonly migrationsTable: QualifiedTablename constructor( adapter: DatabaseAdapter, @@ -34,6 +35,10 @@ export abstract class BundleMigratorBase implements Migrator { this.migrations = [...baseMigration.migrations, ...migrations].map( makeStmtMigration ) + this.migrationsTable = new QualifiedTablename( + this.namespace, + this.tableName + ) } async up(): Promise { @@ -53,8 +58,7 @@ export abstract class BundleMigratorBase implements Migrator { async migrationsTableExists(): Promise { // If this is the first time we're running migrations, then the // migrations table won't exist. - const namespace = this.queryBuilder.defaultNamespace - const tableExists = this.queryBuilder.tableExists(this.tableName, namespace) + const tableExists = this.queryBuilder.tableExists(this.migrationsTable) const tables = await this.adapter.query(tableExists) return tables.length > 0 } @@ -65,7 +69,7 @@ export abstract class BundleMigratorBase implements Migrator { } const existingRecords = ` - SELECT version FROM "${this.namespace}"."${this.tableName}" + SELECT version FROM ${this.migrationsTable} ORDER BY id ASC ` @@ -82,7 +86,7 @@ export abstract class BundleMigratorBase implements Migrator { // The hard-coded version '0' below corresponds to the version of the internal migration defined in `schema.ts`. // We're ignoring it because this function is supposed to return the application schema version. const schemaVersion = ` - SELECT version FROM "${this.namespace}"."${this.tableName}" + SELECT version FROM ${this.migrationsTable} WHERE version != '0' ORDER BY version DESC LIMIT 1 @@ -135,9 +139,7 @@ export abstract class BundleMigratorBase implements Migrator { await this.adapter.runInTransaction(...statements, { sql: dedent` - INSERT INTO "${this.namespace}"."${ - this.tableName - }" (version, applied_at) + INSERT INTO ${this.migrationsTable} (version, applied_at) VALUES (${this.queryBuilder.makePositionalParam( 1 )}, ${this.queryBuilder.makePositionalParam(2)}); @@ -155,7 +157,7 @@ export abstract class BundleMigratorBase implements Migrator { async applyIfNotAlready(migration: StmtMigration): Promise { const rows = await this.adapter.query({ sql: dedent` - SELECT 1 FROM "${this.namespace}"."${this.tableName}" + SELECT 1 FROM ${this.migrationsTable} WHERE version = ${this.queryBuilder.makePositionalParam(1)} `, args: [migration.version], diff --git a/clients/typescript/src/migrators/query-builder/builder.ts b/clients/typescript/src/migrators/query-builder/builder.ts index ed1e7c7257..06f75e807f 100644 --- a/clients/typescript/src/migrators/query-builder/builder.ts +++ b/clients/typescript/src/migrators/query-builder/builder.ts @@ -62,7 +62,7 @@ export abstract class QueryBuilder { /** * Checks if the given table exists. */ - abstract tableExists(tableName: string, namespace?: string): Statement + abstract tableExists(table: QualifiedTablename): Statement /** * Counts tables whose name is included in `tables`. @@ -105,22 +105,20 @@ export abstract class QueryBuilder { * Insert a row into a table, ignoring it if it already exists. */ abstract insertOrIgnore( - table: string, + table: QualifiedTablename, columns: string[], - values: SqlValue[], - schema?: string + values: SqlValue[] ): Statement /** * Insert a row into a table, replacing it if it already exists. */ abstract insertOrReplace( - table: string, + table: QualifiedTablename, columns: string[], values: Array, conflictCols: string[], - updateCols: string[], - schema?: string + updateCols: string[] ): Statement /** @@ -129,26 +127,24 @@ export abstract class QueryBuilder { * with the provided values `updateVals` */ abstract insertOrReplaceWith( - table: string, + table: QualifiedTablename, columns: string[], values: Array, conflictCols: string[], updateCols: string[], - updateVals: SqlValue[], - schema?: string + updateVals: SqlValue[] ): Statement /** * Inserts a batch of rows into a table, replacing them if they already exist. */ abstract batchedInsertOrReplace( - table: string, + table: QualifiedTablename, columns: string[], records: Array>, conflictCols: string[], updateCols: string[], - maxSqlParameters: number, - schema?: string + maxSqlParameters: number ): Statement[] /** @@ -156,80 +152,64 @@ export abstract class QueryBuilder { */ abstract dropTriggerIfExists( triggerName: string, - tablename: string, - namespace?: string + table: QualifiedTablename ): string /** * Create a trigger that prevents updates to the primary key. */ abstract createNoFkUpdateTrigger( - tablename: string, - pk: string[], - namespace?: string + table: QualifiedTablename, + pk: string[] ): string[] /** * Creates or replaces a trigger that prevents updates to the primary key. */ createOrReplaceNoFkUpdateTrigger( - tablename: string, - pk: string[], - namespace?: string + table: QualifiedTablename, + pk: string[] ): string[] { return [ this.dropTriggerIfExists( - `update_ensure_${namespace}_${tablename}_primarykey`, - tablename, - namespace + `update_ensure_${table.namespace}_${table.tablename}_primarykey`, + table ), - ...this.createNoFkUpdateTrigger(tablename, pk, namespace), + ...this.createNoFkUpdateTrigger(table, pk), ] } /** * Modifies the trigger setting for the table identified by its tablename and namespace. */ - abstract setTriggerSetting( - tableName: string, - value: 0 | 1, - namespace?: string - ): string + abstract setTriggerSetting(table: QualifiedTablename, value: 0 | 1): string /** * Create a trigger that logs operations into the oplog. */ abstract createOplogTrigger( opType: 'INSERT' | 'UPDATE' | 'DELETE', - tableName: string, + table: QualifiedTablename, newPKs: string, newRows: string, - oldRows: string, - namespace?: string + oldRows: string ): string[] createOrReplaceOplogTrigger( opType: 'INSERT' | 'UPDATE' | 'DELETE', - tableName: string, + table: QualifiedTablename, newPKs: string, newRows: string, - oldRows: string, - namespace: string = this.defaultNamespace + oldRows: string ): string[] { return [ this.dropTriggerIfExists( - `${opType.toLowerCase()}_${namespace}_${tableName}_into_oplog`, - tableName, - namespace - ), - ...this.createOplogTrigger( - opType, - tableName, - newPKs, - newRows, - oldRows, - namespace + `${opType.toLowerCase()}_${table.namespace}_${ + table.tablename + }_into_oplog`, + table ), + ...this.createOplogTrigger(opType, table, newPKs, newRows, oldRows), ] } @@ -262,40 +242,35 @@ export abstract class QueryBuilder { */ abstract createFkCompensationTrigger( opType: 'INSERT' | 'UPDATE', - tableName: string, + table: QualifiedTablename, childKey: string, - fkTableName: string, + fkTable: QualifiedTablename, joinedFkPKs: string, - foreignKey: ForeignKey, - namespace?: string, - fkTableNamespace?: string + foreignKey: ForeignKey ): string[] createOrReplaceFkCompensationTrigger( opType: 'INSERT' | 'UPDATE', - tableName: string, + table: QualifiedTablename, childKey: string, - fkTableName: string, + fkTable: QualifiedTablename, joinedFkPKs: string, - foreignKey: ForeignKey, - namespace: string = this.defaultNamespace, - fkTableNamespace: string = this.defaultNamespace + foreignKey: ForeignKey ): string[] { return [ this.dropTriggerIfExists( - `compensation_${opType.toLowerCase()}_${namespace}_${tableName}_${childKey}_into_oplog`, - tableName, - namespace + `compensation_${opType.toLowerCase()}_${table.namespace}_${ + table.tablename + }_${childKey}_into_oplog`, + table ), ...this.createFkCompensationTrigger( opType, - tableName, + table, childKey, - fkTableName, + fkTable, joinedFkPKs, - foreignKey, - namespace, - fkTableNamespace + foreignKey ), ] } diff --git a/clients/typescript/src/migrators/query-builder/pgBuilder.ts b/clients/typescript/src/migrators/query-builder/pgBuilder.ts index 0c282dbd68..a7eb342f46 100644 --- a/clients/typescript/src/migrators/query-builder/pgBuilder.ts +++ b/clients/typescript/src/migrators/query-builder/pgBuilder.ts @@ -33,13 +33,10 @@ class PgBuilder extends QueryBuilder { return [] } - tableExists( - tableName: string, - namespace: string = this.defaultNamespace - ): Statement { + tableExists(table: QualifiedTablename): Statement { return { sql: `SELECT 1 FROM information_schema.tables WHERE table_schema = $1 AND table_name = $2`, - args: [namespace, tableName], + args: [table.namespace, table.tablename], } } @@ -70,9 +67,7 @@ class PgBuilder extends QueryBuilder { onTable: QualifiedTablename, columns: string[] ) { - const namespace = onTable.namespace - const tablename = onTable.tablename - return `CREATE INDEX IF NOT EXISTS ${indexName} ON "${namespace}"."${tablename}" (${columns + return `CREATE INDEX IF NOT EXISTS ${indexName} ON ${onTable} (${columns .map(quote) .join(', ')})` } @@ -132,14 +127,13 @@ class PgBuilder extends QueryBuilder { } insertOrIgnore( - table: string, + table: QualifiedTablename, columns: string[], - values: SqlValue[], - schema: string = this.defaultNamespace + values: SqlValue[] ): Statement { return { sql: dedent` - INSERT INTO "${schema}"."${table}" (${columns.map(quote).join(', ')}) + INSERT INTO ${table} (${columns.map(quote).join(', ')}) VALUES (${columns.map((_, i) => `$${i + 1}`).join(', ')}) ON CONFLICT DO NOTHING; `, @@ -148,16 +142,15 @@ class PgBuilder extends QueryBuilder { } insertOrReplace( - table: string, + table: QualifiedTablename, columns: string[], values: Array, conflictCols: string[], - updateCols: string[], - schema: string = this.defaultNamespace + updateCols: string[] ): Statement { return { sql: dedent` - INSERT INTO "${schema}"."${table}" (${columns.map(quote).join(', ')}) + INSERT INTO ${table} (${columns.map(quote).join(', ')}) VALUES (${columns.map((_, i) => `$${i + 1}`).join(', ')}) ON CONFLICT (${conflictCols.map(quote).join(', ')}) DO UPDATE SET ${updateCols @@ -169,17 +162,16 @@ class PgBuilder extends QueryBuilder { } insertOrReplaceWith( - table: string, + table: QualifiedTablename, columns: string[], values: Array, conflictCols: string[], updateCols: string[], - updateVals: SqlValue[], - schema: string = this.defaultNamespace + updateVals: SqlValue[] ): Statement { return { sql: dedent` - INSERT INTO "${schema}"."${table}" (${columns.map(quote).join(', ')}) + INSERT INTO ${table} (${columns.map(quote).join(', ')}) VALUES (${columns.map((_, i) => `$${i + 1}`).join(', ')}) ON CONFLICT (${conflictCols.map(quote).join(', ')}) DO UPDATE SET ${updateCols @@ -191,15 +183,14 @@ class PgBuilder extends QueryBuilder { } batchedInsertOrReplace( - table: string, + table: QualifiedTablename, columns: string[], records: Array>, conflictCols: string[], updateCols: string[], - maxSqlParameters: number, - schema: string = this.defaultNamespace + maxSqlParameters: number ): Statement[] { - const baseSql = `INSERT INTO "${schema}"."${table}" (${columns + const baseSql = `INSERT INTO ${table} (${columns .map(quote) .join(', ')}) VALUES ` const statements = this.prepareInsertBatchedStatements( @@ -220,19 +211,12 @@ class PgBuilder extends QueryBuilder { })) } - dropTriggerIfExists( - triggerName: string, - tablename: string, - namespace: string = this.defaultNamespace - ) { - return `DROP TRIGGER IF EXISTS ${triggerName} ON "${namespace}"."${tablename}";` + dropTriggerIfExists(triggerName: string, table: QualifiedTablename) { + return `DROP TRIGGER IF EXISTS ${triggerName} ON ${table};` } - createNoFkUpdateTrigger( - tablename: string, - pk: string[], - namespace: string = this.defaultNamespace - ): string[] { + createNoFkUpdateTrigger(table: QualifiedTablename, pk: string[]): string[] { + const { namespace, tablename } = table return [ dedent` CREATE OR REPLACE FUNCTION update_ensure_${namespace}_${tablename}_primarykey_function() @@ -252,7 +236,7 @@ class PgBuilder extends QueryBuilder { `, dedent` CREATE TRIGGER update_ensure_${namespace}_${tablename}_primarykey - BEFORE UPDATE ON "${namespace}"."${tablename}" + BEFORE UPDATE ON ${table} FOR EACH ROW EXECUTE FUNCTION update_ensure_${namespace}_${tablename}_primarykey_function(); `, @@ -287,26 +271,23 @@ class PgBuilder extends QueryBuilder { return `json_strip_nulls(${json})` } - setTriggerSetting( - tableName: string, - value: 0 | 1, - namespace: string = this.defaultNamespace - ): string { + setTriggerSetting(table: QualifiedTablename, value: 0 | 1): string { + const { namespace, tablename } = table return dedent` INSERT INTO "${namespace}"."_electric_trigger_settings" ("namespace", "tablename", "flag") - VALUES ('${namespace}', '${tableName}', ${value}) + VALUES ('${namespace}', '${tablename}', ${value}) ON CONFLICT DO NOTHING; ` } createOplogTrigger( opType: 'INSERT' | 'UPDATE' | 'DELETE', - tableName: string, + table: QualifiedTablename, newPKs: string, newRows: string, - oldRows: string, - namespace: string = this.defaultNamespace + oldRows: string ): string[] { + const { namespace, tablename } = table const opTypeLower = opType.toLowerCase() const pk = this.createPKJsonObject(newPKs) // Update has both the old and the new row @@ -319,21 +300,21 @@ class PgBuilder extends QueryBuilder { return [ dedent` - CREATE OR REPLACE FUNCTION ${opTypeLower}_${namespace}_${tableName}_into_oplog_function() + CREATE OR REPLACE FUNCTION ${opTypeLower}_${namespace}_${tablename}_into_oplog_function() RETURNS TRIGGER AS $$ BEGIN DECLARE flag_value INTEGER; BEGIN -- Get the flag value from _electric_trigger_settings - SELECT flag INTO flag_value FROM "${namespace}"._electric_trigger_settings WHERE namespace = '${namespace}' AND tablename = '${tableName}'; + SELECT flag INTO flag_value FROM "${namespace}"._electric_trigger_settings WHERE namespace = '${namespace}' AND tablename = '${tablename}'; IF flag_value = 1 THEN -- Insert into _electric_oplog INSERT INTO "${namespace}"._electric_oplog (namespace, tablename, optype, "primaryKey", "newRow", "oldRow", timestamp) VALUES ( '${namespace}', - '${tableName}', + '${tablename}', '${opType}', ${pk}, ${newRecord}, @@ -348,36 +329,36 @@ class PgBuilder extends QueryBuilder { $$ LANGUAGE plpgsql; `, dedent` - CREATE TRIGGER ${opTypeLower}_${namespace}_${tableName}_into_oplog - AFTER ${opType} ON "${namespace}"."${tableName}" + CREATE TRIGGER ${opTypeLower}_${namespace}_${tablename}_into_oplog + AFTER ${opType} ON ${table} FOR EACH ROW - EXECUTE FUNCTION ${opTypeLower}_${namespace}_${tableName}_into_oplog_function(); + EXECUTE FUNCTION ${opTypeLower}_${namespace}_${tablename}_into_oplog_function(); `, ] } createFkCompensationTrigger( opType: 'INSERT' | 'UPDATE', - tableName: string, + table: QualifiedTablename, childKey: string, - fkTableName: string, + fkTable: QualifiedTablename, joinedFkPKs: string, - foreignKey: ForeignKey, - namespace: string = this.defaultNamespace, - fkTableNamespace: string = this.defaultNamespace + foreignKey: ForeignKey ): string[] { + const { namespace, tablename } = table + const { namespace: fkTableNamespace, tablename: fkTableName } = fkTable const opTypeLower = opType.toLowerCase() return [ dedent` - CREATE OR REPLACE FUNCTION compensation_${opTypeLower}_${namespace}_${tableName}_${childKey}_into_oplog_function() + CREATE OR REPLACE FUNCTION compensation_${opTypeLower}_${namespace}_${tablename}_${childKey}_into_oplog_function() RETURNS TRIGGER AS $$ BEGIN DECLARE flag_value INTEGER; meta_value INTEGER; BEGIN - SELECT flag INTO flag_value FROM "${namespace}"._electric_trigger_settings WHERE namespace = '${namespace}' AND tablename = '${tableName}'; + SELECT flag INTO flag_value FROM "${namespace}"._electric_trigger_settings WHERE namespace = '${namespace}' AND tablename = '${tablename}'; SELECT value INTO meta_value FROM "${namespace}"._electric_meta WHERE key = 'compensations'; @@ -393,7 +374,7 @@ class PgBuilder extends QueryBuilder { jsonb_build_object(${joinedFkPKs}), NULL, NULL - FROM "${fkTableNamespace}"."${fkTableName}" + FROM ${fkTable} WHERE "${foreignKey.parentKey}" = NEW."${foreignKey.childKey}"; END IF; @@ -403,20 +384,18 @@ class PgBuilder extends QueryBuilder { $$ LANGUAGE plpgsql; `, dedent` - CREATE TRIGGER compensation_${opTypeLower}_${namespace}_${tableName}_${childKey}_into_oplog - AFTER ${opType} ON "${namespace}"."${tableName}" + CREATE TRIGGER compensation_${opTypeLower}_${namespace}_${tablename}_${childKey}_into_oplog + AFTER ${opType} ON ${table} FOR EACH ROW - EXECUTE FUNCTION compensation_${opTypeLower}_${namespace}_${tableName}_${childKey}_into_oplog_function(); + EXECUTE FUNCTION compensation_${opTypeLower}_${namespace}_${tablename}_${childKey}_into_oplog_function(); `, ] } setTagsForShadowRows( - oplogTable: QualifiedTablename, - shadowTable: QualifiedTablename + oplog: QualifiedTablename, + shadow: QualifiedTablename ): string { - const oplog = `"${oplogTable.namespace}"."${oplogTable.tablename}"` - const shadow = `"${shadowTable.namespace}"."${shadowTable.tablename}"` return dedent` INSERT INTO ${shadow} (namespace, tablename, "primaryKey", tags) SELECT DISTINCT namespace, tablename, "primaryKey", $1 @@ -430,11 +409,9 @@ class PgBuilder extends QueryBuilder { } removeDeletedShadowRows( - oplogTable: QualifiedTablename, - shadowTable: QualifiedTablename + oplog: QualifiedTablename, + shadow: QualifiedTablename ): string { - const oplog = `"${oplogTable.namespace}"."${oplogTable.tablename}"` - const shadow = `"${shadowTable.namespace}"."${shadowTable.tablename}"` // We do an inner join in a CTE instead of a `WHERE EXISTS (...)` // since this is not reliant on re-executing a query // for every row in the shadow table, but uses a PK join instead. diff --git a/clients/typescript/src/migrators/query-builder/sqliteBuilder.ts b/clients/typescript/src/migrators/query-builder/sqliteBuilder.ts index 3e692e9c08..22dc46fac6 100644 --- a/clients/typescript/src/migrators/query-builder/sqliteBuilder.ts +++ b/clients/typescript/src/migrators/query-builder/sqliteBuilder.ts @@ -36,10 +36,10 @@ class SqliteBuilder extends QueryBuilder { return [query] } - tableExists(tableName: string, _namespace?: string): Statement { + tableExists(table: QualifiedTablename): Statement { return { sql: `SELECT 1 FROM sqlite_master WHERE type = 'table' AND name = ?`, - args: [tableName], + args: [table.tablename], } } @@ -96,14 +96,13 @@ class SqliteBuilder extends QueryBuilder { } insertOrIgnore( - table: string, + table: QualifiedTablename, columns: string[], - values: SqlValue[], - schema: string = this.defaultNamespace + values: SqlValue[] ): Statement { return { sql: dedent` - INSERT OR IGNORE INTO ${schema}.${table} (${columns.join(', ')}) + INSERT OR IGNORE INTO ${table} (${columns.join(', ')}) VALUES (${columns.map(() => '?').join(', ')}); `, args: values, @@ -111,16 +110,15 @@ class SqliteBuilder extends QueryBuilder { } insertOrReplace( - table: string, + table: QualifiedTablename, columns: string[], values: Array, _conflictCols: string[], - _updateCols: string[], - schema: string = this.defaultNamespace + _updateCols: string[] ): Statement { return { sql: dedent` - INSERT OR REPLACE INTO ${schema}.${table} (${columns.join(', ')}) + INSERT OR REPLACE INTO ${table} (${columns.join(', ')}) VALUES (${columns.map(() => '?').join(', ')}) `, args: values, @@ -128,21 +126,19 @@ class SqliteBuilder extends QueryBuilder { } insertOrReplaceWith( - table: string, + table: QualifiedTablename, columns: string[], values: Array, conflictCols: string[], updateCols: string[], - updateVals: SqlValue[], - schema: string = this.defaultNamespace + updateVals: SqlValue[] ): Statement { const { sql: baseSql, args } = this.insertOrReplace( table, columns, values, conflictCols, - updateCols, - schema + updateCols ) return { sql: @@ -155,15 +151,14 @@ class SqliteBuilder extends QueryBuilder { } batchedInsertOrReplace( - table: string, + table: QualifiedTablename, columns: string[], records: Array>, _conflictCols: string[], _updateCols: string[], - maxSqlParameters: number, - schema: string = this.defaultNamespace + maxSqlParameters: number ): Statement[] { - const baseSql = `INSERT OR REPLACE INTO ${schema}.${table} (${columns.join( + const baseSql = `INSERT OR REPLACE INTO ${table} (${columns.join( ', ' )}) VALUES ` return this.prepareInsertBatchedStatements( @@ -174,23 +169,16 @@ class SqliteBuilder extends QueryBuilder { ) } - dropTriggerIfExists( - triggerName: string, - _tablename: string, - _namespace?: string - ) { + dropTriggerIfExists(triggerName: string, _tablename: QualifiedTablename) { return `DROP TRIGGER IF EXISTS ${triggerName};` } - createNoFkUpdateTrigger( - tablename: string, - pk: string[], - namespace: string = this.defaultNamespace - ): string[] { + createNoFkUpdateTrigger(table: QualifiedTablename, pk: string[]): string[] { + const { namespace, tablename } = table return [ dedent` CREATE TRIGGER update_ensure_${namespace}_${tablename}_primarykey - BEFORE UPDATE ON "${namespace}"."${tablename}" + BEFORE UPDATE ON ${table} BEGIN SELECT CASE @@ -220,22 +208,19 @@ class SqliteBuilder extends QueryBuilder { return this.removeSpaceAndNullValuesFromJson(this.createJsonObject(rows)) } - setTriggerSetting( - tableName: string, - value: 0 | 1, - namespace: string = this.defaultNamespace - ): string { - return `INSERT OR IGNORE INTO _electric_trigger_settings (namespace, tablename, flag) VALUES ('${namespace}', '${tableName}', ${value});` + setTriggerSetting(table: QualifiedTablename, value: 0 | 1): string { + const { namespace, tablename } = table + return `INSERT OR IGNORE INTO _electric_trigger_settings (namespace, tablename, flag) VALUES ('${namespace}', '${tablename}', ${value});` } createOplogTrigger( opType: 'INSERT' | 'UPDATE' | 'DELETE', - tableName: string, + table: QualifiedTablename, newPKs: string, newRows: string, - oldRows: string, - namespace: string = this.defaultNamespace + oldRows: string ): string[] { + const { namespace, tablename } = table const opTypeLower = opType.toLowerCase() const pk = this.createPKJsonObject(newPKs) // Update has both the old and the new row @@ -248,12 +233,12 @@ class SqliteBuilder extends QueryBuilder { return [ dedent` - CREATE TRIGGER ${opTypeLower}_${namespace}_${tableName}_into_oplog - AFTER ${opType} ON "${namespace}"."${tableName}" - WHEN 1 = (SELECT flag from _electric_trigger_settings WHERE namespace = '${namespace}' AND tablename = '${tableName}') + CREATE TRIGGER ${opTypeLower}_${namespace}_${tablename}_into_oplog + AFTER ${opType} ON ${table} + WHEN 1 = (SELECT flag from _electric_trigger_settings WHERE namespace = '${namespace}' AND tablename = '${tablename}') BEGIN INSERT INTO _electric_oplog (namespace, tablename, optype, primaryKey, newRow, oldRow, timestamp) - VALUES ('${namespace}', '${tableName}', '${opType}', ${pk}, ${newRecord}, ${oldRecord}, NULL); + VALUES ('${namespace}', '${tablename}', '${opType}', ${pk}, ${newRecord}, ${oldRecord}, NULL); END; `, ] @@ -261,40 +246,38 @@ class SqliteBuilder extends QueryBuilder { createFkCompensationTrigger( opType: 'INSERT' | 'UPDATE', - tableName: string, + table: QualifiedTablename, childKey: string, - fkTableName: string, + fkTable: QualifiedTablename, joinedFkPKs: string, - foreignKey: ForeignKey, - namespace: string = this.defaultNamespace, - fkTableNamespace: string = this.defaultNamespace + foreignKey: ForeignKey ): string[] { + const { namespace, tablename } = table + const { namespace: fkTableNamespace, tablename: fkTableName } = fkTable const opTypeLower = opType.toLowerCase() return [ dedent` - CREATE TRIGGER compensation_${opTypeLower}_${namespace}_${tableName}_${childKey}_into_oplog - AFTER ${opType} ON "${namespace}"."${tableName}" - WHEN 1 = (SELECT flag from _electric_trigger_settings WHERE namespace = '${namespace}' AND tablename = '${tableName}') AND + CREATE TRIGGER compensation_${opTypeLower}_${namespace}_${tablename}_${childKey}_into_oplog + AFTER ${opType} ON ${table} + WHEN 1 = (SELECT flag from _electric_trigger_settings WHERE namespace = '${namespace}' AND tablename = '${tablename}') AND 1 = (SELECT value from _electric_meta WHERE key = 'compensations') BEGIN INSERT INTO _electric_oplog (namespace, tablename, optype, primaryKey, newRow, oldRow, timestamp) SELECT '${fkTableNamespace}', '${fkTableName}', 'COMPENSATION', ${this.createPKJsonObject( joinedFkPKs )}, json_object(${joinedFkPKs}), NULL, NULL - FROM "${fkTableNamespace}"."${fkTableName}" WHERE "${ - foreignKey.parentKey - }" = new."${foreignKey.childKey}"; + FROM ${fkTable} WHERE "${foreignKey.parentKey}" = new."${ + foreignKey.childKey + }"; END; `, ] } setTagsForShadowRows( - oplogTable: QualifiedTablename, - shadowTable: QualifiedTablename + oplog: QualifiedTablename, + shadow: QualifiedTablename ): string { - const oplog = `"${oplogTable.namespace}"."${oplogTable.tablename}"` - const shadow = `"${shadowTable.namespace}"."${shadowTable.tablename}"` return dedent` INSERT OR REPLACE INTO ${shadow} (namespace, tablename, primaryKey, tags) SELECT namespace, tablename, primaryKey, ? @@ -306,11 +289,9 @@ class SqliteBuilder extends QueryBuilder { } removeDeletedShadowRows( - oplogTable: QualifiedTablename, - shadowTable: QualifiedTablename + oplog: QualifiedTablename, + shadow: QualifiedTablename ): string { - const oplog = `"${oplogTable.namespace}"."${oplogTable.tablename}"` - const shadow = `"${shadowTable.namespace}"."${shadowTable.tablename}"` // We do an inner join in a CTE instead of a `WHERE EXISTS (...)` // since this is not reliant on re-executing a query // for every row in the shadow table, but uses a PK join instead. diff --git a/clients/typescript/src/migrators/schema.ts b/clients/typescript/src/migrators/schema.ts index d2989dbc58..164f0a9de5 100644 --- a/clients/typescript/src/migrators/schema.ts +++ b/clients/typescript/src/migrators/schema.ts @@ -9,7 +9,7 @@ export const buildInitialMigration = (builder: QueryBuilder) => { { statements: [ //`-- The ops log table\n`, - `CREATE TABLE IF NOT EXISTS "${oplogTable.namespace}"."${oplogTable.tablename}" (\n "rowid" ${builder.AUTOINCREMENT_PK},\n "namespace" TEXT NOT NULL,\n "tablename" TEXT NOT NULL,\n "optype" TEXT NOT NULL,\n "primaryKey" TEXT NOT NULL,\n "newRow" TEXT,\n "oldRow" TEXT,\n "timestamp" TEXT, "clearTags" TEXT DEFAULT '[]' NOT NULL\n);`, + `CREATE TABLE IF NOT EXISTS ${oplogTable} (\n "rowid" ${builder.AUTOINCREMENT_PK},\n "namespace" TEXT NOT NULL,\n "tablename" TEXT NOT NULL,\n "optype" TEXT NOT NULL,\n "primaryKey" TEXT NOT NULL,\n "newRow" TEXT,\n "oldRow" TEXT,\n "timestamp" TEXT, "clearTags" TEXT DEFAULT '[]' NOT NULL\n);`, // Add an index for the oplog builder.createIndex('_electric_table_pk_reference', oplogTable, [ 'namespace', @@ -18,14 +18,14 @@ export const buildInitialMigration = (builder: QueryBuilder) => { ]), builder.createIndex('_electric_timestamp', oplogTable, ['timestamp']), //`-- Somewhere to keep our metadata\n`, - `CREATE TABLE IF NOT EXISTS "${metaTable.namespace}"."${metaTable.tablename}" (\n "key" TEXT PRIMARY KEY,\n "value" ${builder.BLOB}\n);`, + `CREATE TABLE IF NOT EXISTS ${metaTable} (\n "key" TEXT PRIMARY KEY,\n "value" ${builder.BLOB}\n);`, //`-- Somewhere to track migrations\n`, - `CREATE TABLE IF NOT EXISTS "${migrationsTable.namespace}"."${migrationsTable.tablename}" (\n "id" ${builder.AUTOINCREMENT_PK},\n "version" TEXT NOT NULL UNIQUE,\n "applied_at" TEXT NOT NULL\n);`, + `CREATE TABLE IF NOT EXISTS ${migrationsTable} (\n "id" ${builder.AUTOINCREMENT_PK},\n "version" TEXT NOT NULL UNIQUE,\n "applied_at" TEXT NOT NULL\n);`, //`-- Initialisation of the metadata table\n`, - `INSERT INTO "${metaTable.namespace}"."${metaTable.tablename}" (key, value) VALUES ('compensations', 1), ('lsn', ''), ('clientId', ''), ('subscriptions', ''), ('seenAdditionalData', '');`, + `INSERT INTO ${metaTable} (key, value) VALUES ('compensations', 1), ('lsn', ''), ('clientId', ''), ('subscriptions', ''), ('seenAdditionalData', '');`, //`-- These are toggles for turning the triggers on and off\n`, - `DROP TABLE IF EXISTS "${triggersTable.namespace}"."${triggersTable.tablename}";`, - `CREATE TABLE "${triggersTable.namespace}"."${triggersTable.tablename}" ("namespace" TEXT, "tablename" TEXT, "flag" INTEGER, PRIMARY KEY ("namespace", "tablename"));`, + `DROP TABLE IF EXISTS ${triggersTable};`, + `CREATE TABLE ${triggersTable} ("namespace" TEXT, "tablename" TEXT, "flag" INTEGER, PRIMARY KEY ("namespace", "tablename"));`, //`-- Somewhere to keep dependency tracking information\n`, `CREATE TABLE "${shadowTable.namespace}"."${ shadowTable.tablename diff --git a/clients/typescript/src/migrators/triggers.ts b/clients/typescript/src/migrators/triggers.ts index 70dfb03d4e..48003d4b24 100644 --- a/clients/typescript/src/migrators/triggers.ts +++ b/clients/typescript/src/migrators/triggers.ts @@ -1,4 +1,4 @@ -import { Statement } from '../util' +import { QualifiedTablename, Statement } from '../util' import { QueryBuilder } from './query-builder' export type ForeignKey = { @@ -12,8 +12,7 @@ type ColumnType = string type ColumnTypes = Record export type Table = { - tableName: string - namespace: string + qualifiedTableName: QualifiedTablename columns: ColumnName[] primary: ColumnName[] foreignKeys: ForeignKey[] @@ -41,7 +40,7 @@ export function generateOplogTriggers( table: Omit, builder: QueryBuilder ): Statement[] { - const { tableName, namespace, columns, primary, columnTypes } = table + const { qualifiedTableName, columns, primary, columnTypes } = table const newPKs = joinColsForJSON(primary, columnTypes, builder, 'new') const oldPKs = joinColsForJSON(primary, columnTypes, builder, 'old') @@ -49,19 +48,18 @@ export function generateOplogTriggers( const oldRows = joinColsForJSON(columns, columnTypes, builder, 'old') const [dropFkTrigger, ...createFkTrigger] = - builder.createOrReplaceNoFkUpdateTrigger(tableName, primary, namespace) + builder.createOrReplaceNoFkUpdateTrigger(qualifiedTableName, primary) const [dropInsertTrigger, ...createInsertTrigger] = builder.createOrReplaceInsertTrigger( - tableName, + qualifiedTableName, newPKs, newRows, - oldRows, - namespace + oldRows ) return [ // Toggles for turning the triggers on and off - builder.setTriggerSetting(tableName, 1, namespace), + builder.setTriggerSetting(qualifiedTableName, 1), // Triggers for table ${tableName} // ensures primary key is immutable dropFkTrigger, @@ -70,18 +68,16 @@ export function generateOplogTriggers( dropInsertTrigger, ...createInsertTrigger, ...builder.createOrReplaceUpdateTrigger( - tableName, + qualifiedTableName, newPKs, newRows, - oldRows, - namespace + oldRows ), ...builder.createOrReplaceDeleteTrigger( - tableName, + qualifiedTableName, oldPKs, newRows, - oldRows, - namespace + oldRows ), ].map(mkStatement) } @@ -104,7 +100,7 @@ function generateCompensationTriggers( table: Table, builder: QueryBuilder ): Statement[] { - const { tableName, namespace, foreignKeys, columnTypes } = table + const { qualifiedTableName, foreignKeys, columnTypes } = table const makeTriggers = (foreignKey: ForeignKey) => { const { childKey } = foreignKey @@ -112,6 +108,10 @@ function generateCompensationTriggers( const fkTableNamespace = builder.defaultNamespace // currently, Electric always uses the DB's default namespace const fkTableName = foreignKey.table const fkTablePK = foreignKey.parentKey // primary key of the table pointed at by the FK. + const qualifiedFkTable = new QualifiedTablename( + fkTableNamespace, + fkTableName + ) // This table's `childKey` points to the parent's table `parentKey`. // `joinColsForJSON` looks up the type of the `parentKey` column in the provided `colTypes` object. @@ -129,13 +129,11 @@ function generateCompensationTriggers( const [dropInsertTrigger, ...createInsertTrigger] = builder.createOrReplaceInsertCompensationTrigger( - tableName, + qualifiedTableName, childKey, - fkTableName, + qualifiedFkTable, joinedFkPKs, - foreignKey, - namespace, - fkTableNamespace + foreignKey ) return [ @@ -146,13 +144,11 @@ function generateCompensationTriggers( dropInsertTrigger, ...createInsertTrigger, ...builder.createOrReplaceUpdateCompensationTrigger( - tableName, + qualifiedTableName, foreignKey.childKey, - fkTableName, + qualifiedFkTable, joinedFkPKs, - foreignKey, - namespace, - fkTableNamespace + foreignKey ), ].map(mkStatement) } diff --git a/clients/typescript/src/satellite/process.ts b/clients/typescript/src/satellite/process.ts index 7a1df0a1b9..74c7e4ce73 100644 --- a/clients/typescript/src/satellite/process.ts +++ b/clients/typescript/src/satellite/process.ts @@ -301,7 +301,7 @@ export class SatelliteProcess implements Satellite { // TODO: table and schema warrant escaping here too, but they aren't in the triggers table. const deleteStmts = tables.map((x) => ({ - sql: `DELETE FROM "${x.namespace}"."${x.tablename}"`, + sql: `DELETE FROM ${x}`, })) const stmtsWithTriggers = [ @@ -547,7 +547,7 @@ export class SatelliteProcess implements Satellite { // For each table, do a batched insert for (const [_table, { relation, records, table }] of groupedChanges) { const columnNames = relation.columns.map((col) => col.name) - const qualifiedTableName = `"${table.namespace}"."${table.tablename}"` + const qualifiedTableName = `${table}` const orIgnore = this.builder.sqliteOnly('OR IGNORE') const onConflictDoNothing = this.builder.pgOnly('ON CONFLICT DO NOTHING') const sqlBase = `INSERT ${orIgnore} INTO ${qualifiedTableName} (${columnNames.join( @@ -571,13 +571,12 @@ export class SatelliteProcess implements Satellite { // Then do a batched insert for the shadow table const batchedShadowInserts = this.builder.batchedInsertOrReplace( - this.opts.shadowTable.tablename, + this.opts.shadowTable, ['namespace', 'tablename', 'primaryKey', 'tags'], allArgsForShadowInsert, ['namespace', 'tablename', 'primaryKey'], ['namespace', 'tablename', 'tags'], - this.maxSqlParameters, - this.opts.shadowTable.namespace + this.maxSqlParameters ) stmts.push(...batchedShadowInserts) @@ -978,8 +977,8 @@ export class SatelliteProcess implements Satellite { } try { - const oplog = `"${this.opts.oplogTable.namespace}"."${this.opts.oplogTable.tablename}"` - const shadow = `"${this.opts.shadowTable.namespace}"."${this.opts.shadowTable.tablename}"` + const oplog = `${this.opts.oplogTable}` + const shadow = `${this.opts.shadowTable}` const timestamp = new Date() const newTag = this._generateTag(timestamp) @@ -1205,7 +1204,7 @@ export class SatelliteProcess implements Satellite { async _getEntries(since?: number): Promise { // `rowid` is never below 0, so -1 means "everything" since ??= -1 - const oplog = `"${this.opts.oplogTable.namespace}"."${this.opts.oplogTable.tablename}"` + const oplog = `${this.opts.oplogTable}` const selectEntries = ` SELECT * FROM ${oplog} @@ -1218,7 +1217,7 @@ export class SatelliteProcess implements Satellite { } _deleteShadowTagsStatement(shadow: ShadowEntry): Statement { - const shadowTable = `"${this.opts.shadowTable.namespace}"."${this.opts.shadowTable.tablename}"` + const shadowTable = `${this.opts.shadowTable}` const pos = (i: number) => this.builder.makePositionalParam(i) const deleteRow = ` DELETE FROM ${shadowTable} @@ -1234,12 +1233,11 @@ export class SatelliteProcess implements Satellite { _updateShadowTagsStatement(shadow: ShadowEntry): Statement { return this.builder.insertOrReplace( - this.opts.shadowTable.tablename, + this.opts.shadowTable, ['namespace', 'tablename', 'primaryKey', 'tags'], [shadow.namespace, shadow.tablename, shadow.primaryKey, shadow.tags], ['namespace', 'tablename', 'primaryKey'], - ['tags'], - this.opts.shadowTable.namespace + ['tags'] ) } @@ -1464,7 +1462,7 @@ export class SatelliteProcess implements Satellite { flag: 0 | 1 ): Statement[] { if (tables.length === 0) return [] - const triggers = `"${this.opts.triggersTable.namespace}"."${this.opts.triggersTable.tablename}"` + const triggers = `${this.opts.triggersTable}` const namespacesAndTableNames = tables.flatMap((tbl) => [ tbl.namespace, tbl.tablename, @@ -1482,8 +1480,7 @@ export class SatelliteProcess implements Satellite { } _addSeenAdditionalDataStmt(ref: string): Statement { - const meta = `"${this.opts.metaTable.namespace}"."${this.opts.metaTable.tablename}"` - + const meta = `${this.opts.metaTable}` const sql = ` INSERT INTO ${meta} (key, value) VALUES ('seenAdditionalData', ${this.builder.makePositionalParam( 1 @@ -1505,7 +1502,7 @@ export class SatelliteProcess implements Satellite { ): Statement _setMetaStatement(key: Uuid, value: string | null): Statement _setMetaStatement(key: string, value: SqlValue) { - const meta = `"${this.opts.metaTable.namespace}"."${this.opts.metaTable.tablename}"` + const meta = `${this.opts.metaTable}` const pos = (i: number) => this.builder.makePositionalParam(i) const sql = `UPDATE ${meta} SET value = ${pos(1)} WHERE key = ${pos(2)}` const args = [value, key] @@ -1528,7 +1525,7 @@ export class SatelliteProcess implements Satellite { async _getMeta(key: Uuid): Promise async _getMeta(key: K): Promise async _getMeta(key: string) { - const meta = `"${this.opts.metaTable.namespace}"."${this.opts.metaTable.tablename}"` + const meta = `${this.opts.metaTable}` const pos = (i: number) => this.builder.makePositionalParam(i) const sql = `SELECT value from ${meta} WHERE key = ${pos(1)}` const args = [key] @@ -1564,7 +1561,7 @@ export class SatelliteProcess implements Satellite { async _garbageCollectOplog(commitTimestamp: Date): Promise { const isoString = commitTimestamp.toISOString() - const oplog = `"${this.opts.oplogTable.namespace}"."${this.opts.oplogTable.tablename}"` + const oplog = `${this.opts.oplogTable}` const pos = (i: number) => this.builder.makePositionalParam(i) await this.adapter.run({ sql: `DELETE FROM ${oplog} WHERE timestamp = ${pos(1)}`, @@ -1632,22 +1629,20 @@ export class SatelliteProcess implements Satellite { if (updateColumnStmts.length > 0) { return this.builder.insertOrReplaceWith( - qualifiedTableName.tablename, + qualifiedTableName, columnNames, columnValues, ['id'], updateColumnStmts, - updateColumnStmts.map((col) => fullRow[col]), - qualifiedTableName.namespace + updateColumnStmts.map((col) => fullRow[col]) ) } // no changes, can ignore statement if exists return this.builder.insertOrIgnore( - qualifiedTableName.tablename, + qualifiedTableName, columnNames, - columnValues, - qualifiedTableName.namespace + columnValues ) } @@ -1673,8 +1668,10 @@ export function generateTriggersForTable( builder: QueryBuilder ): Statement[] { const table = { - tableName: tbl.name, - namespace: builder.defaultNamespace, + qualifiedTableName: new QualifiedTablename( + builder.defaultNamespace, + tbl.name + ), columns: tbl.columns.map((col) => col.name), primary: tbl.pks, foreignKeys: tbl.fks.map((fk) => { diff --git a/clients/typescript/src/util/tablename.ts b/clients/typescript/src/util/tablename.ts index 4780fa6dc7..5612a1f377 100644 --- a/clients/typescript/src/util/tablename.ts +++ b/clients/typescript/src/util/tablename.ts @@ -14,22 +14,24 @@ export class QualifiedTablename { } toString(): string { - // Don't collapse it to '.' because that can lead to clashes - // since both `QualifiedTablename('foo', 'bar.baz')` and `QualifiedTablename('foo.bar', 'baz')` - // would be collapsed to 'foo.bar.baz'. - return JSON.stringify({ - namespace: this.namespace, - tablename: this.tablename, - }) + // Escapes double quotes because names can contain double quotes + // e.g. CREATE TABLE "f""oo" (...) creates a table named f"oo + return `"${escDoubleQ(this.namespace)}"."${escDoubleQ(this.tablename)}"` } - static parse(json: string): QualifiedTablename { + static parse(fullyQualifiedName: string): QualifiedTablename { try { - const { namespace, tablename } = JSON.parse(json) - return new QualifiedTablename(namespace, tablename) + const [_, namespace, tablename] = /"(.*)"\."(.*)"/.exec( + fullyQualifiedName + )! + return new QualifiedTablename( + unescDoubleQ(namespace), + unescDoubleQ(tablename) + ) } catch (_e) { throw new Error( - 'Could not parse string into a qualified table name: ' + json + 'Could not parse string into a qualified table name: ' + + fullyQualifiedName ) } } @@ -56,3 +58,11 @@ export const hasIntersection = ( return false } + +function escDoubleQ(str: string): string { + return str.replaceAll('"', '""') +} + +function unescDoubleQ(str: string): string { + return str.replaceAll('""', '"') +} diff --git a/clients/typescript/test/migrators/pglite/schema.test.ts b/clients/typescript/test/migrators/pglite/schema.test.ts index 0027104564..e414427a1c 100644 --- a/clients/typescript/test/migrators/pglite/schema.test.ts +++ b/clients/typescript/test/migrators/pglite/schema.test.ts @@ -40,7 +40,7 @@ test('check schema keys are unique', async (t) => { const migrator = new BundleMigrator(adapter, migrations) await migrator.up() const defaults = satelliteDefaults(migrator.queryBuilder.defaultNamespace) - const metaTable = `"${defaults.metaTable.namespace}"."${defaults.metaTable.tablename}"` + const metaTable = `${defaults.metaTable}` await adapter.run({ sql: `INSERT INTO ${metaTable} (key, value) values ('key', 'value')`, diff --git a/clients/typescript/test/migrators/pglite/triggers.test.ts b/clients/typescript/test/migrators/pglite/triggers.test.ts index 2cd976a604..7948d16614 100644 --- a/clients/typescript/test/migrators/pglite/triggers.test.ts +++ b/clients/typescript/test/migrators/pglite/triggers.test.ts @@ -17,12 +17,11 @@ type Context = ContextType & { const test = testAny as TestFn const defaults = satelliteDefaults('public') -const oplogTable = `"${defaults.oplogTable.namespace}"."${defaults.oplogTable.tablename}"` +const oplogTable = `${defaults.oplogTable}` const personTable = getPersonTable('public') -const personNamespace = personTable.namespace -const personTableName = personTable.tableName -const qualifiedPersonTable = `"${personNamespace}"."${personTableName}"` +const personTableName = personTable.qualifiedTableName.tablename +const qualifiedPersonTable = personTable.qualifiedTableName test.beforeEach(async (t) => { const db = new PGlite() @@ -223,7 +222,6 @@ test('oplog insertion trigger should insert row into oplog table', async (t) => test('oplog trigger should handle Infinity values correctly', async (t) => { const { db, migrateDb } = t.context - const tableName = personTable.tableName // Migrate the DB with the necessary tables and triggers await migrateDb() @@ -237,7 +235,7 @@ test('oplog trigger should handle Infinity values correctly', async (t) => { t.is(oplogRows.length, 1) t.deepEqual(oplogRows[0], { namespace: 'public', - tablename: tableName, + tablename: personTableName, optype: 'INSERT', // `id` and `bmi` values are stored as strings // because we cast REAL values to text in the trigger diff --git a/clients/typescript/test/migrators/postgres/schema.test.ts b/clients/typescript/test/migrators/postgres/schema.test.ts index b3eb21c9cb..d4f20b88a0 100644 --- a/clients/typescript/test/migrators/postgres/schema.test.ts +++ b/clients/typescript/test/migrators/postgres/schema.test.ts @@ -40,7 +40,7 @@ test('check schema keys are unique', async (t) => { const migrator = new BundleMigrator(adapter, migrations) await migrator.up() const defaults = satelliteDefaults(migrator.queryBuilder.defaultNamespace) - const metaTable = `"${defaults.metaTable.namespace}"."${defaults.metaTable.tablename}"` + const metaTable = `${defaults.metaTable}` await adapter.run({ sql: `INSERT INTO ${metaTable} (key, value) values ('key', 'value')`, diff --git a/clients/typescript/test/migrators/postgres/triggers.test.ts b/clients/typescript/test/migrators/postgres/triggers.test.ts index bd1fc8dd2a..9a14b5ede9 100644 --- a/clients/typescript/test/migrators/postgres/triggers.test.ts +++ b/clients/typescript/test/migrators/postgres/triggers.test.ts @@ -17,12 +17,11 @@ type Context = ContextType & { const test = testAny as TestFn const defaults = satelliteDefaults('public') -const oplogTable = `"${defaults.oplogTable.namespace}"."${defaults.oplogTable.tablename}"` +const oplogTable = `${defaults.oplogTable}` const personTable = getPersonTable('public') -const personNamespace = personTable.namespace -const personTableName = personTable.tableName -const qualifiedPersonTable = `"${personNamespace}"."${personTableName}"` +const personTableName = personTable.qualifiedTableName.tablename +const qualifiedPersonTable = personTable.qualifiedTableName let i = 1 let port = 5300 @@ -227,7 +226,6 @@ test('oplog insertion trigger should insert row into oplog table', async (t) => test('oplog trigger should handle Infinity values correctly', async (t) => { const { db, migrateDb } = t.context - const tableName = personTable.tableName // Migrate the DB with the necessary tables and triggers await migrateDb() @@ -243,7 +241,7 @@ test('oplog trigger should handle Infinity values correctly', async (t) => { t.is(oplogRows.length, 1) t.deepEqual(oplogRows[0], { namespace: 'public', - tablename: tableName, + tablename: personTableName, optype: 'INSERT', // `id` and `bmi` values are stored as strings // because we cast REAL values to text in the trigger diff --git a/clients/typescript/test/migrators/sqlite/schema.test.ts b/clients/typescript/test/migrators/sqlite/schema.test.ts index e17c6d1e73..7780813f4f 100644 --- a/clients/typescript/test/migrators/sqlite/schema.test.ts +++ b/clients/typescript/test/migrators/sqlite/schema.test.ts @@ -40,7 +40,7 @@ test('check schema keys are unique', async (t) => { const migrator = new BundleMigrator(adapter, migrations) await migrator.up() const defaults = satelliteDefaults(migrator.queryBuilder.defaultNamespace) - const metaTable = `"${defaults.metaTable.namespace}"."${defaults.metaTable.tablename}"` + const metaTable = `${defaults.metaTable}` await adapter.run({ sql: `INSERT INTO ${metaTable} (key, value) values ('key', 'value')`, diff --git a/clients/typescript/test/migrators/sqlite/triggers.test.ts b/clients/typescript/test/migrators/sqlite/triggers.test.ts index b9da58a8ea..2dc5c20356 100644 --- a/clients/typescript/test/migrators/sqlite/triggers.test.ts +++ b/clients/typescript/test/migrators/sqlite/triggers.test.ts @@ -18,8 +18,10 @@ type Context = ContextType & { const test = testAny as TestFn const defaults = satelliteDefaults('main') -const oplogTable = `"${defaults.oplogTable.namespace}"."${defaults.oplogTable.tablename}"` +const oplogTable = `${defaults.oplogTable}` const personTable = getPersonTable('main') +const qualifiedPersonTable = personTable.qualifiedTableName +const personTableName = qualifiedPersonTable.tablename test.beforeEach(async (t) => { const db = new OriginalDatabase(':memory:') @@ -87,13 +89,12 @@ test('generateTableTriggers should create correct triggers for a table', (t) => test('oplog insertion trigger should insert row into oplog table', async (t) => { const { db, migrateDb } = t.context - const tableName = personTable.tableName // Migrate the DB with the necessary tables and triggers await migrateDb() // Insert a row in the table - const insertRowSQL = `INSERT INTO ${tableName} (id, name, age, bmi, int8, blob) VALUES (1, 'John Doe', 30, 25.5, 7, x'0001ff')` + const insertRowSQL = `INSERT INTO ${qualifiedPersonTable} (id, name, age, bmi, int8, blob) VALUES (1, 'John Doe', 30, 25.5, 7, x'0001ff')` db.exec(insertRowSQL) // Check that the oplog table contains an entry for the inserted row @@ -101,7 +102,7 @@ test('oplog insertion trigger should insert row into oplog table', async (t) => t.is(oplogRows.length, 1) t.deepEqual(oplogRows[0], { namespace: 'main', - tablename: tableName, + tablename: personTableName, optype: 'INSERT', // `id` and `bmi` values are stored as strings // because we cast REAL values to text in the trigger @@ -128,13 +129,12 @@ test('oplog insertion trigger should insert row into oplog table', async (t) => test('oplog trigger should handle Infinity values correctly', async (t) => { const { db, migrateDb } = t.context - const tableName = personTable.tableName // Migrate the DB with the necessary tables and triggers await migrateDb() // Insert a row in the table - const insertRowSQL = `INSERT INTO ${tableName} (id, name, age, bmi, int8, blob) VALUES (-9e999, 'John Doe', 30, 9e999, 7, x'0001ff')` + const insertRowSQL = `INSERT INTO ${qualifiedPersonTable} (id, name, age, bmi, int8, blob) VALUES (-9e999, 'John Doe', 30, 9e999, 7, x'0001ff')` db.exec(insertRowSQL) // Check that the oplog table contains an entry for the inserted row @@ -142,7 +142,7 @@ test('oplog trigger should handle Infinity values correctly', async (t) => { t.is(oplogRows.length, 1) t.deepEqual(oplogRows[0], { namespace: 'main', - tablename: tableName, + tablename: personTableName, optype: 'INSERT', // `id` and `bmi` values are stored as strings // because we cast REAL values to text in the trigger diff --git a/clients/typescript/test/migrators/triggers.ts b/clients/typescript/test/migrators/triggers.ts index f14074c2e9..2af1e5cc18 100644 --- a/clients/typescript/test/migrators/triggers.ts +++ b/clients/typescript/test/migrators/triggers.ts @@ -16,22 +16,20 @@ export type ContextType = { export const triggerTests = (test: TestFn) => { test('oplog trigger should separate null blobs from empty blobs', async (t) => { const { adapter, migrateDb, dialect, personTable, defaults } = t.context - const namespace = personTable.namespace - const tableName = personTable.tableName // Migrate the DB with the necessary tables and triggers await migrateDb() // Insert null and empty rows in the table - const insertRowNullSQL = `INSERT INTO "${namespace}"."${tableName}" (id, name, age, bmi, int8, blob) VALUES (1, 'John Doe', 30, 25.5, 7, NULL)` + const insertRowNullSQL = `INSERT INTO ${personTable.qualifiedTableName} (id, name, age, bmi, int8, blob) VALUES (1, 'John Doe', 30, 25.5, 7, NULL)` const blobValue = dialect === 'Postgres' ? `'\\x'` : `x''` - const insertRowEmptySQL = `INSERT INTO "${namespace}"."${tableName}" (id, name, age, bmi, int8, blob) VALUES (2, 'John Doe', 30, 25.5, 7, ${blobValue})` + const insertRowEmptySQL = `INSERT INTO ${personTable.qualifiedTableName} (id, name, age, bmi, int8, blob) VALUES (2, 'John Doe', 30, 25.5, 7, ${blobValue})` await adapter.run({ sql: insertRowNullSQL }) await adapter.run({ sql: insertRowEmptySQL }) // Check that the oplog table contains an entry for the inserted row const oplogRows = await adapter.query({ - sql: `SELECT * FROM "${defaults.oplogTable.namespace}"."${defaults.oplogTable.tablename}"`, + sql: `SELECT * FROM ${defaults.oplogTable}`, }) t.is(oplogRows.length, 2) t.regex(oplogRows[0].newRow as string, /,\s*"blob":\s*null\s*,/) diff --git a/clients/typescript/test/satellite/common.ts b/clients/typescript/test/satellite/common.ts index f9b5845686..e6fb68f437 100644 --- a/clients/typescript/test/satellite/common.ts +++ b/clients/typescript/test/satellite/common.ts @@ -1,5 +1,5 @@ import { mkdir, rm as removeFile } from 'node:fs/promises' -import { RelationsCache, randomValue } from '../../src/util' +import { QualifiedTablename, RelationsCache, randomValue } from '../../src/util' import type { Database as SqliteDB } from 'better-sqlite3' import SqliteDatabase from 'better-sqlite3' import { DatabaseAdapter as SqliteDatabaseAdapter } from '../../src/drivers/better-sqlite3' @@ -411,11 +411,9 @@ export async function migrateDb( const [createMainSchema, ...restMigration] = migration await db.run({ sql: createMainSchema }) - const namespace = table.namespace - const tableName = table.tableName // Create the table in the database on the given namespace const blobType = builder.dialect === 'SQLite' ? 'BLOB' : 'BYTEA' - const createTableSQL = `CREATE TABLE "${namespace}"."${tableName}" (id REAL PRIMARY KEY, name TEXT, age INTEGER, bmi REAL, int8 INTEGER, blob ${blobType})` + const createTableSQL = `CREATE TABLE ${table.qualifiedTableName} (id REAL PRIMARY KEY, name TEXT, age INTEGER, bmi REAL, int8 INTEGER, blob ${blobType})` await db.run({ sql: createTableSQL }) // Apply the initial migration on the database @@ -435,8 +433,7 @@ export async function migrateDb( export const personTable: (namespace: string) => Table = ( namespace: string ) => ({ - namespace, - tableName: 'personTable', + qualifiedTableName: new QualifiedTablename(namespace, 'personTable'), columns: ['id', 'name', 'age', 'bmi', 'int8', 'blob'], primary: ['id'], foreignKeys: [], diff --git a/clients/typescript/test/satellite/merge.test.ts b/clients/typescript/test/satellite/merge.test.ts index b342e01f22..5e4cdce57d 100644 --- a/clients/typescript/test/satellite/merge.test.ts +++ b/clients/typescript/test/satellite/merge.test.ts @@ -223,18 +223,17 @@ const setupPglite: SetupFn = async (t: ExecutionContext) => { // Migrate the DB with the necessary tables and triggers const personTable = getPersonTable(namespace) + const qualifiedPersonTable = personTable.qualifiedTableName await migrateDb(adapter, personTable, builder) // Insert a row in the table - const insertRowSQL = `INSERT INTO "${personTable.namespace}"."${ - personTable.tableName - }" (id, name, age, bmi, int8, blob) VALUES (54321, 'John Doe', 30, 25.5, 7, ${builder.hexValue( + const insertRowSQL = `INSERT INTO ${qualifiedPersonTable} (id, name, age, bmi, int8, blob) VALUES (54321, 'John Doe', 30, 25.5, 7, ${builder.hexValue( '0001ff' )})` await adapter.run({ sql: insertRowSQL }) // Fetch the oplog entry for the inserted row - const oplogTable = `"${defaults.oplogTable.namespace}"."${defaults.oplogTable.tablename}"` + const oplogTable = `${defaults.oplogTable}` const oplogRows = await adapter.query({ sql: `SELECT * FROM ${oplogTable}`, }) @@ -250,7 +249,8 @@ const setupPglite: SetupFn = async (t: ExecutionContext) => { commit_timestamp: to_commit_timestamp('1970-01-02T03:46:42.000Z'), changes: [ { - relation: relations[personTable.tableName as keyof typeof relations], + relation: + relations[qualifiedPersonTable.tablename as keyof typeof relations], type: DataChangeType.INSERT, record: { // fields must be ordered alphabetically to match the behavior of the triggers @@ -278,10 +278,7 @@ const setupPglite: SetupFn = async (t: ExecutionContext) => { const pk = primaryKeyToStr({ id: 54321 }) // the incoming transaction wins - const qualifiedTableName = new QualifiedTablename( - personTable.namespace, - personTable.tableName - ).toString() + const qualifiedTableName = qualifiedPersonTable.toString() t.like(merged, { [qualifiedTableName]: { [pk]: { optype: 'UPSERT' } }, }) diff --git a/clients/typescript/test/satellite/process.ts b/clients/typescript/test/satellite/process.ts index 329120d879..d7edcc0619 100644 --- a/clients/typescript/test/satellite/process.ts +++ b/clients/typescript/test/satellite/process.ts @@ -1654,7 +1654,7 @@ export const processTests = (test: TestFn) => { await runMigrations() const tablename = 'parent' - const qualified = new QualifiedTablename(namespace, tablename) + const qualifiedTableName = new QualifiedTablename(namespace, tablename) // relations must be present at subscription delivery client.setRelations(relations) @@ -1675,7 +1675,7 @@ export const processTests = (test: TestFn) => { t.is(notifier.notifications.length, 2) t.is(notifier.notifications[1].changes.length, 1) t.deepEqual(notifier.notifications[1].changes[0], { - qualifiedTablename: qualified, + qualifiedTablename: qualifiedTableName, recordChanges: [ { primaryKey: { id: 1 }, @@ -1686,7 +1686,6 @@ export const processTests = (test: TestFn) => { }) // wait for process to apply shape data - const qualifiedTableName = `"${namespace}"."${tablename}"` try { const row = await adapter.query({ sql: `SELECT id FROM ${qualifiedTableName}`, @@ -1792,7 +1791,7 @@ export const processTests = (test: TestFn) => { await runMigrations() const tablename = 'parent' - const qualified = `"${namespace}"."${tablename}"` + const qualified = new QualifiedTablename(namespace, tablename) // relations must be present at subscription delivery client.setRelations(relations) @@ -2306,7 +2305,7 @@ export const processTests = (test: TestFn) => { await runMigrations() const tablename = 'parent' - const qualified = `"${namespace}"."${tablename}"` + const qualified = new QualifiedTablename(namespace, tablename) // relations must be present at subscription delivery client.setRelations(relations) From 42a081658df882c4db0c40f52e1cfe2f94acedc6 Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Wed, 1 May 2024 14:32:42 +0200 Subject: [PATCH 138/156] Modified tableInfo query to also take the namespace into account --- .../src/migrators/query-builder/builder.ts | 2 +- .../src/migrators/query-builder/pgBuilder.ts | 7 ++++--- .../src/migrators/query-builder/sqliteBuilder.ts | 4 ++-- clients/typescript/src/util/relations.ts | 5 +++-- .../typescript/test/satellite/process.migration.ts | 14 +++++++++++--- 5 files changed, 21 insertions(+), 11 deletions(-) diff --git a/clients/typescript/src/migrators/query-builder/builder.ts b/clients/typescript/src/migrators/query-builder/builder.ts index 06f75e807f..4b2c079f84 100644 --- a/clients/typescript/src/migrators/query-builder/builder.ts +++ b/clients/typescript/src/migrators/query-builder/builder.ts @@ -99,7 +99,7 @@ export abstract class QueryBuilder { * The information includes all column names, their type, * whether or not they are nullable, and whether they are part of the PK. */ - abstract getTableInfo(tablename: string): Statement + abstract getTableInfo(table: QualifiedTablename): Statement /** * Insert a row into a table, ignoring it if it already exists. diff --git a/clients/typescript/src/migrators/query-builder/pgBuilder.ts b/clients/typescript/src/migrators/query-builder/pgBuilder.ts index a7eb342f46..0b83464b63 100644 --- a/clients/typescript/src/migrators/query-builder/pgBuilder.ts +++ b/clients/typescript/src/migrators/query-builder/pgBuilder.ts @@ -92,7 +92,7 @@ class PgBuilder extends QueryBuilder { } } - getTableInfo(tablename: string): Statement { + getTableInfo(table: QualifiedTablename): Statement { return { sql: dedent` SELECT @@ -120,9 +120,10 @@ class PgBuilder extends QueryBuilder { ) FROM information_schema.columns AS c WHERE - c.table_name = $1; + c.table_name = $1 AND + c.table_schema = $2; `, - args: [tablename], + args: [table.tablename, table.namespace], } } diff --git a/clients/typescript/src/migrators/query-builder/sqliteBuilder.ts b/clients/typescript/src/migrators/query-builder/sqliteBuilder.ts index 22dc46fac6..157341c127 100644 --- a/clients/typescript/src/migrators/query-builder/sqliteBuilder.ts +++ b/clients/typescript/src/migrators/query-builder/sqliteBuilder.ts @@ -63,10 +63,10 @@ class SqliteBuilder extends QueryBuilder { return `x'${hexString}'` } - getTableInfo(tablename: string): Statement { + getTableInfo(table: QualifiedTablename): Statement { return { sql: `SELECT name, type, "notnull", dflt_value, pk FROM pragma_table_info(?)`, - args: [tablename], + args: [table.tablename], } } diff --git a/clients/typescript/src/util/relations.ts b/clients/typescript/src/util/relations.ts index 71b6dd96e2..0faf14fde9 100644 --- a/clients/typescript/src/util/relations.ts +++ b/clients/typescript/src/util/relations.ts @@ -2,6 +2,7 @@ import { SatRelation_RelationType } from '../_generated/protocol/satellite' import { DatabaseAdapter } from '../electric/adapter' import { QueryBuilder } from '../migrators/query-builder' import { SatelliteOpts } from '../satellite/config' +import { QualifiedTablename } from './tablename' import { Relation, RelationsCache } from './types' // TODO: Improve this code once with Migrator and consider simplifying oplog. @@ -14,11 +15,11 @@ export async function inferRelationsFromDb( const relations: RelationsCache = {} let id = 0 - const schema = 'public' // TODO + const schema = builder.defaultNamespace for (const table of tableNames) { const tableName = table.name const columnsForTable = (await adapter.query( - builder.getTableInfo(tableName) + builder.getTableInfo(new QualifiedTablename(schema, tableName)) )) as { name: string type: string diff --git a/clients/typescript/test/satellite/process.migration.ts b/clients/typescript/test/satellite/process.migration.ts index ad7cecc3c4..5bb2a204dc 100644 --- a/clients/typescript/test/satellite/process.migration.ts +++ b/clients/typescript/test/satellite/process.migration.ts @@ -10,6 +10,7 @@ import { generateTag } from '../../src/satellite/oplog' import { DataChange, DataChangeType, + QualifiedTablename, Relation, Row, SchemaChange, @@ -94,7 +95,7 @@ async function assertDbHasTables( } async function getTableInfo( - table: string, + table: QualifiedTablename, t: ExecutionContext ): Promise { const { adapter, builder } = t.context @@ -247,7 +248,11 @@ export const processMigrationTests = (test: TestFn) => { async function checkMigrationIsApplied(t: ExecutionContext) { await assertDbHasTables(t, 'parent', 'child', 'NewTable') - const newTableInfo = await getTableInfo('NewTable', t) + const { namespace } = t.context + const newTableInfo = await getTableInfo( + new QualifiedTablename(namespace, 'NewTable'), + t + ) const expectedTables = [ { @@ -265,7 +270,10 @@ export const processMigrationTests = (test: TestFn) => { t.true(newTableInfo.some((t) => isEqual(t, tbl))) }) - const parentTableInfo = await getTableInfo('parent', t) + const parentTableInfo = await getTableInfo( + new QualifiedTablename(namespace, 'parent'), + t + ) const parentTableHasColumn = parentTableInfo.some((col: ColumnInfo) => { return ( col.name === 'baz' && From d8c26e4791b1bf09836b4f5b0ca22241d83bd802 Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Wed, 1 May 2024 15:03:13 +0200 Subject: [PATCH 139/156] Catch PG unexpected rejection in test --- clients/typescript/test/satellite/process.migration.ts | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/clients/typescript/test/satellite/process.migration.ts b/clients/typescript/test/satellite/process.migration.ts index 5bb2a204dc..20bd62152d 100644 --- a/clients/typescript/test/satellite/process.migration.ts +++ b/clients/typescript/test/satellite/process.migration.ts @@ -50,7 +50,14 @@ export const commonSetup = async (t: ExecutionContext) => { const { satellite, authState, token } = t.context await satellite.start(authState) satellite.setToken(token) - await satellite.connectWithBackoff() + await satellite.connectWithBackoff().catch((e) => { + if (e.message === 'terminating connection due to administrator command') { + // This can happen when we stop Postgres at the end of the test + return + } + throw e + }) + t.context['clientId'] = satellite._authState!.clientId // store clientId in the context await populateDB(t) const txDate = await satellite._performSnapshot() From 3cbfca1fca18af36d793a621f8be7e4832728a33 Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Wed, 1 May 2024 15:09:57 +0200 Subject: [PATCH 140/156] Improve countTablesIn signature --- clients/typescript/src/migrators/query-builder/builder.ts | 5 ++--- .../typescript/src/migrators/query-builder/pgBuilder.ts | 8 ++++---- .../src/migrators/query-builder/sqliteBuilder.ts | 8 ++++---- clients/typescript/src/satellite/process.ts | 6 +++--- 4 files changed, 13 insertions(+), 14 deletions(-) diff --git a/clients/typescript/src/migrators/query-builder/builder.ts b/clients/typescript/src/migrators/query-builder/builder.ts index 4b2c079f84..5a27012a2e 100644 --- a/clients/typescript/src/migrators/query-builder/builder.ts +++ b/clients/typescript/src/migrators/query-builder/builder.ts @@ -65,10 +65,9 @@ export abstract class QueryBuilder { abstract tableExists(table: QualifiedTablename): Statement /** - * Counts tables whose name is included in `tables`. - * The count is returned as `countName`. + * Counts tables whose name is included in `tableNames`. */ - abstract countTablesIn(countName: string, tables: string[]): Statement + abstract countTablesIn(tableNames: string[]): Statement /** * Converts a column value to a hexidecimal string. diff --git a/clients/typescript/src/migrators/query-builder/pgBuilder.ts b/clients/typescript/src/migrators/query-builder/pgBuilder.ts index 0b83464b63..8691c27d4f 100644 --- a/clients/typescript/src/migrators/query-builder/pgBuilder.ts +++ b/clients/typescript/src/migrators/query-builder/pgBuilder.ts @@ -40,17 +40,17 @@ class PgBuilder extends QueryBuilder { } } - countTablesIn(countName: string, tables: string[]): Statement { + countTablesIn(tableNames: string[]): Statement { const sql = dedent` - SELECT COUNT(table_name)::integer AS "${countName}" + SELECT COUNT(table_name)::integer AS "count" FROM information_schema.tables WHERE table_type = 'BASE TABLE' AND - table_name IN (${tables.map((_, i) => `$${i + 1}`).join(', ')}); + table_name IN (${tableNames.map((_, i) => `$${i + 1}`).join(', ')}); ` return { sql, - args: tables, + args: tableNames, } } diff --git a/clients/typescript/src/migrators/query-builder/sqliteBuilder.ts b/clients/typescript/src/migrators/query-builder/sqliteBuilder.ts index 157341c127..4efc5ea3f4 100644 --- a/clients/typescript/src/migrators/query-builder/sqliteBuilder.ts +++ b/clients/typescript/src/migrators/query-builder/sqliteBuilder.ts @@ -43,15 +43,15 @@ class SqliteBuilder extends QueryBuilder { } } - countTablesIn(countName: string, tables: string[]): Statement { + countTablesIn(tableNames: string[]): Statement { const sql = dedent` - SELECT count(name) as ${countName} FROM sqlite_master + SELECT count(name) as "count" FROM sqlite_master WHERE type='table' - AND name IN (${tables.map(() => '?').join(', ')}) + AND name IN (${tableNames.map(() => '?').join(', ')}) ` return { sql, - args: tables, + args: tableNames, } } diff --git a/clients/typescript/src/satellite/process.ts b/clients/typescript/src/satellite/process.ts index 74c7e4ce73..aa749d3e04 100644 --- a/clients/typescript/src/satellite/process.ts +++ b/clients/typescript/src/satellite/process.ts @@ -946,10 +946,10 @@ export class SatelliteProcess implements Satellite { const oplog = this.opts.oplogTable.tablename const shadow = this.opts.shadowTable.tablename - const [{ numTables }] = await this.adapter.query( - this.builder.countTablesIn('numTables', [meta, oplog, shadow]) + const [{ count }] = await this.adapter.query( + this.builder.countTablesIn([meta, oplog, shadow]) ) - return numTables === 3 + return count === 3 } // Handle auth state changes. From 75c6f890040c4f51d9784fe695182e816d80d8f8 Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Wed, 1 May 2024 15:19:24 +0200 Subject: [PATCH 141/156] Address minor comments --- clients/typescript/src/config/index.ts | 3 ++- .../typescript/src/migrators/query-builder/builder.ts | 10 ---------- .../src/migrators/query-builder/pgBuilder.ts | 8 -------- clients/typescript/src/satellite/process.ts | 3 +-- 4 files changed, 3 insertions(+), 21 deletions(-) diff --git a/clients/typescript/src/config/index.ts b/clients/typescript/src/config/index.ts index 0bed84fff9..96cb0c9a55 100644 --- a/clients/typescript/src/config/index.ts +++ b/clients/typescript/src/config/index.ts @@ -45,7 +45,8 @@ export interface ElectricConfig { } export type ElectricConfigWithDialect = ElectricConfig & { - dialect?: 'SQLite' | 'Postgres' // defaults to SQLite + /** defaults to SQLite */ + dialect?: 'SQLite' | 'Postgres' } export type HydratedConfig = { diff --git a/clients/typescript/src/migrators/query-builder/builder.ts b/clients/typescript/src/migrators/query-builder/builder.ts index 5a27012a2e..a0ce1d7dfa 100644 --- a/clients/typescript/src/migrators/query-builder/builder.ts +++ b/clients/typescript/src/migrators/query-builder/builder.ts @@ -37,21 +37,11 @@ export abstract class QueryBuilder { */ abstract pgOnly(query: string): string - /** - * Returns an array containing the given query if the current SQL dialect is PostgreSQL. - */ - abstract pgOnlyQuery(query: string): string[] - /** * Returns the given query if the current SQL dialect is SQLite. */ abstract sqliteOnly(query: string): string - /** - * Returns an array containing the given query if the current SQL dialect is SQLite. - */ - abstract sqliteOnlyQuery(query: string): string[] - /** * Makes the i-th positional parameter, * e.g. '$3' For Postgres when `i` is 3 diff --git a/clients/typescript/src/migrators/query-builder/pgBuilder.ts b/clients/typescript/src/migrators/query-builder/pgBuilder.ts index 8691c27d4f..d2ce84eb3a 100644 --- a/clients/typescript/src/migrators/query-builder/pgBuilder.ts +++ b/clients/typescript/src/migrators/query-builder/pgBuilder.ts @@ -21,18 +21,10 @@ class PgBuilder extends QueryBuilder { return query } - pgOnlyQuery(query: string) { - return [query] - } - sqliteOnly(_query: string) { return '' } - sqliteOnlyQuery(_query: string) { - return [] - } - tableExists(table: QualifiedTablename): Statement { return { sql: `SELECT 1 FROM information_schema.tables WHERE table_schema = $1 AND table_name = $2`, diff --git a/clients/typescript/src/satellite/process.ts b/clients/typescript/src/satellite/process.ts index aa749d3e04..34f9fba2a9 100644 --- a/clients/typescript/src/satellite/process.ts +++ b/clients/typescript/src/satellite/process.ts @@ -1166,6 +1166,7 @@ export class SatelliteProcess implements Satellite { const stmts: Statement[] = [] for (const [tablenameStr, mapping] of Object.entries(merged)) { + const qualifiedTableName = QualifiedTablename.parse(tablenameStr) for (const entryChanges of Object.values(mapping)) { const shadowEntry: ShadowEntry = { namespace: entryChanges.namespace, @@ -1174,8 +1175,6 @@ export class SatelliteProcess implements Satellite { tags: encodeTags(entryChanges.tags), } - const qualifiedTableName = QualifiedTablename.parse(tablenameStr) - switch (entryChanges.optype) { case OPTYPES.gone: case OPTYPES.delete: From e176211aed5bd214ed01f1124909748559981abb Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Wed, 1 May 2024 16:24:05 +0200 Subject: [PATCH 142/156] Revert to public schema when inferring relations. --- clients/typescript/src/util/relations.ts | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/clients/typescript/src/util/relations.ts b/clients/typescript/src/util/relations.ts index 0faf14fde9..fdb371774b 100644 --- a/clients/typescript/src/util/relations.ts +++ b/clients/typescript/src/util/relations.ts @@ -15,11 +15,12 @@ export async function inferRelationsFromDb( const relations: RelationsCache = {} let id = 0 - const schema = builder.defaultNamespace for (const table of tableNames) { const tableName = table.name const columnsForTable = (await adapter.query( - builder.getTableInfo(new QualifiedTablename(schema, tableName)) + builder.getTableInfo( + new QualifiedTablename(builder.defaultNamespace, tableName) + ) )) as { name: string type: string @@ -31,7 +32,10 @@ export async function inferRelationsFromDb( } const relation: Relation = { id: id++, - schema: schema, + // schema needs to be 'public' because these relations are used + // by the Satellite process and client to replicate changes to Electric + // and merge incoming changes from Electric, and those use the 'public' namespace. + schema: 'public', table: tableName, tableType: SatRelation_RelationType.TABLE, columns: [], From a247345833faa7ecb2ff95177e0ce406ea487016 Mon Sep 17 00:00:00 2001 From: Oleksii Sholik Date: Wed, 1 May 2024 17:28:16 +0300 Subject: [PATCH 143/156] Fix the failing restartability e2e test --- ...sume_replication_after_server_restart.lux} | 31 ++++++++++++++++++- 1 file changed, 30 insertions(+), 1 deletion(-) rename e2e/tests/{03.xx_node_satellite_can_resume_replication_after_server_restart.lux.disabled => 03.26_node_satellite_can_resume_replication_after_server_restart.lux} (88%) diff --git a/e2e/tests/03.xx_node_satellite_can_resume_replication_after_server_restart.lux.disabled b/e2e/tests/03.26_node_satellite_can_resume_replication_after_server_restart.lux similarity index 88% rename from e2e/tests/03.xx_node_satellite_can_resume_replication_after_server_restart.lux.disabled rename to e2e/tests/03.26_node_satellite_can_resume_replication_after_server_restart.lux index 671b6b084c..783805c39e 100644 --- a/e2e/tests/03.xx_node_satellite_can_resume_replication_after_server_restart.lux.disabled +++ b/e2e/tests/03.26_node_satellite_can_resume_replication_after_server_restart.lux @@ -183,7 +183,9 @@ !SELECT * FROM electric.client_additional_data; ?$client_2_id \| \d+ \| 1 \| transaction \| \| \\x -# Restart the server and verify that it cleans up client_actions and client_additional_data. +# Restart the server and verify that it doesn't clean up client_actions and +# client_additional_data for client 2 just yet because the client hasn't sent a SatOpLogAck +# message. [shell electric] -initial sync|initial data @@ -205,6 +207,33 @@ ?+Continuing sync for client $client_1_id from ?Continuing sync for client $client_2_id from +[shell pg_1] + !SELECT * FROM electric.client_actions; + ??$client_2_id + ??(1 row) + + !SELECT * FROM electric.client_additional_data; + ??$client_2_id + ??(1 row) + +# Make sure client 2 advances its stored LSN by sending a transaction to it. +[shell pg_1] + !INSERT INTO items (id, content) VALUES \ + ('00000000-0000-0000-0000-000000000007', 'items-7-'); + ??INSERT 0 1 + +[shell satellite_2] + [invoke node_await_get "items-7-"] + +# Restart the client to trigger `ClientReconnectionInfo.advance_on_reconnection()` for +# it that will discard the now implicitly acknowledged actions and additional data. +[shell satellite_2] + [invoke client_disconnect] + [invoke client_reconnect] + +[shell electric] + ?Continuing sync for client $client_2_id from + [shell pg_1] !SELECT * FROM electric.client_actions; ??(0 rows) From 9c0ff8239e6bf0ca55c06f26880ea3754d5e45da Mon Sep 17 00:00:00 2001 From: Oleksii Sholik Date: Wed, 1 May 2024 18:01:17 +0300 Subject: [PATCH 144/156] Only build a SatOpLogAck message when it's actually needed --- clients/typescript/src/satellite/client.ts | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/clients/typescript/src/satellite/client.ts b/clients/typescript/src/satellite/client.ts index b946b3f324..92b6cfca9d 100644 --- a/clients/typescript/src/satellite/client.ts +++ b/clients/typescript/src/satellite/client.ts @@ -1257,16 +1257,6 @@ export class SatelliteClient implements Client { // Shouldn't ack the same message if (this.inbound.lastAckedTxId?.eq(this.inbound.lastTxId)) return - const msg: SatPbMsg = { - $type: 'Electric.Satellite.SatOpLogAck', - ackTimestamp: Long.UZERO.add(new Date().getTime()), - lsn: this.inbound.last_lsn!, - transactionId: this.inbound.lastTxId, - subscriptionIds: this.inbound.seenAdditionalDataSinceLastTx.subscriptions, - additionalDataSourceIds: - this.inbound.seenAdditionalDataSinceLastTx.dataRefs, - } - // Send acks earlier rather than later to keep the stream continuous - // definitely send at 70% of allowed lag. const boundary = Math.floor(this.inbound.maxUnackedTxs * 0.7) @@ -1278,6 +1268,16 @@ export class SatelliteClient implements Client { reason == 'timeout' || reason == 'additionalData' ) { + const msg: SatPbMsg = { + $type: 'Electric.Satellite.SatOpLogAck', + ackTimestamp: Long.UZERO.add(new Date().getTime()), + lsn: this.inbound.last_lsn!, + transactionId: this.inbound.lastTxId, + subscriptionIds: this.inbound.seenAdditionalDataSinceLastTx.subscriptions, + additionalDataSourceIds: + this.inbound.seenAdditionalDataSinceLastTx.dataRefs, + } + this.sendMessage(msg) this.inbound.lastAckedTxId = msg.transactionId } From 8ff5890b04908ef502cee50179b8879162094562 Mon Sep 17 00:00:00 2001 From: Oleksii Sholik Date: Wed, 1 May 2024 18:08:10 +0300 Subject: [PATCH 145/156] Fix TS code formatting --- clients/typescript/src/satellite/client.ts | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/clients/typescript/src/satellite/client.ts b/clients/typescript/src/satellite/client.ts index 92b6cfca9d..494238692c 100644 --- a/clients/typescript/src/satellite/client.ts +++ b/clients/typescript/src/satellite/client.ts @@ -1273,7 +1273,8 @@ export class SatelliteClient implements Client { ackTimestamp: Long.UZERO.add(new Date().getTime()), lsn: this.inbound.last_lsn!, transactionId: this.inbound.lastTxId, - subscriptionIds: this.inbound.seenAdditionalDataSinceLastTx.subscriptions, + subscriptionIds: + this.inbound.seenAdditionalDataSinceLastTx.subscriptions, additionalDataSourceIds: this.inbound.seenAdditionalDataSinceLastTx.dataRefs, } From 8521dfab6ef287b87a1fd4966d99f76f24f76a80 Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Thu, 2 May 2024 08:46:23 +0200 Subject: [PATCH 146/156] Improved function name in tests --- clients/typescript/test/migrators/builder.ts | 2 +- clients/typescript/test/migrators/pglite/builder.test.ts | 4 ++-- clients/typescript/test/migrators/postgres/builder.test.ts | 4 ++-- clients/typescript/test/migrators/sqlite/builder.test.ts | 4 ++-- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/clients/typescript/test/migrators/builder.ts b/clients/typescript/test/migrators/builder.ts index 4a5887c79f..80e601d8c2 100644 --- a/clients/typescript/test/migrators/builder.ts +++ b/clients/typescript/test/migrators/builder.ts @@ -103,7 +103,7 @@ export type ContextType = { builder: QueryBuilder } -export const bundleTests = (test: TestFn) => { +export const builderTests = (test: TestFn) => { test('parse migration meta data', (t) => { const { migrationMetaData } = t.context const metaData = parseMetadata(migrationMetaData) diff --git a/clients/typescript/test/migrators/pglite/builder.test.ts b/clients/typescript/test/migrators/pglite/builder.test.ts index 4dac188d41..db18c3aca5 100644 --- a/clients/typescript/test/migrators/pglite/builder.test.ts +++ b/clients/typescript/test/migrators/pglite/builder.test.ts @@ -1,6 +1,6 @@ import anyTest, { TestFn } from 'ava' import { makeMigration, parseMetadata } from '../../../src/migrators/builder' -import { ContextType, bundleTests, makeMigrationMetaData } from '../builder' +import { ContextType, builderTests, makeMigrationMetaData } from '../builder' import { PGlite } from '@electric-sql/pglite' import { DatabaseAdapter } from '../../../src/drivers/pglite' import { PgBundleMigrator } from '../../../src/migrators' @@ -21,7 +21,7 @@ test.beforeEach(async (t) => { // No need to run the bundleTests because // they are already ran by `../postgres/builder.test.ts` // and the tests do not use an actual PG database -bundleTests(test) +builderTests(test) test('load migration from meta data', async (t) => { const { migrationMetaData, builder } = t.context diff --git a/clients/typescript/test/migrators/postgres/builder.test.ts b/clients/typescript/test/migrators/postgres/builder.test.ts index fd1bb28d85..eed46809df 100644 --- a/clients/typescript/test/migrators/postgres/builder.test.ts +++ b/clients/typescript/test/migrators/postgres/builder.test.ts @@ -1,6 +1,6 @@ import anyTest, { TestFn } from 'ava' import { makeMigration, parseMetadata } from '../../../src/migrators/builder' -import { ContextType, bundleTests, makeMigrationMetaData } from '../builder' +import { ContextType, builderTests, makeMigrationMetaData } from '../builder' import { makePgDatabase } from '../../support/node-postgres' import { DatabaseAdapter } from '../../../src/drivers/node-postgres' import { PgBundleMigrator } from '../../../src/migrators' @@ -18,7 +18,7 @@ test.beforeEach(async (t) => { } }) -bundleTests(test) +builderTests(test) test('load migration from meta data', async (t) => { const { migrationMetaData, builder } = t.context diff --git a/clients/typescript/test/migrators/sqlite/builder.test.ts b/clients/typescript/test/migrators/sqlite/builder.test.ts index 6d2d3699a8..fe0185cf92 100644 --- a/clients/typescript/test/migrators/sqlite/builder.test.ts +++ b/clients/typescript/test/migrators/sqlite/builder.test.ts @@ -3,7 +3,7 @@ import { makeMigration, parseMetadata } from '../../../src/migrators/builder' import Database from 'better-sqlite3' import { DatabaseAdapter } from '../../../src/drivers/better-sqlite3' import { sqliteBuilder } from '../../../src/migrators/query-builder' -import { ContextType, bundleTests, makeMigrationMetaData } from '../builder' +import { ContextType, builderTests, makeMigrationMetaData } from '../builder' import { SqliteBundleMigrator } from '../../../src/migrators' const test = anyTest as TestFn @@ -18,7 +18,7 @@ test.beforeEach(async (t) => { } }) -bundleTests(test) +builderTests(test) test('load migration from meta data', async (t) => { const { migrationMetaData, builder } = t.context From acfd0faa7c2867e30873275b3f711ff29b54f8cf Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Thu, 2 May 2024 09:09:57 +0200 Subject: [PATCH 147/156] Move deferring and disabling FKs into 1 query builder method --- .../src/migrators/query-builder/builder.ts | 9 ++---- .../src/migrators/query-builder/pgBuilder.ts | 4 +-- .../migrators/query-builder/sqliteBuilder.ts | 4 +-- clients/typescript/src/satellite/process.ts | 32 +++++++------------ 4 files changed, 16 insertions(+), 33 deletions(-) diff --git a/clients/typescript/src/migrators/query-builder/builder.ts b/clients/typescript/src/migrators/query-builder/builder.ts index a0ce1d7dfa..60d28985a6 100644 --- a/clients/typescript/src/migrators/query-builder/builder.ts +++ b/clients/typescript/src/migrators/query-builder/builder.ts @@ -17,20 +17,15 @@ export abstract class QueryBuilder { */ abstract readonly BLOB: string - /** - * Defers foreign key checks for the current transaction. - */ - abstract readonly deferForeignKeys: string - /** * Queries the version of SQLite/Postgres we are using. */ abstract readonly getVersion: string /** - * Disables foreign key checks. + * Depending on the dialect, defers or disables foreign key checks for the duration of the transaction. */ - abstract readonly disableForeignKeys: string + abstract readonly deferOrDisableFKsForTx: string /** * Returns the given query if the current SQL dialect is PostgreSQL. diff --git a/clients/typescript/src/migrators/query-builder/pgBuilder.ts b/clients/typescript/src/migrators/query-builder/pgBuilder.ts index d2ce84eb3a..adf1ac509a 100644 --- a/clients/typescript/src/migrators/query-builder/pgBuilder.ts +++ b/clients/typescript/src/migrators/query-builder/pgBuilder.ts @@ -9,13 +9,13 @@ class PgBuilder extends QueryBuilder { readonly dialect = 'Postgres' readonly AUTOINCREMENT_PK = 'SERIAL PRIMARY KEY' readonly BLOB = 'TEXT' - readonly deferForeignKeys = 'SET CONSTRAINTS ALL DEFERRED;' readonly getVersion = 'SELECT version();' readonly paramSign = '$' readonly defaultNamespace = 'public' /** **Disables** FKs for the duration of the transaction */ - readonly disableForeignKeys = 'SET LOCAL session_replication_role = replica;' + readonly deferOrDisableFKsForTx = + 'SET LOCAL session_replication_role = replica;' pgOnly(query: string) { return query diff --git a/clients/typescript/src/migrators/query-builder/sqliteBuilder.ts b/clients/typescript/src/migrators/query-builder/sqliteBuilder.ts index 4efc5ea3f4..2115335c57 100644 --- a/clients/typescript/src/migrators/query-builder/sqliteBuilder.ts +++ b/clients/typescript/src/migrators/query-builder/sqliteBuilder.ts @@ -7,7 +7,7 @@ class SqliteBuilder extends QueryBuilder { readonly dialect = 'SQLite' readonly AUTOINCREMENT_PK = 'INTEGER PRIMARY KEY AUTOINCREMENT' readonly BLOB = 'BLOB' - readonly deferForeignKeys = 'PRAGMA defer_foreign_keys = ON;' + readonly deferOrDisableFKsForTx = 'PRAGMA defer_foreign_keys = ON;' readonly getVersion = 'SELECT sqlite_version() AS version' readonly maxSqlParameters = 65535 readonly paramSign = '?' @@ -18,8 +18,6 @@ class SqliteBuilder extends QueryBuilder { 'sqlite_temp_schema', ] - readonly disableForeignKeys = 'PRAGMA foreign_keys = OFF;' - pgOnly(_query: string) { return '' } diff --git a/clients/typescript/src/satellite/process.ts b/clients/typescript/src/satellite/process.ts index 34f9fba2a9..3c18e64ddf 100644 --- a/clients/typescript/src/satellite/process.ts +++ b/clients/typescript/src/satellite/process.ts @@ -305,7 +305,7 @@ export class SatelliteProcess implements Satellite { })) const stmtsWithTriggers = [ - { sql: this.builder.deferForeignKeys }, + { sql: this.builder.deferOrDisableFKsForTx }, ...this._disableTriggers(tables), ...deleteStmts, ...this._enableTriggers(tables), @@ -476,16 +476,11 @@ export class SatelliteProcess implements Satellite { const namespace = this.builder.defaultNamespace const stmts: Statement[] = [] - if (this.builder.dialect === 'Postgres') { - // disable FK checks because order of inserts - // may not respect referential integrity - // and Postgres doesn't let us defer FKs - // that were not originally defined as deferrable - stmts.push({ sql: this.builder.disableForeignKeys }) - } else { - // Defer FKs on SQLite - stmts.push({ sql: this.builder.deferForeignKeys }) - } + // Defer (SQLite) or temporarily disable FK checks (Postgres) + // because order of inserts may not respect referential integrity + // and Postgres doesn't let us defer FKs + // that were not originally defined as deferrable + stmts.push({ sql: this.builder.deferOrDisableFKsForTx }) // It's much faster[1] to do less statements to insert the data instead of doing an insert statement for each row // so we're going to do just that, but with a caveat: SQLite has a max number of parameters in prepared statements, @@ -1299,16 +1294,11 @@ export class SatelliteProcess implements Satellite { const lsn = transaction.lsn let firstDMLChunk = true - if (this.builder.dialect === 'Postgres') { - // Temporarily disable FK checks because order of inserts - // may not respect referential integrity - // and Postgres doesn't let us defer FKs - // that were not originally defined as deferrable - stmts.push({ sql: this.builder.disableForeignKeys }) - } else { - // Defer FKs on SQLite - stmts.push({ sql: this.builder.deferForeignKeys }) - } + // Defer (SQLite) or temporarily disable FK checks (Postgres) + // because order of inserts may not respect referential integrity + // and Postgres doesn't let us defer FKs + // that were not originally defined as deferrable + stmts.push({ sql: this.builder.deferOrDisableFKsForTx }) // update lsn. stmts.push(this.updateLsnStmt(lsn)) From e3996b52abf95a7517e2d2daf08e5282bf685520 Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Thu, 2 May 2024 09:21:30 +0200 Subject: [PATCH 148/156] Improved regex to parse qualified table names --- clients/typescript/src/util/tablename.ts | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/clients/typescript/src/util/tablename.ts b/clients/typescript/src/util/tablename.ts index 5612a1f377..59159eb344 100644 --- a/clients/typescript/src/util/tablename.ts +++ b/clients/typescript/src/util/tablename.ts @@ -21,9 +21,10 @@ export class QualifiedTablename { static parse(fullyQualifiedName: string): QualifiedTablename { try { - const [_, namespace, tablename] = /"(.*)"\."(.*)"/.exec( - fullyQualifiedName - )! + // allow only paired double quotes within the quotes + // identifiers can't be empty + const [_, namespace, tablename] = + /^"((?:[^"]|"")+)"\."((?:[^"]|"")+)"$/.exec(fullyQualifiedName)! return new QualifiedTablename( unescDoubleQ(namespace), unescDoubleQ(tablename) From 4a5e45b58bc090ccb8a9194a33595f25787db47a Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Thu, 2 May 2024 09:45:30 +0200 Subject: [PATCH 149/156] Modify getLocalTableNames to be more efficient in PG dialect --- .../src/migrators/query-builder/pgBuilder.ts | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/clients/typescript/src/migrators/query-builder/pgBuilder.ts b/clients/typescript/src/migrators/query-builder/pgBuilder.ts index adf1ac509a..cce4598b98 100644 --- a/clients/typescript/src/migrators/query-builder/pgBuilder.ts +++ b/clients/typescript/src/migrators/query-builder/pgBuilder.ts @@ -66,15 +66,16 @@ class PgBuilder extends QueryBuilder { getLocalTableNames(notIn: string[] = []): Statement { let tables = dedent` - SELECT table_name AS name - FROM information_schema.tables - WHERE - table_type = 'BASE TABLE' AND - table_schema <> 'pg_catalog' AND - table_schema <> 'information_schema' + SELECT relname AS name + FROM pg_class + JOIN pg_namespace ON relnamespace = pg_namespace.oid + WHERE + relkind = 'r' + AND nspname <> 'pg_catalog' + AND nspname <> 'information_schema' ` if (notIn.length > 0) { - tables += ` AND table_name NOT IN (${notIn + tables += `\n AND relname NOT IN (${notIn .map((_, i) => `$${i + 1}`) .join(', ')})` } From 7a87aab1cd04abebbedd8c5f313cbf2a8f8ef349 Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Thu, 2 May 2024 09:50:06 +0200 Subject: [PATCH 150/156] Style improvement --- clients/typescript/src/migrators/schema.ts | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/clients/typescript/src/migrators/schema.ts b/clients/typescript/src/migrators/schema.ts index 164f0a9de5..6e756b8040 100644 --- a/clients/typescript/src/migrators/schema.ts +++ b/clients/typescript/src/migrators/schema.ts @@ -27,9 +27,7 @@ export const buildInitialMigration = (builder: QueryBuilder) => { `DROP TABLE IF EXISTS ${triggersTable};`, `CREATE TABLE ${triggersTable} ("namespace" TEXT, "tablename" TEXT, "flag" INTEGER, PRIMARY KEY ("namespace", "tablename"));`, //`-- Somewhere to keep dependency tracking information\n`, - `CREATE TABLE "${shadowTable.namespace}"."${ - shadowTable.tablename - }" (\n ${builder.pgOnly( + `CREATE TABLE ${shadowTable} (\n ${builder.pgOnly( '"rowid" SERIAL,' )} "namespace" TEXT NOT NULL,\n "tablename" TEXT NOT NULL,\n "primaryKey" TEXT NOT NULL,\n "tags" TEXT NOT NULL,\n PRIMARY KEY ("namespace", "tablename", "primaryKey"));`, ], From aefecbd657276309a23ba5d973f5776f6e744f68 Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Thu, 2 May 2024 09:53:31 +0200 Subject: [PATCH 151/156] Remove unused tauri dependency --- clients/typescript/package.json | 1 - pnpm-lock.yaml | 8 -------- 2 files changed, 9 deletions(-) diff --git a/clients/typescript/package.json b/clients/typescript/package.json index b9214ff9d0..589c51279f 100644 --- a/clients/typescript/package.json +++ b/clients/typescript/package.json @@ -179,7 +179,6 @@ "dependencies": { "@electric-sql/prisma-generator": "workspace:*", "@prisma/client": "4.8.1", - "@tauri-apps/api": "^1.5.3", "async-mutex": "^0.4.0", "base-64": "^1.0.0", "better-sqlite3": "^8.4.0", diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 4626567d4b..d3fbf6d12e 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -23,9 +23,6 @@ importers: '@prisma/client': specifier: 4.8.1 version: 4.8.1(prisma@4.8.1) - '@tauri-apps/api': - specifier: ^1.5.3 - version: 1.5.3 async-mutex: specifier: ^0.4.0 version: 0.4.0 @@ -6300,11 +6297,6 @@ packages: defer-to-connect: 2.0.1 dev: true - /@tauri-apps/api@1.5.3: - resolution: {integrity: sha512-zxnDjHHKjOsrIzZm6nO5Xapb/BxqUq1tc7cGkFXsFkGTsSWgCPH1D8mm0XS9weJY2OaR73I3k3S+b7eSzJDfqA==} - engines: {node: '>= 14.6.0', npm: '>= 6.6.0', yarn: '>= 1.19.1'} - dev: false - /@tauri-apps/api@2.0.0-alpha.13: resolution: {integrity: sha512-sGgCkFahF3OZAHoGN5Ozt9WK7wJlbVZSgWpPQKNag4nSOX1+Py6VDRTEWriiJHDiV+gg31CWHnNXRy6TFoZmdA==} engines: {node: '>= 18', npm: '>= 6.6.0', yarn: '>= 1.19.1'} From 3fcebf5f98825c3dcfa9fb0fda722a65b71a94d6 Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Thu, 2 May 2024 10:56:39 +0200 Subject: [PATCH 152/156] Simplified transformFields --- .../src/client/conversions/input.ts | 28 +++++++++++-------- 1 file changed, 17 insertions(+), 11 deletions(-) diff --git a/clients/typescript/src/client/conversions/input.ts b/clients/typescript/src/client/conversions/input.ts index 80d238813e..8a1d2d062b 100644 --- a/clients/typescript/src/client/conversions/input.ts +++ b/clients/typescript/src/client/conversions/input.ts @@ -333,15 +333,21 @@ export function transformFields( ): object { // only transform fields that are part of this table and not related fields // as those will be transformed later when the query on the related field is processed. - const fieldsAndValues = Object.entries(keepTableFieldsOnly(o, fields)) - const fieldsAndTransformedValues = fieldsAndValues.map((entry) => { - const [field, value] = entry - return transformField(field, value, o, fields, converter, transformation) + const copied: Record = { ...o } + Object.entries(o).forEach(([field, value]) => { + const pgType = fields.get(field) + // Skip anything that's not an actual column on the table + if (pgType === undefined) return + + const transformedValue = + transformation === Transformation.Encode + ? converter.encode(value, pgType) + : converter.decode(value, pgType) + + copied[field] = transformedValue }) - return { - ...o, - ...Object.fromEntries(fieldsAndTransformedValues), - } + + return copied } /** @@ -390,16 +396,16 @@ export function isFilterObject(value: any): boolean { * @returns A filtered object. */ function keepTableFieldsOnly(o: object, fields: Fields) { - return filterKeys(o, new Set(fields.keys())) + return filterKeys(o, fields) } /** * Filters the object to retain only keys that are in `keys`. * @param o The object to filter. - * @param keys The keys to keep. + * @param keys Object that allows checking if a key is present. * @returns A filtered object. */ -function filterKeys(o: object, keys: Set) { +function filterKeys(o: object, keys: { has: (x: string) => boolean }) { return Object.fromEntries( Object.entries(o).filter((entry) => keys.has(entry[0])) ) From e8621caf0bdf49063ba6b3584408e27471318c5d Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Thu, 2 May 2024 11:40:27 +0200 Subject: [PATCH 153/156] Properly quote identifiers in DAL --- .../typescript/src/client/model/builder.ts | 93 ++++++++++--------- clients/typescript/src/util/tablename.ts | 2 +- 2 files changed, 49 insertions(+), 46 deletions(-) diff --git a/clients/typescript/src/client/model/builder.ts b/clients/typescript/src/client/model/builder.ts index 66e4d3194f..5939f231fe 100644 --- a/clients/typescript/src/client/model/builder.ts +++ b/clients/typescript/src/client/model/builder.ts @@ -18,6 +18,7 @@ import { PgBasicType } from '../conversions/types' import { HKT } from '../util/hkt' import { Dialect } from '../../migrators/query-builder/builder' import { isFilterObject } from '../conversions/input' +import { escDoubleQ } from '../../util' const squelPostgres = squel.useFlavour('postgres') squelPostgres.registerValueHandler('bigint', function (bigint) { @@ -50,7 +51,7 @@ export class Builder { >, public dialect: Dialect ) { - this._quotedTableName = `"${this._tableName}"` + this._quotedTableName = quoteIdentifier(this._tableName) squelPostgres.cls.DefaultQueryBuilderOptions.nameQuoteCharacter = '"' squelPostgres.cls.DefaultQueryBuilderOptions.autoQuoteFieldNames = true if (dialect === 'Postgres') { @@ -258,9 +259,10 @@ export class Builder { private castBigIntToText(field: string) { const pgType = this._tableDescription.fields.get(field) if (pgType === PgBasicType.PG_INT8 && this.dialect === 'SQLite') { - return `cast("${field}" as TEXT) AS "${field}"` + const quotedField = quoteIdentifier(field) + return `cast(${quotedField} as TEXT) AS ${quotedField}` } - return `"${field}"` + return quoteIdentifier(field) } private addOrderBy(i: AnyFindInput, q: PostgresSelect): PostgresSelect { @@ -289,7 +291,7 @@ export class Builder { // have to quote the field name ourselves // because Squel does not seem to auto quote // field names in order by statements - return query.order(`"${field}"`, squelOrder) + return query.order(quoteIdentifier(field), squelOrder) }, q) } @@ -316,7 +318,10 @@ export class Builder { if (pgType === PgBasicType.PG_INT8 && this.dialect === 'SQLite') { // make a raw string and quote the field name ourselves // because otherwise Squel would add quotes around the entire cast - const f = squelPostgres.rstr(`cast("${field}" as TEXT) AS "${field}"`) + const quotedField = quoteIdentifier(field) + const f = squelPostgres.rstr( + `cast(${quotedField} as TEXT) AS ${quotedField}` + ) return query.returning(f) } return query.returning(field) @@ -351,7 +356,7 @@ export function makeFilter( prefixFieldsWith = '' ): Array<{ sql: string; args?: unknown[] }> { if (fieldValue === null) - return [{ sql: `${prefixFieldsWith}"${fieldName}" IS NULL` }] + return [{ sql: `${prefixFieldsWith}${quoteIdentifier(fieldName)} IS NULL` }] else if (fieldName === 'AND' || fieldName === 'OR' || fieldName === 'NOT') { return [ makeBooleanFilter( @@ -408,9 +413,8 @@ export function makeFilter( const [filter, handler] = entry if (filter in obj) { const sql = handler( - fieldName, - obj[filter as keyof typeof obj], - prefixFieldsWith + prefixFieldsWith + quoteIdentifier(fieldName), + obj[filter as keyof typeof obj] ) filters.push(sql) } @@ -421,7 +425,10 @@ export function makeFilter( // needed because `WHERE field = NULL` is not valid SQL else return [ - { sql: `${prefixFieldsWith}"${fieldName}" = ?`, args: [fieldValue] }, + { + sql: `${prefixFieldsWith}${quoteIdentifier(fieldName)} = ?`, + args: [fieldValue], + }, ] } @@ -476,108 +483,97 @@ function makeBooleanFilter( function makeEqualsFilter( fieldName: string, - value: unknown | undefined, - prefixFieldsWith: string + value: unknown | undefined ): { sql: string; args?: unknown[] } { - return { sql: `${prefixFieldsWith}"${fieldName}" = ?`, args: [value] } + return { sql: `${fieldName} = ?`, args: [value] } } function makeInFilter( fieldName: string, - values: unknown[] | undefined, - prefixFieldsWith: string + values: unknown[] | undefined ): { sql: string; args?: unknown[] } { - return { sql: `${prefixFieldsWith}"${fieldName}" IN ?`, args: [values] } + return { sql: `${fieldName} IN ?`, args: [values] } } function makeNotInFilter( fieldName: string, - values: unknown[] | undefined, - prefixFieldsWith: string + values: unknown[] | undefined ): { sql: string; args?: unknown[] } { - return { sql: `${prefixFieldsWith}"${fieldName}" NOT IN ?`, args: [values] } + return { sql: `${fieldName} NOT IN ?`, args: [values] } } function makeNotFilter( fieldName: string, - value: unknown, - prefixFieldsWith: string + value: unknown ): { sql: string; args?: unknown[] } { if (value === null) { // needed because `WHERE field != NULL` is not valid SQL - return { sql: `${prefixFieldsWith}"${fieldName}" IS NOT NULL` } + return { sql: `${fieldName} IS NOT NULL` } } else { - return { sql: `${prefixFieldsWith}"${fieldName}" != ?`, args: [value] } + return { sql: `${fieldName} != ?`, args: [value] } } } function makeLtFilter( fieldName: string, - value: unknown, - prefixFieldsWith: string + value: unknown ): { sql: string; args?: unknown[] } { - return { sql: `${prefixFieldsWith}"${fieldName}" < ?`, args: [value] } + return { sql: `${fieldName} < ?`, args: [value] } } function makeLteFilter( fieldName: string, - value: unknown, - prefixFieldsWith: string + value: unknown ): { sql: string; args?: unknown[] } { - return { sql: `${prefixFieldsWith}"${fieldName}" <= ?`, args: [value] } + return { sql: `${fieldName} <= ?`, args: [value] } } function makeGtFilter( fieldName: string, - value: unknown, - prefixFieldsWith: string + value: unknown ): { sql: string; args?: unknown[] } { - return { sql: `${prefixFieldsWith}"${fieldName}" > ?`, args: [value] } + return { sql: `${fieldName} > ?`, args: [value] } } function makeGteFilter( fieldName: string, - value: unknown, - prefixFieldsWith: string + value: unknown ): { sql: string; args?: unknown[] } { - return { sql: `${prefixFieldsWith}"${fieldName}" >= ?`, args: [value] } + return { sql: `${fieldName} >= ?`, args: [value] } } function makeStartsWithFilter( fieldName: string, - value: unknown, - prefixFieldsWith: string + value: unknown ): { sql: string; args?: unknown[] } { if (typeof value !== 'string') throw new Error('startsWith filter must be a string') return { - sql: `${prefixFieldsWith}"${fieldName}" LIKE ?`, + sql: `${fieldName} LIKE ?`, args: [`${escapeLike(value)}%`], } } function makeEndsWithFilter( fieldName: string, - value: unknown, - prefixFieldsWith: string + value: unknown ): { sql: string; args?: unknown[] } { if (typeof value !== 'string') throw new Error('endsWith filter must be a string') return { - sql: `${prefixFieldsWith}"${fieldName}" LIKE ?`, + sql: `${fieldName} LIKE ?`, args: [`%${escapeLike(value)}`], } } function makeContainsFilter( fieldName: string, - value: unknown, - prefixFieldsWith: string + value: unknown ): { sql: string; args?: unknown[] } { if (typeof value !== 'string') throw new Error('contains filter must be a string') return { - sql: `${prefixFieldsWith}"${fieldName}" LIKE ?`, + sql: `${fieldName} LIKE ?`, args: [`%${escapeLike(value)}%`], } } @@ -601,7 +597,7 @@ function addDistinct(i: AnyFindInput, q: PostgresSelect): PostgresSelect { // have to quote the fields ourselves // because Squel does not seem to auto quote // field names in order by statements - return q.distinct(...i.distinct.map((f) => `"${f}"`)) + return q.distinct(...i.distinct.map(quoteIdentifier)) } /** @@ -613,3 +609,10 @@ function addDistinct(i: AnyFindInput, q: PostgresSelect): PostgresSelect { function getSelectedFields(obj: object): string[] { return Object.keys(obj).filter((key) => obj[key as keyof object]) } + +/** + * Quotes the identifier, thereby, escaping any quotes in the identifier. + */ +function quoteIdentifier(identifier: string): string { + return `"${escDoubleQ(identifier)}"` +} diff --git a/clients/typescript/src/util/tablename.ts b/clients/typescript/src/util/tablename.ts index 59159eb344..8ff86a3d1a 100644 --- a/clients/typescript/src/util/tablename.ts +++ b/clients/typescript/src/util/tablename.ts @@ -60,7 +60,7 @@ export const hasIntersection = ( return false } -function escDoubleQ(str: string): string { +export function escDoubleQ(str: string): string { return str.replaceAll('"', '""') } From c53c877d67b3583e32c3b424445d9e1aa8ab18e2 Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Thu, 2 May 2024 15:54:44 +0200 Subject: [PATCH 154/156] Remove ElectricDatabase for node-postgres and directly electrify the PG connection --- .../src/drivers/node-postgres/adapter.ts | 68 ++++++++- .../src/drivers/node-postgres/database.ts | 136 +++++------------- .../src/drivers/node-postgres/index.ts | 6 +- .../src/drivers/node-postgres/mock.ts | 22 ++- .../client/model/postgres/datatype.test.ts | 4 +- .../test/drivers/node-postgres.test.ts | 4 +- .../test/migrators/postgres/triggers.test.ts | 12 +- .../typescript/test/support/node-postgres.ts | 4 +- e2e/satellite_client/src/client.ts | 10 +- e2e/tests/_satellite_macros.luxinc | 19 +-- 10 files changed, 144 insertions(+), 141 deletions(-) diff --git a/clients/typescript/src/drivers/node-postgres/adapter.ts b/clients/typescript/src/drivers/node-postgres/adapter.ts index fd744fc8d5..feee05faf4 100644 --- a/clients/typescript/src/drivers/node-postgres/adapter.ts +++ b/clients/typescript/src/drivers/node-postgres/adapter.ts @@ -1,8 +1,18 @@ +import pg from 'pg' import { Database } from './database' import { Row } from '../../util/types' import { Statement } from '../../util' import { SerialDatabaseAdapter as GenericDatabaseAdapter } from '../generic' import { RunResult } from '../../electric/adapter' +import { PgDateType } from '../../client/conversions/types' +import { deserialiseDate } from '../../client/conversions/datatypes/date' + +const originalGetTypeParser = pg.types.getTypeParser + +export type QueryResult = { + rows: Row[] + rowsModified: number +} export class DatabaseAdapter extends GenericDatabaseAdapter { readonly db: Database @@ -14,14 +24,68 @@ export class DatabaseAdapter extends GenericDatabaseAdapter { } async _run(statement: Statement): Promise { - const { rowsModified } = await this.db.exec(statement) + const { rowsModified } = await this.exec(statement) return { rowsAffected: rowsModified, } } async _query(statement: Statement): Promise { - const { rows } = await this.db.exec(statement) + const { rows } = await this.exec(statement) return rows } + + async exec(statement: Statement): Promise { + try { + const { rows, rowCount } = await this.db.query({ + text: statement.sql, + values: statement.args, + types: { + getTypeParser: ((oid: number) => { + /* + // Modify the parser to not parse JSON values + // Instead, return them as strings + // our conversions will correctly parse them + if ( + oid === pg.types.builtins.JSON || + oid === pg.types.builtins.JSONB + ) { + return (val) => val + } + */ + + if ( + oid == pg.types.builtins.TIMESTAMP || + oid == pg.types.builtins.TIMESTAMPTZ || + oid == pg.types.builtins.DATE + ) { + // Parse timestamps and date values ourselves + // because the pg parser parses them differently from what we expect + const pgTypes = new Map([ + [pg.types.builtins.TIMESTAMP, PgDateType.PG_TIMESTAMP], + [pg.types.builtins.TIMESTAMPTZ, PgDateType.PG_TIMESTAMPTZ], + [pg.types.builtins.DATE, PgDateType.PG_DATE], + ]) + return (val: string) => + deserialiseDate(val, pgTypes.get(oid) as PgDateType) + } + return originalGetTypeParser(oid) + }) as typeof pg.types.getTypeParser, + }, + }) + return { + rows, + rowsModified: rowCount ?? 0, + } + } catch (e: any) { + console.log('EXEC ERROR: ' + e.message) + console.log( + 'STATEMENT was: ' + + statement.sql + + ' - args: ' + + JSON.stringify(statement.args, null, 2) + ) + throw e + } + } } diff --git a/clients/typescript/src/drivers/node-postgres/database.ts b/clients/typescript/src/drivers/node-postgres/database.ts index 59aa7b7c31..5d6d21d956 100644 --- a/clients/typescript/src/drivers/node-postgres/database.ts +++ b/clients/typescript/src/drivers/node-postgres/database.ts @@ -1,87 +1,47 @@ -import pg from 'pg' -import type { Client } from 'pg' -import { Row, Statement } from '../../util' -import { PgDateType } from '../../client/conversions/types' -import { deserialiseDate } from '../../client/conversions/datatypes/date' +import type { Client, QueryConfig, QueryResult, QueryResultRow } from 'pg' -const originalGetTypeParser = pg.types.getTypeParser - -export type QueryResult = { - rows: Row[] - rowsModified: number -} - -export interface Database { - name: string - exec(statement: Statement): Promise +export type Database = Pick & { + query( + queryConfig: QueryConfig + ): Promise, 'rows' | 'rowCount'>> } -export class ElectricDatabase implements Database { - constructor(public name: string, private db: Client) {} - - async exec(statement: Statement): Promise { - try { - const { rows, rowCount } = await this.db.query({ - text: statement.sql, - values: statement.args, - types: { - getTypeParser: ((oid: number) => { - /* - // Modify the parser to not parse JSON values - // Instead, return them as strings - // our conversions will correctly parse them - if ( - oid === pg.types.builtins.JSON || - oid === pg.types.builtins.JSONB - ) { - return (val) => val - } - */ +type StopFn = () => Promise - if ( - oid == pg.types.builtins.TIMESTAMP || - oid == pg.types.builtins.TIMESTAMPTZ || - oid == pg.types.builtins.DATE - ) { - // Parse timestamps and date values ourselves - // because the pg parser parses them differently from what we expect - const pgTypes = new Map([ - [pg.types.builtins.TIMESTAMP, PgDateType.PG_TIMESTAMP], - [pg.types.builtins.TIMESTAMPTZ, PgDateType.PG_TIMESTAMPTZ], - [pg.types.builtins.DATE, PgDateType.PG_DATE], - ]) - return (val: string) => - deserialiseDate(val, pgTypes.get(oid) as PgDateType) - } - return originalGetTypeParser(oid) - }) as typeof pg.types.getTypeParser, - }, - }) - return { - rows, - rowsModified: rowCount ?? 0, - } - } catch (e: any) { - console.log('EXEC ERROR: ' + e.message) - console.log( - 'STATEMENT was: ' + - statement.sql + - ' - args: ' + - JSON.stringify(statement.args, null, 2) - ) - throw e - } - } +type PostgresConfig = { + /** + * The name of the database. + */ + name: string + /** + * The location where the data should be persisted to. + */ + databaseDir: string + /** + * Default is 'postgres'. + */ + user?: string + /** + * Default is 'password'. + */ + password?: string + /** + * Default is 54321. + */ + port?: number + /** + * When set to fale, the database will be deleted when the DB is stopped. + * Default is true. + */ + persistent?: boolean } -type StopFn = () => Promise - /** * Creates and opens a DB backed by Postgres */ export async function createEmbeddedPostgres( config: PostgresConfig -): Promise<{ db: ElectricDatabase; stop: StopFn }> { +): Promise<{ db: Database; stop: StopFn }> { const EmbeddedPostgres = (await import('embedded-postgres')).default // Initialize Postgres const pg = new EmbeddedPostgres({ @@ -101,35 +61,7 @@ export async function createEmbeddedPostgres( // We use the database directory as the name // because it uniquely identifies the DB return { - db: new ElectricDatabase(config.databaseDir, db), + db, stop: () => pg.stop(), } } - -type PostgresConfig = { - /** - * The name of the database. - */ - name: string - /** - * The location where the data should be persisted to. - */ - databaseDir: string - /** - * Default is 'postgres'. - */ - user?: string - /** - * Default is 'password'. - */ - password?: string - /** - * Default is 54321. - */ - port?: number - /** - * When set to fale, the database will be deleted when the DB is stopped. - * Default is true. - */ - persistent?: boolean -} diff --git a/clients/typescript/src/drivers/node-postgres/index.ts b/clients/typescript/src/drivers/node-postgres/index.ts index 22c548e151..ecd3ff5977 100644 --- a/clients/typescript/src/drivers/node-postgres/index.ts +++ b/clients/typescript/src/drivers/node-postgres/index.ts @@ -1,13 +1,13 @@ import { DatabaseAdapter as DatabaseAdapterI } from '../../electric/adapter' import { DatabaseAdapter } from './adapter' -import { Database, ElectricDatabase, createEmbeddedPostgres } from './database' +import { Database, createEmbeddedPostgres } from './database' import { ElectricConfig } from '../../config' import { electrify as baseElectrify, ElectrifyOptions } from '../../electric' import { WebSocketNode } from '../../sockets/node' import { ElectricClient, DbSchema } from '../../client/model' import { PgBundleMigrator } from '../../migrators/bundle' -export { DatabaseAdapter, ElectricDatabase, createEmbeddedPostgres } +export { DatabaseAdapter, createEmbeddedPostgres } export type { Database } /** @@ -21,7 +21,7 @@ export const electrify = async >( config: ElectricConfig, opts?: ElectrifyOptions ): Promise> => { - const dbName = db.name + const dbName = `${db.host}:${db.port}/${db.database ?? ''}` const adapter = opts?.adapter || new DatabaseAdapter(db) const migrator = opts?.migrator || new PgBundleMigrator(adapter, dbDescription.pgMigrations) diff --git a/clients/typescript/src/drivers/node-postgres/mock.ts b/clients/typescript/src/drivers/node-postgres/mock.ts index 95899754de..397b987b8f 100644 --- a/clients/typescript/src/drivers/node-postgres/mock.ts +++ b/clients/typescript/src/drivers/node-postgres/mock.ts @@ -1,21 +1,29 @@ -import { Database, QueryResult } from './database' -import { DbName, Statement } from '../../util' +import { Database } from './database' +import { QueryConfig, QueryResult, QueryResultRow } from 'pg' +import { DbName } from '../../util' export class MockDatabase implements Database { name: DbName fail: Error | undefined - constructor(dbName: DbName, fail?: Error) { - this.name = dbName + constructor( + public host: string, + public port: number, + public database?: string, + fail?: Error + ) { + this.name = `${host}:${port}/${database ?? ''}` this.fail = fail } - async exec(_statement: Statement): Promise { + async query( + _queryConfig: QueryConfig + ): Promise, 'rows' | 'rowCount'>> { if (typeof this.fail !== 'undefined') throw this.fail return { - rows: [{ val: 1 }, { val: 2 }], - rowsModified: 0, + rows: [{ val: 1 } as unknown as R, { val: 2 } as unknown as R], + rowCount: 0, } } diff --git a/clients/typescript/test/client/model/postgres/datatype.test.ts b/clients/typescript/test/client/model/postgres/datatype.test.ts index 655f0bed2a..a0607dd261 100644 --- a/clients/typescript/test/client/model/postgres/datatype.test.ts +++ b/clients/typescript/test/client/model/postgres/datatype.test.ts @@ -38,8 +38,8 @@ test.beforeEach(async (t) => { // Sync all shapes such that we don't get warnings on every query await tbl.sync() - await db.exec({ - sql: `CREATE TABLE "DataTypes"("id" INT4 PRIMARY KEY, "date" DATE, "time" TIME, "timetz" TIMETZ, "timestamp" TIMESTAMP, "timestamptz" TIMESTAMPTZ, "bool" BOOL, "uuid" UUID, "int2" INT2, "int4" INT4, "int8" INT8, "float4" FLOAT4, "float8" FLOAT8, "json" JSONB, "bytea" BYTEA, "relatedId" INT4);`, + await db.query({ + text: `CREATE TABLE "DataTypes"("id" INT4 PRIMARY KEY, "date" DATE, "time" TIME, "timetz" TIMETZ, "timestamp" TIMESTAMP, "timestamptz" TIMESTAMPTZ, "bool" BOOL, "uuid" UUID, "int2" INT2, "int4" INT4, "int8" INT8, "float4" FLOAT4, "float8" FLOAT8, "json" JSONB, "bytea" BYTEA, "relatedId" INT4);`, }) t.context = { diff --git a/clients/typescript/test/drivers/node-postgres.test.ts b/clients/typescript/test/drivers/node-postgres.test.ts index 090deaf170..01d9a64c5e 100644 --- a/clients/typescript/test/drivers/node-postgres.test.ts +++ b/clients/typescript/test/drivers/node-postgres.test.ts @@ -5,7 +5,7 @@ import { DatabaseAdapter } from '../../src/drivers/node-postgres' import { makePgDatabase } from '../support/node-postgres' test('database adapter run works', async (t) => { - const db = new MockDatabase('test.db') + const db = new MockDatabase('localhost', 5432, 'test') const adapter = new DatabaseAdapter(db) const sql = 'drop table badgers' @@ -15,7 +15,7 @@ test('database adapter run works', async (t) => { }) test('database adapter query works', async (t) => { - const db = new MockDatabase('test.db') + const db = new MockDatabase('localhost', 5432, 'test') const adapter = new DatabaseAdapter(db) const sql = 'select * from bars' diff --git a/clients/typescript/test/migrators/postgres/triggers.test.ts b/clients/typescript/test/migrators/postgres/triggers.test.ts index 9a14b5ede9..a1619915a3 100644 --- a/clients/typescript/test/migrators/postgres/triggers.test.ts +++ b/clients/typescript/test/migrators/postgres/triggers.test.ts @@ -196,11 +196,11 @@ test('oplog insertion trigger should insert row into oplog table', async (t) => // Insert a row in the table const insertRowSQL = `INSERT INTO ${qualifiedPersonTable} (id, name, age, bmi, int8, blob) VALUES (1, 'John Doe', 30, 25.5, 7, '\\x0001ff')` - await db.exec({ sql: insertRowSQL }) + await db.query({ text: insertRowSQL }) // Check that the oplog table contains an entry for the inserted row - const { rows: oplogRows } = await db.exec({ - sql: `SELECT * FROM ${oplogTable}`, + const { rows: oplogRows } = await db.query({ + text: `SELECT * FROM ${oplogTable}`, }) t.is(oplogRows.length, 1) t.deepEqual(oplogRows[0], { @@ -232,11 +232,11 @@ test('oplog trigger should handle Infinity values correctly', async (t) => { // Insert a row in the table const insertRowSQL = `INSERT INTO ${qualifiedPersonTable} (id, name, age, bmi, int8) VALUES ('-Infinity', 'John Doe', 30, 'Infinity', 7)` - await db.exec({ sql: insertRowSQL }) + await db.query({ text: insertRowSQL }) // Check that the oplog table contains an entry for the inserted row - const { rows: oplogRows } = await db.exec({ - sql: `SELECT * FROM ${oplogTable}`, + const { rows: oplogRows } = await db.query({ + text: `SELECT * FROM ${oplogTable}`, }) t.is(oplogRows.length, 1) t.deepEqual(oplogRows[0], { diff --git a/clients/typescript/test/support/node-postgres.ts b/clients/typescript/test/support/node-postgres.ts index e720a73bc1..5cdfe4d116 100644 --- a/clients/typescript/test/support/node-postgres.ts +++ b/clients/typescript/test/support/node-postgres.ts @@ -1,11 +1,11 @@ import fs from 'fs/promises' -import { ElectricDatabase } from '../../src/drivers/node-postgres' +import type { Database } from '../../src/drivers/node-postgres' import { createEmbeddedPostgres } from '../../src/drivers/node-postgres/database' export async function makePgDatabase( name: string, port: number -): Promise<{ db: ElectricDatabase; stop: () => Promise }> { +): Promise<{ db: Database; stop: () => Promise }> { const { db, stop: stopPg } = await createEmbeddedPostgres({ name, databaseDir: `./tmp-${name}`, diff --git a/e2e/satellite_client/src/client.ts b/e2e/satellite_client/src/client.ts index 3ae8c4b6e4..3289c18a8d 100644 --- a/e2e/satellite_client/src/client.ts +++ b/e2e/satellite_client/src/client.ts @@ -3,7 +3,7 @@ import pg from 'pg' import SQLiteDatabase from 'better-sqlite3' import { ElectricConfig } from 'electric-sql' import { mockSecureAuthToken } from 'electric-sql/auth/secure' -import { ElectricDatabase } from 'electric-sql/node-postgres' +import type { Database } from 'electric-sql/node-postgres' import { setLogLevel } from 'electric-sql/debug' import { electrify as electrifySqlite } from 'electric-sql/node' import { electrify as electrifyPg } from 'electric-sql/node-postgres' @@ -21,7 +21,7 @@ let dbName: string let electrify = electrifySqlite let builder: QueryBuilder = sqliteBuilder -async function makePgDatabase(): Promise { +async function makePgDatabase(): Promise { const client = new pg.Client({ host: 'pg_1', port: 5432, @@ -29,12 +29,10 @@ async function makePgDatabase(): Promise { user: 'postgres', password: 'password', }) - + dbName = `${client.host}:${client.port}/${client.database}` await client.connect() - //const stop = () => client.end() - const db = new ElectricDatabase(dbName, client) - return db //{ db, stop } + return client } export const make_db = async (name: string): Promise => { diff --git a/e2e/tests/_satellite_macros.luxinc b/e2e/tests/_satellite_macros.luxinc index 0adc979fd9..a86a2d9822 100644 --- a/e2e/tests/_satellite_macros.luxinc +++ b/e2e/tests/_satellite_macros.luxinc @@ -7,14 +7,12 @@ # ?$node !migrations = $migrations ??$node - # Temporarily disable the failure pattern - # because the printed value contains "_connectionError" - # which matches the failure pattern... - - - !originalDb = await client.make_db('e2e_client_${satellite_number}_db') + # Adds a 2nd expression that just returns 0 + # otherwise Node will print the result of the assignment + # but that Database object contains the word "error" + # which would match the fail pattern! + !originalDb = await client.make_db('e2e_client_${satellite_number}_db'); 0 ??$node - # Restore the failure pattern - -$fail_pattern [invoke electrify_db "originalDb" $host $port $migrations $connectToElectric] ??(in electrify_db) config: [endmacro] @@ -24,13 +22,16 @@ # when trying to read `exp` when calling `electrify_db` !if (typeof exp === 'undefined') { var exp = undefined } ??$node - + # Adds a 2nd expression that just returns 0 + # otherwise Node will print the result of the assignment + # but that Database object contains the word "error" + # which would match the fail pattern! !db = await client.electrify_db(originalDb, \ "$host", \ $port, \ $migrations, \ $connectToElectric, \ - exp) + exp); 0 [endmacro] [macro setup_client_with_migrations satellite_number electric port migrations connectToElectric] From 94997c3492bc301282a7bb295a0fc49742a35912 Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Thu, 2 May 2024 16:00:19 +0200 Subject: [PATCH 155/156] Remove unused function --- .../src/client/conversions/input.ts | 33 ------------------- 1 file changed, 33 deletions(-) diff --git a/clients/typescript/src/client/conversions/input.ts b/clients/typescript/src/client/conversions/input.ts index 8a1d2d062b..3217f122fe 100644 --- a/clients/typescript/src/client/conversions/input.ts +++ b/clients/typescript/src/client/conversions/input.ts @@ -350,39 +350,6 @@ export function transformFields( return copied } -/** - * Transforms the provided value into a SQLite/PG compatible value - * based on the type of this field. - * @param field The name of the field. - * @param value The value of the field. - * @param o The object to which the field belongs. - * @param fields Type information about the object's fields. - * @param transformation Which transformation to execute. - * @returns The transformed field. - */ -export function transformField( - field: FieldName, - value: any, - o: object, - fields: Fields, - converter: Converter, - transformation: Transformation = Transformation.Encode -): any { - const pgType = fields.get(field) - - if (!pgType) - throw new InvalidArgumentError( - `Unknown field ${field} in object ${JSON.stringify(o)}` - ) - - const transformedValue = - transformation === Transformation.Encode - ? converter.encode(value, pgType) - : converter.decode(value, pgType) - - return [field, transformedValue] -} - export function isFilterObject(value: any): boolean { // if it is an object it can only be a data object or a filter object return isObject(value) && !isDataObject(value) From 009b06ee74923f45328afb3d9991a3f73a21b70b Mon Sep 17 00:00:00 2001 From: Kevin De Porre Date: Thu, 2 May 2024 16:37:26 +0200 Subject: [PATCH 156/156] Changeset --- .changeset/breezy-plums-dream.md | 7 +++++++ 1 file changed, 7 insertions(+) create mode 100644 .changeset/breezy-plums-dream.md diff --git a/.changeset/breezy-plums-dream.md b/.changeset/breezy-plums-dream.md new file mode 100644 index 0000000000..b8d38fb5db --- /dev/null +++ b/.changeset/breezy-plums-dream.md @@ -0,0 +1,7 @@ +--- +"@core/electric": patch +"electric-sql": patch +"@electric-sql/prisma-generator": patch +--- + +Support for a local Postgres database on the client. Also introduces drivers for node Postgres and PGlite.