diff --git a/.github/workflows/build_pglite.yml b/.github/workflows/build_pglite.yml new file mode 100644 index 00000000..878155ac --- /dev/null +++ b/.github/workflows/build_pglite.yml @@ -0,0 +1,186 @@ +name: Build PGLite + +concurrency: + group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} + cancel-in-progress: true + +on: + workflow_dispatch: + inputs: + message: + description: 'Build PGLite' + + push: + branches: main + + pull_request: + branches: main + +jobs: + build: + name: Build and Test PGlite + runs-on: ubuntu-22.04 + env: + PGVERSION: 16.3 + SDK_VERSION: 3.1.64.1bi + SDK_ARCHIVE: python3.12-wasm-sdk-Ubuntu-22.04.tar.lz4 + SDKROOT: /opt/python-wasm-sdk + SYS_PYTHON: /usr/bin/python3 + PGROOT: /tmp/pglite + DEBUG: false + + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 1 + + - name: Restore cached + id: cache-restore + uses: actions/cache/restore@v4 + with: + path: | + /tmp/web + /tmp/pglite + /tmp/sdk/postgres-*.tar.gz + postgres + postgresql-* + key: build-cache-${{ hashFiles('cibuild/**', 'patches/**') }} + + - name: Install python-wasm-sdk for emsdk/wasi+prebuilts + if: steps.cache-restore.outputs.cache-hit != 'true' + working-directory: / + run: | + sudo apt-get install -y lz4 wget pv bash + echo https://github.com/pygame-web/python-wasm-sdk/releases/download/$SDK_VERSION/$SDK_ARCHIVE + curl -sL --retry 5 https://github.com/pygame-web/python-wasm-sdk/releases/download/$SDK_VERSION/$SDK_ARCHIVE | tar xvP --use-compress-program=lz4 | pv -p -l -s 24400 >/dev/null + + - name: Build postgres WASM via emsdk + if: steps.cache-restore.outputs.cache-hit != 'true' + run: | + bash ./cibuild.sh + + - name: Build pgvector + if: steps.cache-restore.outputs.cache-hit != 'true' + run: | + bash ./cibuild.sh vector + + - name: Pack node distribution + if: steps.cache-restore.outputs.cache-hit != 'true' + run: | + bash ./cibuild.sh node + + - name: Link postgres WASM for web + if: steps.cache-restore.outputs.cache-hit != 'true' + run: | + bash ./cibuild.sh linkweb + + - name: Cached intermediate + if: steps.cache-restore.outputs.cache-hit != 'true' + id: cache-intermediate + uses: actions/cache/save@v4 + with: + path: | + /tmp/web + /tmp/pglite + /tmp/sdk/postgres-*.tar.gz + postgres + postgresql-* + key: ${{ steps.cache-restore.outputs.cache-primary-key }} + + - uses: pnpm/action-setup@v4 + with: + version: 8 + + - name: Copy release for PGlite build + run: | + bash ./cibuild.sh pglite-prep + + - name: Build PGlite + working-directory: ./packages/pglite + run: | + pnpm install + pnpm run build + pnpm pack + + - name: Build REPL for Example deployment + working-directory: ./packages/repl + run: | + pnpm install + pnpm run build + + - name: Build demo site + run: | + bash ./cibuild.sh demo-site + + - name: Upload Postgres to Github artifacts + id: upload-postgres-wasm + uses: actions/upload-artifact@v4 + with: + name: postgres-wasm + path: /tmp/sdk/postgres-*.tar.gz + retention-days: 90 + + - name: Upload PGlite Interim to Github artifacts + id: upload-pglite-interim-build-files + uses: actions/upload-artifact@v4 + with: + name: pglite-interim-build-files + path: ./packages/pglite/release/** + retention-days: 90 + + - name: Upload PGlite to Github artifacts + id: upload-pglite + uses: actions/upload-artifact@v4 + with: + name: pglite + path: ./packages/pglite/electric-sql-pglite-*.tgz + retention-days: 90 + + - name: Upload Demos to Github artifacts + id: upload-demos + uses: actions/upload-artifact@v4 + with: + name: pglite-demos + path: /tmp/web/** + retention-days: 90 + + - name: Deploy to Netlify + uses: nwtgck/actions-netlify@v3.0 + with: + publish-dir: '/tmp/web' + production-branch: master + github-token: ${{ secrets.GITHUB_TOKEN }} + deploy-message: 'Deploy PR${{ github.event.pull_request.id }}: ${{ github.event.pull_request.title }}' + env: + NETLIFY_AUTH_TOKEN: ${{ secrets.NETLIFY_AUTH_TOKEN }} + NETLIFY_SITE_ID: ${{ secrets.NETLIFY_SITE_ID }} + timeout-minutes: 1 + + - name: Find Comment + uses: peter-evans/find-comment@v3 + id: fc + if: github.event_name == 'pull_request' + with: + issue-number: ${{ github.event.pull_request.number }} + comment-author: 'github-actions[bot]' + body-includes: Built bundles + + - name: Create or update comment + uses: peter-evans/create-or-update-comment@v4 + if: github.event_name == 'pull_request' + with: + comment-id: ${{ steps.fc.outputs.comment-id }} + issue-number: ${{ github.event.pull_request.number }} + body: | + Built bundles: + - Postgres WASM: ${{ steps.upload-postgres-wasm.outputs.artifact-url }} + - PGlite: ${{ steps.upload-pglite.outputs.artifact-url }} + - Interim build files: ${{ steps.upload-pglite-interim-build-files.outputs.artifact-url }} + - Demos: ${{ steps.upload-demos.outputs.artifact-url }} + edit-mode: replace + + - name: Test pglite + working-directory: ./packages/pglite + run: | + pnpm exec playwright install --with-deps + pnpm run test diff --git a/.gitignore b/.gitignore index cbc68900..1143d84b 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,12 @@ -node_modules -dist .DS_Store -pgdata-test \ No newline at end of file + +/node_modules +/packages/pglite/dist +/packages/pglite/pgdata-test +/packages/pglite/package-lock.json +/build +/postgresql-16.2.tar.bz2 +/postgresql-16.2 +/postgresql-16.3.tar.bz2 +/postgresql-16.3 +/postgresql diff --git a/.gitmodules b/.gitmodules deleted file mode 100644 index 4dd12572..00000000 --- a/.gitmodules +++ /dev/null @@ -1,3 +0,0 @@ -[submodule "postgres"] - path = postgres - url = git@github.com:electric-sql/postgres-wasm.git diff --git a/cibuild.sh b/cibuild.sh new file mode 100755 index 00000000..b3f6a9cf --- /dev/null +++ b/cibuild.sh @@ -0,0 +1,363 @@ +#!/bin/bash + +# data transfer zone this is == (wire query size + result size ) + 2 +# expressed in EMSDK MB +export CMA_MB=${CMA_MB:-64} + +export PGVERSION=${PGVERSION:-16.3} +export CI=${CI:-false} +export GITHUB_WORKSPACE=${GITHUB_WORKSPACE:-$(pwd)} +export PGROOT=${PGROOT:-/tmp/pglite} +export WEBROOT=${WEBROOT:-/tmp/web} +export DEBUG=${DEBUG:-false} +export PGDATA=${PGROOT}/base +export PGUSER=postgres + + +# exit on error +EOE=false + +# the default is a user writeable path. +if mkdir -p ${PGROOT}/sdk +then + echo "checking for valid prefix ${PGROOT}" +else + sudo mkdir -p ${PGROOT}/sdk + sudo chown $(whoami) -R ${PGROOT} +fi + +# TODO: also handle PGPASSFILE hostname:port:database:username:password +# https://www.postgresql.org/docs/devel/libpq-pgpass.html +export CRED="-U $PGUSER --pwfile=${PGROOT}/password" + +if [ -f ${PGROOT}/password ] +then + echo "not changing db password" + PGPASS=$(cat ${PGROOT}/password) +else + PGPASS=${PGPASS:-password} + echo ${PGPASS:-password} > ${PGROOT}/password +fi + +export PGPASS + + + +# default to web/release size optim. +if $DEBUG +then + echo "debug not supported on web build" + exit 51 +else + export PGDEBUG="" + export CDEBUG="-g0 -Os" +fi + +# setup compiler+node. emsdk provides node (18), recent enough for bun. +# TODO: but may need to adjust $PATH with stock emsdk. + +if which emcc +then + echo "Using provided emsdk from $(which emcc)" +else + . /opt/python-wasm-sdk/wasm32-bi-emscripten-shell.sh +fi + + +# custom code for node/web builds that modify pg main/tools behaviour +# this used by both node/linkweb build stages + +# pass the "kernel" contiguous memory zone size to the C compiler. +CC_PGLITE="-DCMA_MB=${CMA_MB}" + +# these are files that shadow original portion of pg core, with minimal changes +# to original code +# some may be included multiple time +CC_PGLITE="-DPATCH_MAIN=${GITHUB_WORKSPACE}/patches/pg_main.c ${CC_PGLITE}" +CC_PGLITE="-DPATCH_LOOP=${GITHUB_WORKSPACE}/patches/interactive_one.c ${CC_PGLITE}" +CC_PGLITE="-DPATCH_PLUGIN=${GITHUB_WORKSPACE}/patches/pg_plugin.h ${CC_PGLITE}" + +export CC_PGLITE + + + +if [ -f ${WEBROOT}/postgres.js ] +then + echo using current from ${WEBROOT} + + . ${PGROOT}/pgopts.sh + +else + + # store all pg options that have impact on cmd line initdb/boot + cat > ${PGROOT}/pgopts.sh < ${PGROOT}/config.site < ${PGROOT}/locale <> ${PGROOT}/pgopts.sh + echo "export PGLITE=${PGLITE}" >> ${PGROOT}/pgopts.sh + + if [ -f /opt/sdk/wasisdk/wabt/bin/wasm-objdump ] + then + ./cibuild/symtab.sh ${PGROOT}/lib/postgresql/plpgsql.so > ${PGROOT}/symbols + fi +fi + +# put wasm-shared the pg extension linker from build dir in the path +# and also pg_config from the install dir. +export PATH=${GITHUB_WORKSPACE}/build/postgres/bin:${PGROOT}/bin:$PATH + + + +# At this stage, PG should be installed to PREFIX and ready for linking +# or building ext. + + + + +# =========================================================================== +# =========================================================================== +# EXTENSIONS +# =========================================================================== +# =========================================================================== + + +if echo "$*"|grep -q vector +then + echo "=================================================" + + pushd build + + # [ -d pgvector ] || git clone --no-tags --depth 1 --single-branch --branch master https://github.com/pgvector/pgvector + # git clone --branch v0.7.2 https://github.com/pgvector/pgvector.git + + if [ -d pgvector ] + then + echo using local pgvector + else + wget -c -q https://github.com/pgvector/pgvector/archive/refs/tags/v0.7.2.tar.gz -Opgvector.tar.gz + tar xvfz pgvector.tar.gz && rm pgvector.tar.gz + mv pgvector-?.?.? pgvector + fi + + pushd pgvector + # path for wasm-shared already set to (pwd:pg build dir)/bin + # OPTFLAGS="" turns off arch optim (sse/neon). + PG_CONFIG=${PGROOT}/bin/pg_config emmake make OPTFLAGS="" install + cp sql/vector.sql sql/vector--0.7.2.sql ${PGROOT}/share/postgresql/extension + rm ${PGROOT}/share/postgresql/extension/vector--?.?.?--?.?.?.sql ${PGROOT}/share/postgresql/extension/vector.sql + popd + + popd + + python3 cibuild/pack_extension.py + +fi + +if echo "$*"|grep " postgis" +then + echo "=================================================" + PG_LINK=em++ echo "WIP - requires latests python-wasm-sdk, not just emsdk" + + python3 cibuild/pack_extension.py +fi + + + +if echo "$*"|grep " quack" +then + echo "=================================================" + ./cibuild/pg_quack.sh + cp $PGROOT/lib/libduckdb.so /tmp/ + python3 cibuild/pack_extension.py +fi + + +# =========================================================================== +# =========================================================================== +# PGLite +# =========================================================================== +# =========================================================================== + + + + +# in pg git test mode we pull pglite instead +if [ -d pglite ] +then + # to get pglite/postgres populated by web build + rmdir pglite/postgres pglite 2>/dev/null + if [ -d pglite ] + then + echo using local + else + git clone --no-tags --depth 1 --single-branch --branch pglite-build https://github.com/electric-sql/pglite pglite + fi +fi + + +# run this last so all extensions files can be packaged +# those include *.control *.sql and *.so +# TODO: check if some versionned *.sql files can be omitted +# TODO: for bigger extensions than pgvector make separate packaging. + +if echo "$*"|grep "node" +then + echo "=================================================" + mkdir -p /tmp/sdk/ + + # remove versionned symlinks + rm ${PGROOT}/lib/lib*.so.? 2>/dev/null + if $CI + then + tar -cpRz ${PGROOT} > /tmp/sdk/postgres-${PGVERSION}.tar.gz + fi +fi + +# run linkweb after node build because it will remove some wasm .so used by node from fs +# they don't need to be in MEMFS as they are fetched. + + +# include current pglite source for easy local rebuild with just npm run build:js. + + +if echo "$*"|grep "linkweb" +then + + # build web version + pushd build/postgres + echo "=================== $(pwd) =========================" + + . $GITHUB_WORKSPACE/cibuild/linkweb.sh + + # upload all to gh pages, + # TODO: include node archive and samples ? + if $CI + then + mkdir -p /tmp/web/ + cp -r $WEBROOT/* /tmp/web/ + fi + popd +fi + + +# pglite* also use web build files, so order them last. + + +while test $# -gt 0 +do + case "$1" in + pglite) echo "=================== pglite =======================" + # TODO: SAMs NOTE - Not using this in GitHub action as it doesnt resolve pnpm correctly + # replaced with pglite-prep and pglite-bundle-sdk + + . cibuild/pglite-ts.sh + + # copy needed files for a minimal js/ts/extension build + # NB: these don't use NODE FS + + mkdir -p ${PGROOT}/sdk/packages/ /tmp/web/pglite /tmp/web/repl/ + cp -r $PGLITE ${PGROOT}/sdk/packages/ + + mkdir /tmp/web/repl/dist-webcomponent -p + cp -r ${GITHUB_WORKSPACE}/packages/repl/dist-webcomponent /tmp/web/repl/ + + pushd /tmp/web/pglite/examples + ln -s ../dist/postgres.data + popd + + if $CI + then + tar -cpRz ${PGROOT} > /tmp/sdk/pglite-pg${PGVERSION}.tar.gz + fi + + du -hs ${WEBROOT}/* + ;; + + pglite-prep) echo "==================== pglite-prep ==========================" + mkdir $PGLITE/release || rm $PGLITE/release/* + # copy packed extensions + cp ${WEBROOT}/*.tar.gz ${PGLITE}/release/ + + + cp -vf ${WEBROOT}/postgres.{js,data,wasm} $PGLITE/release/ + cp -vf ${WEBROOT}/libecpg.so $PGLITE/release/postgres.so + + ;; + + pglite-bundle-interim) echo "================== pglite-bundle-interim ======================" + tar -cpRz ${PGLITE}/release > /tmp/sdk/pglite-interim-${PGVERSION}.tar.gz + ;; + + demo-site) echo "==================== demo-site ==========================" + + echo " + + + + " > /tmp/web/index.html + + mkdir -p /tmp/web/pglite + mkdir -p /tmp/web/repl + + PGLITE=$(pwd)/packages/pglite + cp -r ${PGLITE}/dist /tmp/web/pglite/ + cp -r ${PGLITE}/examples /tmp/web/pglite/ + cp -r ${GITHUB_WORKSPACE}/packages/repl/dist-webcomponent /tmp/web/repl/ + cp -r ${GITHUB_WORKSPACE}/packages/benchmark /tmp/web/pglite/ + ;; + esac + shift +done + + diff --git a/cibuild/linkweb.sh b/cibuild/linkweb.sh new file mode 100755 index 00000000..d22e6d44 --- /dev/null +++ b/cibuild/linkweb.sh @@ -0,0 +1,239 @@ +#!/bin/bash + +WEBROOT=${WEBROOT:-/tmp/sdk} +echo " + + + +linkweb:begin + + $(pwd) + + WEBROOT=${WEBROOT} + + CC_PGLITE=$CC_PGLITE + +" + +mkdir -p $WEBROOT + +NOWARN="-Wno-missing-prototypes -Wno-unused-function -Wno-declaration-after-statement -Wno-incompatible-pointer-types-discards-qualifiers" + +# client lib ( eg psycopg ) for websocketed pg server +emcc $CDEBUG -shared -o ${WEBROOT}/libpgc.so \ + ./src/interfaces/libpq/libpq.a \ + ./src/port/libpgport.a \ + ./src/common/libpgcommon.a || exit 26 + +# this override completely pg server main loop for web use purpose +pushd src + rm pg_initdb.o backend/main/main.o ./backend/tcop/postgres.o ./backend/utils/init/postinit.o + + emcc -DPG_INITDB_MAIN=1 -sFORCE_FILESYSTEM -DPREFIX=${PGROOT} ${CC_PGLITE} \ + -I${PGROOT}/include -I${PGROOT}/include/postgresql/server -I${PGROOT}/include/postgresql/internal \ + -c -o ../pg_initdb.o ${PGSRC}/src/bin/initdb/initdb.c $NOWARN || exit 34 + + # + emcc -DPG_LINK_MAIN=1 -DPREFIX=${PGROOT} ${CC_PGLITE} -DPG_EC_STATIC \ + -I${PGROOT}/include -I${PGROOT}/include/postgresql/server -I${PGROOT}/include/postgresql/internal \ + -c -o ./backend/tcop/postgres.o ${PGSRC}/src/backend/tcop/postgres.c $NOWARN|| exit 39 + + EMCC_CFLAGS="${CC_PGLITE} -DPREFIX=${PGROOT} -DPG_INITDB_MAIN=1 $NOWARN" \ + emmake make backend/main/main.o backend/utils/init/postinit.o || exit 41 +popd + + +echo "========================================================" +echo -DPREFIX=${PGROOT} $CC_PGLITE +file ${WEBROOT}/libpgc.so pg_initdb.o src/backend/main/main.o src/backend/tcop/postgres.o src/backend/utils/init/postinit.o +echo "========================================================" + + +pushd src/backend + +# https://github.com/emscripten-core/emscripten/issues/12167 +# --localize-hidden +# https://github.com/llvm/llvm-project/issues/50623 + + + echo " ---------- building web test PREFIX=$PGROOT ------------" + du -hs ${WEBROOT}/libpg?.* + + PG_O="../../src/fe_utils/string_utils.o ../../src/common/logging.o \ + $(find . -type f -name "*.o" \ + | grep -v ./utils/mb/conversion_procs \ + | grep -v ./replication/pgoutput \ + | grep -v src/bin/ \ + | grep -v ./snowball/dict_snowball.o ) \ + ../../src/timezone/localtime.o \ + ../../src/timezone/pgtz.o \ + ../../src/timezone/strftime.o \ + ../../pg_initdb.o" + + PG_L="-L../../src/port -L../../src/common \ + ../../src/common/libpgcommon_srv.a ../../src/port/libpgport_srv.a" + + + +if false +then + # PG_L="$PG_L -L../../src/interfaces/ecpg/ecpglib ../../src/interfaces/ecpg/ecpglib/libecpg.so /tmp/pglite/lib/postgresql/libduckdb.so" + PG_L="$PG_L -L../../src/interfaces/ecpg/ecpglib ../../src/interfaces/ecpg/ecpglib/libecpg.so /tmp/libduckdb.so -lstdc++" +else + PG_L="$PG_L -L../../src/interfaces/ecpg/ecpglib ../../src/interfaces/ecpg/ecpglib/libecpg.so" + PG_L="../../src/common/libpgcommon_srv.a ../../src/port/libpgport_srv.a ../.././src/interfaces/libpq/libpq.a" + +fi + +# ../../src/common/libpgcommon_shlib.a" +# ./src/common/libpgcommon.a: binary file matches +# ./src/common/libpgcommon_shlib.a: binary file matches +# error: undefined symbol: fsync_pgdata (referenced by root reference (e.g. compiled C/C++ code)) +# error: undefined symbol: get_restricted_token (referenced by root reference (e.g. compiled C/C++ code)) +# error: undefined symbol: pg_malloc_extended (referenced by root reference (e.g. compiled C/C++ code)) +# error: undefined symbol: pg_realloc (referenced by root reference (e.g. compiled C/C++ code)) +# error: undefined symbol: pg_strdup (referenced by root reference (e.g. compiled C/C++ code)) +# error: undefined symbol: simple_prompt (referenced by root reference (e.g. compiled C/C++ code)) + + + +## \ +# /opt/python-wasm-sdk/devices/emsdk/usr/lib/libxml2.a \ +# /opt/python-wasm-sdk/devices/emsdk/usr/lib/libgeos.a \ +# /opt/python-wasm-sdk/devices/emsdk/usr/lib/libgeos_c.a \ +# /opt/python-wasm-sdk/devices/emsdk/usr/lib/libproj.a" + +# /data/git/pglite-build/pglite/postgres/libgeosall.so +# /data/git/pglite-build/pglite/postgres/libduckdb.so" + + +# ? -sLZ4=1 -sENVIRONMENT=web +# -sSINGLE_FILE => Uncaught SyntaxError: Cannot use 'import.meta' outside a module (at postgres.html:1:6033) +# -sENVIRONMENT=web => XHR +EMCC_WEB="-sNO_EXIT_RUNTIME=1 -sFORCE_FILESYSTEM=1" + +if ${PGES6:-true} +then + # es6 + MODULE="-g0 -Os -sMODULARIZE=1 -sEXPORT_ES6=1 -sEXPORT_NAME=Module --shell-file ${GITHUB_WORKSPACE}/tests/repl.html" +else + # local debug fast build + MODULE="-g3 -O0 -sMODULARIZE=0 -sEXPORT_ES6=0 --shell-file ${GITHUB_WORKSPACE}/tests/repl.html" +fi + +# ======================================================= +# size optimisations +# ======================================================= + +rm ${PGROOT}/lib/lib*.so.? 2>/dev/null + +echo "#!/bin/true" > placeholder +chmod +x placeholder + +# for ./bin + +# share/postgresql/pg_hba.conf.sample REQUIRED +# rm ${PGROOT}/share/postgresql/*.sample + +# ./lib/lib*.a => ignored + +# ./include ignored + +# timezones ? + +# encodings ? +# ./lib/postgresql/utf8_and*.so +rm ${PGROOT}/lib/postgresql/utf8_and*.so + +# ========================================================= + +# --js-library + +# cp ${GITHUB_WORKSPACE}/patches/library_pgfs.js ${EMSDK}/upstream/emscripten/src/library_pgfs.js + + +echo 'localhost:5432:postgres:postgres:password' > pgpass + + +if [ -f ${PGROOT}/symbols ] +then + # _main,_getenv,_setenv,_interactive_one,_interactive_write,_interactive_read,_pg_initdb,_pg_shutdown + +#not yet +#_emscripten_copy_from +#_emscripten_copy_to +#_emscripten_copy_to_end + + + cat > exports <> exports + cat exports > ${GITHUB_WORKSPACE}/patches/exports +else + cat ${GITHUB_WORKSPACE}/patches/exports >> exports +fi + +# copyFrom,copyTo,copyToEnd + +emcc $EMCC_WEB -fPIC -sMAIN_MODULE=2 \ + -D__PYDK__=1 -DPREFIX=${PGROOT} \ + -sTOTAL_MEMORY=1GB -sSTACK_SIZE=4MB -sALLOW_TABLE_GROWTH -sALLOW_MEMORY_GROWTH -sGLOBAL_BASE=${CMA_MB}MB \ + $MODULE -sERROR_ON_UNDEFINED_SYMBOLS -sASSERTIONS=0 \ + -lnodefs.js -lidbfs.js \ + -sEXPORTED_RUNTIME_METHODS=FS,setValue,getValue,UTF8ToString,stringToNewUTF8,stringToUTF8OnStack,ccall,cwrap,callMain \ + -sEXPORTED_FUNCTIONS=@exports \ + --preload-file ${PGROOT}/share/postgresql@${PGROOT}/share/postgresql \ + --preload-file ${PGROOT}/lib/postgresql@${PGROOT}/lib/postgresql \ + --preload-file ${PGROOT}/password@${PGROOT}/password \ + --preload-file pgpass@${PGROOT}/pgpass \ + --preload-file placeholder@${PGROOT}/bin/postgres \ + --preload-file placeholder@${PGROOT}/bin/initdb \ + -o postgres.html $PG_O $PG_L || exit 186 + +mkdir -p ${WEBROOT} + +cp -v postgres.* ${WEBROOT}/ +#cp ${PGROOT}/lib/libecpg.so ${WEBROOT}/ +cp ${PGROOT}/sdk/*.tar ${WEBROOT}/ +for tarf in ${WEBROOT}/*.tar +do + gzip -f -9 $tarf +done + + + cp $GITHUB_WORKSPACE/{tests/vtx.js,patches/tinytar.min.js} ${WEBROOT}/ + + popd + +echo " +linkweb:end + + + + +" + + + + diff --git a/cibuild/pack_extension.py b/cibuild/pack_extension.py new file mode 100644 index 00000000..a6cfc2a4 --- /dev/null +++ b/cibuild/pack_extension.py @@ -0,0 +1,116 @@ +# cibuild/pack_extension.py + +# use recorded file list in ${PGROOT}/pg.installed +# get other files into a tarball, find a .so and named everything after it + + + +import asyncio +import tarfile +import os +from pathlib import Path + +class Error(Exception): + pass + +def gather(root: Path, *kw): + + for current, dirnames, filenames in os.walk(root): + rel = Path("/").joinpath(Path(current).relative_to(root)) + + # print(rel, len(dirnames), len(filenames)) + yield rel, filenames + + + +def is_extension(path:Path, fullpath:Path): + global EXTNAME, SYMBOLS + asp = path.as_posix() + + # check .so + if asp.startswith('/lib/postgresql/'): + if path.suffix == ".so": + EXTNAME = path.stem + if os.path.isfile('/opt/sdk/wasisdk/wabt/bin/wasm-objdump'): + # TODO use popen and sort/merge + os.system(f"./cibuild/symtab.sh {fullpath} >> {PGROOT}/symbols") + with open(f"{PGROOT}/symbols","r") as f: + SYMBOLS=f.readlines() + + return True + + # rpath + if asp.startswith('/lib/'): + return True + + if asp.startswith('/share/postgresql/extension'): + return True + + + + +async def archive(target_folder): + global INSTALLED, PACKLIST + + walked = [] + for folder, filenames in gather(target_folder): + walked.append([folder, filenames]) + + + for folder, filenames in walked: + for filename in filenames: + test = Path(folder) / Path(filename) + asp = test.as_posix() + if (PGROOT/test).is_symlink(): + print("SYMLINK:", test) + continue + if test.as_posix() not in INSTALLED: + if asp.startswith('/sdk/'): + continue + fp = PGROOT / asp[1:] + if fp.is_symlink(): + continue + if is_extension(test, fp): + #print(f"{EXTNAME=}", test ) + PACKLIST.append( [fp, test] ) + else: + print("custom:", test) + + +PGROOT=Path(os.environ['PGROOT']) + +INSTALLED = [] + +EXTNAME = "" +PACKLIST = [] +SYMBOLS=[] + + +for line in open(PGROOT / "pg.installed" ).readlines(): + INSTALLED.append( Path(line[1:].strip()).as_posix() ) + +print("="*80) +asyncio.run( archive(PGROOT) ) +print("="*80) +print(f""" + + + + {EXTNAME =} ({len(SYMBOLS)} imports) + + + +""") + +swd = os.getcwd() + +if len(PACKLIST): + os.chdir(PGROOT) + with tarfile.open(PGROOT / "sdk" / f"{EXTNAME}.tar" , "w:") as tar: + for fp, fn in PACKLIST: + print(f"{EXTNAME} : {fp} => {fn}") + tar.add(fn.as_posix()[1:]) + os.remove(fp) + os.chdir(swd) +else: + print("Nothing to pack for", EXTNAME) diff --git a/cibuild/pg-16.x.sh b/cibuild/pg-16.x.sh new file mode 100644 index 00000000..f26c259f --- /dev/null +++ b/cibuild/pg-16.x.sh @@ -0,0 +1,48 @@ +ARCHIVE=postgresql-${PGVERSION}.tar.bz2 + +if [ -f postgresql/postgresql-${PGVERSION}.patched ] +then + echo version already selected and patch stage already done +else + [ -f ${ARCHIVE} ] || wget -q -c https://ftp.postgresql.org/pub/source/v${PGVERSION}/${ARCHIVE} + + tar xfj ${ARCHIVE} + + if pushd postgresql-${PGVERSION} + then + echo + > ./src/template/emscripten + > ./src/include/port/emscripten.h + > ./src/makefiles/Makefile.emscripten + for patchdir in \ + postgresql-emscripten \ + postgresql-wasm postgresql-wasm-${PGVERSION} \ + postgresql-pglite postgresql-pglite-${PGVERSION} + do + if [ -d ../patches/$patchdir ] + then + cat ../patches/$patchdir/*.diff | patch -p1 || exit 24 + fi + done + touch postgresql-${PGVERSION}.patched + popd + fi + + # either a submodule dir or a symlink. + # release only use symlink + + rm postgresql 2>/dev/null + ln -s postgresql-${PGVERSION} postgresql + +fi + +export PGSRC=$(realpath postgresql-${PGVERSION}) + +if [ -f ${PGROOT}/pg.installed ] +then + echo "skipping pg build, using previous install from ${PGROOT}" +else + echo "Building $ARCHIVE (patched) from $PGSRC" + . cibuild/pgbuild.sh +fi + diff --git a/cibuild/pgbuild.sh b/cibuild/pgbuild.sh new file mode 100644 index 00000000..4ed22486 --- /dev/null +++ b/cibuild/pgbuild.sh @@ -0,0 +1,241 @@ +echo " + + + +pgbuild:begin + +CC_PGLITE=$CC_PGLITE + +" + + + mkdir -p build/postgres + pushd build/postgres + + + if $CI + then + echo "CI : using build cache" + else + if [ -f Makefile ] + then + echo "Cleaning up previous build ..." + make distclean 2>&1 > /dev/null + fi + fi + + + CNF="${PGSRC}/configure --prefix=${PGROOT} \ + --disable-spinlocks --disable-atomics \ + --without-zlib --disable-largefile --without-llvm \ + --without-pam --disable-largefile --without-zlib --with-openssl=no \ + --without-readline --without-icu \ + ${PGDEBUG}" + + echo " ==== building wasm MVP:$MVP Debug=${PGDEBUG} with opts : $@ == " + + + if [ -f ${PGROOT}/config.cache.emsdk ] + then + echo "re-using config cache file from ${PGROOT}/config.cache.emsdk" + else + if [ -f ../config.cache.emsdk ] + then + cp ../config.cache.emsdk ${PGROOT}/ + else + cp config.cache.emsdk ${PGROOT}/ + fi + fi + + # -lwebsocket.js -sPROXY_POSIX_SOCKETS -pthread -sPROXY_TO_PTHREAD + # CONFIG_SITE=$(pwd)/config.site EMCC_CFLAGS="--oformat=html" \ + + # crash clang CFLAGS=-Wno-error=implicit-function-declaration + + if CONFIG_SITE==${PGDATA}/config.site emconfigure $CNF --with-template=emscripten --cache-file=${PGROOT}/config.cache.emsdk + then + echo configure ok + else + echo configure failed + exit 262 + fi + + sed -i 's|ZIC= ./zic|ZIC= zic|g' ${PGSRC}/src/timezone/Makefile + + + if grep -q MAIN_MODULE ${PGSRC}/src/backend/Makefile + then + echo "dyld server patch ok" + else + echo "missing server dyld patch" + exit 273 + fi + mkdir -p bin + + cat > bin/zic < /tmp/disable-shared.log + + cat > bin/wasm-shared <> /tmp/disable-shared.log +# shared build +\${PG_LINK:-emcc} -L${PREFIX}/lib -DPREFIX=${PGROOT} -shared -sSIDE_MODULE=1 \$@ -Wno-unused-function +END + + # FIXME: workaround for /conversion_procs/ make + # cp bin/wasm-shared bin/o + if which zic + then + cp $(which zic) zic.native bin/zic + fi + chmod +x bin/zic bin/wasm-shared + + # for zic and wasm-shared + export PATH=$(pwd)/bin:$PATH + + + EMCC_WEB="-sNO_EXIT_RUNTIME=1 -sENVIRONMENT=web" + EMCC_NODE="-sEXIT_RUNTIME=1 -DEXIT_RUNTIME -sNODERAWFS -sENVIRONMENT=node" + + # -lwebsocket.js" + # -sWEBSOCKET_SUBPROTOCOL=binary -sWEBSOCKET_URL=ws://127.0.0.1:25432" + + # -lwebsocket.js + # -sPROXY_POSIX_SOCKETS -pthread -sPROXY_TO_PTHREAD $EMCC_CFLAGS" + + # -sWASMFS + + EMCC_ENV="${EMCC_NODE} -sFORCE_FILESYSTEM=0" + EMCC_ENV="${EMCC_NODE} -sERROR_ON_UNDEFINED_SYMBOLS" + + # only required for static initdb + EMCC_CFLAGS="-sERROR_ON_UNDEFINED_SYMBOLS=0 ${CC_PGLITE}" + EMCC_CFLAGS="${EMCC_CFLAGS} -sTOTAL_MEMORY=1GB -sSTACK_SIZE=5MB -sALLOW_TABLE_GROWTH -sALLOW_MEMORY_GROWTH -sGLOBAL_BASE=${CMA_MB}MB" + EMCC_CFLAGS="${EMCC_CFLAGS} -DPREFIX=${PGROOT}" + + export EMCC_CFLAGS="${EMCC_CFLAGS} -Wno-macro-redefined -Wno-unused-function" + + + if EMCC_CFLAGS="${EMCC_ENV} ${EMCC_CFLAGS}" emmake make -j $(nproc) 2>&1 > /tmp/build.log + then + echo build ok + # for 32bits zic + unset LD_PRELOAD + if EMCC_CFLAGS="${EMCC_ENV} ${EMCC_CFLAGS}" emmake make install 2>&1 > /tmp/install.log + then + echo install ok + pushd ${PGROOT} + #find ./lib/postgresql ./share/postgresql/extension -type f > ${PGROOT}/pg.installed + find . -type f > ${PGROOT}/pg.installed + popd + else + cat /tmp/install.log + echo "install failed" + exit 368 + fi + else + cat /tmp/build.log + echo "build failed" + exit 373 + fi + + # wip + mv -vf ./src/bin/psql/psql.wasm ./src/bin/pg_config/pg_config.wasm ${PGROOT}/bin/ + mv -vf ./src/bin/pg_dump/pg_restore.wasm ./src/bin/pg_dump/pg_dump.wasm ./src/bin/pg_dump/pg_dumpall.wasm ${PGROOT}/bin/ + mv -vf ./src/bin/pg_resetwal/pg_resetwal.wasm ./src/bin/initdb/initdb.wasm ./src/backend/postgres.wasm ${PGROOT}/bin/ + + mv -vf ${PGROOT}/bin/pg_config ${PGROOT}/bin/pg_config.js + mv -vf ./src/bin/initdb/initdb ${PGROOT}/bin/initdb.js + mv -vf ./src/bin/pg_resetwal/pg_resetwal ${PGROOT}/bin/pg_resetwal.js + mv -vf ./src/backend/postgres ${PGROOT}/bin/postgres.js + + cat > ${PGROOT}/bin/pg_config < ${PGROOT}/postgres <&1 | grep --line-buffered -v ^var\\ Module + + # force node wasm version + cp -vf ${PGROOT}/postgres ${PGROOT}/bin/postgres + + cat > ${PGROOT}/initdb <$PGROOT/initdb.sh < /tmp/initdb.txt + +${PGROOT}/initdb --no-clean --wal-segsize=1 -g $LANG $CRED --pgdata=${PGDATA} + +mv /tmp/initdb.boot.txt \${SQL}.boot.sql +mv /tmp/initdb.single.txt \${SQL}.single.sql + +if \${CI:-false} +then + cp -vf \$SQL ${PGROOT}/\$(md5sum \$SQL|cut -c1-32).sql +fi + +# --wal-segsize=1 -> -X 1048576 + +# CKSUM_B -k --data-checksums +# 2024-04-24 05:53:28.121 GMT [42] WARNING: page verification failed, calculated checksum 5487 but expected 0 +# 2024-04-24 05:53:28.121 GMT [42] FATAL: invalid page in block 0 of relation base/1/1259 + +CMD="${PGROOT}/postgres --boot -D ${PGDATA} -d 3 $PGOPTS -X 1048576" +echo "\$CMD < \$SQL.boot.sql" +\$CMD < \$SQL.boot.sql 2>&1 \\ + | grep -v --line-buffered 'bootstrap> boot' \\ + | grep -v --line-buffered 'index' + +echo " + +\$(md5sum /tmp/initdb-\$\$.*.sql) + + boot done +" +END + + popd +echo "pgbuild:end + + + + +" + + + diff --git a/cibuild/pglite-ts.sh b/cibuild/pglite-ts.sh new file mode 100644 index 00000000..482917df --- /dev/null +++ b/cibuild/pglite-ts.sh @@ -0,0 +1,110 @@ + if [ -d pglite ] + then + # work tree + pushd pglite/packages/pglite + PGLITE=$(pwd) + else + # release tree + pushd packages/pglite + PGLITE=$(pwd) + fi + + # not used for now, everything in PGROOT is to be bundled + cat > $PGLITE/release/share.js </dev/null + + # copy packed extensions + cp ${WEBROOT}/*.tar.gz ${PGLITE}/release/ + + # copy wasm web prebuilt artifacts to release folder + # TODO: get them from web for nosdk systems. + if $CI + then + cp -vf /tmp/web/postgres.{js,data,wasm} $PGLITE/release/ + cp -vf /tmp/web/libecpg.so $PGLITE/release/postgres.so + else + cp ${WEBROOT}/postgres.{js,data,wasm} ${PGLITE}/release/ + cp ${WEBROOT}/libecpg.so ${PGLITE}/release/postgres.so + fi + + # unused right now + # touch $PGLITE/release/share.data + + + + if ${DEV:-false} + then + echo " + + + + =============================== dev test mode =========================== + + + + + +" + # this is the ES6 wasm module loader from emscripten. + cp $PGLITE/release/postgres.js $PGLITE/release/pgbuild.js + # use a javascript wasm module loader with a thin api for tests + cat ${GITHUB_WORKSPACE}/patches/pgbuild.js > $PGLITE/release/postgres.js + else + echo "using emscripten es6->ts interface" + fi + + # debug CI does not use pnpm/npm for building pg, so call the typescript build + # part from here + if $CI + then + pnpm run build:js + mkdir -p /tmp/sdk + pnpm pack + packed=$(echo -n electric-sql-pglite-*.tgz) + mv $packed /tmp/sdk/pg${PGVERSION}-${packed} + + # for repl demo + mkdir -p /tmp/web/pglite + cp -r ${PGLITE}/dist /tmp/web/pglite/ + cp -r ${PGLITE}/examples /tmp/web/pglite/ + pushd /tmp/web/ + ln -s ../dist/postgres.data + popd + # link files for xterm based repl + ln ${WEBROOT}/dist/postgres.* ${WEBROOT}/ || echo pass + + echo " + + + + " > ${WEBROOT}/index.html + + else + mkdir -p ${WEBROOT}/node_modules/@electric-sql/pglite + cp -r ${PGLITE}/{../../LICENSE,package.json,README.md} ${PGLITE}/dist ${WEBROOT}/node_modules/@electric-sql/pglite/ + pushd ${WEBROOT} + zip /tmp/sdk/pglite.zip -q -r node_modules + popd + fi + + popd + diff --git a/packages/pglite/Makefile b/packages/pglite/Makefile deleted file mode 100644 index ffa9d52d..00000000 --- a/packages/pglite/Makefile +++ /dev/null @@ -1,41 +0,0 @@ -.PHONY: debug-build debug-datadir build datadir - -build-configure: - EMCC_CFLAGS="-Wl,--allow-undefined" \ - emconfigure ./configure CFLAGS='-O2' \ - --without-readline \ - --without-zlib \ - --disable-thread-safety \ - --disable-spinlocks \ - --with-system-tzdata=/usr/share/zoneinfo - --with-system-tzdata=/usr/share/zoneinfo - -build: - EMCC_CFLAGS="-s SIDE_MODULE=1 -sERROR_ON_UNDEFINED_SYMBOLS=0 -sWARN_ON_UNDEFINED_SYMBOLS=0 -sTOTAL_MEMORY=65536000 -sMODULARIZE=1 -sEXPORT_ES6=1 -sEXPORTED_RUNTIME_METHODS='FS'" \ - emmake make -C src/pl/plpgsql MAKELEVEL=0 - - EMCC_CFLAGS="-sALLOW_MEMORY_GROWTH=1 -sERROR_ON_UNDEFINED_SYMBOLS=0 -sWARN_ON_UNDEFINED_SYMBOLS=0 -sTOTAL_MEMORY=65536000 -sEMULATE_FUNCTION_POINTER_CASTS=1 -sMODULARIZE=1 -sEXPORT_ES6=1 -sEXPORTED_FUNCTIONS=_main,_ExecProtocolMsg,_malloc,_free -sEXPORTED_RUNTIME_METHODS=ccall,cwrap,FS" \ - emmake make -C src/backend MAKELEVEL=0 - - mkdir -p ../packages/pglite/release - cp src/backend/postgres ../packages/pglite/release/postgres.js - cp src/backend/postgres.wasm ../packages/pglite/release/postgres.wasm - cd ../packages/pglite/ && node scripts/modify-postgres-js.js - -sharedir: - mkdir -p tmp_install - - DESTDIR="$(abspath tmp_install)" \ - EMCC_CFLAGS="-sERROR_ON_UNDEFINED_SYMBOLS=0 -sWARN_ON_UNDEFINED_SYMBOLS=0 -sTOTAL_MEMORY=65536000 -sEMULATE_FUNCTION_POINTER_CASTS=1 -sMODULARIZE=1 -sEXPORT_ES6=1 -sEXPORTED_RUNTIME_METHODS='FS'" \ - emmake make MAKELEVEL=0 -C src/backend/ install - - DESTDIR="$(abspath tmp_install)" \ - EMCC_CFLAGS="-sERROR_ON_UNDEFINED_SYMBOLS=0 -sWARN_ON_UNDEFINED_SYMBOLS=0 -sTOTAL_MEMORY=65536000 -sEMULATE_FUNCTION_POINTER_CASTS=1 -sMODULARIZE=1 -sEXPORT_ES6=1 -sEXPORTED_RUNTIME_METHODS='FS'" \ - emmake make MAKELEVEL=0 -C src/pl/plpgsql install - - rm tmp_install/usr/local/pgsql/share/*.sample - rm tmp_install/usr/local/pgsql/share/timezonesets/*.txt - node ../packages/pglite/scripts/modify-share.js - cd ../packages/pglite/release && \ - `em-config EMSCRIPTEN_ROOT`/tools/file_packager share.data --preload ../../../postgres/tmp_install/usr/local/pgsql/share@/usr/local/pgsql/share --js-output=share.js --export-name=ModuleBase - cd ../packages/pglite/ && node scripts/modify-share-js.js diff --git a/packages/pglite/examples/basic.html b/packages/pglite/examples/basic.html index 138c3fa1..01394b64 100644 --- a/packages/pglite/examples/basic.html +++ b/packages/pglite/examples/basic.html @@ -1,38 +1 @@ - + \ No newline at end of file diff --git a/packages/pglite/examples/dumpDataDir.html b/packages/pglite/examples/dumpDataDir.html new file mode 100644 index 00000000..f02cfc46 --- /dev/null +++ b/packages/pglite/examples/dumpDataDir.html @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/packages/pglite/examples/dumpDataDir.js b/packages/pglite/examples/dumpDataDir.js new file mode 100644 index 00000000..35bdd52e --- /dev/null +++ b/packages/pglite/examples/dumpDataDir.js @@ -0,0 +1,32 @@ +import { PGlite } from "../dist/index.js"; + +const pg = new PGlite(); +await pg.exec(` + CREATE TABLE IF NOT EXISTS test ( + id SERIAL PRIMARY KEY, + name TEXT + ); +`); +await pg.exec("INSERT INTO test (name) VALUES ('test');"); + +const file = await pg.dumpDataDir(); + +if (typeof window !== "undefined") { + // Download the dump + const url = URL.createObjectURL(file); + const a = document.createElement("a"); + a.href = url; + a.download = file.name; + a.click(); +} else { + // Save the dump to a file using node fs + const fs = await import("fs"); + fs.writeFileSync(file.name, await file.arrayBuffer()); +} + +const pg2 = new PGlite({ + loadDataDir: file, +}); + +const rows = await pg2.query("SELECT * FROM test;"); +console.log(rows); diff --git a/packages/pglite/examples/index.html b/packages/pglite/examples/index.html new file mode 100644 index 00000000..43871ea3 --- /dev/null +++ b/packages/pglite/examples/index.html @@ -0,0 +1,11 @@ + + + \ No newline at end of file diff --git a/packages/pglite/examples/notify.html b/packages/pglite/examples/notify.html index 9a63e55c..85826ecd 100644 --- a/packages/pglite/examples/notify.html +++ b/packages/pglite/examples/notify.html @@ -1,18 +1,19 @@ + + + + diff --git a/packages/pglite/examples/repl.html b/packages/pglite/examples/repl.html new file mode 100644 index 00000000..e9462633 --- /dev/null +++ b/packages/pglite/examples/repl.html @@ -0,0 +1,24 @@ + + + + + + diff --git a/packages/pglite/examples/vector.html b/packages/pglite/examples/vector.html new file mode 100644 index 00000000..83603e7c --- /dev/null +++ b/packages/pglite/examples/vector.html @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/packages/pglite/examples/vector.js b/packages/pglite/examples/vector.js new file mode 100644 index 00000000..47e6d036 --- /dev/null +++ b/packages/pglite/examples/vector.js @@ -0,0 +1,36 @@ +import { PGlite } from "../dist/index.js"; +import { vector } from "../dist/vector/index.js"; + +const pg = new PGlite({ + extensions: { + vector, + // Alternatively, you can specify the path to the extension tarball + // vector: new URL("../dist/vector.tar.gz", import.meta.url), + } +}); + +await pg.exec("CREATE EXTENSION IF NOT EXISTS vector;"); +await pg.exec(` + CREATE TABLE IF NOT EXISTS test ( + id SERIAL PRIMARY KEY, + name TEXT, + vec vector(3) + ); +`); +await pg.exec("INSERT INTO test (name, vec) VALUES ('test1', '[1,2,3]');"); +await pg.exec("INSERT INTO test (name, vec) VALUES ('test2', '[4,5,6]');"); +await pg.exec("INSERT INTO test (name, vec) VALUES ('test3', '[7,8,9]');"); + +const res = await pg.exec(` + SELECT * FROM test; +`); +console.log(res); + +const res2 = await pg.exec(` + SELECT + name, + vec, + vec <-> '[3,1,2]' AS distance + FROM test; +`); +console.log(res2); \ No newline at end of file diff --git a/packages/pglite/package.json b/packages/pglite/package.json index 41b239a1..c256bc7c 100644 --- a/packages/pglite/package.json +++ b/packages/pglite/package.json @@ -1,6 +1,6 @@ { "name": "@electric-sql/pglite", - "version": "0.1.5", + "version": "0.2.0", "private": false, "publishConfig": { "access": "public" @@ -19,12 +19,16 @@ "main": "dist/index.js", "exports": { ".": "./dist/index.js", - "./worker": "./dist/worker/index.js" + "./worker": "./dist/worker/index.js", + "./vector": "./dist/vector/index.js" }, "typesVersions": { "*": { "worker": [ "./dist/worker/index.d.ts" + ], + "vector": [ + "./dist/vector/index.d.ts" ] } }, @@ -39,27 +43,28 @@ "directory": "packages/pglite" }, "scripts": { - "test": "rm -rf ./pgdata-test && concurrently --hide 0 --prefix none -k \"npx http-server --port 3334 ./\" \"sleep 2 && ava tests/*.test.js tests/**/*.test.js\"", - "build:configure": "cd ../../postgres/ && make -f ../packages/pglite/Makefile build-configure", - "build:wasm": "cd ../../postgres/ && make -f ../packages/pglite/Makefile build", - "build:sharedir": "cd ../../postgres/ && make -f ../packages/pglite/Makefile sharedir", - "build:clean": "cd ../../postgres/ && make clean", + "test": "rm -rf ./pgdata-test && concurrently -s first --hide 1 --prefix none -k \"sleep 2 && ava tests/*.test.js tests/**/*.test.js\" \"npx http-server --port 3334 ./\"", + "test:quick": "rm -rf ./pgdata-bun-test && ava tests/*.test.js tests/target/node-*.test.js", + "test:bun": "rm -rf ./pgdata-test && npx bun test tests/basic.test.js && npx bun test tests/pgvector.test.js && npx bun test tests/targets/node-fs.test.js", "build:js": "tsup && tsx scripts/bundle-wasm.ts", - "build:all": "npm run build:wasm && npm run build:sharedir && npm run build:js", - "build": "npm run build:configure && npm run build:all", + "build": "npm run build:js", "format": "prettier --write ./src" }, "devDependencies": { + "@types/emscripten": "^1.39.13", "@types/node": "^20.11.18", + "@types/node-fetch": "^2.6.11", "async-mutex": "^0.4.1", "ava": "^6.1.2", "buffer": "^6.0.3", + "bun": "^1.1.18", "comlink": "^4.4.1", "concurrently": "^8.2.2", "http-server": "^14.1.1", "pg-protocol": "^1.6.0", "playwright": "^1.42.1", "prettier": "3.2.5", + "tinytar": "^0.1.0", "tsup": "^8.0.2", "tsx": "^4.7.1", "typescript": "^5.3.3" diff --git a/packages/pglite/release/postgres.d.ts b/packages/pglite/release/postgres.d.ts deleted file mode 100644 index d2e1d9d5..00000000 --- a/packages/pglite/release/postgres.d.ts +++ /dev/null @@ -1,438 +0,0 @@ -/** Based on https://github.com/DefinitelyTyped/DefinitelyTyped/blob/master/types/emscripten/index.d.ts */ -/** Other WebAssembly declarations, for compatibility with older versions of Typescript */ -declare namespace WebAssembly { - interface Module {} -} - -declare namespace Emscripten { - interface FileSystemType {} - type EnvironmentType = "WEB" | "NODE" | "SHELL" | "WORKER"; - - type JSType = "number" | "string" | "array" | "boolean"; - type TypeCompatibleWithC = number | string | any[] | boolean; - - type CIntType = "i8" | "i16" | "i32" | "i64"; - type CFloatType = "float" | "double"; - type CPointerType = - | "i8*" - | "i16*" - | "i32*" - | "i64*" - | "float*" - | "double*" - | "*"; - type CType = CIntType | CFloatType | CPointerType; - - type WebAssemblyImports = Array<{ - name: string; - kind: string; - }>; - - type WebAssemblyExports = Array<{ - module: string; - name: string; - kind: string; - }>; - - interface CCallOpts { - async?: boolean | undefined; - } -} - -export interface EmscriptenModule { - print(str: string): void; - printErr(str: string): void; - arguments: string[]; - environment: Emscripten.EnvironmentType; - preInit: Array<{ (mod: EmscriptenModule): void }>; - preRun: Array<{ (mod: EmscriptenModule): void }>; - postRun: Array<{ (mod: EmscriptenModule): void }>; - onAbort: { (what: any): void }; - onRuntimeInitialized: { (): void }; - preinitializedWebGLContext: WebGLRenderingContext; - noInitialRun: boolean; - noExitRuntime: boolean; - logReadFiles: boolean; - filePackagePrefixURL: string; - wasmBinary: ArrayBuffer; - - destroy(object: object): void; - getPreloadedPackage( - remotePackageName: string, - remotePackageSize: number - ): ArrayBuffer; - instantiateWasm( - imports: Emscripten.WebAssemblyImports, - successCallback: (module: WebAssembly.Module) => void - ): Emscripten.WebAssemblyExports; - locateFile(url: string, scriptDirectory: string): string; - onCustomMessage(event: MessageEvent): void; - - // USE_TYPED_ARRAYS == 1 - HEAP: Int32Array; - IHEAP: Int32Array; - FHEAP: Float64Array; - - // USE_TYPED_ARRAYS == 2 - HEAP8: Int8Array; - HEAP16: Int16Array; - HEAP32: Int32Array; - HEAPU8: Uint8Array; - HEAPU16: Uint16Array; - HEAPU32: Uint32Array; - HEAPF32: Float32Array; - HEAPF64: Float64Array; - HEAP64: BigInt64Array; - HEAPU64: BigUint64Array; - - TOTAL_STACK: number; - TOTAL_MEMORY: number; - FAST_MEMORY: number; - - addOnPreRun(cb: () => any): void; - addOnInit(cb: () => any): void; - addOnPreMain(cb: () => any): void; - addOnExit(cb: () => any): void; - addOnPostRun(cb: () => any): void; - - preloadedImages: any; - preloadedAudios: any; - - _malloc(size: number): number; - _free(ptr: number): void; -} - -/** - * A factory function is generated when setting the `MODULARIZE` build option - * to `1` in your Emscripten build. It return a Promise that resolves to an - * initialized, ready-to-call `EmscriptenModule` instance. - * - * By default, the factory function will be named `Module`. It's recommended to - * use the `EXPORT_ES6` option, in which the factory function will be the - * default export. If used without `EXPORT_ES6`, the factory function will be a - * global variable. You can rename the variable using the `EXPORT_NAME` build - * option. It's left to you to declare any global variables as needed in your - * application's types. - * @param moduleOverrides Default properties for the initialized module. - */ -type EmscriptenModuleFactory = ( - moduleOverrides?: Partial -) => Promise; - -export interface FS { - Lookup: { - path: string; - node: FSNode; - }; - - FSStream: {}; - FSNode: {}; - ErrnoError: {}; - - ignorePermissions: boolean; - trackingDelegate: any; - tracking: any; - genericErrors: any; - - filesystems: { - NODEFS: Emscripten.FileSystemType; - MEMFS: Emscripten.FileSystemType; - IDBFS: Emscripten.FileSystemType; - }; - - // - // paths - // - lookupPath(path: string, opts: any): Lookup; - getPath(node: FSNode): string; - - // - // nodes - // - isFile(mode: number): boolean; - isDir(mode: number): boolean; - isLink(mode: number): boolean; - isChrdev(mode: number): boolean; - isBlkdev(mode: number): boolean; - isFIFO(mode: number): boolean; - isSocket(mode: number): boolean; - - // - // devices - // - major(dev: number): number; - minor(dev: number): number; - makedev(ma: number, mi: number): number; - registerDevice(dev: number, ops: any): void; - - // - // core - // - syncfs(populate: boolean, callback: (e: any) => any): void; - syncfs(callback: (e: any) => any, populate?: boolean): void; - mount(type: Emscripten.FileSystemType, opts: any, mountpoint: string): any; - unmount(mountpoint: string): void; - - mkdir(path: string, mode?: number): any; - mkdev(path: string, mode?: number, dev?: number): any; - symlink(oldpath: string, newpath: string): any; - rename(old_path: string, new_path: string): void; - rmdir(path: string): void; - readdir(path: string): any; - unlink(path: string): void; - readlink(path: string): string; - stat(path: string, dontFollow?: boolean): any; - lstat(path: string): any; - chmod(path: string, mode: number, dontFollow?: boolean): void; - lchmod(path: string, mode: number): void; - fchmod(fd: number, mode: number): void; - chown(path: string, uid: number, gid: number, dontFollow?: boolean): void; - lchown(path: string, uid: number, gid: number): void; - fchown(fd: number, uid: number, gid: number): void; - truncate(path: string, len: number): void; - ftruncate(fd: number, len: number): void; - utime(path: string, atime: number, mtime: number): void; - open( - path: string, - flags: string, - mode?: number, - fd_start?: number, - fd_end?: number - ): FSStream; - close(stream: FSStream): void; - llseek(stream: FSStream, offset: number, whence: number): any; - read( - stream: FSStream, - buffer: ArrayBufferView, - offset: number, - length: number, - position?: number - ): number; - write( - stream: FSStream, - buffer: ArrayBufferView, - offset: number, - length: number, - position?: number, - canOwn?: boolean - ): number; - allocate(stream: FSStream, offset: number, length: number): void; - mmap( - stream: FSStream, - buffer: ArrayBufferView, - offset: number, - length: number, - position: number, - prot: number, - flags: number - ): any; - ioctl(stream: FSStream, cmd: any, arg: any): any; - readFile( - path: string, - opts: { encoding: "binary"; flags?: string | undefined } - ): Uint8Array; - readFile( - path: string, - opts: { encoding: "utf8"; flags?: string | undefined } - ): string; - readFile(path: string, opts?: { flags?: string | undefined }): Uint8Array; - writeFile( - path: string, - data: string | ArrayBufferView, - opts?: { flags?: string | undefined } - ): void; - - // - // module-level FS code - // - cwd(): string; - chdir(path: string): void; - init( - input: null | (() => number | null), - output: null | ((c: number) => any), - error: null | ((c: number) => any) - ): void; - - createLazyFile( - parent: string | FSNode, - name: string, - url: string, - canRead: boolean, - canWrite: boolean - ): FSNode; - createPreloadedFile( - parent: string | FSNode, - name: string, - url: string, - canRead: boolean, - canWrite: boolean, - onload?: () => void, - onerror?: () => void, - dontCreateFile?: boolean, - canOwn?: boolean - ): void; - createDataFile( - parent: string | FSNode, - name: string, - data: ArrayBufferView, - canRead: boolean, - canWrite: boolean, - canOwn: boolean - ): FSNode; -} - -declare var MEMFS: Emscripten.FileSystemType; -declare var NODEFS: Emscripten.FileSystemType; -declare var IDBFS: Emscripten.FileSystemType; - -// https://emscripten.org/docs/porting/connecting_cpp_and_javascript/Interacting-with-code.html -type StringToType = R extends Emscripten.JSType - ? { - number: number; - string: string; - array: number[] | string[] | boolean[] | Uint8Array | Int8Array; - boolean: boolean; - null: null; - }[R] - : never; - -type ArgsToType> = Extract< - { - [P in keyof T]: StringToType; - }, - any[] ->; - -type ReturnToType = R extends null - ? null - : StringToType>; - -// Below runtime function/variable declarations are exportable by -// -s EXTRA_EXPORTED_RUNTIME_METHODS. You can extend or merge -// EmscriptenModule interface to add runtime functions. -// -// For example, by using -s "EXTRA_EXPORTED_RUNTIME_METHODS=['ccall']" -// You can access ccall() via Module["ccall"]. In this case, you should -// extend EmscriptenModule to pass the compiler check like the following: -// -// interface YourOwnEmscriptenModule extends EmscriptenModule { -// ccall: typeof ccall; -// } -// -// See: https://emscripten.org/docs/getting_started/FAQ.html#why-do-i-get-typeerror-module-something-is-not-a-function - -declare function cwrap< - I extends Array | [], - R extends Emscripten.JSType | null ->( - ident: string, - returnType: R, - argTypes: I, - opts?: Emscripten.CCallOpts -): (...arg: ArgsToType) => ReturnToType; - -declare function ccall< - I extends Array | [], - R extends Emscripten.JSType | null ->( - ident: string, - returnType: R, - argTypes: I, - args: ArgsToType, - opts?: Emscripten.CCallOpts -): ReturnToType; - -declare function setValue( - ptr: number, - value: any, - type: Emscripten.CType, - noSafe?: boolean -): void; -declare function getValue( - ptr: number, - type: Emscripten.CType, - noSafe?: boolean -): number; - -declare function allocate( - slab: number[] | ArrayBufferView | number, - types: Emscripten.CType | Emscripten.CType[], - allocator: number, - ptr?: number -): number; - -declare function stackAlloc(size: number): number; -declare function stackSave(): number; -declare function stackRestore(ptr: number): void; - -declare function UTF8ToString(ptr: number, maxBytesToRead?: number): string; -declare function stringToUTF8( - str: string, - outPtr: number, - maxBytesToRead?: number -): void; -declare function lengthBytesUTF8(str: string): number; -declare function allocateUTF8(str: string): number; -declare function allocateUTF8OnStack(str: string): number; -declare function UTF16ToString(ptr: number): string; -declare function stringToUTF16( - str: string, - outPtr: number, - maxBytesToRead?: number -): void; -declare function lengthBytesUTF16(str: string): number; -declare function UTF32ToString(ptr: number): string; -declare function stringToUTF32( - str: string, - outPtr: number, - maxBytesToRead?: number -): void; -declare function lengthBytesUTF32(str: string): number; - -declare function intArrayFromString( - stringy: string, - dontAddNull?: boolean, - length?: number -): number[]; -declare function intArrayToString(array: number[]): string; -declare function writeStringToMemory( - str: string, - buffer: number, - dontAddNull: boolean -): void; -declare function writeArrayToMemory(array: number[], buffer: number): void; -declare function writeAsciiToMemory( - str: string, - buffer: number, - dontAddNull: boolean -): void; - -declare function addRunDependency(id: any): void; -declare function removeRunDependency(id: any): void; - -declare function addFunction( - func: (...args: any[]) => any, - signature?: string -): number; -declare function removeFunction(funcPtr: number): void; - -declare var ALLOC_NORMAL: number; -declare var ALLOC_STACK: number; -declare var ALLOC_STATIC: number; -declare var ALLOC_DYNAMIC: number; -declare var ALLOC_NONE: number; - -export interface EmPostgres extends EmscriptenModule { - FS: FS; - eventTarget: EventTarget; - Event: typeof CustomEvent; - onRuntimeInitialized: (Module: EmPostgres) => Promise; -} - -function EmPostgresFactory( - opts?: Partial, - __dirname?: any, - require?: (string) => any -): Promise; - -export default EmPostgresFactory; diff --git a/packages/pglite/release/share.d.ts b/packages/pglite/release/share.d.ts deleted file mode 100644 index bba6e1d2..00000000 --- a/packages/pglite/release/share.d.ts +++ /dev/null @@ -1,8 +0,0 @@ -import { type EmPostgres } from "./postgres.js"; - -function loadPgShare( - module: Partial, - require?: (string) => any -): Partial; - -export default loadPgShare; diff --git a/packages/pglite/scripts/bundle-wasm.ts b/packages/pglite/scripts/bundle-wasm.ts index 7a90e3ce..e9b10a32 100644 --- a/packages/pglite/scripts/bundle-wasm.ts +++ b/packages/pglite/scripts/bundle-wasm.ts @@ -35,7 +35,9 @@ async function findAndReplaceInDir( async function main() { await fs.copyFile("./release/postgres.wasm", "./dist/postgres.wasm"); - await fs.copyFile("./release/share.data", "./dist/share.data"); + await fs.copyFile("./release/postgres.data", "./dist/postgres.data"); +// await fs.copyFile("./release/postgres.so", "./dist/postgres.so"); + await fs.copyFile("./release/vector.tar.gz", "./dist/vector.tar.gz"); await findAndReplaceInDir( "./dist", /new URL\('\.\.\/release\//g, @@ -48,6 +50,12 @@ async function main() { 'new URL("./', [".js"] ); + await findAndReplaceInDir( + "./dist/vector", + /new URL\("\.\.\/\.\.\/release\//g, + 'new URL("\.\.\/', + [".js"] + ); } await main(); diff --git a/packages/pglite/scripts/modify-postgres-js.js b/packages/pglite/scripts/modify-postgres-js.js deleted file mode 100644 index c141b6f9..00000000 --- a/packages/pglite/scripts/modify-postgres-js.js +++ /dev/null @@ -1,22 +0,0 @@ -#!/usr/bin/env node -import * as fs from "fs"; -import * as path from "path"; -import { fileURLToPath } from "url"; - -const filename = fileURLToPath(import.meta.url); -const dirname = path.dirname(filename); - -const postgresJsPath = path.resolve(dirname, "../release/postgres.js"); -let postgresJs = fs.readFileSync(postgresJsPath, "utf8"); -postgresJs = postgresJs - // Fix for ReferenceError: asyncifyStubs is not defined - // see: https://github.com/emscripten-core/emscripten/issues/21104 - // var Module=moduleArg or var Module = moduleArg - .replace(/var Module\s?=\s?moduleArg;/g, "var Module = moduleArg; var asyncifyStubs = {};") - // Make doRun async so we can perform async operations inside onRuntimeInitialized - .replace("function doRun()", "async function doRun()") - .replace( - 'Module["onRuntimeInitialized"]()', - 'await Module["onRuntimeInitialized"](Module)' - ) -fs.writeFileSync(postgresJsPath, postgresJs); diff --git a/packages/pglite/scripts/modify-share-js.js b/packages/pglite/scripts/modify-share-js.js deleted file mode 100644 index 666b7106..00000000 --- a/packages/pglite/scripts/modify-share-js.js +++ /dev/null @@ -1,17 +0,0 @@ -#!/usr/bin/env node -import * as fs from "fs"; -import * as path from "path"; -import { fileURLToPath } from "url"; - -const filename = fileURLToPath(import.meta.url); -const dirname = path.dirname(filename); - -const postgresJsPath = path.resolve(dirname, "../release/share.js"); -let postgresJs = fs.readFileSync(postgresJsPath, "utf8"); -postgresJs = `var Module = (ModuleBase, require) => { -${postgresJs} -return Module; -}; -export default Module; -`; -fs.writeFileSync(postgresJsPath, postgresJs); diff --git a/packages/pglite/scripts/modify-share.js b/packages/pglite/scripts/modify-share.js deleted file mode 100644 index 1f60ae45..00000000 --- a/packages/pglite/scripts/modify-share.js +++ /dev/null @@ -1,25 +0,0 @@ -#!/usr/bin/env node -import * as fs from "fs"; -import * as path from "path"; -import { fileURLToPath } from "url"; - -const filename = fileURLToPath(import.meta.url); -const dirname = path.dirname(filename); -let sharePath = path.resolve( - dirname, - "../../../postgres/tmp_install/usr/local/pgsql/share" -); - -let bki = fs - .readFileSync(sharePath + "/postgres.bki", "utf8") - .replaceAll("NAMEDATALEN", "64") - .replaceAll("SIZEOF_POINTER", "4") - .replaceAll("ALIGNOF_POINTER", "i") - .replaceAll("FLOAT8PASSBYVAL", "false") - .replaceAll("POSTGRES", "'postgres'") - .replaceAll("ENCODING", "6") // PG_UTF8 - .replaceAll("LC_COLLATE", "'en_US.UTF-8'") - .replaceAll("LC_CTYPE", "'en_US.UTF-8'"); - -fs.writeFileSync(sharePath + "/postgres_wasm.bki", bki); -fs.unlinkSync(sharePath + "/postgres.bki"); diff --git a/packages/pglite/src/definitions/tinytar.d.ts b/packages/pglite/src/definitions/tinytar.d.ts new file mode 100644 index 00000000..90e4f760 --- /dev/null +++ b/packages/pglite/src/definitions/tinytar.d.ts @@ -0,0 +1,64 @@ +declare module "tinytar" { + interface TarFile { + name: string; + mode?: number; + uid?: number; + gid?: number; + size?: number; + modifyTime?: number | Date; + checksum?: number; + type?: number; + linkName?: string; + ustar?: string; + owner?: string; + group?: string; + majorNumber?: number; + minorNumber?: number; + prefix?: string; + accessTime?: number | Date; + createTime?: number | Date; + data: Uint8Array; + isOldGNUFormat?: boolean; + } + + interface UntarOptions { + extractData?: boolean; + checkHeader?: boolean; + checkChecksum?: boolean; + checkFileSize?: boolean; + } + + function tar(files: TarFile[]): Uint8Array; + function untar(buffer: Uint8Array, options?: UntarOptions): TarFile[]; + + const NULL_CHAR: string; + const TMAGIC: string; + const OLDGNU_MAGIC: string; + + // Values used in typeflag field + const REGTYPE: number; + const LNKTYPE: number; + const SYMTYPE: number; + const CHRTYPE: number; + const BLKTYPE: number; + const DIRTYPE: number; + const FIFOTYPE: number; + const CONTTYPE: number; + + // Bits used in the mode field, values in octal + const TSUID: number; + const TSGID: number; + const TSVTX: number; + const TUREAD: number; + const TUWRITE: number; + const TUEXEC: number; + const TGREAD: number; + const TGWRITE: number; + const TGEXEC: number; + const TOREAD: number; + const TOWRITE: number; + const TOEXEC: number; + + const TPERMALL: number; + const TPERMMASK: number; +} diff --git a/packages/pglite/src/event.ts b/packages/pglite/src/event.ts deleted file mode 100644 index f41f7314..00000000 --- a/packages/pglite/src/event.ts +++ /dev/null @@ -1,23 +0,0 @@ -import { IN_NODE } from "./utils.js"; - -let PGEvent: typeof CustomEvent; - -// Older versions of Node.js do not have CustomEvent -if (IN_NODE && typeof CustomEvent === "undefined") { - PGEvent = class CustomEvent extends Event { - #detail: T | null; - - constructor(type: string, options?: EventInit & { detail: T }) { - super(type, options); - this.#detail = options?.detail ?? null; - } - - get detail() { - return this.#detail; - } - } as typeof CustomEvent; -} else { - PGEvent = CustomEvent; -} - -export { PGEvent }; diff --git a/packages/pglite/src/extensionUtils.ts b/packages/pglite/src/extensionUtils.ts new file mode 100644 index 00000000..a2ca2f85 --- /dev/null +++ b/packages/pglite/src/extensionUtils.ts @@ -0,0 +1,109 @@ +import tinyTar from "tinytar"; +import { IN_NODE } from "./utils.js"; +import type { PostgresMod } from "./postgres.js"; + +export async function loadExtensionBundle( + bundlePath: URL, +): Promise { + // Async load the extension bundle tar file + // could be from a URL or a file + if (IN_NODE) { + const fs = await import("fs"); + const zlib = await import("zlib"); + const { Writable } = await import("stream"); + const { pipeline } = await import("stream/promises"); + + if (!fs.existsSync(bundlePath)) { + throw new Error(`Extension bundle not found: ${bundlePath}`); + } + + const gunzip = zlib.createGunzip(); + const chunks: Uint8Array[] = []; + + await pipeline( + fs.createReadStream(bundlePath), + gunzip, + new Writable({ + write(chunk, encoding, callback) { + chunks.push(chunk); + callback(); + }, + }), + ); + return new Blob(chunks); + } else { + const response = await fetch(bundlePath.toString()); + if (!response.ok || !response.body) { + return null; + } + const decompressionStream = new DecompressionStream("gzip"); + const decompressedStream = new Response( + response.body.pipeThrough(decompressionStream), + ); + return decompressedStream.blob(); + } +} + +export async function loadExtensions( + mod: PostgresMod, + log: (...args: any[]) => void, +) { + for (const ext in mod.pg_extensions) { + let blob; + try { + blob = await mod.pg_extensions[ext]; + } catch (err) { + console.error("Failed to fetch extension:", ext, err); + continue; + } + if (blob) { + const bytes = new Uint8Array(await blob.arrayBuffer()); + loadExtension(mod, ext, bytes, log); + } else { + console.error("Could not get binary data for extension:", ext); + } + } +} + +function loadExtension( + mod: PostgresMod, + ext: string, + bytes: Uint8Array, + log: (...args: any[]) => void, +) { + const data = tinyTar.untar(bytes); + data.forEach((file) => { + if (!file.name.startsWith(".")) { + const filePath = mod.WASM_PREFIX + "/" + file.name; + if (file.name.endsWith(".so")) { + const extOk = (...args: any[]) => { + log("pgfs:ext OK", filePath, args); + }; + const extFail = (...args: any[]) => { + log("pgfs:ext FAIL", filePath, args); + }; + mod.FS.createPreloadedFile( + dirname(filePath), + file.name.split("/").pop()!.slice(0, -3), + file.data as any, // There is a type error in Emscripten's FS.createPreloadedFile, this excepts a Uint8Array, but the type is defined as any + true, + true, + extOk, + extFail, + false, + ); + } else { + mod.FS.writeFile(filePath, file.data); + } + } + }); +} + +function dirname(path: string) { + const last = path.lastIndexOf("/"); + if (last > 0) { + return path.slice(0, last); + } else { + return path; + } +} diff --git a/packages/pglite/src/fs/idbfs.ts b/packages/pglite/src/fs/idbfs.ts index e8beef45..29fe13e4 100644 --- a/packages/pglite/src/fs/idbfs.ts +++ b/packages/pglite/src/fs/idbfs.ts @@ -1,101 +1,46 @@ import { FilesystemBase } from "./types.js"; +import type { FS, PostgresMod } from "../postgres.js"; import { PGDATA } from "./index.js"; -import { copyDir } from "./utils.js"; -import type { FS, EmPostgres } from "../../release/postgres.js"; -import loadPgShare from "../../release/share.js"; -import { initDb } from "../initdb.js"; -import { nodeValues } from "../utils.js"; -import type { DebugLevel } from "../index.js"; +import { dumpTar } from "./tarUtils.js"; export class IdbFs extends FilesystemBase { - initModule?: any; - - async init(debug?: DebugLevel) { - const dbExists = () => - new Promise((resolve, reject) => { - const request = globalThis.indexedDB.open(`/pglite${this.dataDir}`); - let exists = true; - request.onupgradeneeded = (e) => { - if (e.oldVersion === 0) { - exists = false; - } - }; - request.onerror = (e) => { - resolve(false); - }; - request.onsuccess = (e) => { - const db = request.result; - db.close(); - if (!exists) { - globalThis.indexedDB.deleteDatabase(`/pglite${this.dataDir}`); - } - resolve(exists); - }; - }); - - if (!(await dbExists())) { - this.initModule = await initDb(undefined, debug); - return true; - } else { - return false; - } - } - - async emscriptenOpts(opts: Partial) { - const options: Partial = { + async emscriptenOpts(opts: Partial) { + const options: Partial = { ...opts, preRun: [ ...(opts.preRun || []), (mod: any) => { const idbfs = mod.FS.filesystems.IDBFS; - // Mount the idbfs to the users dataDir - // then symlink the PGDATA to the idbfs mount + // Mount the idbfs to the users dataDir then symlink the PGDATA to the + // idbfs mount point. + // We specifically use /pglite as the root directory for the idbfs + // as the fs will ber persisted in the indexeddb as a database with + // the path as the name. mod.FS.mkdir(`/pglite`); - mod.FS.mkdir(`/pglite${this.dataDir}`); - mod.FS.mount(idbfs, {}, `/pglite${this.dataDir}`); - mod.FS.symlink(`/pglite${this.dataDir}`, PGDATA); - - if (this.initModule) { - // We need to copy the files from the memory filesystem to the main fs - const proxyfs = mod.FS.filesystems.PROXYFS; - mod.FS.mkdir(PGDATA + "_temp"); - mod.FS.mount( - proxyfs, - { root: PGDATA + "/", fs: this.initModule.FS }, - PGDATA + "_temp", - ); - copyDir(mod.FS, PGDATA + "_temp", PGDATA); - mod.FS.unmount(PGDATA + "_temp"); - } else { - mod.FS; - } + mod.FS.mkdir(`/pglite/${this.dataDir}`); + mod.FS.mount(idbfs, {}, `/pglite/${this.dataDir}`); + mod.FS.symlink(`/pglite/${this.dataDir}`, PGDATA); }, ], }; - const { require } = await nodeValues(); - loadPgShare(options, require); return options; } initialSyncFs(fs: FS) { - if (this.initModule) { - return this.syncToFs(fs); - } else { - return new Promise((resolve, reject) => { - fs.syncfs(true, (err: any) => { - if (err) { - reject(err); - } else { - resolve(); - } - }); + return new Promise((resolve, reject) => { + fs.syncfs(true, (err: any) => { + if (err) { + reject(err); + } else { + resolve(); + } }); - } + }); } syncToFs(fs: FS) { return new Promise((resolve, reject) => { - fs.syncfs((err: any) => { + fs.syncfs(false, (err: any) => { if (err) { reject(err); } else { @@ -104,4 +49,8 @@ export class IdbFs extends FilesystemBase { }); }); } + + async dumpTar(mod: FS, dbname: string) { + return dumpTar(mod, dbname); + } } diff --git a/packages/pglite/src/fs/index.ts b/packages/pglite/src/fs/index.ts index 4532909b..0dfd5423 100644 --- a/packages/pglite/src/fs/index.ts +++ b/packages/pglite/src/fs/index.ts @@ -1,10 +1,11 @@ -import type { FsType } from "./types.js"; +import type { FsType, Filesystem } from "./types.js"; import { IdbFs } from "./idbfs.js"; import { MemoryFS } from "./memoryfs.js"; export type * from "./types.js"; -export const PGDATA = "/pgdata"; +export const WASM_PREFIX = "/tmp/pglite"; +export const PGDATA = WASM_PREFIX + "/" + "base"; export function parseDataDir(dataDir?: string) { let fsType: FsType; @@ -18,12 +19,6 @@ export function parseDataDir(dataDir?: string) { } else if (dataDir?.startsWith("idb://")) { // Remove the idb:// prefix, and use indexeddb filesystem dataDir = dataDir.slice(6); - if (!dataDir.startsWith("/")) { - dataDir = "/" + dataDir; - } - if (dataDir.length <= 1) { - throw new Error("Invalid dataDir, path required for idbfs"); - } fsType = "idbfs"; } else if (!dataDir || dataDir?.startsWith("memory://")) { // Use in-memory filesystem @@ -36,13 +31,15 @@ export function parseDataDir(dataDir?: string) { } export async function loadFs(dataDir?: string, fsType?: FsType) { + let fs: Filesystem; if (dataDir && fsType === "nodefs") { // Lazy load the nodefs to avoid bundling it in the browser const { NodeFS } = await import("./nodefs.js"); - return new NodeFS(dataDir); + fs = new NodeFS(dataDir); } else if (dataDir && fsType === "idbfs") { - return new IdbFs(dataDir); + fs = new IdbFs(dataDir); } else { - return new MemoryFS(); + fs = new MemoryFS(); } + return fs; } diff --git a/packages/pglite/src/fs/memoryfs.ts b/packages/pglite/src/fs/memoryfs.ts index 07332a00..82b7ffdc 100644 --- a/packages/pglite/src/fs/memoryfs.ts +++ b/packages/pglite/src/fs/memoryfs.ts @@ -1,45 +1,14 @@ import { FilesystemBase } from "./types.js"; -import { PGDATA } from "./index.js"; -import { copyDir } from "./utils.js"; -import type { EmPostgres } from "../../release/postgres.js"; -import loadPgShare from "../../release/share.js"; -import { initDb } from "../initdb.js"; -import { nodeValues } from "../utils.js"; -import type { DebugLevel } from "../index.js"; +import type { PostgresMod, FS } from "../postgres.js"; +import { dumpTar } from "./tarUtils.js"; export class MemoryFS extends FilesystemBase { - initModule?: any; - - async init(debug?: DebugLevel) { - this.initModule = await initDb(undefined, debug); - return true; + async emscriptenOpts(opts: Partial) { + // Nothing to do for memoryfs + return opts; } - async emscriptenOpts(opts: Partial) { - const options: Partial = { - ...opts, - preRun: [ - ...(opts.preRun || []), - (mod: any) => { - /** - * There is an issue with just mounting the filesystem, Postgres stalls... - * so we need to copy the files from the memory filesystem to the main fs - */ - const proxyfs = mod.FS.filesystems.PROXYFS; - mod.FS.mkdir(PGDATA + "_temp"); - mod.FS.mkdir(PGDATA); - mod.FS.mount( - proxyfs, - { root: PGDATA + "/", fs: this.initModule.FS }, - PGDATA + "_temp", - ); - copyDir(mod.FS, PGDATA + "_temp", PGDATA); - mod.FS.unmount(PGDATA + "_temp"); - }, - ], - }; - const { require } = await nodeValues(); - loadPgShare(options, require); - return options; + async dumpTar(mod: FS, dbname: string) { + return dumpTar(mod, dbname); } } diff --git a/packages/pglite/src/fs/nodefs.ts b/packages/pglite/src/fs/nodefs.ts index a4118350..8e846e5c 100644 --- a/packages/pglite/src/fs/nodefs.ts +++ b/packages/pglite/src/fs/nodefs.ts @@ -2,11 +2,8 @@ import * as fs from "fs"; import * as path from "path"; import { FilesystemBase } from "./types.js"; import { PGDATA } from "./index.js"; -import { initDb } from "../initdb.js"; -import loadPgShare from "../../release/share.js"; -import type { EmPostgres } from "../../release/postgres.js"; -import { nodeValues } from "../utils.js"; -import type { DebugLevel } from "../index.js"; +import type { PostgresMod, FS } from "../postgres.js"; +import { dumpTar } from "./tarUtils.js"; export class NodeFS extends FilesystemBase { protected rootDir: string; @@ -14,22 +11,13 @@ export class NodeFS extends FilesystemBase { constructor(dataDir: string) { super(dataDir); this.rootDir = path.resolve(dataDir); - } - - async init(debug?: DebugLevel) { - if (!this.dataDir) { - throw new Error("No datadir specified"); - } - if (fs.existsSync(path.join(this.dataDir!, "PG_VERSION"))) { - return false; + if (!fs.existsSync(path.join(this.rootDir))) { + fs.mkdirSync(this.rootDir); } - fs.mkdirSync(this.dataDir); - await initDb(this.dataDir, debug); - return true; } - async emscriptenOpts(opts: Partial) { - const options: Partial = { + async emscriptenOpts(opts: Partial) { + const options: Partial = { ...opts, preRun: [ ...(opts.preRun || []), @@ -40,8 +28,10 @@ export class NodeFS extends FilesystemBase { }, ], }; - const { require } = await nodeValues(); - loadPgShare(options, require); return options; } + + async dumpTar(mod: FS, dbname: string) { + return dumpTar(mod, dbname); + } } diff --git a/packages/pglite/src/fs/tarUtils.ts b/packages/pglite/src/fs/tarUtils.ts new file mode 100644 index 00000000..b87c6539 --- /dev/null +++ b/packages/pglite/src/fs/tarUtils.ts @@ -0,0 +1,211 @@ +import { tar, untar, type TarFile, REGTYPE, DIRTYPE } from "tinytar"; +import { FS } from "../postgres.js"; +import { PGDATA } from "./index.js"; + +export async function dumpTar(FS: FS, dbname?: string): Promise { + const tarball = createTarball(FS, PGDATA); + const [compressed, zipped] = await maybeZip(tarball); + const filename = (dbname || "pgdata") + (zipped ? ".tar.gz" : ".tar"); + const type = zipped ? "application/x-gzip" : "application/x-tar"; + if (typeof File !== "undefined") { + return new File([compressed], filename, { + type, + }); + } else { + return new Blob([compressed], { + type, + }); + } +} + +const compressedMimeTypes = [ + "application/x-gtar", + "application/x-tar+gzip", + "application/x-gzip", + "application/gzip", +]; + +export async function loadTar(FS: FS, file: File | Blob): Promise { + let tarball = new Uint8Array(await file.arrayBuffer()); + const filename = + typeof File !== "undefined" && file instanceof File ? file.name : undefined; + const compressed = + compressedMimeTypes.includes(file.type) || + filename?.endsWith(".tgz") || + filename?.endsWith(".tar.gz"); + if (compressed) { + tarball = await unzip(tarball); + } + + const files = untar(tarball); + for (const file of files) { + const filePath = PGDATA + file.name; + + // Ensure the directory structure exists + const dirPath = filePath.split("/").slice(0, -1); + for (let i = 1; i <= dirPath.length; i++) { + const dir = dirPath.slice(0, i).join("/"); + if (!FS.analyzePath(dir).exists) { + FS.mkdir(dir); + } + } + + // Write the file or directory + if (file.type == REGTYPE) { + FS.writeFile(filePath, file.data); + FS.utime( + filePath, + dateToUnixTimestamp(file.modifyTime), + dateToUnixTimestamp(file.modifyTime), + ); + } else if (file.type == DIRTYPE) { + FS.mkdir(filePath); + } + } +} + +function readDirectory(FS: FS, path: string) { + let files: TarFile[] = []; + + const traverseDirectory = (currentPath: string) => { + const entries = FS.readdir(currentPath); + entries.forEach((entry) => { + if (entry === "." || entry === "..") { + return; + } + const fullPath = currentPath + "/" + entry; + const stats = FS.stat(fullPath); + const data = FS.isFile(stats.mode) + ? FS.readFile(fullPath, { encoding: "binary" }) + : new Uint8Array(0); + files.push({ + name: fullPath.substring(path.length), // remove the root path + mode: stats.mode, + size: stats.size, + type: FS.isFile(stats.mode) ? REGTYPE : DIRTYPE, + modifyTime: stats.mtime, + data, + }); + if (FS.isDir(stats.mode)) { + traverseDirectory(fullPath); + } + }); + }; + + traverseDirectory(path); + return files; +} + +export function createTarball(FS: FS, directoryPath: string) { + const files = readDirectory(FS, directoryPath); + const tarball = tar(files); + return tarball; +} + +export async function maybeZip( + file: Uint8Array, +): Promise<[Uint8Array, boolean]> { + if (typeof window !== "undefined" && "CompressionStream" in window) { + return [await zipBrowser(file), true]; + } else if ( + typeof process !== "undefined" && + process.versions && + process.versions.node + ) { + return [await zipNode(file), true]; + } else { + return [file, false]; + } +} + +export async function zipBrowser(file: Uint8Array): Promise { + const cs = new CompressionStream("gzip"); + const writer = cs.writable.getWriter(); + const reader = cs.readable.getReader(); + + writer.write(file); + writer.close(); + + const chunks: Uint8Array[] = []; + + while (true) { + const { value, done } = await reader.read(); + if (done) break; + if (value) chunks.push(value); + } + + const compressed = new Uint8Array( + chunks.reduce((acc, chunk) => acc + chunk.length, 0), + ); + let offset = 0; + chunks.forEach((chunk) => { + compressed.set(chunk, offset); + offset += chunk.length; + }); + + return compressed; +} + +export async function zipNode(file: Uint8Array): Promise { + const { promisify } = await import("util"); + const { gzip } = await import("zlib"); + const gzipPromise = promisify(gzip); + return await gzipPromise(file); +} + +export async function unzip(file: Uint8Array): Promise { + if (typeof window !== "undefined" && "DecompressionStream" in window) { + return await unzipBrowser(file); + } else if ( + typeof process !== "undefined" && + process.versions && + process.versions.node + ) { + return await unzipNode(file); + } else { + throw new Error("Unsupported environment for decompression"); + } +} + +export async function unzipBrowser(file: Uint8Array): Promise { + const ds = new DecompressionStream("gzip"); + const writer = ds.writable.getWriter(); + const reader = ds.readable.getReader(); + + writer.write(file); + writer.close(); + + const chunks: Uint8Array[] = []; + + while (true) { + const { value, done } = await reader.read(); + if (done) break; + if (value) chunks.push(value); + } + + const decompressed = new Uint8Array( + chunks.reduce((acc, chunk) => acc + chunk.length, 0), + ); + let offset = 0; + chunks.forEach((chunk) => { + decompressed.set(chunk, offset); + offset += chunk.length; + }); + + return decompressed; +} + +export async function unzipNode(file: Uint8Array): Promise { + const { promisify } = await import("util"); + const { gunzip } = await import("zlib"); + const gunzipPromise = promisify(gunzip); + return await gunzipPromise(file); +} + +function dateToUnixTimestamp(date: Date | number | undefined): number { + if (!date) { + return Math.floor(Date.now() / 1000); + } else { + return typeof date === "number" ? date : Math.floor(date.getTime() / 1000); + } +} diff --git a/packages/pglite/src/fs/types.ts b/packages/pglite/src/fs/types.ts index 8813cd29..c3b6f0a7 100644 --- a/packages/pglite/src/fs/types.ts +++ b/packages/pglite/src/fs/types.ts @@ -1,5 +1,4 @@ -import type { EmPostgres, FS } from "../../release/postgres.js"; -import type { DebugLevel } from "../index.js"; +import type { PostgresMod, FS } from "../postgres.js"; export type FsType = "nodefs" | "idbfs" | "memoryfs"; @@ -8,25 +7,25 @@ export interface FilesystemFactory { } export interface Filesystem { - /** - * Returns true if the filesystem was initialized and this is the fun run. - */ - init(debug?: DebugLevel): Promise; - /** * Returns the options to pass to the emscripten module. */ - emscriptenOpts(opts: Partial): Promise>; + emscriptenOpts(opts: Partial): Promise>; /** * Sync the filesystem to the emscripten filesystem. */ - syncToFs(mod: FS): Promise; + syncToFs(FS: FS): Promise; /** * Sync the emscripten filesystem to the filesystem. */ - initialSyncFs(mod: FS): Promise; + initialSyncFs(FS: FS): Promise; + + /** + * Dump the PGDATA dir from the filesystem to a gziped tarball. + */ + dumpTar(FS: FS, dbname: string): Promise; } export abstract class FilesystemBase implements Filesystem { @@ -34,10 +33,10 @@ export abstract class FilesystemBase implements Filesystem { constructor(dataDir?: string) { this.dataDir = dataDir; } - abstract init(): Promise; abstract emscriptenOpts( - opts: Partial, - ): Promise>; - async syncToFs(mod: FS) {} + opts: Partial, + ): Promise>; + async syncToFs(FS: FS) {} async initialSyncFs(mod: FS) {} + abstract dumpTar(mod: FS, dbname: string): Promise; } diff --git a/packages/pglite/src/fs/utils.ts b/packages/pglite/src/fs/utils.ts deleted file mode 100644 index 541aa42b..00000000 --- a/packages/pglite/src/fs/utils.ts +++ /dev/null @@ -1,22 +0,0 @@ -import type { FS } from "../../release/postgres.js"; - -export function copyDir(fs: FS, src: string, dest: string) { - const entries = fs.readdir(src); - for (const name of entries) { - if (name === "." || name === "..") continue; - - const srcPath = src + "/" + name; - const destPath = dest + "/" + name; - if (isDir(fs, srcPath)) { - fs.mkdir(destPath); - copyDir(fs, srcPath, destPath); - } else { - const data = fs.readFile(srcPath); - fs.writeFile(destPath, data); - } - } -} - -export function isDir(fs: FS, path: string) { - return fs.isDir(fs.stat(path).mode); -} diff --git a/packages/pglite/src/initdb.ts b/packages/pglite/src/initdb.ts deleted file mode 100644 index 5f17ffe1..00000000 --- a/packages/pglite/src/initdb.ts +++ /dev/null @@ -1,84 +0,0 @@ -import { PGDATA } from "./fs/index.js"; -import EmPostgresFactory, { type EmPostgres } from "../release/postgres.js"; -import loadPgShare from "../release/share.js"; -import { makeLocateFile, nodeValues } from "./utils.js"; -import { DebugLevel } from "./index.js"; - -export const DIRS = [ - "global", - "pg_wal", - "pg_wal/archive_status", - "pg_commit_ts", - "pg_dynshmem", - "pg_notify", - "pg_serial", - "pg_snapshots", - "pg_subtrans", - "pg_twophase", - "pg_multixact", - "pg_multixact/members", - "pg_multixact/offsets", - "base", - "base/1", - "pg_replslot", - "pg_tblspc", - "pg_stat", - "pg_stat_tmp", - "pg_xact", - "pg_logical", - "pg_logical/snapshots", - "pg_logical/mapping", -]; - -export const FILES = [ - "postgresql.conf", - "postgresql.auto.conf", - "pg_ident.conf", - "pg_hba.conf", -]; - -export async function initDb(dataDir?: string, debug?: DebugLevel) { - const debugMode = debug !== undefined && debug > 0; - - const emscriptenOpts: Partial = { - preRun: [ - (mod: any) => { - mod.FS.mkdir(PGDATA, 0o750); - if (dataDir) { - const nodefs = mod.FS.filesystems.NODEFS; - mod.FS.mount(nodefs, { root: dataDir }, PGDATA); - } - for (const dir of DIRS) { - mod.FS.mkdir(PGDATA + "/" + dir, 0o700); - } - for (const filename of FILES) { - mod.FS.writeFile(PGDATA + "/" + filename, ""); - } - mod.FS.writeFile(PGDATA + "/PG_VERSION", "15devel"); - mod.FS.writeFile(PGDATA + "/base/1/PG_VERSION", "15devel"); - }, - ], - locateFile: await makeLocateFile(), - ...(debugMode - ? { print: console.info, printErr: console.error } - : { print: () => {}, printErr: () => {} }), - arguments: [ - "--boot", - "-x1", - "-X", - "16777216", - ...(debug ? ["-d", debug.toString()] : []), - "-c", - "dynamic_shared_memory_type=mmap", - "-D", - PGDATA, - ], - }; - - const { require } = await nodeValues(); - - loadPgShare(emscriptenOpts, require); - - const mod = await EmPostgresFactory(emscriptenOpts); - return mod; -} diff --git a/packages/pglite/src/interface.ts b/packages/pglite/src/interface.ts index d6ba5800..c2fa7976 100644 --- a/packages/pglite/src/interface.ts +++ b/packages/pglite/src/interface.ts @@ -24,6 +24,7 @@ export interface ExecProtocolOptions { export interface ExtensionSetupResult { emscriptenOpts?: any; namespaceObj?: any; + bundlePath?: URL; init?: () => Promise; close?: () => Promise; } @@ -34,20 +35,27 @@ export type ExtensionSetup = ( ) => Promise; export interface Extension { - name?: string; + name: string; setup: ExtensionSetup; } export type Extensions = { - [namespace: string]: Extension; + [namespace: string]: Extension | URL; }; +export interface DumpDataDirResult { + tarball: Uint8Array; + extension: ".tar" | ".tgz"; + filename: string; +} + export interface PGliteOptions { dataDir?: string; fs?: Filesystem; debug?: DebugLevel; relaxedDurability?: boolean; extensions?: Extensions; + loadDataDir?: Blob | File; } export type PGliteInterface = { @@ -82,16 +90,17 @@ export type PGliteInterface = { callback: (channel: string, payload: string) => void, ): () => void; offNotification(callback: (channel: string, payload: string) => void): void; + dumpDataDir(): Promise; }; export type PGliteInterfaceExtensions = E extends Extensions ? { - [K in keyof E]: Awaited< - ReturnType - >["namespaceObj"] extends infer N - ? N extends undefined | null | void - ? never - : N + [K in keyof E]: E[K] extends Extension + ? Awaited>["namespaceObj"] extends infer N + ? N extends undefined | null | void + ? never + : N + : never : never; } : {}; diff --git a/packages/pglite/src/parse.ts b/packages/pglite/src/parse.ts index de988f97..867c2b79 100644 --- a/packages/pglite/src/parse.ts +++ b/packages/pglite/src/parse.ts @@ -14,7 +14,7 @@ import { parseType } from "./types.js"; export function parseResults( messages: Array, options?: QueryOptions, - blob?: Blob + blob?: Blob, ): Array { const resultSets: Results[] = []; let currentResultSet: Results = { rows: [], fields: [] }; @@ -24,7 +24,7 @@ export function parseResults( (msg) => msg instanceof RowDescriptionMessage || msg instanceof DataRowMessage || - msg instanceof CommandCompleteMessage + msg instanceof CommandCompleteMessage, ); filteredMessages.forEach((msg, index) => { @@ -40,9 +40,9 @@ export function parseResults( parseType( field, currentResultSet!.fields[i].dataTypeID, - options?.parsers - ) - ) + options?.parsers, + ), + ), ); } else { // rowMode === "object" @@ -53,10 +53,10 @@ export function parseResults( parseType( field, currentResultSet!.fields[i].dataTypeID, - options?.parsers + options?.parsers, ), - ]) - ) + ]), + ), ); } } else if (msg instanceof CommandCompleteMessage) { diff --git a/packages/pglite/src/pglite.ts b/packages/pglite/src/pglite.ts index 3fb352fb..679accf2 100644 --- a/packages/pglite/src/pglite.ts +++ b/packages/pglite/src/pglite.ts @@ -1,8 +1,7 @@ import { Mutex } from "async-mutex"; -import EmPostgresFactory, { type EmPostgres } from "../release/postgres.js"; +import PostgresModFactory, { type PostgresMod } from "./postgres.js"; import { type Filesystem, parseDataDir, loadFs } from "./fs/index.js"; import { makeLocateFile } from "./utils.js"; -import { PGEvent } from "./event.js"; import { parseResults } from "./parse.js"; import { serializeType } from "./types.js"; import type { @@ -15,8 +14,11 @@ import type { ExecProtocolOptions, PGliteInterfaceExtensions, Extensions, - Extension, } from "./interface.js"; +import { loadExtensionBundle, loadExtensions } from "./extensionUtils.js"; +import { loadTar } from "./fs/tarUtils.js"; + +import { PGDATA, WASM_PREFIX } from "./fs/index.js"; // Importing the source as the built version is not ESM compatible import { serialize } from "pg-protocol/dist/index.js"; @@ -31,19 +33,15 @@ import { export class PGlite implements PGliteInterface { fs?: Filesystem; - protected emp?: any; + protected mod?: PostgresMod; + + readonly dataDir?: string; - #extensions: Extensions; - #initStarted = false; #ready = false; - #eventTarget: EventTarget; #closing = false; #closed = false; #inTransaction = false; #relaxedDurability = false; - #extensionsClose: Array<() => Promise> = []; - - #resultAccumulator: Uint8Array[] = []; readonly waitReady: Promise; @@ -55,13 +53,16 @@ export class PGlite implements PGliteInterface { readonly debug: DebugLevel = 0; + #extensions: Extensions; + #extensionsClose: Array<() => Promise> = []; + #parser = new Parser(); // These are the current ArrayBuffer that is being read or written to // during a query, such as COPY FROM or COPY TO. #queryReadBuffer?: ArrayBuffer; #queryWriteChunks?: Uint8Array[]; - + #notifyListeners = new Map void>>(); #globalNotifyListeners = new Set< (channel: string, payload: string) => void @@ -94,6 +95,7 @@ export class PGlite implements PGliteInterface { } else { options = dataDirOrPGliteOptions; } + this.dataDir = options.dataDir; // Enable debug logging if requested if (options?.debug !== undefined) { @@ -105,13 +107,8 @@ export class PGlite implements PGliteInterface { this.#relaxedDurability = options.relaxedDurability; } - // Create an event target to handle events from the emscripten module - this.#eventTarget = new EventTarget(); - - // Listen for result events from the emscripten module and accumulate them - this.#eventTarget.addEventListener("result", async (e: any) => { - this.#resultAccumulator.push(e.detail); - }); + // Save the extensions for later use + this.#extensions = options.extensions ?? {}; // Save the extensions for later use this.#extensions = options.extensions ?? {}; @@ -132,104 +129,96 @@ export class PGlite implements PGliteInterface { this.fs = await loadFs(dataDir, fsType); } + const extensionBundlePromises: Record> = {}; const extensionInitFns: Array<() => Promise> = []; - let firstRun = false; - await new Promise(async (resolve, reject) => { - if (this.#initStarted) { - throw new Error("Already initializing"); - } - this.#initStarted = true; - - // Initialize the filesystem - // returns true if this is the first run, we then need to perform - // additional setup steps at the end of the init. - firstRun = await this.fs!.init(this.debug); - - let emscriptenOpts: Partial = { - arguments: [ - "--single", // Single user mode - "-F", // Disable fsync (TODO: Only for in-memory mode?) - "-O", // Allow the structure of system tables to be modified. This is used by initdb - "-j", // Single use mode - Use semicolon followed by two newlines, rather than just newline, as the command entry terminator. - "-c", // Set parameter - "search_path=pg_catalog", - "-c", - "dynamic_shared_memory_type=mmap", - "-c", - "max_prepared_transactions=10", - // Debug level - ...(this.debug ? ["-d", this.debug.toString()] : []), - "-D", // Data directory - "/pgdata", - "template1", - ], - locateFile: await makeLocateFile(), - ...(this.debug > 0 - ? { print: console.info, printErr: console.error } - : { print: () => {}, printErr: () => {} }), - preRun: [ - (mod: any) => { - // Register /dev/blob device - // This is used to read and write blobs when used in COPY TO/FROM - // e.g. COPY mytable TO '/dev/blob' WITH (FORMAT binary) - // The data is returned by the query as a `blob` property in the results - const devId = mod.FS.makedev(64, 0); - let callCounter = 0; - const devOpt = { - open: (stream: any) => {}, - close: (stream: any) => {}, - read: ( - stream: any, - buffer: Uint8Array, - offset: number, - length: number, - position: number, - ) => { - const buf = this.#queryReadBuffer; - if (!buf) { - throw new Error("No File or Blob provided to read from"); - } - const contents = new Uint8Array(buf); - if (position >= contents.length) return 0; - const size = Math.min(contents.length - position, length); - for (let i = 0; i < size; i++) { - buffer[offset + i] = contents[position + i]; - } - return size; - }, - write: ( - stream: any, - buffer: Uint8Array, - offset: number, - length: number, - position: number, - ) => { - callCounter++; - this.#queryWriteChunks ??= []; - this.#queryWriteChunks.push( - buffer.slice(offset, offset + length), - ); - return length; - }, - llseek: (stream: any, offset: number, whence: number) => { - throw new Error("Cannot seek /dev/blob"); - }, - }; - mod.FS.registerDevice(devId, devOpt); - mod.FS.mkdev("/dev/blob", devId); - }, - ], - onRuntimeInitialized: async (Module: EmPostgres) => { - await this.fs!.initialSyncFs(Module.FS); - this.#ready = true; - resolve(); + + const args = [ + `PGDATA=${PGDATA}`, + `PREFIX=${WASM_PREFIX}`, + "MODE=REACT", + "REPL=N", + // "-F", // Disable fsync (TODO: Only for in-memory mode?) + ...(this.debug ? ["-d", this.debug.toString()] : []), + ]; + + let emscriptenOpts: Partial = { + WASM_PREFIX, + arguments: args, + noExitRuntime: true, + ...(this.debug > 0 + ? { print: console.info, printErr: console.error } + : { print: () => {}, printErr: () => {} }), + locateFile: await makeLocateFile(), + preRun: [ + (mod: any) => { + // Register /dev/blob device + // This is used to read and write blobs when used in COPY TO/FROM + // e.g. COPY mytable TO '/dev/blob' WITH (FORMAT binary) + // The data is returned by the query as a `blob` property in the results + const devId = mod.FS.makedev(64, 0); + let callCounter = 0; + const devOpt = { + open: (stream: any) => {}, + close: (stream: any) => {}, + read: ( + stream: any, + buffer: Uint8Array, + offset: number, + length: number, + position: number, + ) => { + const buf = this.#queryReadBuffer; + if (!buf) { + throw new Error("No File or Blob provided to read from"); + } + const contents = new Uint8Array(buf); + if (position >= contents.length) return 0; + const size = Math.min(contents.length - position, length); + for (let i = 0; i < size; i++) { + buffer[offset + i] = contents[position + i]; + } + return size; + }, + write: ( + stream: any, + buffer: Uint8Array, + offset: number, + length: number, + position: number, + ) => { + callCounter++; + this.#queryWriteChunks ??= []; + this.#queryWriteChunks.push( + buffer.slice(offset, offset + length), + ); + return length; + }, + llseek: (stream: any, offset: number, whence: number) => { + throw new Error("Cannot seek /dev/blob"); + }, + }; + mod.FS.registerDevice(devId, devOpt); + mod.FS.mkdev("/dev/blob", devId); }, - eventTarget: this.#eventTarget, - Event: PGEvent, - }; + ], + }; - // Setup extensions - for (const [extName, ext] of Object.entries(this.#extensions)) { + emscriptenOpts = await this.fs!.emscriptenOpts(emscriptenOpts); + + // # Setup extensions + // This is the first step of loading PGlite extensions + // We loop through each extension and call the setup function + // This amends the emscriptenOpts and can return: + // - emscriptenOpts: The updated emscripten options + // - namespaceObj: The namespace object to attach to the PGlite instance + // - init: A function to initialize the extension/plugin after the database is ready + // - close: A function to close/tidy-up the extension/plugin when the database is closed + for (const [extName, ext] of Object.entries(this.#extensions)) { + if (ext instanceof URL) { + // Extension with only a URL to a bundle + extensionBundlePromises[extName] = loadExtensionBundle(ext); + } else { + // Extension with JS setup function const extRet = await ext.setup(this, emscriptenOpts); if (extRet.emscriptenOpts) { emscriptenOpts = extRet.emscriptenOpts; @@ -237,6 +226,11 @@ export class PGlite implements PGliteInterface { if (extRet.namespaceObj) { (this as any)[extName] = extRet.namespaceObj; } + if (extRet.bundlePath) { + extensionBundlePromises[extName] = loadExtensionBundle( + extRet.bundlePath, + ); // Don't await here, this is parallel + } if (extRet.init) { extensionInitFns.push(extRet.init); } @@ -244,18 +238,55 @@ export class PGlite implements PGliteInterface { this.#extensionsClose.push(extRet.close); } } + } + emscriptenOpts["pg_extensions"] = extensionBundlePromises; - emscriptenOpts = await this.fs!.emscriptenOpts(emscriptenOpts); - const emp = await EmPostgresFactory(emscriptenOpts); - this.emp = emp; - }); + // Load the database engine + this.mod = await PostgresModFactory(emscriptenOpts); + + // Sync the filesystem from any previous store + await this.fs!.initialSyncFs(this.mod.FS); + + // If the user has provided a tarball to load the database from, do that now. + // We do this after the initial sync so that we can throw if the database + // already exists. + if (options.loadDataDir) { + if (this.mod.FS.analyzePath(PGDATA + "/PG_VERSION").exists) { + throw new Error("Database already exists, cannot load from tarball"); + } + this.#log("pglite: loading data from tarball"); + await loadTar(this.mod.FS, options.loadDataDir); + } + + // Check and log if the database exists + if (this.mod.FS.analyzePath(PGDATA + "/PG_VERSION").exists) { + this.#log("pglite: found DB, resuming"); + } else { + this.#log("pglite: no db"); + } - if (firstRun) { - await this.#firstRun(); + // Start compiling dynamic extensions present in FS. + await loadExtensions(this.mod, (...args) => this.#log(...args)); + + // Initialize the database + const idb = this.mod._pg_initdb(); + + if (!idb) { + // TODO: meaning full initdb return/error code? + } else { + if (idb & 0b0001) console.log(" #1"); + if (idb & 0b0010) console.log(" #2"); + if (idb & 0b0100) console.log(" #3"); } - await this.#runExec(` - SET search_path TO public; - `); + + // Sync any changes back to the persisted store (if there is one) + // TODO: only sync here if initdb did init db. + await this.#syncToFs(); + + // Set the search path to public for this connection + await this.#runExec("SET search_path TO public;"); + + this.#ready = true; // Init extensions for (const initFn of extensionInitFns) { @@ -263,35 +294,6 @@ export class PGlite implements PGliteInterface { } } - /** - * Perform the first run initialization of the database - * This is only run when the database is first created - */ - async #firstRun() { - const shareDir = "/usr/local/pgsql/share"; - const sqlFiles = [ - ["information_schema.sql"], - ["system_constraints.sql", "pg_catalog"], - ["system_functions.sql", "pg_catalog"], - ["system_views.sql", "pg_catalog"], - ]; - // Load the sql files into the database - for (const [file, schema] of sqlFiles) { - const sql = await this.emp.FS.readFile(shareDir + "/" + file, { - encoding: "utf8", - }); - if (schema) { - await this.#runExec(`SET search_path TO ${schema};\n ${sql}`); - } else { - await this.#runExec(sql); - } - } - await this.#runExec(` - SET search_path TO public; - CREATE EXTENSION IF NOT EXISTS plpgsql; - `); - } - /** * The ready state of the database */ @@ -569,59 +571,64 @@ export class PGlite implements PGliteInterface { { syncToFs = true }: ExecProtocolOptions = {}, ): Promise> { return await this.#executeMutex.runExclusive(async () => { - if (this.#resultAccumulator.length > 0) { - this.#resultAccumulator = []; - } + const msg_len = message.length; + const mod = this.mod!; + + // >0 set buffer content type to wire protocol + // set buffer size so answer will be at size+0x2 pointer addr + mod._interactive_write(msg_len); + + // copy whole buffer at addr 0x1 + mod.HEAPU8.set(message, 1); - var bytes = message.length; - var ptr = this.emp._malloc(bytes); - this.emp.HEAPU8.set(message, ptr); - this.emp._ExecProtocolMsg(ptr); + // execute the message + mod._interactive_one(); if (syncToFs) { await this.#syncToFs(); } - const resData = this.#resultAccumulator; - const results: Array<[BackendMessage, Uint8Array]> = []; - resData.forEach((data) => { - this.#parser.parse(Buffer.from(data), (msg) => { - if (msg instanceof DatabaseError) { - this.#parser = new Parser(); // Reset the parser - throw msg; - // TODO: Do we want to wrap the error in a custom error? - } else if (msg instanceof NoticeMessage && this.debug > 0) { - // Notice messages are warnings, we should log them - console.warn(msg); - } else if (msg instanceof CommandCompleteMessage) { - // Keep track of the transaction state - switch (msg.text) { - case "BEGIN": - this.#inTransaction = true; - break; - case "COMMIT": - case "ROLLBACK": - this.#inTransaction = false; - break; - } - } else if (msg instanceof NotificationResponseMessage) { - // We've received a notification, call the listeners - const listeners = this.#notifyListeners.get(msg.channel); - if (listeners) { - listeners.forEach((cb) => { - // We use queueMicrotask so that the callback is called after any - // synchronous code has finished running. - queueMicrotask(() => cb(msg.payload)); - }); - } - this.#globalNotifyListeners.forEach((cb) => { - queueMicrotask(() => cb(msg.channel, msg.payload)); + // Read responses from the buffer + const msg_start = msg_len + 2; + const msg_end = msg_start + mod._interactive_read(); + const data = mod.HEAPU8.subarray(msg_start, msg_end); + + this.#parser.parse(Buffer.from(data), (msg) => { + if (msg instanceof DatabaseError) { + this.#parser = new Parser(); // Reset the parser + throw msg; + // TODO: Do we want to wrap the error in a custom error? + } else if (msg instanceof NoticeMessage && this.debug > 0) { + // Notice messages are warnings, we should log them + console.warn(msg); + } else if (msg instanceof CommandCompleteMessage) { + // Keep track of the transaction state + switch (msg.text) { + case "BEGIN": + this.#inTransaction = true; + break; + case "COMMIT": + case "ROLLBACK": + this.#inTransaction = false; + break; + } + } else if (msg instanceof NotificationResponseMessage) { + // We've received a notification, call the listeners + const listeners = this.#notifyListeners.get(msg.channel); + if (listeners) { + listeners.forEach((cb) => { + // We use queueMicrotask so that the callback is called after any + // synchronous code has finished running. + queueMicrotask(() => cb(msg.payload)); }); } - results.push([msg, data]); - }); + this.#globalNotifyListeners.forEach((cb) => { + queueMicrotask(() => cb(msg.channel, msg.payload)); + }); + } + results.push([msg, data]); }); return results; @@ -647,7 +654,7 @@ export class PGlite implements PGliteInterface { const doSync = async () => { await this.#fsSyncMutex.runExclusive(async () => { this.#fsSyncScheduled = false; - await this.fs!.syncToFs(this.emp.FS); + await this.fs!.syncToFs(this.mod!.FS); }); }; @@ -737,4 +744,13 @@ export class PGlite implements PGliteInterface { ): PGlite & PGliteInterfaceExtensions { return new PGlite(options) as any; } + + /** + * Dump the PGDATA dir from the filesystem to a gziped tarball. + * @returns The tarball as a File object where available, and fallback to a Blob + */ + async dumpDataDir() { + let dbname = this.dataDir?.split("/").pop() ?? "pgdata"; + return this.fs!.dumpTar(this.mod!.FS, dbname); + } } diff --git a/packages/pglite/src/polyfills/indirectEval.ts b/packages/pglite/src/polyfills/indirectEval.ts new file mode 100644 index 00000000..2b31faf0 --- /dev/null +++ b/packages/pglite/src/polyfills/indirectEval.ts @@ -0,0 +1,2 @@ +const indirectEval = (globalThis || window).eval; +export { indirectEval as eval }; diff --git a/packages/pglite/src/postgres.ts b/packages/pglite/src/postgres.ts new file mode 100644 index 00000000..9587ff69 --- /dev/null +++ b/packages/pglite/src/postgres.ts @@ -0,0 +1,32 @@ +//@ts-ignore +import PostgresModFactory from "../release/postgres.js"; + +// Uses the types from @types/emscripten + +export type FS = typeof FS & { + filesystems: { + MEMFS: Emscripten.FileSystemType; + NODEFS: Emscripten.FileSystemType; + IDBFS: Emscripten.FileSystemType; + }; +}; + +export interface PostgresMod + extends Omit { + preInit: Array<{ (mod: PostgresMod): void }>; + preRun: Array<{ (mod: PostgresMod): void }>; + postRun: Array<{ (mod: PostgresMod): void }>; + FS: FS; + WASM_PREFIX: string; + pg_extensions: Record>; + _pg_initdb: () => number; + _interactive_write: (msgLength: number) => void; + _interactive_one: () => void; + _interactive_read: () => number; +} + +type PostgresFactory = ( + moduleOverrides?: Partial, +) => Promise; + +export default PostgresModFactory as PostgresFactory; diff --git a/packages/pglite/src/utils.ts b/packages/pglite/src/utils.ts index 35c51beb..89c29356 100644 --- a/packages/pglite/src/utils.ts +++ b/packages/pglite/src/utils.ts @@ -3,28 +3,9 @@ export const IN_NODE = typeof process.versions === "object" && typeof process.versions.node === "string"; -export async function nodeValues() { - let dirname: string | undefined = undefined; - let require: ((id: string) => any) | undefined = undefined; - if (IN_NODE) { - const module = await import("module"); - // In some environments importing 'module' doesn't have a 'default' property and - // createRequire is on the top level of the import. - // This is a workaround for that. - // See https://github.com/electric-sql/pglite/issues/71 - const createRequire = - module.default?.createRequire ?? - ((module as any) - .createRequire as (typeof module.default)["createRequire"]); - require = createRequire(import.meta.url); - dirname = (await import("path")).dirname(import.meta.url); - } - return { dirname, require }; -} - export async function makeLocateFile() { const PGWASM_URL = new URL("../release/postgres.wasm", import.meta.url); - const PGSHARE_URL = new URL("../release/share.data", import.meta.url); + const PGSHARE_URL = new URL("../release/postgres.data", import.meta.url); let fileURLToPath = (fileUrl: URL) => fileUrl.pathname; if (IN_NODE) { fileURLToPath = (await import("url")).fileURLToPath; @@ -32,13 +13,14 @@ export async function makeLocateFile() { return (base: string) => { let url: URL | null = null; switch (base) { - case "share.data": + case "postgres.data": url = PGSHARE_URL; break; case "postgres.wasm": url = PGWASM_URL; break; default: + console.error("makeLocateFile", base); } if (url?.protocol === "file:") { diff --git a/packages/pglite/src/vector/index.ts b/packages/pglite/src/vector/index.ts new file mode 100644 index 00000000..4edadc24 --- /dev/null +++ b/packages/pglite/src/vector/index.ts @@ -0,0 +1,17 @@ +import type { + Extension, + ExtensionSetupResult, + PGliteInterface, +} from "../interface"; + +const setup = async (pg: PGliteInterface, emscriptenOpts: any) => { + return { + emscriptenOpts, + bundlePath: new URL("../../release/vector.tar.gz", import.meta.url), + } satisfies ExtensionSetupResult; +}; + +export const vector = { + name: "pgvector", + setup, +} satisfies Extension; diff --git a/packages/pglite/src/worker/index.ts b/packages/pglite/src/worker/index.ts index 6cbcf600..b69be732 100644 --- a/packages/pglite/src/worker/index.ts +++ b/packages/pglite/src/worker/index.ts @@ -13,6 +13,7 @@ import type { Worker as WorkerInterface } from "./process.js"; export class PGliteWorker implements PGliteInterface { readonly dataDir?: string; + // @ts-ignore readonly fsType: FilesystemType; readonly waitReady: Promise; readonly debug: DebugLevel = 0; @@ -31,6 +32,7 @@ export class PGliteWorker implements PGliteInterface { constructor(dataDir: string, options?: PGliteOptions) { const { dataDir: dir, fsType } = parseDataDir(dataDir); this.dataDir = dir; + // @ts-ignore this.fsType = fsType; this.#options = options ?? {}; this.debug = options?.debug ?? 0; @@ -142,4 +144,8 @@ export class PGliteWorker implements PGliteInterface { queueMicrotask(() => listener(channel, payload)); } } + + async dumpDataDir() { + return this.#worker.dumpDataDir(); + } } diff --git a/packages/pglite/src/worker/process.ts b/packages/pglite/src/worker/process.ts index d9695b94..cdaf91e6 100644 --- a/packages/pglite/src/worker/process.ts +++ b/packages/pglite/src/worker/process.ts @@ -34,6 +34,10 @@ const worker = { async execProtocol(message: Uint8Array) { return await db.execProtocol(message); }, + async dumpDataDir() { + const file = await db.dumpDataDir(); + return Comlink.transfer(file, [await file.arrayBuffer()]); + }, }; Comlink.expose(worker); diff --git a/packages/pglite/tests/basic.test.js b/packages/pglite/tests/basic.test.js index af2709f4..e46fe0db 100644 --- a/packages/pglite/tests/basic.test.js +++ b/packages/pglite/tests/basic.test.js @@ -1,4 +1,4 @@ -import test from "ava"; +import test from "./polytest.js"; import { PGlite } from "../dist/index.js"; test("basic exec", async (t) => { @@ -207,7 +207,7 @@ test("basic types", async (t) => { affectedRows: 0, }); - // standardize timestamp comparison to UTC milliseconds to ensure predictable test runs on machines in different timezones. + // standardize timestamp comparison to UTC milliseconds to ensure predictable test runs on machines in different timezones. t.deepEqual(res.rows[0].timestamp.getUTCMilliseconds(), new Date("2021-01-01T12:00:00.000Z").getUTCMilliseconds()) }); @@ -334,7 +334,7 @@ test("basic copy to/from blob", async (t) => { const ret = await db.query("COPY test TO '/dev/blob' WITH (FORMAT csv);"); const csv = await ret.blob.text(); t.is(csv, "1,test\n2,test2\n"); - + // copy from const blob2 = new Blob([csv]); await db.exec(` diff --git a/packages/pglite/tests/dump.test.js b/packages/pglite/tests/dump.test.js new file mode 100644 index 00000000..24eff1f4 --- /dev/null +++ b/packages/pglite/tests/dump.test.js @@ -0,0 +1,27 @@ +import test from "ava"; +import { PGlite } from "../dist/index.js"; + +test("dump data dir and load it", async (t) => { + const pg1 = new PGlite(); + await pg1.exec(` + CREATE TABLE IF NOT EXISTS test ( + id SERIAL PRIMARY KEY, + name TEXT + ); + `); + pg1.exec("INSERT INTO test (name) VALUES ('test');"); + + const ret1 = await pg1.query("SELECT * FROM test;"); + + const file = await pg1.dumpDataDir(); + + t.is(typeof file, "object"); + + const pg2 = new PGlite({ + loadDataDir: file, + }); + + const ret2 = await pg2.query("SELECT * FROM test;"); + + t.deepEqual(ret1, ret2); +}); diff --git a/packages/pglite/tests/notify.test.js b/packages/pglite/tests/notify.test.js index 5e3c1a74..3fb2ab73 100644 --- a/packages/pglite/tests/notify.test.js +++ b/packages/pglite/tests/notify.test.js @@ -1,4 +1,4 @@ -import test from "ava"; +import test from "./polytest.js"; import { PGlite } from "../dist/index.js"; test("notify", async (t) => { diff --git a/packages/pglite/tests/pgvector.test.js b/packages/pglite/tests/pgvector.test.js new file mode 100644 index 00000000..5441797b --- /dev/null +++ b/packages/pglite/tests/pgvector.test.js @@ -0,0 +1,68 @@ +import test from "./polytest.js"; +import { PGlite } from "../dist/index.js"; +import { vector } from "../dist/vector/index.js"; + +test("pgvector", async (t) => { + const pg = new PGlite({ + extensions: { + vector, + }, + }); + + await pg.exec("CREATE EXTENSION IF NOT EXISTS vector;"); + await pg.exec(` + CREATE TABLE IF NOT EXISTS test ( + id SERIAL PRIMARY KEY, + name TEXT, + vec vector(3) + ); + `); + await pg.exec("INSERT INTO test (name, vec) VALUES ('test1', '[1,2,3]');"); + await pg.exec("INSERT INTO test (name, vec) VALUES ('test2', '[4,5,6]');"); + await pg.exec("INSERT INTO test (name, vec) VALUES ('test3', '[7,8,9]');"); + + const res = await pg.exec(` + SELECT + name, + vec, + vec <-> '[3,1,2]' AS distance + FROM test; + `); + + t.deepEqual(res, [ + { + rows: [ + { + name: "test1", + vec: "[1,2,3]", + distance: 2.449489742783178, + }, + { + name: "test2", + vec: "[4,5,6]", + distance: 5.744562646538029, + }, + { + name: "test3", + vec: "[7,8,9]", + distance: 10.677078252031311, + }, + ], + fields: [ + { + name: "name", + dataTypeID: 25, + }, + { + name: "vec", + dataTypeID: 12772, + }, + { + name: "distance", + dataTypeID: 701, + }, + ], + affectedRows: 0, + }, + ]); +}); diff --git a/packages/pglite/tests/polytest.js b/packages/pglite/tests/polytest.js new file mode 100644 index 00000000..1bfb3b5e --- /dev/null +++ b/packages/pglite/tests/polytest.js @@ -0,0 +1,37 @@ +/* This file is a polyfill for AVA tests to run in Bun */ + +let test; + +if (typeof Bun !== "undefined") { + // Minimal implementation of AVA for Bun + const bunTest = await import("bun:test"); + + const t = { + is: (a, b) => bunTest.expect(a).toBe(b), + deepEqual: (a, b) => bunTest.expect(a).toEqual(b), + like: (a, b) => bunTest.expect(a).toMatchObject(b), + pass: () => bunTest.expect(true).toBe(true), + fail: () => bunTest.expect(true).toBe(false), + throwsAsync: async (fn, expected) => { + try { + await fn(); + bunTest.expect(true).toBe(false); + } catch (err) { + bunTest.expect(err).toMatchObject(expected); + } + } + } + + test = (name, fn) => { + return bunTest.test(name, () => fn(t)); + } + test.before = (fn) => bunTest.beforeAll(() => fn(t)); + test.after = (fn) => bunTest.afterAll(() => fn(t)); + test.serial = test; + test.serial.before = (fn) => bunTest.beforeEach(() => fn(t)); +} else { + // Just use AVA + test = (await import("ava")).default; +} + +export default test; diff --git a/packages/pglite/tests/targets/base.js b/packages/pglite/tests/targets/base.js index e5e4db1f..63b05b22 100644 --- a/packages/pglite/tests/targets/base.js +++ b/packages/pglite/tests/targets/base.js @@ -1,4 +1,4 @@ -import test from "ava"; +import test from "../polytest.js"; import playwright from "playwright"; const wsPort = process.env.WS_PORT || 3334; diff --git a/packages/pglite/tests/targets/chromium-idb.test.js b/packages/pglite/tests/targets/chromium-idb.test.js index 56e93473..5f1f0da8 100644 --- a/packages/pglite/tests/targets/chromium-idb.test.js +++ b/packages/pglite/tests/targets/chromium-idb.test.js @@ -1,3 +1,3 @@ import { tests } from "./base.js"; -tests("chromium", "idb://pgdata-test", "chromium.idb"); +tests("chromium", "idb://base", "chromium.idb"); diff --git a/packages/pglite/tests/targets/firefox-idb.test.js b/packages/pglite/tests/targets/firefox-idb.test.js index d285b30b..739b57c0 100644 --- a/packages/pglite/tests/targets/firefox-idb.test.js +++ b/packages/pglite/tests/targets/firefox-idb.test.js @@ -1,3 +1,3 @@ import { tests } from "./base.js"; -tests("firefox", "idb://pgdata-test", "firefox.idb"); +tests("firefox", "idb://base", "firefox.idb"); diff --git a/packages/pglite/tests/targets/webkit-idb.test.js b/packages/pglite/tests/targets/webkit-idb.test.js index ff173540..ccd8953c 100644 --- a/packages/pglite/tests/targets/webkit-idb.test.js +++ b/packages/pglite/tests/targets/webkit-idb.test.js @@ -1,3 +1,3 @@ import { tests } from "./base.js"; -tests("webkit", "idb://pgdata-test", "webkit.idb"); +tests("webkit", "idb://base", "webkit.idb"); diff --git a/packages/pglite/tests/types.test.js b/packages/pglite/tests/types.test.js index 5a3f395f..6db6ace3 100644 --- a/packages/pglite/tests/types.test.js +++ b/packages/pglite/tests/types.test.js @@ -1,4 +1,4 @@ -import test from "ava"; +import test from "./polytest.js"; import { types } from "../dist/index.js"; // Parse type tests diff --git a/packages/pglite/tsup.config.ts b/packages/pglite/tsup.config.ts index 7c875471..23ee6f9f 100644 --- a/packages/pglite/tsup.config.ts +++ b/packages/pglite/tsup.config.ts @@ -1,40 +1,42 @@ -import { defineConfig } from 'tsup' -import path from 'path' -import { fileURLToPath } from 'url' +import { defineConfig } from "tsup"; +import path from "path"; +import { fileURLToPath } from "url"; -const thisFile = fileURLToPath(new URL(import.meta.url)) -const root = path.dirname(thisFile) +const thisFile = fileURLToPath(new URL(import.meta.url)); +const root = path.dirname(thisFile); let replaceAssertPlugin = { - name: 'replace-assert', + name: "replace-assert", setup(build: any) { // Resolve `assert` to a blank file build.onResolve({ filter: /^assert$/ }, (args: any) => { - return { path: path.join(root, 'src', 'polyfills', 'blank.ts') } - }) + return { path: path.join(root, "src", "polyfills", "blank.ts") }; + }); }, -} +}; const entryPoints = [ - 'src/index.ts', - 'src/worker/index.ts', - 'src/worker/process.ts', -] + "src/index.ts", + "src/worker/index.ts", + "src/worker/process.ts", + "src/vector/index.ts", +]; export default defineConfig({ entry: entryPoints, sourcemap: true, dts: { entry: entryPoints, - resolve: true + resolve: true, }, clean: true, - format: ['esm'], + format: ["esm"], esbuildOptions(options, context) { - options.inject = ['src/polyfills/buffer.ts'] + options.inject = [ + "src/polyfills/buffer.ts", + "src/polyfills/indirectEval.ts", + ]; }, - esbuildPlugins: [ - replaceAssertPlugin, - ], + esbuildPlugins: [replaceAssertPlugin], minify: true, -}) +}); diff --git a/packages/repl/package.json b/packages/repl/package.json index b67bf242..25984193 100644 --- a/packages/repl/package.json +++ b/packages/repl/package.json @@ -25,7 +25,7 @@ "dev": "vite", "build:react": "tsc && vite build", "build:webcomp": "vite build --config vite.webcomp.config.ts", - "build": "npm run build:react && npm run build:webcomp", + "build": "pnpm run build:react && pnpm run build:webcomp", "lint": "eslint . --ext ts,tsx --report-unused-disable-directives --max-warnings 0", "preview": "vite preview", "format": "prettier --write ./src && prettier --write ./src-webcomponent" diff --git a/patches/exports b/patches/exports new file mode 100644 index 00000000..c30d92f6 --- /dev/null +++ b/patches/exports @@ -0,0 +1,429 @@ +___cxa_throw +_main +_main_repl +_pg_repl_raf +_getenv +_setenv +_interactive_one +_interactive_write +_interactive_read +_pg_initdb +_pg_shutdown +_lowerstr +_AllocSetContextCreateInternal +_ArrayGetIntegerTypmods +_BeginInternalSubTransaction +_BlessTupleDesc +_BlockSampler_HasMore +_BlockSampler_Init +_BlockSampler_Next +_BufferBlocks +_BufferGetBlockNumber +_BuildIndexInfo +_CachedPlanAllowsSimpleValidityCheck +_CachedPlanIsSimplyValid +_CheckFunctionValidatorAccess +_CommandCounterIncrement +_ConditionVariableCancelSleep +_ConditionVariableInit +_ConditionVariableSignal +_ConditionVariableSleep +_CopyErrorData +_CreateDestReceiver +_CreateExecutorState +_CreateExprContext +_CreateParallelContext +_CreateTemplateTupleDesc +_CreateTupleDescCopy +_CurrentMemoryContext +_CurrentResourceOwner +_DatumGetEOHP +_DecrTupleDescRefCount +_DefineCustomBoolVariable +_DefineCustomEnumVariable +_DefineCustomIntVariable +_DefineCustomStringVariable +_DeleteExpandedObject +_DestroyParallelContext +_DirectFunctionCall1Coll +_EOH_flatten_into +_EOH_get_flat_size +_EnsurePortalSnapshotExists +_EnterParallelMode +_ExecInitExpr +_ExecInitExprWithParams +_ExecStoreVirtualTuple +_ExitParallelMode +_ExprEvalPushStep +_Float8GetDatum +_FlushErrorState +_FreeAccessStrategy +_FreeCachedExpression +_FreeExecutorState +_FreeExprContext +_FunctionCall0Coll +_FunctionCall1Coll +_FunctionCall2Coll +_GUC_check_errdetail_string +_GenerationContextCreate +_GenericXLogAbort +_GenericXLogFinish +_GenericXLogRegisterBuffer +_GenericXLogStart +_GetAccessStrategy +_GetCachedExpression +_GetCommandTagName +_GetCurrentSubTransactionId +_GetErrorContextStack +_GetTransactionSnapshot +_HeapTupleHeaderGetDatum +_InitializeParallelDSM +_Int64GetDatum +_InterruptPending +_ItemPointerEquals +_LWLockAcquire +_LWLockInitialize +_LWLockNewTrancheId +_LWLockRegisterTranche +_LWLockRelease +_LaunchParallelWorkers +_LocalBufferBlockPointers +_LockBuffer +_LockBufferForCleanup +_LockPage +_LockRelationForExtension +_LookupTypeName +_MainLWLockArray +_MakeExpandedObjectReadOnlyInternal +_MakeSingleTupleTableSlot +_MarkBufferDirty +_MarkGUCPrefixReserved +_MemoryContextAlloc +_MemoryContextAllocExtended +_MemoryContextAllocZero +_MemoryContextAllocZeroAligned +_MemoryContextDelete +_MemoryContextDeleteChildren +_MemoryContextGetParent +_MemoryContextMemAllocated +_MemoryContextReset +_MemoryContextSetIdentifier +_MemoryContextSetParent +_MemoryContextStrdup +_MyProc +_NameListToString +_OidOutputFunctionCall +_PG_exception_stack +_PageAddItemExtended +_PageGetFreeSpace +_PageIndexMultiDelete +_PageIndexTupleOverwrite +_PageInit +_PinPortal +_PopActiveSnapshot +_ProcessInterrupts +_PushActiveSnapshot +_RangeVarGetRelidExtended +_ReThrowError +_ReadBuffer +_ReadBufferExtended +_RegisterSnapshot +_RegisterSubXactCallback +_RegisterXactCallback +_RelationGetIndexScan +_RelationGetNumberOfBlocksInFork +_ReleaseCachedPlan +_ReleaseCurrentSubTransaction +_ReleaseSysCache +_RelnameGetRelid +_ResourceOwnerCreate +_ResourceOwnerDelete +_ResourceOwnerReleaseAllPlanCacheRefs +_RollbackAndReleaseCurrentSubTransaction +_SPI_commit +_SPI_commit_and_chain +_SPI_connect +_SPI_connect_ext +_SPI_copytuple +_SPI_cursor_close +_SPI_cursor_fetch +_SPI_cursor_find +_SPI_cursor_open_with_paramlist +_SPI_cursor_parse_open +_SPI_datumTransfer +_SPI_execute_extended +_SPI_execute_plan_extended +_SPI_execute_plan_with_paramlist +_SPI_finish +_SPI_freeplan +_SPI_freetuptable +_SPI_getbinval +_SPI_keepplan +_SPI_palloc +_SPI_plan_get_cached_plan +_SPI_plan_get_plan_sources +_SPI_prepare_extended +_SPI_processed +_SPI_register_trigger_data +_SPI_result +_SPI_result_code_string +_SPI_returntuple +_SPI_rollback +_SPI_rollback_and_chain +_SPI_scroll_cursor_fetch +_SPI_scroll_cursor_move +_SPI_tuptable +_ScanKeywordLookup +_SearchSysCache1 +_SearchSysCacheAttName +_SetTuplestoreDestReceiverParams +_ShmemInitStruct +_SnapshotAnyData +_SplitIdentifierString +_SysCacheGetAttrNotNull +_TTSOpsMinimalTuple +_TTSOpsVirtual +_TopMemoryContext +_TopTransactionContext +_TopTransactionResourceOwner +_TransferExpandedObject +_TupleDescInitEntry +_TupleDescInitEntryCollation +_UnlockPage +_UnlockRelationForExtension +_UnlockReleaseBuffer +_UnpinPortal +_UnregisterSnapshot +_WaitForParallelWorkersToAttach +_WaitForParallelWorkersToFinish +___THREW__ +___errno_location +___memory_base +___stack_pointer +___table_base +___threwValue +___wasm_setjmp +___wasm_setjmp_test +_acos +_add_int_reloption +_add_reloption_kind +_add_size +_appendBinaryStringInfo +_appendStringInfo +_appendStringInfoChar +_appendStringInfoSpaces +_appendStringInfoString +_appendStringInfoStringQuoted +_array_contains_nulls +_array_create_iterator +_array_iterate +_bms_add_member +_bms_is_member +_bms_next_member +_build_reloptions +_check_function_bodies +_coerce_to_target_type +_construct_array +_construct_md_array +_contain_mutable_functions +_convert_tuples_by_position +_core_yylex +_cstring_to_text +_datumCopy +_datumIsEqual +_datumTransfer +_debug_query_string +_deconstruct_array +_deconstruct_expanded_record +_detoast_external_attr +_domain_check +_emscripten_longjmp +_enlargeStringInfo +_err_generic_string +_errcode +_errcontext_msg +_errdetail +_errdetail_internal +_errfinish +_errhint +_errmsg +_errmsg_internal +_errmsg_plural +_error_context_stack +_errposition +_errstart +_errstart_cold +_execute_attr_map_tuple +_expand_array +_expanded_record_fetch_field +_expanded_record_fetch_tupdesc +_expanded_record_get_tuple +_expanded_record_lookup_field +_expanded_record_set_field_internal +_expanded_record_set_fields +_expanded_record_set_tuple +_exprType +_exprTypmod +_fflush +_find_rendezvous_variable +_float_overflow_error +_float_to_shortest_decimal_buf +_float_to_shortest_decimal_bufn +_float_underflow_error +_format_elog_string +_format_procedure +_format_type_be +_function_parse_error_transpose +_genericcostestimate +_getTempRet0 +_getTypeOutputInfo +_get_base_element_type +_get_call_expr_argtype +_get_call_result_type +_get_collation_oid +_get_element_type +_get_fn_expr_rettype +_get_func_arg_info +_get_namespace_name +_get_rel_type_id +_get_tablespace_page_costs +_get_typcollation +_get_typlenbyval +_get_typlenbyvalalign +_get_typsubscript +_get_typtype +_geterrposition +_getinternalerrposition +_guc_malloc +_hash_create +_hash_search +_heap_deform_tuple +_heap_form_tuple +_index_close +_index_form_tuple +_index_getprocid +_index_getprocinfo +_index_open +_initStringInfo +_internalerrposition +_internalerrquery +_lappend +_list_copy +_list_delete_last +_list_free +_list_make1_impl +_list_make2_impl +_list_make3_impl +_list_sort +_log +_log_newpage_range +_lookup_rowtype_tupdesc +_lookup_type_cache +_maintenance_work_mem +_makeParamList +_makeRangeVar +_makeString +_makeTypeName +_makeTypeNameFromNameList +_make_expanded_record_from_exprecord +_make_expanded_record_from_tupdesc +_make_expanded_record_from_typeid +_max_parallel_maintenance_workers +_memcpy +_memmove +_memset +_namein +_nocache_index_getattr +_numeric_float4 +_pairingheap_add +_pairingheap_allocate +_pairingheap_first +_pairingheap_free +_pairingheap_remove_first +_palloc +_palloc0 +_palloc_extended +_parser_errposition +_pfree +_pg_bindtextdomain +_pg_detoast_datum +_pg_detoast_datum_copy +_pg_global_prng_state +_pg_ltoa +_pg_mbstrlen_with_len +_pg_number_of_ones +_pg_printf +_pg_prng_double +_pg_prng_uint32 +_pg_qsort +_pg_re_throw +_pg_snprintf +_pg_strcasecmp +_pgstat_assoc_relation +_pgstat_progress_update_param +_pgstat_report_activity +_plan_create_index_workers +_pnstrdup +_pq_begintypsend +_pq_endtypsend +_pq_getmsgfloat4 +_pq_getmsgint +_pq_sendfloat4 +_pre_format_elog_string +_process_shared_preload_libraries_in_progress +_pstrdup +_puts +_quote_identifier +_raw_parser +_repalloc +_reservoir_get_next_S +_reservoir_init_selection_state +_resolve_polymorphic_argtypes +_s_init_lock_sema +_s_lock +_s_unlock_sema +_sampler_random_fract +_scanner_finish +_scanner_init +_scanner_isspace +_setTempRet0 +_set_errcontext_domain +_shm_toc_allocate +_shm_toc_insert +_shm_toc_lookup +_slot_getsomeattrs_int +_stdout +_strchr +_strcmp +_strcpy +_strlen +_strspn +_strtof +_strtol +_table_beginscan_parallel +_table_close +_table_open +_table_parallelscan_estimate +_table_parallelscan_initialize +_tas_sema +_text_to_cstring +_tuplesort_attach_shared +_tuplesort_begin_heap +_tuplesort_end +_tuplesort_estimate_shared +_tuplesort_gettupleslot +_tuplesort_initialize_shared +_tuplesort_performsort +_tuplesort_puttupleslot +_tuplesort_reset +_tuplestore_begin_heap +_tuplestore_puttuple +_tuplestore_putvalues +_tuplestore_tuple_count +_typeStringToTypeName +_type_is_rowtype +_typenameTypeIdAndMod +_unpack_sql_state +_vacuum_delay_point +_wal_level +_work_mem diff --git a/patches/interactive_one.c b/patches/interactive_one.c new file mode 100644 index 00000000..3dd69ef0 --- /dev/null +++ b/patches/interactive_one.c @@ -0,0 +1,524 @@ +#define PDEBUG(...) +#include // access, unlink + +static void pg_prompt() { + fprintf(stdout,"pg> %c\n", 4); +} + +extern void AbortTransaction(void); +extern void CleanupTransaction(void); +extern void ClientAuthentication(Port *port); +extern FILE* SOCKET_FILE; +extern int SOCKET_DATA; + +/* +init sequence +___________________________________ +SubPostmasterMain / (forkexec) + InitPostmasterChild + shm attach + preload + + BackendInitialize(Port *port) -> collect initial packet + + pq_init(); + whereToSendOutput = DestRemote; + status = ProcessStartupPacket(port, false, false); + pq_startmsgread + pq_getbytes from pq_recvbuf + TODO: place PqRecvBuffer (8K) in lower mem for zero copy + + PerformAuthentication + ClientAuthentication(port) + CheckPasswordAuth SYNC!!!! ( sendAuthRequest flush -> recv_password_packet ) + InitShmemAccess/InitProcess/CreateSharedMemoryAndSemaphores + + BackendRun(port) + PostgresMain + + +-> pq_flush() is synchronous + + +buffer sizes: + + https://github.com/postgres/postgres/blob/master/src/backend/libpq/pqcomm.c#L118 + + https://github.com/postgres/postgres/blob/master/src/common/stringinfo.c#L28 + + + +*/ +extern int ProcessStartupPacket(Port *port, bool ssl_done, bool gss_done); +extern void pq_recvbuf_fill(FILE* fp, int packetlen); + +#define PG_MAX_AUTH_TOKEN_LENGTH 65535 +static char * +recv_password_packet(Port *port) { + StringInfoData buf; + int mtype; + + pq_startmsgread(); + + /* Expect 'p' message type */ + mtype = pq_getbyte(); + if (mtype != 'p') + { + /* + * If the client just disconnects without offering a password, don't + * make a log entry. This is legal per protocol spec and in fact + * commonly done by psql, so complaining just clutters the log. + */ + if (mtype != EOF) + ereport(ERROR, + (errcode(ERRCODE_PROTOCOL_VIOLATION), + errmsg("expected password response, got message type %d", + mtype))); + return NULL; /* EOF or bad message type */ + } + + initStringInfo(&buf); + if (pq_getmessage(&buf, PG_MAX_AUTH_TOKEN_LENGTH)) /* receive password */ + { + /* EOF - pq_getmessage already logged a suitable message */ + pfree(buf.data); + return NULL; + } + + /* + * Apply sanity check: password packet length should agree with length of + * contained string. Note it is safe to use strlen here because + * StringInfo is guaranteed to have an appended '\0'. + */ + if (strlen(buf.data) + 1 != buf.len) + ereport(ERROR, + (errcode(ERRCODE_PROTOCOL_VIOLATION), + errmsg("invalid password packet size"))); + + /* + * Don't allow an empty password. Libpq treats an empty password the same + * as no password at all, and won't even try to authenticate. But other + * clients might, so allowing it would be confusing. + * + * Note that this only catches an empty password sent by the client in + * plaintext. There's also a check in CREATE/ALTER USER that prevents an + * empty string from being stored as a user's password in the first place. + * We rely on that for MD5 and SCRAM authentication, but we still need + * this check here, to prevent an empty password from being used with + * authentication methods that check the password against an external + * system, like PAM, LDAP and RADIUS. + */ + if (buf.len == 1) + ereport(ERROR, + (errcode(ERRCODE_INVALID_PASSWORD), + errmsg("empty password returned by client"))); + + /* Do not echo password to logs, for security. */ + elog(DEBUG5, "received password packet"); + return buf.data; +} + + +int md5Salt_len = 4; +char md5Salt[4]; + +static void io_init() { + ClientAuthInProgress = false; + pq_init(); /* initialize libpq to talk to client */ + whereToSendOutput = DestRemote; /* now safe to ereport to client */ + MyProcPort = (Port *) calloc(1, sizeof(Port)); + if (!MyProcPort) { + PDEBUG(" --------- NO CLIENT (oom) ---------"); + abort(); + } + MyProcPort->canAcceptConnections = CAC_OK; + + SOCKET_FILE = NULL; + SOCKET_DATA = 0; + PDEBUG(" --------- CLIENT (ready) ---------"); +} + +static void wait_unlock() { + int busy = 0; + while (access(PGS_OLOCK, F_OK) == 0) { + if (!(busy++ % 1110222)) + printf("FIXME: busy wait lock removed %d\n", busy); + } +} + +EMSCRIPTEN_KEEPALIVE int +cma_wsize = 0; + +EMSCRIPTEN_KEEPALIVE int +cma_rsize = 0; + + +EMSCRIPTEN_KEEPALIVE void +interactive_write(int size) { + cma_rsize = size; +} + +EMSCRIPTEN_KEEPALIVE int +interactive_read() { + return cma_wsize; +} + + +EMSCRIPTEN_KEEPALIVE void +interactive_one() { + int firstchar; + int c; /* character read from getc() */ + StringInfoData input_message; + StringInfoData *inBuf; + FILE *stream ; + int packetlen; + bool is_socket = false; + bool is_wire = true; + + if (is_node && is_repl) { + wait_unlock(); + + if (!MyProcPort) { + io_init(); + } + + + // this could be pg_flush in sync mode. + if (SOCKET_DATA>0) { + + PDEBUG("end packet"); + ReadyForQuery(DestRemote); + + PDEBUG("flushing data"); + if (SOCKET_FILE) + fclose(SOCKET_FILE); + + PDEBUG("setting lock"); + FILE *c_lock; + c_lock = fopen(PGS_OLOCK, "w"); + fclose(c_lock); + SOCKET_FILE = NULL; + SOCKET_DATA = 0; + return; + } + + if (!SOCKET_FILE) { + SOCKET_FILE = fopen(PGS_OUT,"w") ; + MyProcPort->sock = fileno(SOCKET_FILE); + } + } // is_node + + + doing_extended_query_message = false; + MemoryContextSwitchTo(MessageContext); + MemoryContextResetAndDeleteChildren(MessageContext); + + initStringInfo(&input_message); + inBuf = &input_message; + + DoingCommandRead = true; + + + #define IO ((char *)(1)) + + if (is_node && is_repl) { + if (access(PGS_ILOCK, F_OK) != 0) { + packetlen = 0; + FILE *fp; + // TODO: lock file + fp = fopen(PGS_IN, "r"); + if (fp) { + fseek(fp, 0L, SEEK_END); + packetlen = ftell(fp); + if (packetlen) { + whereToSendOutput = DestRemote; + resetStringInfo(inBuf); + rewind(fp); + firstchar = getc(fp); + + // first packet + if (!firstchar || (firstchar==112)) { + rewind(fp); + + if (!firstchar) { + pq_recvbuf_fill(fp, packetlen); + if (ProcessStartupPacket(MyProcPort, true, true) != STATUS_OK) { + PDEBUG("ProcessStartupPacket !OK"); + } else { + PDEBUG("auth request"); + //ClientAuthentication(MyProcPort); + ClientAuthInProgress = true; + md5Salt[0]=0x01; + md5Salt[1]=0x23; + md5Salt[2]=0x45; + md5Salt[3]=0x56; + { + StringInfoData buf; + pq_beginmessage(&buf, 'R'); + pq_sendint32(&buf, (int32) AUTH_REQ_MD5); + if (md5Salt_len > 0) + pq_sendbytes(&buf, md5Salt, md5Salt_len); + pq_endmessage(&buf); + pq_flush(); + } + } + } + if (firstchar==112) { + pq_recvbuf_fill(fp, packetlen); + char *passwd = recv_password_packet(MyProcPort); + printf("auth recv password: %s\n", "md5***" ); + ClientAuthInProgress = false; + /* + // TODO: CheckMD5Auth + if (passwd == NULL) + return STATUS_EOF; + if (shadow_pass) + result = md5_crypt_verify(port->user_name, shadow_pass, passwd, md5Salt, md5Salt_len, logdetail); + else + result = STATUS_ERROR; + */ + pfree(passwd); + { + StringInfoData buf; + pq_beginmessage(&buf, 'R'); + pq_sendint32(&buf, (int32) AUTH_REQ_OK); + pq_endmessage(&buf); + } + BeginReportingGUCOptions(); + pgstat_report_connect(MyDatabaseId); + { + StringInfoData buf; + pq_beginmessage(&buf, 'K'); + pq_sendint32(&buf, (int32) MyProcPid); + pq_sendint32(&buf, (int32) MyCancelKey); + pq_endmessage(&buf); + } + PDEBUG("TODO: pg_main start flag"); + + + + + } + } else { + fprintf(stderr, "incoming=%d [%d, ", packetlen, firstchar); + for (int i=1;i4) { + appendStringInfoChar(inBuf, (char)b); + fprintf(stderr, "%d, ", b); + } + } + fprintf(stderr, "]\n"); + } + // when using lock + //ftruncate(filenum(fp), 0); + } + fclose(fp); + unlink(PGS_IN); + if (packetlen) { + if (!firstchar || (firstchar==112)) { + PDEBUG("auth/nego skip"); + return; + } + + is_socket = true; + whereToSendOutput = DestRemote; + goto incoming; + } + } + //no use on node. usleep(10); + } + + } // is_node + + if (cma_rsize) { +// PDEBUG("wire message !"); + is_wire = true; + is_socket = false; + whereToSendOutput = DestRemote; + + if (!MyProcPort) { + ClientAuthInProgress = true; + pq_init(); + MyProcPort = (Port *) calloc(1, sizeof(Port)); + if (!MyProcPort) { + PDEBUG(" --------- NO CLIENT (oom) ---------"); + abort(); + } + MyProcPort->canAcceptConnections = CAC_OK; + ClientAuthInProgress = false; + } + + if (!SOCKET_FILE) { + SOCKET_FILE = fopen(PGS_OUT,"w") ; + MyProcPort->sock = fileno(SOCKET_FILE); + } + printf("# fd %s: %s fd=%d\n", PGS_OUT, IO, MyProcPort->sock); + + goto incoming; + + } + + c = IO[0]; + + +// TODO: use a msg queue length + if (!c) + return; + + if (is_repl) { + whereToSendOutput = DestNone; + is_wire = false; + is_socket = false; + } else { + is_wire = false; + is_socket = false; + whereToSendOutput = DestRemote; + + if (!MyProcPort) { + ClientAuthInProgress = true; + pq_init(); + MyProcPort = (Port *) calloc(1, sizeof(Port)); + if (!MyProcPort) { + PDEBUG(" --------- NO CLIENT (oom) ---------"); + abort(); + } + MyProcPort->canAcceptConnections = CAC_OK; + ClientAuthInProgress = false; + } + + if (!SOCKET_FILE) { + SOCKET_FILE = fopen(PGS_OUT,"w") ; + MyProcPort->sock = fileno(SOCKET_FILE); + } + printf("# fd %s: %s fd=%d\n", PGS_OUT, IO, MyProcPort->sock); + + } + + // zero copy buffer ( lower wasm memory segment ) + packetlen = strlen(IO); + if (packetlen<2) { + pg_prompt(); + // always free kernel buffer !!! + IO[0] = 0; + return; + } + + +// buffer query TODO: direct access ? + resetStringInfo(inBuf); + + for (int i=0; ilen == 0) { + firstchar = EOF; + + } else { + appendStringInfoChar(inBuf, (char) '\0'); + firstchar = 'Q'; + } + + if (is_repl) { + whereToSendOutput = DestDebug; + if (force_echo && inBuf->len >2) + printf("# wire=%d socket=%d repl=%c: %s", is_wire, is_socket, firstchar, inBuf->data); + } + } + + #include "pg_proto.c" + + /* process notifications */ + ProcessClientReadInterrupt(true); + + if (is_wire) { +wire_flush: + cma_wsize = SOCKET_DATA; + if (SOCKET_DATA>0) { + ReadyForQuery(DestRemote); + cma_wsize = SOCKET_DATA; + if (SOCKET_FILE) { + fclose(SOCKET_FILE); + SOCKET_FILE = NULL; + SOCKET_DATA = 0; + } + } + } + + // always free kernel buffer !!! + cma_rsize = 0; + IO[0] = 0; + + + #undef IO +} + + + diff --git a/patches/library_fs.js b/patches/library_fs.js new file mode 100644 index 00000000..6cf1a88a --- /dev/null +++ b/patches/library_fs.js @@ -0,0 +1,1879 @@ +/** + * @license + * Copyright 2013 The Emscripten Authors + * SPDX-License-Identifier: MIT + */ + +addToLibrary({ + $FS__deps: ['$randomFill', '$PATH', '$PATH_FS', '$TTY', '$MEMFS', + '$FS_createPreloadedFile', + '$FS_modeStringToFlags', + '$FS_getMode', + '$intArrayFromString', + '$stringToUTF8Array', + '$lengthBytesUTF8', +#if LibraryManager.has('library_pgfs.js') + '$PGFS', +#endif +#if LibraryManager.has('library_idbfs.js') + '$IDBFS', +#endif +#if LibraryManager.has('library_nodefs.js') + '$NODEFS', +#endif +#if LibraryManager.has('library_workerfs.js') + '$WORKERFS', +#endif +#if LibraryManager.has('library_noderawfs.js') + '$NODERAWFS', +#endif +#if LibraryManager.has('library_proxyfs.js') + '$PROXYFS', +#endif +#if ASSERTIONS + '$strError', '$ERRNO_CODES', +#endif + ], + $FS__postset: function() { + // TODO: do we need noFSInit? + addAtInit(` +if (!Module['noFSInit'] && !FS.init.initialized) + FS.init(); +FS.ignorePermissions = false; +`) + addAtExit('FS.quit();'); + return ` +FS.createPreloadedFile = FS_createPreloadedFile; +FS.staticInit();` + + // Get module methods from settings + '{{{ EXPORTED_RUNTIME_METHODS.filter(function(func) { return func.substr(0, 3) === 'FS_' }).map(function(func){return 'Module["' + func + '"] = FS.' + func.substr(3) + ";"}).reduce(function(str, func){return str + func;}, '') }}}'; + }, + $FS: { + root: null, + mounts: [], + devices: {}, + streams: [], + nextInode: 1, + nameTable: null, + currentPath: '/', + initialized: false, + // Whether we are currently ignoring permissions. Useful when preparing the + // filesystem and creating files inside read-only folders. + // This is set to false during `preInit`, allowing you to modify the + // filesystem freely up until that point (e.g. during `preRun`). + ignorePermissions: true, +#if FS_DEBUG + trackingDelegate: {}, +#endif + ErrnoError: null, // set during init + genericErrors: {}, + filesystems: null, + syncFSRequests: 0, // we warn if there are multiple in flight at once + +#if ASSERTIONS + ErrnoError: class extends Error { +#else + ErrnoError: class { +#endif + // We set the `name` property to be able to identify `FS.ErrnoError` + // - the `name` is a standard ECMA-262 property of error objects. Kind of good to have it anyway. + // - when using PROXYFS, an error can come from an underlying FS + // as different FS objects have their own FS.ErrnoError each, + // the test `err instanceof FS.ErrnoError` won't detect an error coming from another filesystem, causing bugs. + // we'll use the reliable test `err.name == "ErrnoError"` instead + constructor(errno) { +#if ASSERTIONS + super(runtimeInitialized ? strError(errno) : ''); +#endif + // TODO(sbc): Use the inline member declaration syntax once we + // support it in acorn and closure. + this.name = 'ErrnoError'; + this.errno = errno; +#if ASSERTIONS + for (var key in ERRNO_CODES) { + if (ERRNO_CODES[key] === errno) { + this.code = key; + break; + } + } +#endif + } + }, + + FSStream: class { + constructor() { + // TODO(https://github.com/emscripten-core/emscripten/issues/21414): + // Use inline field declarations. + this.shared = {}; +#if USE_CLOSURE_COMPILER + // Closure compiler requires us to declare all properties in the + // constructor. + this.node = null; +#endif + } + get object() { + return this.node; + } + set object(val) { + this.node = val; + } + get isRead() { + return (this.flags & {{{ cDefs.O_ACCMODE }}}) !== {{{ cDefs.O_WRONLY }}}; + } + get isWrite() { + return (this.flags & {{{ cDefs.O_ACCMODE }}}) !== {{{ cDefs.O_RDONLY }}}; + } + get isAppend() { + return (this.flags & {{{ cDefs.O_APPEND }}}); + } + get flags() { + return this.shared.flags; + } + set flags(val) { + this.shared.flags = val; + } + get position() { + return this.shared.position; + } + set position(val) { + this.shared.position = val; + } + }, + FSNode: class { + constructor(parent, name, mode, rdev) { + if (!parent) { + parent = this; // root node sets parent to itself + } + this.parent = parent; + this.mount = parent.mount; + this.mounted = null; + this.id = FS.nextInode++; + this.name = name; + this.mode = mode; + this.node_ops = {}; + this.stream_ops = {}; + this.rdev = rdev; + this.readMode = 292/*{{{ cDefs.S_IRUGO }}}*/ | 73/*{{{ cDefs.S_IXUGO }}}*/; + this.writeMode = 146/*{{{ cDefs.S_IWUGO }}}*/; + } + get read() { + return (this.mode & this.readMode) === this.readMode; + } + set read(val) { + val ? this.mode |= this.readMode : this.mode &= ~this.readMode; + } + get write() { + return (this.mode & this.writeMode) === this.writeMode; + } + set write(val) { + val ? this.mode |= this.writeMode : this.mode &= ~this.writeMode; + } + get isFolder() { + return FS.isDir(this.mode); + } + get isDevice() { + return FS.isChrdev(this.mode); + } + }, + + // + // paths + // + lookupPath(path, opts = {}) { + path = PATH_FS.resolve(path); + + if (!path) return { path: '', node: null }; + + var defaults = { + follow_mount: true, + recurse_count: 0 + }; + opts = Object.assign(defaults, opts) + + if (opts.recurse_count > 8) { // max recursive lookup of 8 + throw new FS.ErrnoError({{{ cDefs.ELOOP }}}); + } + + // split the absolute path + var parts = path.split('/').filter((p) => !!p); + + // start at the root + var current = FS.root; + var current_path = '/'; + + for (var i = 0; i < parts.length; i++) { + var islast = (i === parts.length-1); + if (islast && opts.parent) { + // stop resolving + break; + } + + current = FS.lookupNode(current, parts[i]); + current_path = PATH.join2(current_path, parts[i]); + + // jump to the mount's root node if this is a mountpoint + if (FS.isMountpoint(current)) { + if (!islast || (islast && opts.follow_mount)) { + current = current.mounted.root; + } + } + + // by default, lookupPath will not follow a symlink if it is the final path component. + // setting opts.follow = true will override this behavior. + if (!islast || opts.follow) { + var count = 0; + while (FS.isLink(current.mode)) { + var link = FS.readlink(current_path); + current_path = PATH_FS.resolve(PATH.dirname(current_path), link); + + var lookup = FS.lookupPath(current_path, { recurse_count: opts.recurse_count + 1 }); + current = lookup.node; + + if (count++ > 40) { // limit max consecutive symlinks to 40 (SYMLOOP_MAX). + throw new FS.ErrnoError({{{ cDefs.ELOOP }}}); + } + } + } + } + + return { path: current_path, node: current }; + }, + getPath(node) { + var path; + while (true) { + if (FS.isRoot(node)) { + var mount = node.mount.mountpoint; + if (!path) return mount; + return mount[mount.length-1] !== '/' ? `${mount}/${path}` : mount + path; + } + path = path ? `${node.name}/${path}` : node.name; + node = node.parent; + } + }, + + // + // nodes + // + hashName(parentid, name) { + var hash = 0; + +#if CASE_INSENSITIVE_FS + name = name.toLowerCase(); +#endif + + for (var i = 0; i < name.length; i++) { + hash = ((hash << 5) - hash + name.charCodeAt(i)) | 0; + } + return ((parentid + hash) >>> 0) % FS.nameTable.length; + }, + hashAddNode(node) { + var hash = FS.hashName(node.parent.id, node.name); + node.name_next = FS.nameTable[hash]; + FS.nameTable[hash] = node; + }, + hashRemoveNode(node) { + var hash = FS.hashName(node.parent.id, node.name); + if (FS.nameTable[hash] === node) { + FS.nameTable[hash] = node.name_next; + } else { + var current = FS.nameTable[hash]; + while (current) { + if (current.name_next === node) { + current.name_next = node.name_next; + break; + } + current = current.name_next; + } + } + }, + lookupNode(parent, name) { + var errCode = FS.mayLookup(parent); + if (errCode) { + throw new FS.ErrnoError(errCode); + } + var hash = FS.hashName(parent.id, name); +#if CASE_INSENSITIVE_FS + name = name.toLowerCase(); +#endif + for (var node = FS.nameTable[hash]; node; node = node.name_next) { + var nodeName = node.name; +#if CASE_INSENSITIVE_FS + nodeName = nodeName.toLowerCase(); +#endif + if (node.parent.id === parent.id && nodeName === name) { + return node; + } + } + // if we failed to find it in the cache, call into the VFS + return FS.lookup(parent, name); + }, + createNode(parent, name, mode, rdev) { +#if ASSERTIONS + assert(typeof parent == 'object') +#endif + var node = new FS.FSNode(parent, name, mode, rdev); + + FS.hashAddNode(node); + + return node; + }, + destroyNode(node) { + FS.hashRemoveNode(node); + }, + isRoot(node) { + return node === node.parent; + }, + isMountpoint(node) { + return !!node.mounted; + }, + isFile(mode) { + return (mode & {{{ cDefs.S_IFMT }}}) === {{{ cDefs.S_IFREG }}}; + }, + isDir(mode) { + return (mode & {{{ cDefs.S_IFMT }}}) === {{{ cDefs.S_IFDIR }}}; + }, + isLink(mode) { + return (mode & {{{ cDefs.S_IFMT }}}) === {{{ cDefs.S_IFLNK }}}; + }, + isChrdev(mode) { + return (mode & {{{ cDefs.S_IFMT }}}) === {{{ cDefs.S_IFCHR }}}; + }, + isBlkdev(mode) { + return (mode & {{{ cDefs.S_IFMT }}}) === {{{ cDefs.S_IFBLK }}}; + }, + isFIFO(mode) { + return (mode & {{{ cDefs.S_IFMT }}}) === {{{ cDefs.S_IFIFO }}}; + }, + isSocket(mode) { + return (mode & {{{ cDefs.S_IFSOCK }}}) === {{{ cDefs.S_IFSOCK }}}; + }, + + // + // permissions + // + // convert O_* bitmask to a string for nodePermissions + flagsToPermissionString(flag) { + var perms = ['r', 'w', 'rw'][flag & 3]; + if ((flag & {{{ cDefs.O_TRUNC }}})) { + perms += 'w'; + } + return perms; + }, + nodePermissions(node, perms) { + if (FS.ignorePermissions) { + return 0; + } + // return 0 if any user, group or owner bits are set. + if (perms.includes('r') && !(node.mode & {{{ cDefs.S_IRUGO }}})) { + return {{{ cDefs.EACCES }}}; + } else if (perms.includes('w') && !(node.mode & {{{ cDefs.S_IWUGO }}})) { + return {{{ cDefs.EACCES }}}; + } else if (perms.includes('x') && !(node.mode & {{{ cDefs.S_IXUGO }}})) { + return {{{ cDefs.EACCES }}}; + } + return 0; + }, + mayLookup(dir) { + if (!FS.isDir(dir.mode)) return {{{ cDefs.ENOTDIR }}}; + var errCode = FS.nodePermissions(dir, 'x'); + if (errCode) return errCode; + if (!dir.node_ops.lookup) return {{{ cDefs.EACCES }}}; + return 0; + }, + mayCreate(dir, name) { + try { + var node = FS.lookupNode(dir, name); + return {{{ cDefs.EEXIST }}}; + } catch (e) { + } + return FS.nodePermissions(dir, 'wx'); + }, + mayDelete(dir, name, isdir) { + var node; + try { + node = FS.lookupNode(dir, name); + } catch (e) { + return e.errno; + } + var errCode = FS.nodePermissions(dir, 'wx'); + if (errCode) { + return errCode; + } + if (isdir) { + if (!FS.isDir(node.mode)) { + return {{{ cDefs.ENOTDIR }}}; + } + if (FS.isRoot(node) || FS.getPath(node) === FS.cwd()) { + return {{{ cDefs.EBUSY }}}; + } + } else { + if (FS.isDir(node.mode)) { + return {{{ cDefs.EISDIR }}}; + } + } + return 0; + }, + mayOpen(node, flags) { + if (!node) { + return {{{ cDefs.ENOENT }}}; + } + if (FS.isLink(node.mode)) { + return {{{ cDefs.ELOOP }}}; + } else if (FS.isDir(node.mode)) { + if (FS.flagsToPermissionString(flags) !== 'r' || // opening for write + (flags & {{{ cDefs.O_TRUNC }}})) { // TODO: check for O_SEARCH? (== search for dir only) + return {{{ cDefs.EISDIR }}}; + } + } + return FS.nodePermissions(node, FS.flagsToPermissionString(flags)); + }, + + // + // streams + // + MAX_OPEN_FDS: 4096, + nextfd() { + for (var fd = 0; fd <= FS.MAX_OPEN_FDS; fd++) { + if (!FS.streams[fd]) { + return fd; + } + } + throw new FS.ErrnoError({{{ cDefs.EMFILE }}}); + }, + getStreamChecked(fd) { + var stream = FS.getStream(fd); + if (!stream) { + throw new FS.ErrnoError({{{ cDefs.EBADF }}}); + } + return stream; + }, + getStream: (fd) => FS.streams[fd], + // TODO parameterize this function such that a stream + // object isn't directly passed in. not possible until + // SOCKFS is completed. + createStream(stream, fd = -1) { +#if ASSERTIONS + assert(fd >= -1); +#endif + + // clone it, so we can return an instance of FSStream + stream = Object.assign(new FS.FSStream(), stream); + if (fd == -1) { + fd = FS.nextfd(); + } + stream.fd = fd; + FS.streams[fd] = stream; + return stream; + }, + closeStream(fd) { + FS.streams[fd] = null; + }, + dupStream(origStream, fd = -1) { + var stream = FS.createStream(origStream, fd); + stream.stream_ops?.dup?.(stream); + return stream; + }, + + // + // devices + // + // each character device consists of a device id + stream operations. + // when a character device node is created (e.g. /dev/stdin) it is + // assigned a device id that lets us map back to the actual device. + // by default, each character device stream (e.g. _stdin) uses chrdev_stream_ops. + // however, once opened, the stream's operations are overridden with + // the operations of the device its underlying node maps back to. + chrdev_stream_ops: { + open(stream) { + var device = FS.getDevice(stream.node.rdev); + // override node's stream ops with the device's + stream.stream_ops = device.stream_ops; + // forward the open call + stream.stream_ops.open?.(stream); + }, + llseek() { + throw new FS.ErrnoError({{{ cDefs.ESPIPE }}}); + } + }, + major: (dev) => ((dev) >> 8), + minor: (dev) => ((dev) & 0xff), + makedev: (ma, mi) => ((ma) << 8 | (mi)), + registerDevice(dev, ops) { + FS.devices[dev] = { stream_ops: ops }; + }, + getDevice: (dev) => FS.devices[dev], + + // + // core + // + getMounts(mount) { + var mounts = []; + var check = [mount]; + + while (check.length) { + var m = check.pop(); + + mounts.push(m); + + check.push(...m.mounts); + } + + return mounts; + }, + syncfs(populate, callback) { + if (typeof populate == 'function') { + callback = populate; + populate = false; + } + + FS.syncFSRequests++; + + if (FS.syncFSRequests > 1) { + err(`warning: ${FS.syncFSRequests} FS.syncfs operations in flight at once, probably just doing extra work`); + } + + var mounts = FS.getMounts(FS.root.mount); + var completed = 0; + + function doCallback(errCode) { +#if ASSERTIONS + assert(FS.syncFSRequests > 0); +#endif + FS.syncFSRequests--; + return callback(errCode); + } + + function done(errCode) { + if (errCode) { + if (!done.errored) { + done.errored = true; + return doCallback(errCode); + } + return; + } + if (++completed >= mounts.length) { + doCallback(null); + } + }; + + // sync all mounts + mounts.forEach((mount) => { + if (!mount.type.syncfs) { + return done(null); + } + mount.type.syncfs(mount, populate, done); + }); + }, + mount(type, opts, mountpoint) { +#if ASSERTIONS + if (typeof type == 'string') { + // The filesystem was not included, and instead we have an error + // message stored in the variable. + throw type; + } +#endif + var root = mountpoint === '/'; + var pseudo = !mountpoint; + var node; + + if (root && FS.root) { + throw new FS.ErrnoError({{{ cDefs.EBUSY }}}); + } else if (!root && !pseudo) { + var lookup = FS.lookupPath(mountpoint, { follow_mount: false }); + + mountpoint = lookup.path; // use the absolute path + node = lookup.node; + + if (FS.isMountpoint(node)) { + throw new FS.ErrnoError({{{ cDefs.EBUSY }}}); + } + + if (!FS.isDir(node.mode)) { + throw new FS.ErrnoError({{{ cDefs.ENOTDIR }}}); + } + } + + var mount = { + type, + opts, + mountpoint, + mounts: [] + }; + + // create a root node for the fs + var mountRoot = type.mount(mount); + mountRoot.mount = mount; + mount.root = mountRoot; + + if (root) { + FS.root = mountRoot; + } else if (node) { + // set as a mountpoint + node.mounted = mount; + + // add the new mount to the current mount's children + if (node.mount) { + node.mount.mounts.push(mount); + } + } + + return mountRoot; + }, + unmount(mountpoint) { + var lookup = FS.lookupPath(mountpoint, { follow_mount: false }); + + if (!FS.isMountpoint(lookup.node)) { + throw new FS.ErrnoError({{{ cDefs.EINVAL }}}); + } + + // destroy the nodes for this mount, and all its child mounts + var node = lookup.node; + var mount = node.mounted; + var mounts = FS.getMounts(mount); + + Object.keys(FS.nameTable).forEach((hash) => { + var current = FS.nameTable[hash]; + + while (current) { + var next = current.name_next; + + if (mounts.includes(current.mount)) { + FS.destroyNode(current); + } + + current = next; + } + }); + + // no longer a mountpoint + node.mounted = null; + + // remove this mount from the child mounts + var idx = node.mount.mounts.indexOf(mount); +#if ASSERTIONS + assert(idx !== -1); +#endif + node.mount.mounts.splice(idx, 1); + }, + lookup(parent, name) { + return parent.node_ops.lookup(parent, name); + }, + // generic function for all node creation + mknod(path, mode, dev) { + var lookup = FS.lookupPath(path, { parent: true }); + var parent = lookup.node; + var name = PATH.basename(path); + if (!name || name === '.' || name === '..') { + throw new FS.ErrnoError({{{ cDefs.EINVAL }}}); + } + var errCode = FS.mayCreate(parent, name); + if (errCode) { + throw new FS.ErrnoError(errCode); + } + if (!parent.node_ops.mknod) { + throw new FS.ErrnoError({{{ cDefs.EPERM }}}); + } + return parent.node_ops.mknod(parent, name, mode, dev); + }, + // helpers to create specific types of nodes + create(path, mode) { + mode = mode !== undefined ? mode : 438 /* 0666 */; + mode &= {{{ cDefs.S_IALLUGO }}}; + mode |= {{{ cDefs.S_IFREG }}}; + return FS.mknod(path, mode, 0); + }, + mkdir(path, mode) { + mode = mode !== undefined ? mode : 511 /* 0777 */; + mode &= {{{ cDefs.S_IRWXUGO }}} | {{{ cDefs.S_ISVTX }}}; + mode |= {{{ cDefs.S_IFDIR }}}; +#if FS_DEBUG + if (FS.trackingDelegate['onMakeDirectory']) { + FS.trackingDelegate['onMakeDirectory'](path, mode); + } +#endif + return FS.mknod(path, mode, 0); + }, + // Creates a whole directory tree chain if it doesn't yet exist + mkdirTree(path, mode) { + var dirs = path.split('/'); + var d = ''; + for (var i = 0; i < dirs.length; ++i) { + if (!dirs[i]) continue; + d += '/' + dirs[i]; + try { + FS.mkdir(d, mode); + } catch(e) { + if (e.errno != {{{ cDefs.EEXIST }}}) throw e; + } + } + }, + mkdev(path, mode, dev) { + if (typeof dev == 'undefined') { + dev = mode; + mode = 438 /* 0666 */; + } + mode |= {{{ cDefs.S_IFCHR }}}; + return FS.mknod(path, mode, dev); + }, + symlink(oldpath, newpath) { + if (!PATH_FS.resolve(oldpath)) { + throw new FS.ErrnoError({{{ cDefs.ENOENT }}}); + } + var lookup = FS.lookupPath(newpath, { parent: true }); + var parent = lookup.node; + if (!parent) { + throw new FS.ErrnoError({{{ cDefs.ENOENT }}}); + } + var newname = PATH.basename(newpath); + var errCode = FS.mayCreate(parent, newname); + if (errCode) { + throw new FS.ErrnoError(errCode); + } + if (!parent.node_ops.symlink) { + throw new FS.ErrnoError({{{ cDefs.EPERM }}}); + } +#if FS_DEBUG + if (FS.trackingDelegate['onMakeSymlink']) { + FS.trackingDelegate['onMakeSymlink'](oldpath, newpath); + } +#endif + return parent.node_ops.symlink(parent, newname, oldpath); + }, + rename(old_path, new_path) { + var old_dirname = PATH.dirname(old_path); + var new_dirname = PATH.dirname(new_path); + var old_name = PATH.basename(old_path); + var new_name = PATH.basename(new_path); + // parents must exist + var lookup, old_dir, new_dir; + + // let the errors from non existent directories percolate up + lookup = FS.lookupPath(old_path, { parent: true }); + old_dir = lookup.node; + lookup = FS.lookupPath(new_path, { parent: true }); + new_dir = lookup.node; + + if (!old_dir || !new_dir) throw new FS.ErrnoError({{{ cDefs.ENOENT }}}); + // need to be part of the same mount + if (old_dir.mount !== new_dir.mount) { + throw new FS.ErrnoError({{{ cDefs.EXDEV }}}); + } + // source must exist + var old_node = FS.lookupNode(old_dir, old_name); + // old path should not be an ancestor of the new path + var relative = PATH_FS.relative(old_path, new_dirname); + if (relative.charAt(0) !== '.') { + throw new FS.ErrnoError({{{ cDefs.EINVAL }}}); + } + // new path should not be an ancestor of the old path + relative = PATH_FS.relative(new_path, old_dirname); + if (relative.charAt(0) !== '.') { + throw new FS.ErrnoError({{{ cDefs.ENOTEMPTY }}}); + } + // see if the new path already exists + var new_node; + try { + new_node = FS.lookupNode(new_dir, new_name); + } catch (e) { + // not fatal + } + // early out if nothing needs to change + if (old_node === new_node) { + return; + } + // we'll need to delete the old entry + var isdir = FS.isDir(old_node.mode); + var errCode = FS.mayDelete(old_dir, old_name, isdir); + if (errCode) { + throw new FS.ErrnoError(errCode); + } + // need delete permissions if we'll be overwriting. + // need create permissions if new doesn't already exist. + errCode = new_node ? + FS.mayDelete(new_dir, new_name, isdir) : + FS.mayCreate(new_dir, new_name); + if (errCode) { + throw new FS.ErrnoError(errCode); + } + if (!old_dir.node_ops.rename) { + throw new FS.ErrnoError({{{ cDefs.EPERM }}}); + } + if (FS.isMountpoint(old_node) || (new_node && FS.isMountpoint(new_node))) { + throw new FS.ErrnoError({{{ cDefs.EBUSY }}}); + } + // if we are going to change the parent, check write permissions + if (new_dir !== old_dir) { + errCode = FS.nodePermissions(old_dir, 'w'); + if (errCode) { + throw new FS.ErrnoError(errCode); + } + } +#if FS_DEBUG + if (FS.trackingDelegate['willMovePath']) { + FS.trackingDelegate['willMovePath'](old_path, new_path); + } +#endif + // remove the node from the lookup hash + FS.hashRemoveNode(old_node); + // do the underlying fs rename + try { + old_dir.node_ops.rename(old_node, new_dir, new_name); + // update old node (we do this here to avoid each backend + // needing to) + old_node.parent = new_dir; + } catch (e) { + throw e; + } finally { + // add the node back to the hash (in case node_ops.rename + // changed its name) + FS.hashAddNode(old_node); + } +#if FS_DEBUG + if (FS.trackingDelegate['onMovePath']) { + FS.trackingDelegate['onMovePath'](old_path, new_path); + } +#endif + }, + rmdir(path) { + var lookup = FS.lookupPath(path, { parent: true }); + var parent = lookup.node; + var name = PATH.basename(path); + var node = FS.lookupNode(parent, name); + var errCode = FS.mayDelete(parent, name, true); + if (errCode) { + throw new FS.ErrnoError(errCode); + } + if (!parent.node_ops.rmdir) { + throw new FS.ErrnoError({{{ cDefs.EPERM }}}); + } + if (FS.isMountpoint(node)) { + throw new FS.ErrnoError({{{ cDefs.EBUSY }}}); + } +#if FS_DEBUG + if (FS.trackingDelegate['willDeletePath']) { + FS.trackingDelegate['willDeletePath'](path); + } +#endif + parent.node_ops.rmdir(parent, name); + FS.destroyNode(node); +#if FS_DEBUG + if (FS.trackingDelegate['onDeletePath']) { + FS.trackingDelegate['onDeletePath'](path); + } +#endif + }, + readdir(path) { + var lookup = FS.lookupPath(path, { follow: true }); + var node = lookup.node; + if (!node.node_ops.readdir) { + throw new FS.ErrnoError({{{ cDefs.ENOTDIR }}}); + } + return node.node_ops.readdir(node); + }, + unlink(path) { + var lookup = FS.lookupPath(path, { parent: true }); + var parent = lookup.node; + if (!parent) { + throw new FS.ErrnoError({{{ cDefs.ENOENT }}}); + } + var name = PATH.basename(path); + var node = FS.lookupNode(parent, name); + var errCode = FS.mayDelete(parent, name, false); + if (errCode) { + // According to POSIX, we should map EISDIR to EPERM, but + // we instead do what Linux does (and we must, as we use + // the musl linux libc). + throw new FS.ErrnoError(errCode); + } + if (!parent.node_ops.unlink) { + throw new FS.ErrnoError({{{ cDefs.EPERM }}}); + } + if (FS.isMountpoint(node)) { + throw new FS.ErrnoError({{{ cDefs.EBUSY }}}); + } +#if FS_DEBUG + if (FS.trackingDelegate['willDeletePath']) { + FS.trackingDelegate['willDeletePath'](path); + } +#endif + parent.node_ops.unlink(parent, name); + FS.destroyNode(node); +#if FS_DEBUG + if (FS.trackingDelegate['onDeletePath']) { + FS.trackingDelegate['onDeletePath'](path); + } +#endif + }, + readlink(path) { + var lookup = FS.lookupPath(path); + var link = lookup.node; + if (!link) { + throw new FS.ErrnoError({{{ cDefs.ENOENT }}}); + } + if (!link.node_ops.readlink) { + throw new FS.ErrnoError({{{ cDefs.EINVAL }}}); + } + return PATH_FS.resolve(FS.getPath(link.parent), link.node_ops.readlink(link)); + }, + stat(path, dontFollow) { + var lookup = FS.lookupPath(path, { follow: !dontFollow }); + var node = lookup.node; + if (!node) { + throw new FS.ErrnoError({{{ cDefs.ENOENT }}}); + } + if (!node.node_ops.getattr) { + throw new FS.ErrnoError({{{ cDefs.EPERM }}}); + } + return node.node_ops.getattr(node); + }, + lstat(path) { + return FS.stat(path, true); + }, + chmod(path, mode, dontFollow) { + var node; + if (typeof path == 'string') { + var lookup = FS.lookupPath(path, { follow: !dontFollow }); + node = lookup.node; + } else { + node = path; + } + if (!node.node_ops.setattr) { + throw new FS.ErrnoError({{{ cDefs.EPERM }}}); + } + node.node_ops.setattr(node, { + mode: (mode & {{{ cDefs.S_IALLUGO }}}) | (node.mode & ~{{{ cDefs.S_IALLUGO }}}), + timestamp: Date.now() + }); + }, + lchmod(path, mode) { + FS.chmod(path, mode, true); + }, + fchmod(fd, mode) { + var stream = FS.getStreamChecked(fd); + FS.chmod(stream.node, mode); + }, + chown(path, uid, gid, dontFollow) { + var node; + if (typeof path == 'string') { + var lookup = FS.lookupPath(path, { follow: !dontFollow }); + node = lookup.node; + } else { + node = path; + } + if (!node.node_ops.setattr) { + throw new FS.ErrnoError({{{ cDefs.EPERM }}}); + } + node.node_ops.setattr(node, { + timestamp: Date.now() + // we ignore the uid / gid for now + }); + }, + lchown(path, uid, gid) { + FS.chown(path, uid, gid, true); + }, + fchown(fd, uid, gid) { + var stream = FS.getStreamChecked(fd); + FS.chown(stream.node, uid, gid); + }, + truncate(path, len) { + if (len < 0) { + throw new FS.ErrnoError({{{ cDefs.EINVAL }}}); + } + var node; + if (typeof path == 'string') { + var lookup = FS.lookupPath(path, { follow: true }); + node = lookup.node; + } else { + node = path; + } + if (!node.node_ops.setattr) { + throw new FS.ErrnoError({{{ cDefs.EPERM }}}); + } + if (FS.isDir(node.mode)) { + throw new FS.ErrnoError({{{ cDefs.EISDIR }}}); + } + if (!FS.isFile(node.mode)) { + throw new FS.ErrnoError({{{ cDefs.EINVAL }}}); + } + var errCode = FS.nodePermissions(node, 'w'); + if (errCode) { + throw new FS.ErrnoError(errCode); + } + node.node_ops.setattr(node, { + size: len, + timestamp: Date.now() + }); + }, + ftruncate(fd, len) { + var stream = FS.getStreamChecked(fd); + if ((stream.flags & {{{ cDefs.O_ACCMODE }}}) === {{{ cDefs.O_RDONLY}}}) { + throw new FS.ErrnoError({{{ cDefs.EINVAL }}}); + } + FS.truncate(stream.node, len); + }, + utime(path, atime, mtime) { + var lookup = FS.lookupPath(path, { follow: true }); + var node = lookup.node; + node.node_ops.setattr(node, { + timestamp: Math.max(atime, mtime) + }); + }, + open(path, flags, mode) { + if (path === "") { + throw new FS.ErrnoError({{{ cDefs.ENOENT }}}); + } + flags = typeof flags == 'string' ? FS_modeStringToFlags(flags) : flags; + if ((flags & {{{ cDefs.O_CREAT }}})) { + mode = typeof mode == 'undefined' ? 438 /* 0666 */ : mode; + mode = (mode & {{{ cDefs.S_IALLUGO }}}) | {{{ cDefs.S_IFREG }}}; + } else { + mode = 0; + } + var node; + if (typeof path == 'object') { + node = path; + } else { + path = PATH.normalize(path); + try { + var lookup = FS.lookupPath(path, { + follow: !(flags & {{{ cDefs.O_NOFOLLOW }}}) + }); + node = lookup.node; + } catch (e) { + // ignore + } + } + // perhaps we need to create the node + var created = false; + if ((flags & {{{ cDefs.O_CREAT }}})) { + if (node) { + // if O_CREAT and O_EXCL are set, error out if the node already exists + if ((flags & {{{ cDefs.O_EXCL }}})) { + throw new FS.ErrnoError({{{ cDefs.EEXIST }}}); + } + } else { + // node doesn't exist, try to create it + node = FS.mknod(path, mode, 0); + created = true; + } + } + if (!node) { + throw new FS.ErrnoError({{{ cDefs.ENOENT }}}); + } + // can't truncate a device + if (FS.isChrdev(node.mode)) { + flags &= ~{{{ cDefs.O_TRUNC }}}; + } + // if asked only for a directory, then this must be one + if ((flags & {{{ cDefs.O_DIRECTORY }}}) && !FS.isDir(node.mode)) { + throw new FS.ErrnoError({{{ cDefs.ENOTDIR }}}); + } + // check permissions, if this is not a file we just created now (it is ok to + // create and write to a file with read-only permissions; it is read-only + // for later use) + if (!created) { + var errCode = FS.mayOpen(node, flags); + if (errCode) { + throw new FS.ErrnoError(errCode); + } + } + // do truncation if necessary + if ((flags & {{{ cDefs.O_TRUNC}}}) && !created) { + FS.truncate(node, 0); + } +#if FS_DEBUG + var trackingFlags = flags +#endif + // we've already handled these, don't pass down to the underlying vfs + flags &= ~({{{ cDefs.O_EXCL }}} | {{{ cDefs.O_TRUNC }}} | {{{ cDefs.O_NOFOLLOW }}}); + + // register the stream with the filesystem + var stream = FS.createStream({ + node, + path: FS.getPath(node), // we want the absolute path to the node + flags, + seekable: true, + position: 0, + stream_ops: node.stream_ops, + // used by the file family libc calls (fopen, fwrite, ferror, etc.) + ungotten: [], + error: false + }); + // call the new stream's open function + if (stream.stream_ops.open) { + stream.stream_ops.open(stream); + } + if (Module['logReadFiles'] && !(flags & {{{ cDefs.O_WRONLY}}})) { + if (!FS.readFiles) FS.readFiles = {}; + if (!(path in FS.readFiles)) { + FS.readFiles[path] = 1; +#if FS_DEBUG + dbg(`FS.trackingDelegate error on read file: ${path}`); +#endif + } + } +#if FS_DEBUG + if (FS.trackingDelegate['onOpenFile']) { + FS.trackingDelegate['onOpenFile'](path, trackingFlags); + } +#endif + return stream; + }, + close(stream) { + if (FS.isClosed(stream)) { + throw new FS.ErrnoError({{{ cDefs.EBADF }}}); + } + if (stream.getdents) stream.getdents = null; // free readdir state + try { + if (stream.stream_ops.close) { + stream.stream_ops.close(stream); + } + } catch (e) { + throw e; + } finally { + FS.closeStream(stream.fd); + } + stream.fd = null; +#if FS_DEBUG + if (stream.path && FS.trackingDelegate['onCloseFile']) { + FS.trackingDelegate['onCloseFile'](stream.path); + } +#endif + }, + isClosed(stream) { + return stream.fd === null; + }, + llseek(stream, offset, whence) { + if (FS.isClosed(stream)) { + throw new FS.ErrnoError({{{ cDefs.EBADF }}}); + } + if (!stream.seekable || !stream.stream_ops.llseek) { + throw new FS.ErrnoError({{{ cDefs.ESPIPE }}}); + } + if (whence != {{{ cDefs.SEEK_SET }}} && whence != {{{ cDefs.SEEK_CUR }}} && whence != {{{ cDefs.SEEK_END }}}) { + throw new FS.ErrnoError({{{ cDefs.EINVAL }}}); + } + stream.position = stream.stream_ops.llseek(stream, offset, whence); + stream.ungotten = []; +#if FS_DEBUG + if (stream.path && FS.trackingDelegate['onSeekFile']) { + FS.trackingDelegate['onSeekFile'](stream.path, stream.position, whence); + } +#endif + return stream.position; + }, + read(stream, buffer, offset, length, position) { +#if ASSERTIONS + assert(offset >= 0); +#endif + if (length < 0 || position < 0) { + throw new FS.ErrnoError({{{ cDefs.EINVAL }}}); + } + if (FS.isClosed(stream)) { + throw new FS.ErrnoError({{{ cDefs.EBADF }}}); + } + if ((stream.flags & {{{ cDefs.O_ACCMODE }}}) === {{{ cDefs.O_WRONLY}}}) { + throw new FS.ErrnoError({{{ cDefs.EBADF }}}); + } + if (FS.isDir(stream.node.mode)) { + throw new FS.ErrnoError({{{ cDefs.EISDIR }}}); + } + if (!stream.stream_ops.read) { + throw new FS.ErrnoError({{{ cDefs.EINVAL }}}); + } + var seeking = typeof position != 'undefined'; + if (!seeking) { + position = stream.position; + } else if (!stream.seekable) { + throw new FS.ErrnoError({{{ cDefs.ESPIPE }}}); + } + var bytesRead = stream.stream_ops.read(stream, buffer, offset, length, position); + if (!seeking) stream.position += bytesRead; +#if FS_DEBUG + if (stream.path && FS.trackingDelegate['onReadFile']) { + FS.trackingDelegate['onReadFile'](stream.path, bytesRead); + } +#endif + return bytesRead; + }, + write(stream, buffer, offset, length, position, canOwn) { +#if ASSERTIONS + assert(offset >= 0); +#endif + if (length < 0 || position < 0) { + throw new FS.ErrnoError({{{ cDefs.EINVAL }}}); + } + if (FS.isClosed(stream)) { + throw new FS.ErrnoError({{{ cDefs.EBADF }}}); + } + if ((stream.flags & {{{ cDefs.O_ACCMODE }}}) === {{{ cDefs.O_RDONLY}}}) { + throw new FS.ErrnoError({{{ cDefs.EBADF }}}); + } + if (FS.isDir(stream.node.mode)) { + throw new FS.ErrnoError({{{ cDefs.EISDIR }}}); + } + if (!stream.stream_ops.write) { + throw new FS.ErrnoError({{{ cDefs.EINVAL }}}); + } + if (stream.seekable && stream.flags & {{{ cDefs.O_APPEND }}}) { + // seek to the end before writing in append mode + FS.llseek(stream, 0, {{{ cDefs.SEEK_END }}}); + } + var seeking = typeof position != 'undefined'; + if (!seeking) { + position = stream.position; + } else if (!stream.seekable) { + throw new FS.ErrnoError({{{ cDefs.ESPIPE }}}); + } + var bytesWritten = stream.stream_ops.write(stream, buffer, offset, length, position, canOwn); + if (!seeking) stream.position += bytesWritten; +#if FS_DEBUG + if (stream.path && FS.trackingDelegate['onWriteToFile']) { + FS.trackingDelegate['onWriteToFile'](stream.path, bytesWritten); + } +#endif + return bytesWritten; + }, + allocate(stream, offset, length) { + if (FS.isClosed(stream)) { + throw new FS.ErrnoError({{{ cDefs.EBADF }}}); + } + if (offset < 0 || length <= 0) { + throw new FS.ErrnoError({{{ cDefs.EINVAL }}}); + } + if ((stream.flags & {{{ cDefs.O_ACCMODE }}}) === {{{ cDefs.O_RDONLY}}}) { + throw new FS.ErrnoError({{{ cDefs.EBADF }}}); + } + if (!FS.isFile(stream.node.mode) && !FS.isDir(stream.node.mode)) { + throw new FS.ErrnoError({{{ cDefs.ENODEV }}}); + } + if (!stream.stream_ops.allocate) { + throw new FS.ErrnoError({{{ cDefs.EOPNOTSUPP }}}); + } + stream.stream_ops.allocate(stream, offset, length); + }, + mmap(stream, length, position, prot, flags) { + // User requests writing to file (prot & PROT_WRITE != 0). + // Checking if we have permissions to write to the file unless + // MAP_PRIVATE flag is set. According to POSIX spec it is possible + // to write to file opened in read-only mode with MAP_PRIVATE flag, + // as all modifications will be visible only in the memory of + // the current process. + if ((prot & {{{ cDefs.PROT_WRITE }}}) !== 0 + && (flags & {{{ cDefs.MAP_PRIVATE}}}) === 0 + && (stream.flags & {{{ cDefs.O_ACCMODE }}}) !== {{{ cDefs.O_RDWR}}}) { + throw new FS.ErrnoError({{{ cDefs.EACCES }}}); + } + if ((stream.flags & {{{ cDefs.O_ACCMODE }}}) === {{{ cDefs.O_WRONLY}}}) { + throw new FS.ErrnoError({{{ cDefs.EACCES }}}); + } + if (!stream.stream_ops.mmap) { + throw new FS.ErrnoError({{{ cDefs.ENODEV }}}); + } + return stream.stream_ops.mmap(stream, length, position, prot, flags); + }, + msync(stream, buffer, offset, length, mmapFlags) { +#if ASSERTIONS + assert(offset >= 0); +#endif + if (!stream.stream_ops.msync) { + return 0; + } + return stream.stream_ops.msync(stream, buffer, offset, length, mmapFlags); + }, + ioctl(stream, cmd, arg) { + if (!stream.stream_ops.ioctl) { + throw new FS.ErrnoError({{{ cDefs.ENOTTY }}}); + } + return stream.stream_ops.ioctl(stream, cmd, arg); + }, + readFile(path, opts = {}) { + opts.flags = opts.flags || {{{ cDefs.O_RDONLY }}}; + opts.encoding = opts.encoding || 'binary'; + if (opts.encoding !== 'utf8' && opts.encoding !== 'binary') { + throw new Error(`Invalid encoding type "${opts.encoding}"`); + } + var ret; + var stream = FS.open(path, opts.flags); + var stat = FS.stat(path); + var length = stat.size; + var buf = new Uint8Array(length); + FS.read(stream, buf, 0, length, 0); + if (opts.encoding === 'utf8') { + ret = UTF8ArrayToString(buf, 0); + } else if (opts.encoding === 'binary') { + ret = buf; + } + FS.close(stream); + return ret; + }, + writeFile(path, data, opts = {}) { + opts.flags = opts.flags || {{{ cDefs.O_TRUNC | cDefs.O_CREAT | cDefs.O_WRONLY }}}; + var stream = FS.open(path, opts.flags, opts.mode); + if (typeof data == 'string') { + var buf = new Uint8Array(lengthBytesUTF8(data)+1); + var actualNumBytes = stringToUTF8Array(data, buf, 0, buf.length); + FS.write(stream, buf, 0, actualNumBytes, undefined, opts.canOwn); + } else if (ArrayBuffer.isView(data)) { + FS.write(stream, data, 0, data.byteLength, undefined, opts.canOwn); + } else { + throw new Error('Unsupported data type'); + } + FS.close(stream); + }, + + // + // module-level FS code + // + cwd: () => FS.currentPath, + chdir(path) { + var lookup = FS.lookupPath(path, { follow: true }); + if (lookup.node === null) { + throw new FS.ErrnoError({{{ cDefs.ENOENT }}}); + } + if (!FS.isDir(lookup.node.mode)) { + throw new FS.ErrnoError({{{ cDefs.ENOTDIR }}}); + } + var errCode = FS.nodePermissions(lookup.node, 'x'); + if (errCode) { + throw new FS.ErrnoError(errCode); + } + FS.currentPath = lookup.path; + }, + createDefaultDirectories() { + FS.mkdir('/tmp'); + FS.mkdir('/home'); + FS.mkdir('/home/web_user'); + }, + createDefaultDevices() { + // create /dev + FS.mkdir('/dev'); + // setup /dev/null + FS.registerDevice(FS.makedev(1, 3), { + read: () => 0, + write: (stream, buffer, offset, length, pos) => length, + }); + FS.mkdev('/dev/null', FS.makedev(1, 3)); + // setup /dev/tty and /dev/tty1 + // stderr needs to print output using err() rather than out() + // so we register a second tty just for it. + TTY.register(FS.makedev(5, 0), TTY.default_tty_ops); + TTY.register(FS.makedev(6, 0), TTY.default_tty1_ops); + FS.mkdev('/dev/tty', FS.makedev(5, 0)); + FS.mkdev('/dev/tty1', FS.makedev(6, 0)); + // setup /dev/[u]random + // use a buffer to avoid overhead of individual crypto calls per byte + var randomBuffer = new Uint8Array(1024), randomLeft = 0; + var randomByte = () => { + if (randomLeft === 0) { + randomLeft = randomFill(randomBuffer).byteLength; + } + return randomBuffer[--randomLeft]; + }; + FS.createDevice('/dev', 'random', randomByte); + FS.createDevice('/dev', 'urandom', randomByte); + // we're not going to emulate the actual shm device, + // just create the tmp dirs that reside in it commonly + FS.mkdir('/dev/shm'); + FS.mkdir('/dev/shm/tmp'); + }, + createSpecialDirectories() { + // create /proc/self/fd which allows /proc/self/fd/6 => readlink gives the + // name of the stream for fd 6 (see test_unistd_ttyname) + FS.mkdir('/proc'); + var proc_self = FS.mkdir('/proc/self'); + FS.mkdir('/proc/self/fd'); + FS.mount({ + mount() { + var node = FS.createNode(proc_self, 'fd', {{{ cDefs.S_IFDIR }}} | 511 /* 0777 */, {{{ cDefs.S_IXUGO }}}); + node.node_ops = { + lookup(parent, name) { + var fd = +name; + var stream = FS.getStreamChecked(fd); + var ret = { + parent: null, + mount: { mountpoint: 'fake' }, + node_ops: { readlink: () => stream.path }, + }; + ret.parent = ret; // make it look like a simple root node + return ret; + } + }; + return node; + } + }, {}, '/proc/self/fd'); + }, + createStandardStreams() { + // TODO deprecate the old functionality of a single + // input / output callback and that utilizes FS.createDevice + // and instead require a unique set of stream ops + + // by default, we symlink the standard streams to the + // default tty devices. however, if the standard streams + // have been overwritten we create a unique device for + // them instead. + if (Module['stdin']) { + FS.createDevice('/dev', 'stdin', Module['stdin']); + } else { + FS.symlink('/dev/tty', '/dev/stdin'); + } + if (Module['stdout']) { + FS.createDevice('/dev', 'stdout', null, Module['stdout']); + } else { + FS.symlink('/dev/tty', '/dev/stdout'); + } + if (Module['stderr']) { + FS.createDevice('/dev', 'stderr', null, Module['stderr']); + } else { + FS.symlink('/dev/tty1', '/dev/stderr'); + } + + // open default streams for the stdin, stdout and stderr devices + var stdin = FS.open('/dev/stdin', {{{ cDefs.O_RDONLY }}}); + var stdout = FS.open('/dev/stdout', {{{ cDefs.O_WRONLY }}}); + var stderr = FS.open('/dev/stderr', {{{ cDefs.O_WRONLY }}}); +#if ASSERTIONS + assert(stdin.fd === 0, `invalid handle for stdin (${stdin.fd})`); + assert(stdout.fd === 1, `invalid handle for stdout (${stdout.fd})`); + assert(stderr.fd === 2, `invalid handle for stderr (${stderr.fd})`); +#endif + }, + staticInit() { + // Some errors may happen quite a bit, to avoid overhead we reuse them (and suffer a lack of stack info) + [{{{ cDefs.ENOENT }}}].forEach((code) => { + FS.genericErrors[code] = new FS.ErrnoError(code); + FS.genericErrors[code].stack = ''; + }); + + FS.nameTable = new Array(4096); + + FS.mount(MEMFS, {}, '/'); + + FS.createDefaultDirectories(); + FS.createDefaultDevices(); + FS.createSpecialDirectories(); + + FS.filesystems = { + 'MEMFS': MEMFS, +#if LibraryManager.has('library_pgfs.js') + 'PGFS': PGFS, +#endif +#if LibraryManager.has('library_idbfs.js') + 'IDBFS': IDBFS, +#endif +#if LibraryManager.has('library_nodefs.js') + 'NODEFS': NODEFS, +#endif +#if LibraryManager.has('library_workerfs.js') + 'WORKERFS': WORKERFS, +#endif +#if LibraryManager.has('library_proxyfs.js') + 'PROXYFS': PROXYFS, +#endif + }; + }, + init(input, output, error) { +#if ASSERTIONS + assert(!FS.init.initialized, 'FS.init was previously called. If you want to initialize later with custom parameters, remove any earlier calls (note that one is automatically added to the generated code)'); +#endif + FS.init.initialized = true; + + // Allow Module.stdin etc. to provide defaults, if none explicitly passed to us here + Module['stdin'] = input || Module['stdin']; + Module['stdout'] = output || Module['stdout']; + Module['stderr'] = error || Module['stderr']; + + FS.createStandardStreams(); + }, + quit() { + FS.init.initialized = false; + // force-flush all streams, so we get musl std streams printed out +#if hasExportedSymbol('fflush') + _fflush(0); +#endif + // close all of our streams + for (var i = 0; i < FS.streams.length; i++) { + var stream = FS.streams[i]; + if (!stream) { + continue; + } + FS.close(stream); + } + }, + + // + // old v1 compatibility functions + // + findObject(path, dontResolveLastLink) { + var ret = FS.analyzePath(path, dontResolveLastLink); + if (!ret.exists) { + return null; + } + return ret.object; + }, + analyzePath(path, dontResolveLastLink) { + // operate from within the context of the symlink's target + try { + var lookup = FS.lookupPath(path, { follow: !dontResolveLastLink }); + path = lookup.path; + } catch (e) { + } + var ret = { + isRoot: false, exists: false, error: 0, name: null, path: null, object: null, + parentExists: false, parentPath: null, parentObject: null + }; + try { + var lookup = FS.lookupPath(path, { parent: true }); + ret.parentExists = true; + ret.parentPath = lookup.path; + ret.parentObject = lookup.node; + ret.name = PATH.basename(path); + lookup = FS.lookupPath(path, { follow: !dontResolveLastLink }); + ret.exists = true; + ret.path = lookup.path; + ret.object = lookup.node; + ret.name = lookup.node.name; + ret.isRoot = lookup.path === '/'; + } catch (e) { + ret.error = e.errno; + }; + return ret; + }, + createPath(parent, path, canRead, canWrite) { + parent = typeof parent == 'string' ? parent : FS.getPath(parent); + var parts = path.split('/').reverse(); + while (parts.length) { + var part = parts.pop(); + if (!part) continue; + var current = PATH.join2(parent, part); + try { + FS.mkdir(current); + } catch (e) { + // ignore EEXIST + } + parent = current; + } + return current; + }, + createFile(parent, name, properties, canRead, canWrite) { + var path = PATH.join2(typeof parent == 'string' ? parent : FS.getPath(parent), name); + var mode = FS_getMode(canRead, canWrite); + return FS.create(path, mode); + }, + createDataFile(parent, name, data, canRead, canWrite, canOwn) { + var path = name; + if (parent) { + parent = typeof parent == 'string' ? parent : FS.getPath(parent); + path = name ? PATH.join2(parent, name) : parent; + } + var mode = FS_getMode(canRead, canWrite); + var node = FS.create(path, mode); + if (data) { + if (typeof data == 'string') { + var arr = new Array(data.length); + for (var i = 0, len = data.length; i < len; ++i) arr[i] = data.charCodeAt(i); + data = arr; + } + // make sure we can write to the file + FS.chmod(node, mode | {{{ cDefs.S_IWUGO }}}); + var stream = FS.open(node, {{{ cDefs.O_TRUNC | cDefs.O_CREAT | cDefs.O_WRONLY }}}); + FS.write(stream, data, 0, data.length, 0, canOwn); + FS.close(stream); + FS.chmod(node, mode); + } + }, + createDevice(parent, name, input, output) { + var path = PATH.join2(typeof parent == 'string' ? parent : FS.getPath(parent), name); + var mode = FS_getMode(!!input, !!output); + if (!FS.createDevice.major) FS.createDevice.major = 64; + var dev = FS.makedev(FS.createDevice.major++, 0); + // Create a fake device that a set of stream ops to emulate + // the old behavior. + FS.registerDevice(dev, { + open(stream) { + stream.seekable = false; + }, + close(stream) { + // flush any pending line data + if (output?.buffer?.length) { + output({{{ charCode('\n') }}}); + } + }, + read(stream, buffer, offset, length, pos /* ignored */) { + var bytesRead = 0; + for (var i = 0; i < length; i++) { + var result; + try { + result = input(); + } catch (e) { + throw new FS.ErrnoError({{{ cDefs.EIO }}}); + } + if (result === undefined && bytesRead === 0) { + throw new FS.ErrnoError({{{ cDefs.EAGAIN }}}); + } + if (result === null || result === undefined) break; + bytesRead++; + buffer[offset+i] = result; + } + if (bytesRead) { + stream.node.timestamp = Date.now(); + } + return bytesRead; + }, + write(stream, buffer, offset, length, pos) { + for (var i = 0; i < length; i++) { + try { + output(buffer[offset+i]); + } catch (e) { + throw new FS.ErrnoError({{{ cDefs.EIO }}}); + } + } + if (length) { + stream.node.timestamp = Date.now(); + } + return i; + } + }); + return FS.mkdev(path, mode, dev); + }, + // Makes sure a file's contents are loaded. Returns whether the file has + // been loaded successfully. No-op for files that have been loaded already. + forceLoadFile(obj) { + if (obj.isDevice || obj.isFolder || obj.link || obj.contents) return true; + #if FS_DEBUG + dbg(`forceLoadFile: ${obj.url}`) + #endif + if (typeof XMLHttpRequest != 'undefined') { + throw new Error("Lazy loading should have been performed (contents set) in createLazyFile, but it was not. Lazy loading only works in web workers. Use --embed-file or --preload-file in emcc on the main thread."); + } else { // Command-line. + try { + obj.contents = readBinary(obj.url); + obj.usedBytes = obj.contents.length; + } catch (e) { + throw new FS.ErrnoError({{{ cDefs.EIO }}}); + } + } + }, + // Creates a file record for lazy-loading from a URL. XXX This requires a synchronous + // XHR, which is not possible in browsers except in a web worker! Use preloading, + // either --preload-file in emcc or FS.createPreloadedFile + createLazyFile(parent, name, url, canRead, canWrite) { + // Lazy chunked Uint8Array (implements get and length from Uint8Array). + // Actual getting is abstracted away for eventual reuse. + class LazyUint8Array { + constructor() { + this.lengthKnown = false; + this.chunks = []; // Loaded chunks. Index is the chunk number +#if USE_CLOSURE_COMPILER + // Closure compiler requires us to declare all properties in the + // constructor. + this.getter = undefined; + this._length = 0; + this._chunkSize = 0; +#endif + } + get(idx) { + if (idx > this.length-1 || idx < 0) { + return undefined; + } + var chunkOffset = idx % this.chunkSize; + var chunkNum = (idx / this.chunkSize)|0; + return this.getter(chunkNum)[chunkOffset]; + } + setDataGetter(getter) { + this.getter = getter; + } + cacheLength() { + // Find length + var xhr = new XMLHttpRequest(); + xhr.open('HEAD', url, false); + xhr.send(null); + if (!(xhr.status >= 200 && xhr.status < 300 || xhr.status === 304)) throw new Error("Couldn't load " + url + ". Status: " + xhr.status); + var datalength = Number(xhr.getResponseHeader("Content-length")); + var header; + var hasByteServing = (header = xhr.getResponseHeader("Accept-Ranges")) && header === "bytes"; + var usesGzip = (header = xhr.getResponseHeader("Content-Encoding")) && header === "gzip"; + + #if SMALL_XHR_CHUNKS + var chunkSize = 1024; // Chunk size in bytes + #else + var chunkSize = 1024*1024; // Chunk size in bytes + #endif + + if (!hasByteServing) chunkSize = datalength; + + // Function to get a range from the remote URL. + var doXHR = (from, to) => { + if (from > to) throw new Error("invalid range (" + from + ", " + to + ") or no bytes requested!"); + if (to > datalength-1) throw new Error("only " + datalength + " bytes available! programmer error!"); + + // TODO: Use mozResponseArrayBuffer, responseStream, etc. if available. + var xhr = new XMLHttpRequest(); + xhr.open('GET', url, false); + if (datalength !== chunkSize) xhr.setRequestHeader("Range", "bytes=" + from + "-" + to); + + // Some hints to the browser that we want binary data. + xhr.responseType = 'arraybuffer'; + if (xhr.overrideMimeType) { + xhr.overrideMimeType('text/plain; charset=x-user-defined'); + } + + xhr.send(null); + if (!(xhr.status >= 200 && xhr.status < 300 || xhr.status === 304)) throw new Error("Couldn't load " + url + ". Status: " + xhr.status); + if (xhr.response !== undefined) { + return new Uint8Array(/** @type{Array} */(xhr.response || [])); + } + return intArrayFromString(xhr.responseText || '', true); + }; + var lazyArray = this; + lazyArray.setDataGetter((chunkNum) => { + var start = chunkNum * chunkSize; + var end = (chunkNum+1) * chunkSize - 1; // including this byte + end = Math.min(end, datalength-1); // if datalength-1 is selected, this is the last block + if (typeof lazyArray.chunks[chunkNum] == 'undefined') { + lazyArray.chunks[chunkNum] = doXHR(start, end); + } + if (typeof lazyArray.chunks[chunkNum] == 'undefined') throw new Error('doXHR failed!'); + return lazyArray.chunks[chunkNum]; + }); + + if (usesGzip || !datalength) { + // if the server uses gzip or doesn't supply the length, we have to download the whole file to get the (uncompressed) length + chunkSize = datalength = 1; // this will force getter(0)/doXHR do download the whole file + datalength = this.getter(0).length; + chunkSize = datalength; + out("LazyFiles on gzip forces download of the whole file when length is accessed"); + } + + this._length = datalength; + this._chunkSize = chunkSize; + this.lengthKnown = true; + } + get length() { + if (!this.lengthKnown) { + this.cacheLength(); + } + return this._length; + } + get chunkSize() { + if (!this.lengthKnown) { + this.cacheLength(); + } + return this._chunkSize; + } + } + + if (typeof XMLHttpRequest != 'undefined') { + if (!ENVIRONMENT_IS_WORKER) throw 'Cannot do synchronous binary XHRs outside webworkers in modern browsers. Use --embed-file or --preload-file in emcc'; + var lazyArray = new LazyUint8Array(); + var properties = { isDevice: false, contents: lazyArray }; + } else { + var properties = { isDevice: false, url: url }; + } + + var node = FS.createFile(parent, name, properties, canRead, canWrite); + // This is a total hack, but I want to get this lazy file code out of the + // core of MEMFS. If we want to keep this lazy file concept I feel it should + // be its own thin LAZYFS proxying calls to MEMFS. + if (properties.contents) { + node.contents = properties.contents; + } else if (properties.url) { + node.contents = null; + node.url = properties.url; + } + // Add a function that defers querying the file size until it is asked the first time. + Object.defineProperties(node, { + usedBytes: { + get: function() { return this.contents.length; } + } + }); + // override each stream op with one that tries to force load the lazy file first + var stream_ops = {}; + var keys = Object.keys(node.stream_ops); + keys.forEach((key) => { + var fn = node.stream_ops[key]; + stream_ops[key] = (...args) => { + FS.forceLoadFile(node); + return fn(...args); + }; + }); + function writeChunks(stream, buffer, offset, length, position) { + var contents = stream.node.contents; + if (position >= contents.length) + return 0; + var size = Math.min(contents.length - position, length); +#if ASSERTIONS + assert(size >= 0); +#endif + if (contents.slice) { // normal array + for (var i = 0; i < size; i++) { + buffer[offset + i] = contents[position + i]; + } + } else { + for (var i = 0; i < size; i++) { // LazyUint8Array from sync binary XHR + buffer[offset + i] = contents.get(position + i); + } + } + return size; + } + // use a custom read function + stream_ops.read = (stream, buffer, offset, length, position) => { + FS.forceLoadFile(node); + return writeChunks(stream, buffer, offset, length, position) + }; + // use a custom mmap function + stream_ops.mmap = (stream, length, position, prot, flags) => { + FS.forceLoadFile(node); + var ptr = mmapAlloc(length); + if (!ptr) { + throw new FS.ErrnoError({{{ cDefs.ENOMEM }}}); + } + writeChunks(stream, HEAP8, ptr, length, position); + return { ptr, allocated: true }; + }; + node.stream_ops = stream_ops; + return node; + }, + + // Removed v1 functions +#if ASSERTIONS + absolutePath() { + abort('FS.absolutePath has been removed; use PATH_FS.resolve instead'); + }, + createFolder() { + abort('FS.createFolder has been removed; use FS.mkdir instead'); + }, + createLink() { + abort('FS.createLink has been removed; use FS.symlink instead'); + }, + joinPath() { + abort('FS.joinPath has been removed; use PATH.join instead'); + }, + mmapAlloc() { + abort('FS.mmapAlloc has been replaced by the top level function mmapAlloc'); + }, + standardizePath() { + abort('FS.standardizePath has been removed; use PATH.normalize instead'); + }, +#endif + }, + + $FS_createDataFile__deps: ['$FS'], + $FS_createDataFile: (parent, name, fileData, canRead, canWrite, canOwn) => { + FS.createDataFile(parent, name, fileData, canRead, canWrite, canOwn); + }, + + $FS_unlink__deps: ['$FS'], + $FS_unlink: (path) => FS.unlink(path), + + $FS_mkdirTree__docs: ` + /** + * @param {number=} mode Optionally, the mode to create in. Uses mkdir's + * default if not set. + */`, + $FS_mkdirTree__deps: ['$FS'], + $FS_mkdirTree: (path, mode) => FS.mkdirTree(path, mode), + + $FS_createLazyFile__deps: ['$FS'], + $FS_createLazyFile: 'FS.createLazyFile', +}); diff --git a/patches/library_pgfs.js b/patches/library_pgfs.js new file mode 100644 index 00000000..21b71b39 --- /dev/null +++ b/patches/library_pgfs.js @@ -0,0 +1,487 @@ +/** + * @license + * Copyright 2013 The Emscripten Authors + * SPDX-License-Identifier: MIT + */ + +addToLibrary({ + $PGFS__deps: ['$FS', '$MEMFS', '$PATH'], + $PGFS__postset: () => { + addAtExit('PGFS.quit();'); + return ''; + }, + $PGFS: { + dbs: {}, + indexedDB: () => { + if (typeof indexedDB != 'undefined') return indexedDB; + var ret = null; + if (typeof window == 'object') ret = window.indexedDB || window.mozIndexedDB || window.webkitIndexedDB || window.msIndexedDB; +#if ASSERTIONS + assert(ret, 'PGFS used, but indexedDB not supported'); +#endif + return ret; + }, + DB_VERSION: 163, + DB_STORE_NAME: 'PG', + + on_mount: () => { + console.warn("pgfs", "mounted") + }, + + // Queues a new VFS -> PGFS synchronization operation + queuePersist: (mount) => { + function onPersistComplete() { + if (mount.idbPersistState === 'again') startPersist(); // If a new sync request has appeared in between, kick off a new sync + else mount.idbPersistState = 0; // Otherwise reset sync state back to idle to wait for a new sync later + } + function startPersist() { + mount.idbPersistState = 'idb'; // Mark that we are currently running a sync operation + PGFS.syncfs(mount, /*populate:*/false, onPersistComplete); + } + + if (!mount.idbPersistState) { + // Programs typically write/copy/move multiple files in the in-memory + // filesystem within a single app frame, so when a filesystem sync + // command is triggered, do not start it immediately, but only after + // the current frame is finished. This way all the modified files + // inside the main loop tick will be batched up to the same sync. + mount.idbPersistState = setTimeout(startPersist, 0); + } else if (mount.idbPersistState === 'idb') { + // There is an active IndexedDB sync operation in-flight, but we now + // have accumulated more files to sync. We should therefore queue up + // a new sync after the current one finishes so that all writes + // will be properly persisted. + mount.idbPersistState = 'again'; + } + }, + + mount: (mount) => { + // reuse core MEMFS functionality + var mnt = MEMFS.mount(mount); + // If the automatic PGFS persistence option has been selected, then automatically persist + // all modifications to the filesystem as they occur. + if (mount?.opts?.autoPersist) { + mnt.idbPersistState = 0; // IndexedDB sync starts in idle state + var memfs_node_ops = mnt.node_ops; + mnt.node_ops = Object.assign({}, mnt.node_ops); // Clone node_ops to inject write tracking + mnt.node_ops.mknod = (parent, name, mode, dev) => { + var node = memfs_node_ops.mknod(parent, name, mode, dev); + // Propagate injected node_ops to the newly created child node + node.node_ops = mnt.node_ops; + // Remember for each PGFS node which PGFS mount point they came from so we know which mount to persist on modification. + node.PGFS_mount = mnt.mount; + // Remember original MEMFS stream_ops for this node + node.memfs_stream_ops = node.stream_ops; + // Clone stream_ops to inject write tracking + node.stream_ops = Object.assign({}, node.stream_ops); + + // Track all file writes + node.stream_ops.write = (stream, buffer, offset, length, position, canOwn) => { + // This file has been modified, we must persist IndexedDB when this file closes + stream.node.isModified = true; + return node.memfs_stream_ops.write(stream, buffer, offset, length, position, canOwn); + }; + + // Persist IndexedDB on file close + node.stream_ops.close = (stream) => { + var n = stream.node; + if (n.isModified) { + PGFS.queuePersist(n.PGFS_mount); + n.isModified = false; + } + if (n.memfs_stream_ops.close) return n.memfs_stream_ops.close(stream); + }; + + return node; + }; + // Also kick off persisting the filesystem on other operations that modify the filesystem. + mnt.node_ops.mkdir = (...args) => (PGFS.queuePersist(mnt.mount), memfs_node_ops.mkdir(...args)); + mnt.node_ops.rmdir = (...args) => (PGFS.queuePersist(mnt.mount), memfs_node_ops.rmdir(...args)); + mnt.node_ops.symlink = (...args) => (PGFS.queuePersist(mnt.mount), memfs_node_ops.symlink(...args)); + mnt.node_ops.unlink = (...args) => (PGFS.queuePersist(mnt.mount), memfs_node_ops.unlink(...args)); + mnt.node_ops.rename = (...args) => (PGFS.queuePersist(mnt.mount), memfs_node_ops.rename(...args)); + } + return mnt; + }, + + ext_ok : (...args) => { + console.log("pgfs:ext OK", args); + }, + + ext_fail : (...args) => { + console.log("pgfs:ext FAIL", args); + }, + + + load_pg_extension: (ext, bytes) => { + var data = tinyTar.untar(bytes); + data.forEach(function(file) { + if (!file.name.startsWith(".")) { + const _file = "/tmp/pglite/" + file.name; + console.log(" + ", _file); + if (file.name.endsWith(".so")) { + console.warn(_file, "scheduled for wasm streaming compilation"); + + const ext_ok = (...args) => { + console.log("pgfs:ext OK", _file, args); + }; + + const ext_fail = (...args) => { + console.log("pgfs:ext FAIL", _file, args); + }; + + FS.createPreloadedFile(PATH.dirname(_file), PATH.basename(_file), file.data, true, true, ext_ok, ext_fail, false); + console.log("createPreloadedFile called for :", _file); + } else { + FS.writeFile(_file, file.data); + } + } + }); + console.warn("pgfs ext:end", ext); + }, + + + load_package: async (ext, url) => { + var bytes; + var response; + if (FS.analyzePath(url).exists) { + console.error("PGFS TODO: handle local archives", url) + } else { + console.error("PGFS Fetching:", url) + response = await fetch(url); + } + + if (url.endsWith(".tar")) { + const buffer = await response.arrayBuffer(); + bytes = new Uint8Array(buffer); + } else { + const ds = new DecompressionStream("gzip"); + const gzbytes = await response.blob(); + console.log("gzdata", gzbytes.size); + const stream_in = gzbytes.stream().pipeThrough(ds); + bytes = new Uint8Array(await new Response(stream_in).arrayBuffer()); + } + PGFS.load_pg_extension(ext, bytes); + }, + + + syncfs: (mount, populate, callback) => { + if (populate) { + const save_cb = callback; + console.log("ext ?", Module.pg_extensions ) + + callback = async function load_pg_extensions(arg) { + for (const ext in Module.pg_extensions) { + var blob; + try { + blob = await Module.pg_extensions[ext] + } catch (x) { + console.error("failed to fetch extension :", ext) + continue + } + if (blob) { + const bytes = new Uint8Array(await blob.arrayBuffer()) + console.log(" +", ext,"tardata:", bytes.length ) + if (ext=="quack") + console.warn(ext,"skipped !") + else + PGFS.load_pg_extension(ext, bytes) + } else { + console.error("could not get binary data for extension :", ext); + } + } + return save_cb(arg); + } + } + + PGFS.getLocalSet(mount, (err, local) => { + if (err) return callback(err); + + PGFS.getRemoteSet(mount, (err, remote) => { + if (err) return callback(err); + + var src = populate ? remote : local; + var dst = populate ? local : remote; + + PGFS.reconcile(src, dst, callback); + }); + }); + }, + quit: () => { + Object.values(PGFS.dbs).forEach((value) => value.close()); + PGFS.dbs = {}; + }, + getDB: (name, callback) => { + // check the cache first + name = name.split("/").pop() + "@⌁PGLite v16.3⌁"; + var db = PGFS.dbs[name]; + if (db) { + return callback(null, db); + } + + var req; + try { + req = PGFS.indexedDB().open(name, PGFS.DB_VERSION); + } catch (e) { + return callback(e); + } + if (!req) { + return callback("Unable to connect to IndexedDB"); + } + req.onupgradeneeded = (e) => { + var db = /** @type {IDBDatabase} */ (e.target.result); + var transaction = e.target.transaction; + + var fileStore; + + if (db.objectStoreNames.contains(PGFS.DB_STORE_NAME)) { + fileStore = transaction.objectStore(PGFS.DB_STORE_NAME); + } else { + fileStore = db.createObjectStore(PGFS.DB_STORE_NAME); + } + + if (!fileStore.indexNames.contains('timestamp')) { + fileStore.createIndex('timestamp', 'timestamp', { unique: false }); + } + }; + req.onsuccess = () => { + db = /** @type {IDBDatabase} */ (req.result); + + // add to the cache + PGFS.dbs[name] = db; + callback(null, db); + }; + req.onerror = (e) => { + callback(e.target.error); + e.preventDefault(); + }; + }, + getLocalSet: (mount, callback) => { + var entries = {}; + + function isRealDir(p) { + return p !== '.' && p !== '..'; + }; + function toAbsolute(root) { + return (p) => PATH.join2(root, p); + }; + + var check = FS.readdir(mount.mountpoint).filter(isRealDir).map(toAbsolute(mount.mountpoint)); + + while (check.length) { + var path = check.pop(); + var stat; + + try { + stat = FS.stat(path); + } catch (e) { + return callback(e); + } + + if (FS.isDir(stat.mode)) { + check.push(...FS.readdir(path).filter(isRealDir).map(toAbsolute(path))); + } + + entries[path] = { 'timestamp': stat.mtime }; + } + + return callback(null, { type: 'local', entries: entries }); + }, + getRemoteSet: (mount, callback) => { + var entries = {}; + + PGFS.getDB(mount.mountpoint, (err, db) => { + if (err) return callback(err); + + try { + var transaction = db.transaction([PGFS.DB_STORE_NAME], 'readonly'); + transaction.onerror = (e) => { + callback(e.target.error); + e.preventDefault(); + }; + + var store = transaction.objectStore(PGFS.DB_STORE_NAME); + var index = store.index('timestamp'); + + index.openKeyCursor().onsuccess = (event) => { + var cursor = event.target.result; + + if (!cursor) { + return callback(null, { type: 'remote', db, entries }); + } + + entries[cursor.primaryKey] = { 'timestamp': cursor.key }; + + cursor.continue(); + }; + } catch (e) { + return callback(e); + } + }); + }, + loadLocalEntry: (path, callback) => { + var stat, node; + + try { + var lookup = FS.lookupPath(path); + node = lookup.node; + stat = FS.stat(path); + } catch (e) { + return callback(e); + } + + if (FS.isDir(stat.mode)) { + return callback(null, { 'timestamp': stat.mtime, 'mode': stat.mode }); + } else if (FS.isFile(stat.mode)) { + // Performance consideration: storing a normal JavaScript array to a IndexedDB is much slower than storing a typed array. + // Therefore always convert the file contents to a typed array first before writing the data to IndexedDB. + node.contents = MEMFS.getFileDataAsTypedArray(node); + return callback(null, { 'timestamp': stat.mtime, 'mode': stat.mode, 'contents': node.contents }); + } else { + return callback(new Error('node type not supported')); + } + }, + storeLocalEntry: (path, entry, callback) => { + try { + if (FS.isDir(entry['mode'])) { + FS.mkdirTree(path, entry['mode']); + } else if (FS.isFile(entry['mode'])) { + FS.writeFile(path, entry['contents'], { canOwn: true }); + } else { + return callback(new Error('node type not supported')); + } + + FS.chmod(path, entry['mode']); + FS.utime(path, entry['timestamp'], entry['timestamp']); + } catch (e) { + return callback(e); + } + + callback(null); + }, + removeLocalEntry: (path, callback) => { + try { + var stat = FS.stat(path); + + if (FS.isDir(stat.mode)) { + FS.rmdir(path); + } else if (FS.isFile(stat.mode)) { + FS.unlink(path); + } + } catch (e) { + return callback(e); + } + + callback(null); + }, + loadRemoteEntry: (store, path, callback) => { + var req = store.get(path); + req.onsuccess = (event) => callback(null, event.target.result); + req.onerror = (e) => { + callback(e.target.error); + e.preventDefault(); + }; + }, + storeRemoteEntry: (store, path, entry, callback) => { + try { + var req = store.put(entry, path); + } catch (e) { + callback(e); + return; + } + req.onsuccess = (event) => callback(); + req.onerror = (e) => { + callback(e.target.error); + e.preventDefault(); + }; + }, + removeRemoteEntry: (store, path, callback) => { + var req = store.delete(path); + req.onsuccess = (event) => callback(); + req.onerror = (e) => { + callback(e.target.error); + e.preventDefault(); + }; + }, + reconcile: (src, dst, callback) => { + var total = 0; + + var create = []; + Object.keys(src.entries).forEach(function (key) { + var e = src.entries[key]; + var e2 = dst.entries[key]; + if (!e2 || e['timestamp'].getTime() != e2['timestamp'].getTime()) { + create.push(key); + total++; + } + }); + + var remove = []; + Object.keys(dst.entries).forEach(function (key) { + if (!src.entries[key]) { + remove.push(key); + total++; + } + }); + + if (!total) { + return callback(null); + } + + var errored = false; + var db = src.type === 'remote' ? src.db : dst.db; + var transaction = db.transaction([PGFS.DB_STORE_NAME], 'readwrite'); + var store = transaction.objectStore(PGFS.DB_STORE_NAME); + + function done(err) { + if (err && !errored) { + errored = true; + return callback(err); + } + }; + + // transaction may abort if (for example) there is a QuotaExceededError + transaction.onerror = transaction.onabort = (e) => { + done(e.target.error); + e.preventDefault(); + }; + + transaction.oncomplete = (e) => { + if (!errored) { + callback(null); + } + }; + + // sort paths in ascending order so directory entries are created + // before the files inside them + create.sort().forEach((path) => { + if (dst.type === 'local') { + PGFS.loadRemoteEntry(store, path, (err, entry) => { + if (err) return done(err); + PGFS.storeLocalEntry(path, entry, done); + }); + } else { + PGFS.loadLocalEntry(path, (err, entry) => { + if (err) return done(err); + PGFS.storeRemoteEntry(store, path, entry, done); + }); + } + }); + + // sort paths in descending order so files are deleted before their + // parent directories + remove.sort().reverse().forEach((path) => { + if (dst.type === 'local') { + PGFS.removeLocalEntry(path, done); + } else { + PGFS.removeRemoteEntry(store, path, done); + } + }); + } + } +}); + +if (WASMFS) { + error("using -lpgfs is not currently supported in WasmFS."); +} + diff --git a/patches/pg_main.c b/patches/pg_main.c new file mode 100644 index 00000000..cf259ac0 --- /dev/null +++ b/patches/pg_main.c @@ -0,0 +1,1499 @@ +#define PDEBUG(...) +#if defined(PG_MAIN) + +#if defined(PG_EC_STATIC) +#warning "PG_EC_STATIC" + +EMSCRIPTEN_KEEPALIVE void +fsync_pgdata(const char *pg_data, int serverVersion) { + // stub +} + +EMSCRIPTEN_KEEPALIVE void +get_restricted_token(void) { + // stub +} + +EMSCRIPTEN_KEEPALIVE void * +pg_malloc(size_t size) +{ + return malloc(size); +} +EMSCRIPTEN_KEEPALIVE void * +pg_malloc_extended(size_t size, int flags) { + return malloc(size); +} + +EMSCRIPTEN_KEEPALIVE void * +pg_realloc(void *ptr, size_t size) { + return realloc(ptr, size); +} + +EMSCRIPTEN_KEEPALIVE char * +pg_strdup(const char *in) { + char *tmp; + + if (!in) + { + fprintf(stderr, + _("cannot duplicate null pointer (internal error)\n")); + exit(EXIT_FAILURE); + } + tmp = strdup(in); + if (!tmp) + { + fprintf(stderr, _("out of memory\n")); + exit(EXIT_FAILURE); + } + return tmp; +} + +EMSCRIPTEN_KEEPALIVE char * +simple_prompt(const char *prompt, bool echo) { + return pg_strdup(""); +} + + + +#endif + + +bool is_node = false; +bool is_repl = true; + +EMSCRIPTEN_KEEPALIVE bool +quote_all_identifiers = false; + + +EMSCRIPTEN_KEEPALIVE void interactive_one(void); +EMSCRIPTEN_KEEPALIVE void interactive_file(void); + +/* exported from postmaster.h */ +EMSCRIPTEN_KEEPALIVE const char* +progname; + +void +PostgresMain(const char *dbname, const char *username) +{ + PDEBUG("# 22: ERROR: PostgresMain should not be called anymore" __FILE__ ); + while (1){}; +} + + + +volatile bool send_ready_for_query = true; +volatile bool idle_in_transaction_timeout_enabled = false; +volatile bool idle_session_timeout_enabled = false; +volatile sigjmp_buf local_sigjmp_buf; + +volatile bool repl = true ; +volatile int pg_idb_status = 0; +volatile bool inloop = false; + +/* ================================================================================ */ +/* ================================================================================ */ +/* ================================================================================ */ +/* ================================================================================ */ + +EMSCRIPTEN_KEEPALIVE +FILE * single_mode_feed = NULL; + +bool force_echo = false; + +extern void ReInitPostgres(const char *in_dbname, Oid dboid, + const char *username, Oid useroid, + bool load_session_libraries, + bool override_allow_connections, + char *out_dbname); + + +void +AsyncPostgresSingleUserMain(int argc, char *argv[], + const char *username, int async_restart) +{ + const char *dbname = NULL; + + /* Initialize startup process environment. */ + InitStandaloneProcess(argv[0]); + + /* Set default values for command-line options. */ + InitializeGUCOptions(); +PDEBUG("520"); + /* Parse command-line options. */ + process_postgres_switches(argc, argv, PGC_POSTMASTER, &dbname); +PDEBUG("523"); + /* Must have gotten a database name, or have a default (the username) */ + if (dbname == NULL) + { + dbname = username; + if (dbname == NULL) + ereport(FATAL, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("%s: no database nor user name specified", + progname))); + } +if (async_restart) goto async_db_change; + /* Acquire configuration parameters */ + if (!SelectConfigFiles(userDoption, progname)) + proc_exit(1); + + checkDataDir(); + ChangeToDataDir(); + + /* + * Create lockfile for data directory. + */ + CreateDataDirLockFile(false); + + /* read control file (error checking and contains config ) */ + LocalProcessControlFile(false); + + /* + * process any libraries that should be preloaded at postmaster start + */ + process_shared_preload_libraries(); + + /* Initialize MaxBackends */ + InitializeMaxBackends(); +PDEBUG("557"); + /* + * Give preloaded libraries a chance to request additional shared memory. + */ + process_shmem_requests(); + + /* + * Now that loadable modules have had their chance to request additional + * shared memory, determine the value of any runtime-computed GUCs that + * depend on the amount of shared memory required. + */ + InitializeShmemGUCs(); + + /* + * Now that modules have been loaded, we can process any custom resource + * managers specified in the wal_consistency_checking GUC. + */ + InitializeWalConsistencyChecking(); + + CreateSharedMemoryAndSemaphores(); + + /* + * Remember stand-alone backend startup time,roughly at the same point + * during startup that postmaster does so. + */ + PgStartTime = GetCurrentTimestamp(); + + /* + * Create a per-backend PGPROC struct in shared memory. We must do this + * before we can use LWLocks. + */ + InitProcess(); + +// main + SetProcessingMode(InitProcessing); + + /* Early initialization */ + BaseInit(); +async_db_change:; + /* + * General initialization. + * + * NOTE: if you are tempted to add code in this vicinity, consider putting + * it inside InitPostgres() instead. In particular, anything that + * involves database access should be there, not here. + */ + InitPostgres(dbname, InvalidOid, /* database to connect to */ + username, InvalidOid, /* role to connect as */ + !am_walsender, /* honor session_preload_libraries? */ + false, /* don't ignore datallowconn */ + NULL); /* no out_dbname */ + + /* + * If the PostmasterContext is still around, recycle the space; we don't + * need it anymore after InitPostgres completes. Note this does not trash + * *MyProcPort, because ConnCreate() allocated that space with malloc() + * ... else we'd need to copy the Port data first. Also, subsidiary data + * such as the username isn't lost either; see ProcessStartupPacket(). + */ + if (PostmasterContext) + { + MemoryContextDelete(PostmasterContext); + PostmasterContext = NULL; + } + + SetProcessingMode(NormalProcessing); + + /* + * Now all GUC states are fully set up. Report them to client if + * appropriate. + */ + BeginReportingGUCOptions(); + + /* + * Also set up handler to log session end; we have to wait till now to be + * sure Log_disconnections has its final value. + */ + if (IsUnderPostmaster && Log_disconnections) + on_proc_exit(log_disconnections, 0); + + pgstat_report_connect(MyDatabaseId); + + /* Perform initialization specific to a WAL sender process. */ + if (am_walsender) + InitWalSender(); + + /* + * Send this backend's cancellation info to the frontend. + */ + if (whereToSendOutput == DestRemote) + { + StringInfoData buf; + + pq_beginmessage(&buf, 'K'); + pq_sendint32(&buf, (int32) MyProcPid); + pq_sendint32(&buf, (int32) MyCancelKey); + pq_endmessage(&buf); + /* Need not flush since ReadyForQuery will do it. */ + } + + /* Welcome banner for standalone case */ + if (whereToSendOutput == DestDebug) + printf("\nPostgreSQL stand-alone backend %s\n", PG_VERSION); + + /* + * Create the memory context we will use in the main loop. + * + * MessageContext is reset once per iteration of the main loop, ie, upon + * completion of processing of each command message from the client. + */ + MessageContext = AllocSetContextCreate(TopMemoryContext, "MessageContext", ALLOCSET_DEFAULT_SIZES); + + /* + * Create memory context and buffer used for RowDescription messages. As + * SendRowDescriptionMessage(), via exec_describe_statement_message(), is + * frequently executed for ever single statement, we don't want to + * allocate a separate buffer every time. + */ + row_description_context = AllocSetContextCreate(TopMemoryContext, "RowDescriptionContext", ALLOCSET_DEFAULT_SIZES); + MemoryContextSwitchTo(row_description_context); + initStringInfo(&row_description_buf); + MemoryContextSwitchTo(TopMemoryContext); +} + +void +RePostgresSingleUserMain(int single_argc, char *single_argv[], const char *username) +{ + +printf("# 54: RePostgresSingleUserMain progname=%s for %s\n", progname, single_argv[0]); + single_mode_feed = fopen(IDB_PIPE_SINGLE, "r"); + + // should be template1. + const char *dbname = NULL; + + + /* Parse command-line options. */ + process_postgres_switches(single_argc, single_argv, PGC_POSTMASTER, &dbname); + +printf("# 67: dbname=%s\n", dbname); + + LocalProcessControlFile(false); + + process_shared_preload_libraries(); + +// InitializeMaxBackends(); +PDEBUG("# 76 ?"); +// ? IgnoreSystemIndexes = true; +IgnoreSystemIndexes = false; + process_shmem_requests(); + + InitializeShmemGUCs(); + + InitializeWalConsistencyChecking(); +//PDEBUG("# NO CreateSharedMemoryAndSemaphores"); +// CreateSharedMemoryAndSemaphores(); + + PgStartTime = GetCurrentTimestamp(); + +//PDEBUG("# NO InitProcess 'FATAL: you already exist'"); +// InitProcess(); + + SetProcessingMode(InitProcessing); +PDEBUG("# 91: Re-InitPostgres"); +// BaseInit(); + + InitPostgres(dbname, InvalidOid, /* database to connect to */ + username, InvalidOid, /* role to connect as */ + !am_walsender, /* honor session_preload_libraries? */ + false, /* don't ignore datallowconn */ + NULL); /* no out_dbname */ +/* +PDEBUG("# 100"); + if (PostmasterContext) + { + PDEBUG("# 103"); + MemoryContextDelete(PostmasterContext); + PostmasterContext = NULL; + } +*/ + SetProcessingMode(NormalProcessing); + + BeginReportingGUCOptions(); + + if (IsUnderPostmaster && Log_disconnections) + on_proc_exit(log_disconnections, 0); + + pgstat_report_connect(MyDatabaseId); + + /* Perform initialization specific to a WAL sender process. */ + if (am_walsender) + InitWalSender(); + + /* + * Send this backend's cancellation info to the frontend. + */ + if (whereToSendOutput == DestRemote) + { + StringInfoData buf; + + pq_beginmessage(&buf, 'K'); + pq_sendint32(&buf, (int32) MyProcPid); + pq_sendint32(&buf, (int32) MyCancelKey); + pq_endmessage(&buf); + /* Need not flush since ReadyForQuery will do it. */ + } + + /* Welcome banner for standalone case */ + if (whereToSendOutput == DestDebug) + printf("\nPostgreSQL stand-alone backend %s\n", PG_VERSION); + + /* + * Create the memory context we will use in the main loop. + * + * MessageContext is reset once per iteration of the main loop, ie, upon + * completion of processing of each command message from the client. + */ + MessageContext = AllocSetContextCreate(TopMemoryContext, + "MessageContext", + ALLOCSET_DEFAULT_SIZES); + + /* + * Create memory context and buffer used for RowDescription messages. As + * SendRowDescriptionMessage(), via exec_describe_statement_message(), is + * frequently executed for ever single statement, we don't want to + * allocate a separate buffer every time. + */ + row_description_context = AllocSetContextCreate(TopMemoryContext, + "RowDescriptionContext", + ALLOCSET_DEFAULT_SIZES); + MemoryContextSwitchTo(row_description_context); + initStringInfo(&row_description_buf); + MemoryContextSwitchTo(TopMemoryContext); + + /* + * POSTGRES main processing loop begins here + * + * If an exception is encountered, processing resumes here so we abort the + * current transaction and start a new one. + * + * You might wonder why this isn't coded as an infinite loop around a + * PG_TRY construct. The reason is that this is the bottom of the + * exception stack, and so with PG_TRY there would be no exception handler + * in force at all during the CATCH part. By leaving the outermost setjmp + * always active, we have at least some chance of recovering from an error + * during error recovery. (If we get into an infinite loop thereby, it + * will soon be stopped by overflow of elog.c's internal state stack.) + * + * Note that we use sigsetjmp(..., 1), so that this function's signal mask + * (to wit, UnBlockSig) will be restored when longjmp'ing to here. This + * is essential in case we longjmp'd out of a signal handler on a platform + * where that leaves the signal blocked. It's not redundant with the + * unblock in AbortTransaction() because the latter is only called if we + * were inside a transaction. + */ + +#if 1 +#if 1 + if (sigsetjmp(local_sigjmp_buf, 1) != 0) +#endif + { + /* + * NOTE: if you are tempted to add more code in this if-block, + * consider the high probability that it should be in + * AbortTransaction() instead. The only stuff done directly here + * should be stuff that is guaranteed to apply *only* for outer-level + * error recovery, such as adjusting the FE/BE protocol status. + */ + + /* Since not using PG_TRY, must reset error stack by hand */ + error_context_stack = NULL; + + /* Prevent interrupts while cleaning up */ + HOLD_INTERRUPTS(); + + /* + * Forget any pending QueryCancel request, since we're returning to + * the idle loop anyway, and cancel any active timeout requests. (In + * future we might want to allow some timeout requests to survive, but + * at minimum it'd be necessary to do reschedule_timeouts(), in case + * we got here because of a query cancel interrupting the SIGALRM + * interrupt handler.) Note in particular that we must clear the + * statement and lock timeout indicators, to prevent any future plain + * query cancels from being misreported as timeouts in case we're + * forgetting a timeout cancel. + */ + disable_all_timeouts(false); /* do first to avoid race condition */ + QueryCancelPending = false; + idle_in_transaction_timeout_enabled = false; + idle_session_timeout_enabled = false; + + /* Not reading from the client anymore. */ + DoingCommandRead = false; + + /* Make sure libpq is in a good state */ + pq_comm_reset(); + + /* Report the error to the client and/or server log */ + EmitErrorReport(); + + /* + * If Valgrind noticed something during the erroneous query, print the + * query string, assuming we have one. + */ + valgrind_report_error_query(debug_query_string); + + /* + * Make sure debug_query_string gets reset before we possibly clobber + * the storage it points at. + */ + debug_query_string = NULL; + + /* + * Abort the current transaction in order to recover. + */ + AbortCurrentTransaction(); + + if (am_walsender) + WalSndErrorCleanup(); + + PortalErrorCleanup(); + + /* + * We can't release replication slots inside AbortTransaction() as we + * need to be able to start and abort transactions while having a slot + * acquired. But we never need to hold them across top level errors, + * so releasing here is fine. There also is a before_shmem_exit() + * callback ensuring correct cleanup on FATAL errors. + */ + if (MyReplicationSlot != NULL) + ReplicationSlotRelease(); + + /* We also want to cleanup temporary slots on error. */ + ReplicationSlotCleanup(); + + jit_reset_after_error(); + + /* + * Now return to normal top-level context and clear ErrorContext for + * next time. + */ + MemoryContextSwitchTo(TopMemoryContext); + FlushErrorState(); + + /* + * If we were handling an extended-query-protocol message, initiate + * skip till next Sync. This also causes us not to issue + * ReadyForQuery (until we get Sync). + */ + if (doing_extended_query_message) + ignore_till_sync = true; + + /* We don't have a transaction command open anymore */ + xact_started = false; + + /* + * If an error occurred while we were reading a message from the + * client, we have potentially lost track of where the previous + * message ends and the next one begins. Even though we have + * otherwise recovered from the error, we cannot safely read any more + * messages from the client, so there isn't much we can do with the + * connection anymore. + */ + if (pq_is_reading_msg()) + ereport(FATAL, + (errcode(ERRCODE_PROTOCOL_VIOLATION), + errmsg("terminating connection because protocol synchronization was lost"))); + + /* Now we can allow interrupts again */ + RESUME_INTERRUPTS(); + } + + /* We can now handle ereport(ERROR) */ + PG_exception_stack = &local_sigjmp_buf; + + if (!ignore_till_sync) + send_ready_for_query = true; /* initially, or after error */ + +#endif + + if (!inloop) { + inloop = true; + PDEBUG("# 311: REPL(initdb-single):Begin " __FILE__ ); + + while (repl) { interactive_file(); } + } else { + // signal error + optind = -1; + } + + fclose(single_mode_feed); + + if (strlen(getenv("REPL")) && getenv("REPL")[0]=='Y') { + PDEBUG("# 321: REPL(initdb-single):End " __FILE__ ); + + /* now use stdin as source */ + repl = true; + single_mode_feed = NULL; + + force_echo = true; + if (!is_node) { + fprintf(stdout,"# now in webloop(RAF)\npg> %c\n", 4); + emscripten_set_main_loop( (em_callback_func)interactive_one, 0, 1); + } else { + PDEBUG("# 331: REPL(single after initdb):Begin(NORETURN)"); + while (repl) { interactive_file(); } + exit(0); + } + + PDEBUG("# 338: REPL:End Raising a 'RuntimeError Exception' to halt program NOW"); + { + void (*npe)() = NULL; + npe(); + } + // unreachable. + } + + PDEBUG("# 346: no line-repl requested, exiting and keeping runtime alive"); +} + + + +/* ================================================================================ */ +/* ================================================================================ */ +/* ================================================================================ */ +/* ================================================================================ */ + +EMSCRIPTEN_KEEPALIVE void +pg_repl_raf(){ // const char* std_in, const char* std_out, const char* std_err, const char* js_handler) { + //printf("# 531: in=%s out=%s err=%s js=%s\n", std_in, std_out, std_err, js_handler); + + is_repl = strlen(getenv("REPL")) && getenv("REPL")[0]=='Y'; + + if (is_repl) { + PDEBUG("# 536: switching to REPL mode (raf)"); + repl = true; + single_mode_feed = NULL; + force_echo = true; + whereToSendOutput = DestNone; + emscripten_set_main_loop( (em_callback_func)interactive_one, 0, 0); + } else { + PDEBUG("# 543: wire mode"); + } +} + +EMSCRIPTEN_KEEPALIVE void +pg_initdb_start() { + pg_idb_status++; +} + + +EMSCRIPTEN_KEEPALIVE void +pg_shutdown() { + PDEBUG("pg_shutdown"); + proc_exit(66); +} + +EMSCRIPTEN_KEEPALIVE int +pg_isready() { + return pg_idb_status; + +} + +int loops = 0; + + + +EM_JS(int, peek_fd, (int fd), { + return test_data.length; +}); + +EM_JS(int, fnc_getfd, (int fd), { + return fnc_stdin() +}); + + +EMSCRIPTEN_KEEPALIVE void +interactive_file() { + int firstchar; + int c; /* character read from getc() */ + StringInfoData input_message; + StringInfoData *inBuf; + FILE *stream ; + + /* + * At top of loop, reset extended-query-message flag, so that any + * errors encountered in "idle" state don't provoke skip. + */ + doing_extended_query_message = false; + + /* + * Release storage left over from prior query cycle, and create a new + * query input buffer in the cleared MessageContext. + */ + MemoryContextSwitchTo(MessageContext); + MemoryContextResetAndDeleteChildren(MessageContext); + + initStringInfo(&input_message); + inBuf = &input_message; + DoingCommandRead = true; + + //firstchar = ReadCommand(&input_message); + if (whereToSendOutput == DestRemote) + firstchar = SocketBackend(&input_message); + else { + + /* + * display a prompt and obtain input from the user + */ + if (!single_mode_feed) { + printf("pg> %c\n", 4); + fflush(stdout); + stream = stdin; + } else { + stream = single_mode_feed; + } + + resetStringInfo(inBuf); + while ((c = getc(stream)) != EOF) + { + if (c == '\n') + { + if (UseSemiNewlineNewline) + { + /* + * In -j mode, semicolon followed by two newlines ends the + * command; otherwise treat newline as regular character. + */ + if (inBuf->len > 1 && + inBuf->data[inBuf->len - 1] == '\n' && + inBuf->data[inBuf->len - 2] == ';') + { + /* might as well drop the second newline */ + break; + } + } + else + { + /* + * In plain mode, newline ends the command unless preceded by + * backslash. + */ + if (inBuf->len > 0 && + inBuf->data[inBuf->len - 1] == '\\') + { + /* discard backslash from inBuf */ + inBuf->data[--inBuf->len] = '\0'; + /* discard newline too */ + continue; + } + else + { + /* keep the newline character, but end the command */ + appendStringInfoChar(inBuf, '\n'); + break; + } + } + } + + /* Not newline, or newline treated as regular character */ + appendStringInfoChar(inBuf, (char) c); + } + + if (c == EOF && inBuf->len == 0) { + firstchar = EOF; + } else { + /* Add '\0' to make it look the same as message case. */ + appendStringInfoChar(inBuf, (char) '\0'); + firstchar = 'Q'; + } + } + + if (ignore_till_sync && firstchar != EOF) + return; + + #include "pg_proto.c" +} + +#include "./interactive_one.c" + + + +void +PostgresSingleUserMain(int argc, char *argv[], + const char *username) +{ + const char *dbname = NULL; + + Assert(!IsUnderPostmaster); + + progname = get_progname(argv[0]); + + /* Initialize startup process environment. */ + InitStandaloneProcess(argv[0]); + + /* Set default values for command-line options. */ + InitializeGUCOptions(); + + /* Parse command-line options. */ + process_postgres_switches(argc, argv, PGC_POSTMASTER, &dbname); + + /* Must have gotten a database name, or have a default (the username) */ + if (dbname == NULL) + { + dbname = username; + if (dbname == NULL) + ereport(FATAL, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("%s: no database nor user name specified", + progname))); + } + + /* Acquire configuration parameters */ + if (!SelectConfigFiles(userDoption, progname)) + proc_exit(1); + + checkDataDir(); + ChangeToDataDir(); + + /* + * Create lockfile for data directory. + */ + CreateDataDirLockFile(false); + + /* read control file (error checking and contains config ) */ + LocalProcessControlFile(false); + + /* + * process any libraries that should be preloaded at postmaster start + */ + process_shared_preload_libraries(); + + /* Initialize MaxBackends */ + InitializeMaxBackends(); +PDEBUG("560"); + /* + * Give preloaded libraries a chance to request additional shared memory. + */ + process_shmem_requests(); + + /* + * Now that loadable modules have had their chance to request additional + * shared memory, determine the value of any runtime-computed GUCs that + * depend on the amount of shared memory required. + */ + InitializeShmemGUCs(); + + /* + * Now that modules have been loaded, we can process any custom resource + * managers specified in the wal_consistency_checking GUC. + */ + InitializeWalConsistencyChecking(); + + CreateSharedMemoryAndSemaphores(); + + /* + * Remember stand-alone backend startup time,roughly at the same point + * during startup that postmaster does so. + */ + PgStartTime = GetCurrentTimestamp(); + + /* + * Create a per-backend PGPROC struct in shared memory. We must do this + * before we can use LWLocks. + */ + InitProcess(); + +// main + SetProcessingMode(InitProcessing); + + /* Early initialization */ + BaseInit(); + + /* + * General initialization. + * + * NOTE: if you are tempted to add code in this vicinity, consider putting + * it inside InitPostgres() instead. In particular, anything that + * involves database access should be there, not here. + */ + InitPostgres(dbname, InvalidOid, /* database to connect to */ + username, InvalidOid, /* role to connect as */ + !am_walsender, /* honor session_preload_libraries? */ + false, /* don't ignore datallowconn */ + NULL); /* no out_dbname */ + + /* + * If the PostmasterContext is still around, recycle the space; we don't + * need it anymore after InitPostgres completes. Note this does not trash + * *MyProcPort, because ConnCreate() allocated that space with malloc() + * ... else we'd need to copy the Port data first. Also, subsidiary data + * such as the username isn't lost either; see ProcessStartupPacket(). + */ + if (PostmasterContext) + { + MemoryContextDelete(PostmasterContext); + PostmasterContext = NULL; + } + + SetProcessingMode(NormalProcessing); + + /* + * Now all GUC states are fully set up. Report them to client if + * appropriate. + */ + BeginReportingGUCOptions(); + + /* + * Also set up handler to log session end; we have to wait till now to be + * sure Log_disconnections has its final value. + */ + if (IsUnderPostmaster && Log_disconnections) + on_proc_exit(log_disconnections, 0); + + pgstat_report_connect(MyDatabaseId); + + /* Perform initialization specific to a WAL sender process. */ + if (am_walsender) + InitWalSender(); + + /* + * Send this backend's cancellation info to the frontend. + */ + if (whereToSendOutput == DestRemote) + { + StringInfoData buf; + + pq_beginmessage(&buf, 'K'); + pq_sendint32(&buf, (int32) MyProcPid); + pq_sendint32(&buf, (int32) MyCancelKey); + pq_endmessage(&buf); + /* Need not flush since ReadyForQuery will do it. */ + } + + /* Welcome banner for standalone case */ + if (whereToSendOutput == DestDebug) + printf("\nPostgreSQL stand-alone backend %s\n", PG_VERSION); + + /* + * Create the memory context we will use in the main loop. + * + * MessageContext is reset once per iteration of the main loop, ie, upon + * completion of processing of each command message from the client. + */ + MessageContext = AllocSetContextCreate(TopMemoryContext, + "MessageContext", + ALLOCSET_DEFAULT_SIZES); + + /* + * Create memory context and buffer used for RowDescription messages. As + * SendRowDescriptionMessage(), via exec_describe_statement_message(), is + * frequently executed for ever single statement, we don't want to + * allocate a separate buffer every time. + */ + row_description_context = AllocSetContextCreate(TopMemoryContext, + "RowDescriptionContext", + ALLOCSET_DEFAULT_SIZES); + MemoryContextSwitchTo(row_description_context); + initStringInfo(&row_description_buf); + MemoryContextSwitchTo(TopMemoryContext); + + /* + * POSTGRES main processing loop begins here + * + * If an exception is encountered, processing resumes here so we abort the + * current transaction and start a new one. + * + * You might wonder why this isn't coded as an infinite loop around a + * PG_TRY construct. The reason is that this is the bottom of the + * exception stack, and so with PG_TRY there would be no exception handler + * in force at all during the CATCH part. By leaving the outermost setjmp + * always active, we have at least some chance of recovering from an error + * during error recovery. (If we get into an infinite loop thereby, it + * will soon be stopped by overflow of elog.c's internal state stack.) + * + * Note that we use sigsetjmp(..., 1), so that this function's signal mask + * (to wit, UnBlockSig) will be restored when longjmp'ing to here. This + * is essential in case we longjmp'd out of a signal handler on a platform + * where that leaves the signal blocked. It's not redundant with the + * unblock in AbortTransaction() because the latter is only called if we + * were inside a transaction. + */ + +exception_handler: + +#if 1 + if (sigsetjmp(local_sigjmp_buf, 1) != 0) + { + error_context_stack = NULL; + HOLD_INTERRUPTS(); + disable_all_timeouts(false); /* do first to avoid race condition */ + QueryCancelPending = false; + idle_in_transaction_timeout_enabled = false; + idle_session_timeout_enabled = false; + DoingCommandRead = false; + pq_comm_reset(); + EmitErrorReport(); + valgrind_report_error_query(debug_query_string); + debug_query_string = NULL; + AbortCurrentTransaction(); + if (am_walsender) + WalSndErrorCleanup(); + PortalErrorCleanup(); + if (MyReplicationSlot != NULL) + ReplicationSlotRelease(); + ReplicationSlotCleanup(); + jit_reset_after_error(); + MemoryContextSwitchTo(TopMemoryContext); + FlushErrorState(); + if (doing_extended_query_message) + ignore_till_sync = true; + xact_started = false; + if (pq_is_reading_msg()) { + ereport(FATAL, + (errcode(ERRCODE_PROTOCOL_VIOLATION), + errmsg("terminating connection because protocol synchronization was lost"))); + } + RESUME_INTERRUPTS(); + } + PG_exception_stack = &local_sigjmp_buf; + if (!ignore_till_sync) + send_ready_for_query = true; /* initially, or after error */ +#endif + + /* + * Non-error queries loop here. + */ + +printf("# 943: hybrid loop:Begin CI=%s\n", getenv("CI") ); + fprintf(stdout,"pg> %c\n", 4); + while (repl && !proc_exit_inprogress) { + interactive_one(); + } + PDEBUG("\n\n# 948: REPL:End " __FILE__); + abort(); +#if !defined(PG_INITDB_MAIN) + proc_exit(0); +#endif +} + +#else + +extern bool is_node; +extern bool is_repl; + +extern bool quote_all_identifiers; + +#if defined(__EMSCRIPTEN__) || defined(__wasi__) +#include /* chdir */ +#include /* mkdir */ +static +void mkdirp(const char *p) { + if (!mkdir(p, 0700)) { + fprintf(stderr, "# no '%s' directory, creating one ...\n", p); + } +} +#endif /* wasm */ + + +#if defined(PG_INITDB_MAIN) +extern int pg_initdb_main(); + +extern void RePostgresSingleUserMain(int single_argc, char *single_argv[], const char *username); +extern void AsyncPostgresSingleUserMain(int single_argc, char *single_argv[], const char *username, int async_restart); +extern void main_post(); +extern void proc_exit(int code); + + +EMSCRIPTEN_KEEPALIVE int +pg_initdb() { + PDEBUG("# 985: pg_initdb()"); + optind = 1; + int async_restart = 1; + + + if (!chdir(getenv("PGDATA"))){ + if (access("PG_VERSION", F_OK) == 0) { + chdir("/"); + printf("pg_initdb: db exists at : %s\n", getenv("PGDATA") ); + main_post(); + async_restart = 0; + { + char *single_argv[] = { + WASM_PREFIX "/bin/postgres", + "--single", + "-d", "1", "-B", "16", "-S", "512", "-f", "siobtnmh", + "-D", getenv("PGDATA"), + "-F", "-O", "-j", + WASM_PGOPTS, + "template1", + NULL + }; + int single_argc = sizeof(single_argv) / sizeof(char*) - 1; + optind = 1; + AsyncPostgresSingleUserMain(single_argc, single_argv, strdup(getenv("PGUSER")), async_restart); + } + + goto initdb_done; + } + chdir("/"); + printf("pg_initdb: no db found at : %s\n", getenv("PGDATA") ); + } + PDEBUG("# 1019"); + printf("# pg_initdb_main result = %d\n", pg_initdb_main() ); + + + /* save stdin and use previous initdb output to feed boot mode */ + int saved_stdin = dup(STDIN_FILENO); + { + PDEBUG("# restarting in boot mode for initdb"); + freopen(IDB_PIPE_BOOT, "r", stdin); + + char *boot_argv[] = { + WASM_PREFIX "/bin/postgres", + "--boot", + "-D", getenv("PGDATA"), + "-d","3", + WASM_PGOPTS, + "-X", "1048576", + NULL + }; + int boot_argc = sizeof(boot_argv) / sizeof(char*) - 1; + + set_pglocale_pgservice(boot_argv[0], PG_TEXTDOMAIN("initdb")); + + optind = 1; + BootstrapModeMain(boot_argc, boot_argv, false); + fclose(stdin); + remove(IDB_PIPE_BOOT); + stdin = fdopen(saved_stdin, "r"); + /* fake a shutdown to comlplete WAL/OID states */ + proc_exit(66); + } + + PDEBUG("# 1051"); + /* use previous initdb output to feed single mode */ + + + /* or resume a previous db */ + + + { + PDEBUG("# restarting in single mode for initdb"); + + char *single_argv[] = { + WASM_PREFIX "/bin/postgres", + "--single", + "-d", "1", "-B", "16", "-S", "512", "-f", "siobtnmh", + "-D", getenv("PGDATA"), + "-F", "-O", "-j", + WASM_PGOPTS, + "template1", + NULL + }; + int single_argc = sizeof(single_argv) / sizeof(char*) - 1; + optind = 1; + RePostgresSingleUserMain(single_argc, single_argv, strdup( getenv("PGUSER"))); + } + +initdb_done:; + { + if (async_restart) + PDEBUG("# FIXME: restart in server mode on 'postgres' db"); + else + PDEBUG("# FIXME: start server on 'postgres' db"); + } + + + if (optind>0) { + /* RESET getopt */ + optind = 1; + return false; + } + PDEBUG("# exiting on initdb-single error"); + return true; +} + + + + +#endif + +#define PGDB WASM_PREFIX "/base" + +EM_JS(int, is_web_env, (), { + try { + if (window) return 1; + } catch(x) {return 0} +}); + +static void +main_pre(int argc, char *argv[]) { + + + char key[256]; + int i=0; +// extra env is always after normal args + PDEBUG("# ============= extra argv dump =================="); + { + for (;ienv dump =================="); + { + for (;i255) { + PDEBUG("buffer overrun on extra env at:"); + PDEBUG(kv); + continue; + } + if (kv[sk]=='=') { + memcpy(key, kv, sk); + key[sk] = 0; + printf("%s='%s'\n", &(key[0]), &(kv[sk+1])); + setenv(key, &kv[sk+1], 1); + } + } + } + } + PDEBUG("\n# ========================================="); + + argv[0] = strdup(WASM_PREFIX "/bin/postgres"); + + +#if defined(__EMSCRIPTEN__) + EM_ASM({ + Module.is_worker = (typeof WorkerGlobalScope !== 'undefined') && self instanceof WorkerGlobalScope; + Module.FD_BUFFER_MAX = $0; + Module.emscripten_copy_to = console.warn; + }, FD_BUFFER_MAX); /* ( global mem start / num fd max ) */ + + if (is_node) { + setenv("ENVIRONMENT", "node" , 1); + EM_ASM({ + console.warn("prerun(C-node) worker=", Module.is_worker); + Module['postMessage'] = function custom_postMessage(event) { + console.log("onCustomMessage:", event); + }; + }); + + } else { + setenv("ENVIRONMENT", "web" , 1); + EM_ASM({ + console.warn("prerun(C-web) worker=", Module.is_worker); + }); + is_repl = true; + } + + EM_ASM({ + if (Module.is_worker) { + //console.log("Main: running in a worker, setting onCustomMessage"); + function onCustomMessage(event) { + console.log("onCustomMessage:", event); + // PUT SHM HERE + //stringToUTF8( utf8encode(data), shm_rcon, FD_BUFFER_MAX); + }; + Module['onCustomMessage'] = onCustomMessage; + } else { + //console.log("Running in main thread, faking onCustomMessage"); + Module['postMessage'] = function custom_postMessage(event) { + switch (event.type) { + case "raw" : { + stringToUTF8( event.data, shm_rawinput, Module.FD_BUFFER_MAX); + break; + } + + case "stdin" : { + stringToUTF8( event.data, 1, Module.FD_BUFFER_MAX); + break; + } + case "rcon" : { + stringToUTF8( event.data, shm_rcon, Module.FD_BUFFER_MAX); + break; + } + default : console.warn("custom_postMessage?", event); + } + }; + //if (!window.vm) + // window.vm = Module; + }; + }); + +#endif + chdir("/"); + if (access("/etc/fstab", F_OK) == 0) { + PDEBUG("WARNING: Node with real filesystem access"); + } else { + mkdirp("/tmp"); + mkdirp("/tmp/pgdata"); + mkdirp("/tmp/pglite"); + mkdirp(WASM_PREFIX); + } + + // postgres does not know where to find the server configuration file. + // also we store the fake locale file there. + // postgres.js:1605 You must specify the --config-file or -D invocation option or set the PGDATA environment variable. + // ?? setenv("PGDATABASE", WASM_PREFIX "/db" , 1 ); + setenv("PGSYSCONFDIR", WASM_PREFIX, 1); + setenv("PGCLIENTENCODING", "UTF8", 1); + + // default is to run a repl loop + setenv("REPL", "Y", 0); +/* + * we cannot run "locale -a" either from web or node. the file getenv("PGSYSCONFDIR") / "locale" + * serves as popen output + */ + + setenv("LC_CTYPE", "C" , 1); + + /* default username */ + setenv("PGUSER", WASM_USERNAME , 0); + + /* default path */ + setenv("PGDATA", PGDB , 0); + + setenv("PG_COLOR", "always", 0); + +PDEBUG("# ============= env dump =================="); + for (char **env = environ; *env != 0; env++) + { + char *drefp = *env; + printf("# %s\n", drefp); + } +PDEBUG("# ========================================="); + + mkdirp(WASM_PREFIX); +} + +int g_argc; +char **g_argv; + +void main_post() { + /* + * Fire up essential subsystems: error and memory management + * + * Code after this point is allowed to use elog/ereport, though + * localization of messages may not work right away, and messages won't go + * anywhere but stderr until GUC settings get loaded. + */ + MemoryContextInit(); + + /* + * Set up locale information + */ + set_pglocale_pgservice(g_argv[0], PG_TEXTDOMAIN("postgres")); + + /* + * In the postmaster, absorb the environment values for LC_COLLATE and + * LC_CTYPE. Individual backends will change these later to settings + * taken from pg_database, but the postmaster cannot do that. If we leave + * these set to "C" then message localization might not work well in the + * postmaster. + */ + init_locale("LC_COLLATE", LC_COLLATE, ""); + init_locale("LC_CTYPE", LC_CTYPE, ""); + + /* + * LC_MESSAGES will get set later during GUC option processing, but we set + * it here to allow startup error messages to be localized. + */ + #ifdef LC_MESSAGES + init_locale("LC_MESSAGES", LC_MESSAGES, ""); + #endif + + /* + * We keep these set to "C" always, except transiently in pg_locale.c; see + * that file for explanations. + */ + init_locale("LC_MONETARY", LC_MONETARY, "C"); + init_locale("LC_NUMERIC", LC_NUMERIC, "C"); + init_locale("LC_TIME", LC_TIME, "C"); + + /* + * Now that we have absorbed as much as we wish to from the locale + * environment, remove any LC_ALL setting, so that the environment + * variables installed by pg_perm_setlocale have force. + */ + unsetenv("LC_ALL"); +} + +EMSCRIPTEN_KEEPALIVE void +__cxa_throw(void *thrown_exception, void *tinfo, void *dest) {} + +extern void AsyncPostgresSingleUserMain(int single_argc, char *single_argv[], const char *username, int async_restart); + +EMSCRIPTEN_KEEPALIVE int +main_repl(int async) { + bool hadloop_error = false; + + whereToSendOutput = DestNone; + + if (!mkdir(PGDB, 0700)) { + /* no db : run initdb now. */ + fprintf(stderr, "db %s not found, running initdb with defaults\n", PGDB ); + #if defined(PG_INITDB_MAIN) + #warning "web build" + hadloop_error = pg_initdb(); + + #else + #warning "node build" + #endif + + } else { + // download a db case ? + mkdirp(PGDB); + + // db fixup because empty dirs are not packaged + /* + mkdirp(WASM_PREFIX "/lib"); + mkdirp(WASM_PREFIX "/lib/postgresql"); + */ + mkdirp(PGDB "/pg_wal"); + mkdirp(PGDB "/pg_wal/archive_status"); + mkdirp(PGDB "/pg_wal/summaries"); + + mkdirp(PGDB "/pg_tblspc"); + mkdirp(PGDB "/pg_snapshots"); + mkdirp(PGDB "/pg_commit_ts"); + mkdirp(PGDB "/pg_notify"); + mkdirp(PGDB "/pg_replslot"); + mkdirp(PGDB "/pg_twophase"); + + + mkdirp(PGDB "/pg_logical"); + mkdirp(PGDB "/pg_logical/snapshots"); + mkdirp(PGDB "/pg_logical/mappings"); + + } + + if (!hadloop_error) { + main_post(); + + /* + * Catch standard options before doing much else, in particular before we + * insist on not being root. + */ + if (g_argc > 1) { + if (strcmp(g_argv[1], "--help") == 0 || strcmp(g_argv[1], "-?") == 0) + { + help(progname); + exit(0); + } + if (strcmp(g_argv[1], "--version") == 0 || strcmp(g_argv[1], "-V") == 0) + { + fputs(PG_BACKEND_VERSIONSTR, stdout); + exit(0); + } + + } + + if (g_argc > 1 && strcmp(g_argv[1], "--check") == 0) { + BootstrapModeMain(g_argc, g_argv, true); + return 0; + } + + if (g_argc > 1 && strcmp(g_argv[1], "--boot") == 0) { + PDEBUG("# 1356: boot: " __FILE__ ); + BootstrapModeMain(g_argc, g_argv, false); + return 0; + } + + PDEBUG("# 1362: single: " __FILE__ ); + if (async) + AsyncPostgresSingleUserMain(g_argc, g_argv, strdup(getenv("PGUSER")), 0); + else + PostgresSingleUserMain(g_argc, g_argv, strdup( getenv("PGUSER"))); + } + return 0; +} + + + +int +main(int argc, char **argv) // []) +{ +/* +TODO: + postgres.js:6382 warning: unsupported syscall: __syscall_prlimit64 +*/ + int ret=0; + is_node = !is_web_env(); + + main_pre(argc, argv); + + printf("# argv0 (%s) PGUSER=%s PGDATA=%s\n", argv[0], getenv("PGUSER"), getenv("PGDATA")); + + progname = get_progname(argv[0]); + + /* + PGDATESTYLE + TZ + PG_SHMEM_ADDR + + PGCTLTIMEOUT + PG_TEST_USE_UNIX_SOCKETS + INITDB_TEMPLATE + PSQL_HISTORY + TMPDIR + PGOPTIONS + */ + + /* + * Platform-specific startup hacks + */ + startup_hacks(progname); + + /* + * Remember the physical location of the initially given argv[] array for + * possible use by ps display. On some platforms, the argv[] storage must + * be overwritten in order to set the process title for ps. In such cases + * save_ps_display_args makes and returns a new copy of the argv[] array. + * + * save_ps_display_args may also move the environment strings to make + * extra room. Therefore this should be done as early as possible during + * startup, to avoid entanglements with code that might save a getenv() + * result pointer. + */ + argv = save_ps_display_args(argc, argv); + g_argv = argv; + g_argc = argc; + + is_repl = strlen(getenv("REPL")) && getenv("REPL")[0]=='Y'; + if (!is_repl) { + PDEBUG("exit with live runtime (nodb)"); + return 0; + } + + // so it is repl + main_repl(1); + PDEBUG("# 1453: " __FILE__); + emscripten_force_exit(ret); + return ret; +} + +#endif // PG_MAIN diff --git a/patches/pg_plugin.h b/patches/pg_plugin.h new file mode 100644 index 00000000..d0ffeca1 --- /dev/null +++ b/patches/pg_plugin.h @@ -0,0 +1,10 @@ +#ifndef PG_EXTERN +#define PG_EXTERN + +/* + * TODO: async fetch/compilation of a pre-selected plugin + * with emscripten_run_preload_plugins() + */ + + +#endif diff --git a/patches/pg_proto.c b/patches/pg_proto.c new file mode 100644 index 00000000..ff3dd2f3 --- /dev/null +++ b/patches/pg_proto.c @@ -0,0 +1,274 @@ +/* + * this file is used by both interactive_file ( initdb boot/single ) + * and interactive_one() + * + */ + switch (firstchar) + { + case 'Q': /* simple query */ + { + const char *query_string; + + /* Set statement_timestamp() */ + SetCurrentStatementStartTimestamp(); + + query_string = pq_getmsgstring(&input_message); + pq_getmsgend(&input_message); + + if (am_walsender) + { + if (!exec_replication_command(query_string)) + exec_simple_query(query_string); + } + else + exec_simple_query(query_string); + + send_ready_for_query = true; + if (!single_mode_feed) + fprintf(stdout,"pg> %c\n", 4); + } + break; + + case 'P': /* parse */ + { + const char *stmt_name; + const char *query_string; + int numParams; + Oid *paramTypes = NULL; + + forbidden_in_wal_sender(firstchar); + + /* Set statement_timestamp() */ + SetCurrentStatementStartTimestamp(); + + stmt_name = pq_getmsgstring(&input_message); + query_string = pq_getmsgstring(&input_message); + numParams = pq_getmsgint(&input_message, 2); + if (numParams > 0) + { + paramTypes = palloc_array(Oid, numParams); + for (int i = 0; i < numParams; i++) + paramTypes[i] = pq_getmsgint(&input_message, 4); + } + pq_getmsgend(&input_message); + + exec_parse_message(query_string, stmt_name, + paramTypes, numParams); + + //valgrind_report_error_query(query_string); + } + break; + + case 'B': /* bind */ + forbidden_in_wal_sender(firstchar); + + /* Set statement_timestamp() */ + SetCurrentStatementStartTimestamp(); + + /* + * this message is complex enough that it seems best to put + * the field extraction out-of-line + */ + exec_bind_message(&input_message); + + /* exec_bind_message does valgrind_report_error_query */ + break; + + case 'E': /* execute */ + { + const char *portal_name; + int max_rows; + + forbidden_in_wal_sender(firstchar); + + /* Set statement_timestamp() */ + SetCurrentStatementStartTimestamp(); + + portal_name = pq_getmsgstring(&input_message); + max_rows = pq_getmsgint(&input_message, 4); + pq_getmsgend(&input_message); + + exec_execute_message(portal_name, max_rows); + + /* exec_execute_message does valgrind_report_error_query */ + } + break; + + case 'F': /* fastpath function call */ + forbidden_in_wal_sender(firstchar); + + /* Set statement_timestamp() */ + SetCurrentStatementStartTimestamp(); + + /* Report query to various monitoring facilities. */ + pgstat_report_activity(STATE_FASTPATH, NULL); + set_ps_display(""); + + /* start an xact for this function invocation */ + start_xact_command(); + + /* + * Note: we may at this point be inside an aborted + * transaction. We can't throw error for that until we've + * finished reading the function-call message, so + * HandleFunctionRequest() must check for it after doing so. + * Be careful not to do anything that assumes we're inside a + * valid transaction here. + */ + + /* switch back to message context */ + MemoryContextSwitchTo(MessageContext); + + HandleFunctionRequest(&input_message); + + /* commit the function-invocation transaction */ + finish_xact_command(); + + // valgrind_report_error_query("fastpath function call"); + + send_ready_for_query = true; + break; + + case 'C': /* close */ + { + int close_type; + const char *close_target; + + forbidden_in_wal_sender(firstchar); + + close_type = pq_getmsgbyte(&input_message); + close_target = pq_getmsgstring(&input_message); + pq_getmsgend(&input_message); + + switch (close_type) + { + case 'S': + if (close_target[0] != '\0') + DropPreparedStatement(close_target, false); + else + { + /* special-case the unnamed statement */ + drop_unnamed_stmt(); + } + break; + case 'P': + { + Portal portal; + + portal = GetPortalByName(close_target); + if (PortalIsValid(portal)) + PortalDrop(portal, false); + } + break; + default: + ereport(ERROR, + (errcode(ERRCODE_PROTOCOL_VIOLATION), + errmsg("invalid CLOSE message subtype %d", + close_type))); + break; + } + + if (whereToSendOutput == DestRemote) + pq_putemptymessage('3'); /* CloseComplete */ + + //valgrind_report_error_query("CLOSE message"); + } + break; + + case 'D': /* describe */ + { + int describe_type; + const char *describe_target; + + forbidden_in_wal_sender(firstchar); + + /* Set statement_timestamp() (needed for xact) */ + SetCurrentStatementStartTimestamp(); + + describe_type = pq_getmsgbyte(&input_message); + describe_target = pq_getmsgstring(&input_message); + pq_getmsgend(&input_message); + + switch (describe_type) + { + case 'S': + exec_describe_statement_message(describe_target); + break; + case 'P': + exec_describe_portal_message(describe_target); + break; + default: + ereport(ERROR, + (errcode(ERRCODE_PROTOCOL_VIOLATION), + errmsg("invalid DESCRIBE message subtype %d", + describe_type))); + break; + } + + // valgrind_report_error_query("DESCRIBE message"); + } + break; + + case 'H': /* flush */ + pq_getmsgend(&input_message); + if (whereToSendOutput == DestRemote) + pq_flush(); + break; + + case 'S': /* sync */ + pq_getmsgend(&input_message); + finish_xact_command(); + //valgrind_report_error_query("SYNC message"); + send_ready_for_query = true; + break; + + /* + * 'X' means that the frontend is closing down the socket. EOF + * means unexpected loss of frontend connection. Either way, + * perform normal shutdown. + */ + case EOF: + + /* for the cumulative statistics system */ + pgStatSessionEndCause = DISCONNECT_CLIENT_EOF; + + /* FALLTHROUGH */ + + case 'X': + + /* + * Reset whereToSendOutput to prevent ereport from attempting + * to send any more messages to client. + */ + if (whereToSendOutput == DestRemote) + whereToSendOutput = DestNone; + + /* + * NOTE: if you are tempted to add more code here, DON'T! + * Whatever you had in mind to do should be set up as an + * on_proc_exit or on_shmem_exit callback, instead. Otherwise + * it will fail to be called during other backend-shutdown + * scenarios. + */ +// puts("# 697:proc_exit/repl/skip"); //proc_exit(0); + repl = false; + return; + + case 'd': /* copy data */ + case 'c': /* copy done */ + case 'f': /* copy fail */ + + /* + * Accept but ignore these messages, per protocol spec; we + * probably got here because a COPY failed, and the frontend + * is still sending data. + */ + break; + + default: + ereport(FATAL, + (errcode(ERRCODE_PROTOCOL_VIOLATION), + errmsg("invalid frontend message type %d", + firstchar))); + } // end switch + diff --git a/patches/postgresql-debug/configure.diff b/patches/postgresql-debug/configure.diff new file mode 100644 index 00000000..1ed1b07f --- /dev/null +++ b/patches/postgresql-debug/configure.diff @@ -0,0 +1,129 @@ +--- postgresql-16.2/configure 2024-02-05 22:41:37.000000000 +0100 ++++ postgresql-16.2-wasm/configure 2024-05-06 08:20:15.232406617 +0200 +@@ -4322,7 +4322,7 @@ + ac_save_c_werror_flag=$ac_c_werror_flag + ac_c_werror_flag=yes + ac_cv_prog_cc_g=no +- CFLAGS="-g" ++ CFLAGS="-g2" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext + /* end confdefs.h. */ + +@@ -4353,7 +4353,7 @@ + + else + ac_c_werror_flag=$ac_save_c_werror_flag +- CFLAGS="-g" ++ CFLAGS="-g2" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext + /* end confdefs.h. */ + +@@ -4381,13 +4381,13 @@ + CFLAGS=$ac_save_CFLAGS + elif test $ac_cv_prog_cc_g = yes; then + if test "$GCC" = yes; then +- CFLAGS="-g -O2" ++ CFLAGS="-g2 -Os" + else +- CFLAGS="-g" ++ CFLAGS="-g2" + fi + else + if test "$GCC" = yes; then +- CFLAGS="-O2" ++ CFLAGS="-Os" + else + CFLAGS= + fi +@@ -4853,7 +4853,7 @@ + ac_save_cxx_werror_flag=$ac_cxx_werror_flag + ac_cxx_werror_flag=yes + ac_cv_prog_cxx_g=no +- CXXFLAGS="-g" ++ CXXFLAGS="-g2" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext + /* end confdefs.h. */ + +@@ -4884,7 +4884,7 @@ + + else + ac_cxx_werror_flag=$ac_save_cxx_werror_flag +- CXXFLAGS="-g" ++ CXXFLAGS="-g2" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext + /* end confdefs.h. */ + +@@ -4912,13 +4912,13 @@ + CXXFLAGS=$ac_save_CXXFLAGS + elif test $ac_cv_prog_cxx_g = yes; then + if test "$GXX" = yes; then +- CXXFLAGS="-g -O2" ++ CXXFLAGS="-g2 -Os" + else +- CXXFLAGS="-g" ++ CXXFLAGS="-g2" + fi + else + if test "$GXX" = yes; then +- CXXFLAGS="-O2" ++ CXXFLAGS="-Os" + else + CXXFLAGS= + fi +@@ -5278,11 +5278,11 @@ + elif test "$enable_coverage" = yes; then + : # no optimization by default + elif test "$GCC" = yes; then +- CFLAGS="-O2" ++ CFLAGS="-Os" + else + # if the user selected debug mode, don't use -O + if test "$enable_debug" != yes; then +- CFLAGS="-O" ++ CFLAGS="-Os" + fi + fi + +@@ -5293,11 +5293,11 @@ + elif test "$enable_coverage" = yes; then + : # no optimization by default + elif test "$GCC" = yes; then +- CXXFLAGS="-O2" ++ CXXFLAGS="-Os" + else + # if the user selected debug mode, don't use -O + if test "$enable_debug" != yes; then +- CXXFLAGS="-O" ++ CXXFLAGS="-Os" + fi + fi + +@@ -5310,12 +5310,12 @@ + if test "$ac_env_BITCODE_CFLAGS_set" = set; then + BITCODE_CFLAGS=$ac_env_BITCODE_CFLAGS_value + else +- BITCODE_CFLAGS="-O2 $BITCODE_CFLAGS" ++ BITCODE_CFLAGS="-Os $BITCODE_CFLAGS" + fi + if test "$ac_env_BITCODE_CXXFLAGS_set" = set; then + BITCODE_CXXFLAGS=$ac_env_BITCODE_CXXFLAGS_value + else +- BITCODE_CXXFLAGS="-O2 $BITCODE_CXXFLAGS" ++ BITCODE_CXXFLAGS="-Os $BITCODE_CXXFLAGS" + fi + + # C[XX]FLAGS we determined above will be added back at the end +@@ -7826,11 +7826,11 @@ + + # supply -g if --enable-debug + if test "$enable_debug" = yes && test "$ac_cv_prog_cc_g" = yes; then +- CFLAGS="$CFLAGS -g" ++ CFLAGS="$CFLAGS -g2" + fi + + if test "$enable_debug" = yes && test "$ac_cv_prog_cxx_g" = yes; then +- CXXFLAGS="$CXXFLAGS -g" ++ CXXFLAGS="$CXXFLAGS -g2" + fi + + # enable code coverage if --enable-coverage diff --git a/patches/postgresql-emscripten/src-Makefile.shlib.diff b/patches/postgresql-emscripten/src-Makefile.shlib.diff new file mode 100644 index 00000000..da77a3d4 --- /dev/null +++ b/patches/postgresql-emscripten/src-Makefile.shlib.diff @@ -0,0 +1,22 @@ +--- postgresql-16.3/src/Makefile.shlib ++++ postgresql-16.3-wasm/src/Makefile.shlib +@@ -233,6 +233,19 @@ + override CXXFLAGS += $(CXXFLAGS_SL_MODULE) + endif + ++ifeq ($(PORTNAME), emscripten) ++ LINK.shared = wasm-shared ++ ifdef soname ++ # emscripten uses unversioned shared libraries ++ shlib = $(shlib_bare) ++ soname = $(shlib_bare) ++ endif ++ BUILD.exports = ( echo '{ global:'; $(AWK) '/^[^\#]/ {printf "%s;\n",$$1}' $<; echo ' local: *; };' ) >$@ ++ exports_file = $(SHLIB_EXPORTS:%.txt=%.list) ++# ifneq (,$(exports_file)) ++# LINK.shared += -Wl,--version-script=$(exports_file) ++# endif ++endif + + ## + ## BUILD diff --git a/patches/postgresql-emscripten/src-backend-Makefile.diff b/patches/postgresql-emscripten/src-backend-Makefile.diff new file mode 100644 index 00000000..657cc9a6 --- /dev/null +++ b/patches/postgresql-emscripten/src-backend-Makefile.diff @@ -0,0 +1,11 @@ +--- postgresql-16.3/src/backend/Makefile ++++ postgresql-16.3-wasm/src/backend/Makefile +@@ -65,7 +65,7 @@ + ifneq ($(PORTNAME), aix) + + postgres: $(OBJS) +- $(CC) $(CFLAGS) $(call expand_subsys,$^) $(LDFLAGS) $(LIBS) -o $@ ++ $(CC) -sMAIN_MODULE=1 $(CFLAGS) $(call expand_subsys,$^) $(LDFLAGS) $(LIBS) -o $@ + + endif + endif diff --git a/patches/postgresql-emscripten/src-backend-commands-async.c.diff b/patches/postgresql-emscripten/src-backend-commands-async.c.diff new file mode 100644 index 00000000..2c1ffde6 --- /dev/null +++ b/patches/postgresql-emscripten/src-backend-commands-async.c.diff @@ -0,0 +1,15 @@ +--- postgresql-16.3/src/backend/commands/async.c ++++ postgresql-16.3-wasm/src/backend/commands/async.c +@@ -1700,8 +1700,12 @@ + * NotifyQueueLock; which is unlikely but certainly possible. So we + * just log a low-level debug message if it happens. + */ ++#if defined(__EMSCRIPTEN__) ++ HandleNotifyInterrupt(); ++#else + if (SendProcSignal(pid, PROCSIG_NOTIFY_INTERRUPT, ids[i]) < 0) + elog(DEBUG3, "could not signal backend with PID %d: %m", pid); ++#endif + } + + pfree(pids); diff --git a/patches/postgresql-emscripten/src-include-port-emscripten.h.diff b/patches/postgresql-emscripten/src-include-port-emscripten.h.diff new file mode 100644 index 00000000..7b89e2b4 --- /dev/null +++ b/patches/postgresql-emscripten/src-include-port-emscripten.h.diff @@ -0,0 +1,332 @@ +--- postgresql-16.3/src/include/port/emscripten.h ++++ postgresql-16.3-wasm/src/include/port/emscripten.h +@@ -0,0 +1,329 @@ ++/* src/include/port/emscripten.h */ ++ ++#ifndef I_EMSCRIPTEN ++#define I_EMSCRIPTEN ++#define PGDLLIMPORT ++// #define COPY_INTERNAL ++#define COPY_OFF ++#include ++extern PGDLLIMPORT bool fe_utils_quote_all_identifiers; ++ ++#if defined(__cplusplus) ++#else ++#include ++#endif ++ ++/* ++ * https://github.com/electric-sql/postgres-wasm/pull/1/files#diff-6e542ba2eb1d83ef90e65cdc0912b51a295184701c7e3bd236937c43c4cac4b9R63 ++ */ ++ ++#define WAIT_USE_POLL 1 ++ ++/* internal size 8 is invalid for passed-by-value type */ ++/* #define USE_FLOAT8_BYVAL 1 */ ++ ++#define HAVE_LINUX_EIDRM_BUG ++/* ++ * Set the default wal_sync_method to fdatasync. With recent Linux versions, ++ * xlogdefs.h's normal rules will prefer open_datasync, which (a) doesn't ++ * perform better and (b) causes outright failures on ext4 data=journal ++ * filesystems, because those don't support O_DIRECT. ++ */ ++#define PLATFORM_DEFAULT_WAL_SYNC_METHOD WAL_SYNC_METHOD_FDATASYNC ++ ++// force the name used with --single ++#define WASM_USERNAME "postgres" ++ ++ ++#define BOOT_END_MARK "build indices" ++#define FD_BUFFER_MAX 16384 ++ ++#define WASM_PGOPTS \ ++ "-c", "log_checkpoints=false",\ ++ "-c", "search_path=pg_catalog",\ ++ "-c", "exit_on_error=true",\ ++ "-c", "ignore_invalid_pages=on",\ ++ "-c", "temp_buffers=8MB",\ ++ "-c", "work_mem=4MB",\ ++ "-c", "fsync=on",\ ++ "-c", "synchronous_commit=on",\ ++ "-c", "wal_buffers=4MB",\ ++ "-c", "min_wal_size=80MB",\ ++ "-c", "shared_buffers=128MB" ++ ++ ++// socket emulation via file ++#define PGS_ILOCK "/tmp/.s.PGSQL.5432.lock.in" ++#define PGS_IN "/tmp/.s.PGSQL.5432.in" ++ ++#define PGS_OLOCK "/tmp/.s.PGSQL.5432.lock.out" ++#define PGS_OUT "/tmp/.s.PGSQL.5432.out" ++ ++ ++#if defined(PREFIX) ++#define em_xstr(s) em_str(s) ++#define em_str(s) #s ++# define WASM_PREFIX em_xstr(PREFIX) ++# define PG_MAIN_INCLUDE em_xstr(PATCH_MAIN) ++# define PG_PLUGIN_INCLUDE em_xstr(PATCH_PLUGIN) ++#else ++# define WASM_PREFIX "/pgdata" ++# define PG_MAIN_INCLUDE "/data/git/pg/pg_main.c" ++# define PG_PLUGIN_INCLUDE "/data/git/pg/pg_plugin.h" ++#endif ++ ++#define IDB_PIPE_FILE "/tmp/initdb.txt" ++#define IDB_PIPE_BOOT "/tmp/initdb.boot.txt" ++#define IDB_PIPE_SINGLE "/tmp/initdb.single.txt" ++ ++ ++#define PG_FORCE_DISABLE_INLINE ++ ++// we want client and server in the same lib for now. ++#if defined(PG_INITDB) && defined(PG_MAIN) ++extern const char *progname; ++#endif ++ ++ ++ ++ ++/*extern bool quote_all_identifiers;*/ ++extern int pg_char_to_encoding_private(const char *name); ++extern const char *pg_encoding_to_char_private(int encoding); ++extern int pg_valid_server_encoding_id_private(int encoding); ++ ++#if defined(pg_char_to_encoding) ++#undef pg_char_to_encoding ++#endif ++#define pg_char_to_encoding(encoding) pg_char_to_encoding_private(encoding) ++ ++#if defined(pg_encoding_to_char) ++#undef pg_encoding_to_char ++#endif ++#define pg_encoding_to_char(encoding) pg_encoding_to_char_private(encoding) ++ ++#if defined(pg_valid_server_encoding_id) ++#undef pg_valid_server_encoding_id ++#endif ++#define pg_valid_server_encoding_id(encoding) pg_valid_server_encoding_id_private(encoding) ++ ++ ++/* ++ * 'proc_exit' is a wasi system call, so change its name everywhere. ++ */ ++ ++#define proc_exit(arg) pg_proc_exit(arg) ++ ++/* ++#include // fprintf ++#include ++static int em_ioctl(int fd, int request, char *argp) { ++ fprintf(stderr, "ioctl(fd=%d, req=%d, %p)\n", fd, request, argp ); ++ int r=ioctl(fd,request, argp); ++ fprintf(stderr, "ioctl(fd=%d, req=%d, %p) = %d\n", fd, request, argp, r ); ++ return r; ++} ++ ++#define ioctl(fd, request, ...) em_ioctl(fd, request, __VA_ARGS__) ++*/ ++ ++ ++ ++/* ++#define geteuid(void) pg_geteuid(void) ++#include ++static ++uid_t pg_geteuid() { ++ return 1000; ++} ++*/ ++ ++/* ++ * popen / pclose for initdb is routed to stderr ++ * link a pclose replacement when we are in exec.c ( PG_EXEC defined ) ++ */ ++ ++ ++#if defined(PG_EXEC) ++#define pclose(stream) pg_pclose(stream) ++#include // FILE ++ ++EMSCRIPTEN_KEEPALIVE FILE* ++SOCKET_FILE = NULL; ++ ++EMSCRIPTEN_KEEPALIVE int ++SOCKET_DATA = 0; ++ ++EMSCRIPTEN_KEEPALIVE FILE* ++IDB_PIPE_FP = NULL; ++ ++EMSCRIPTEN_KEEPALIVE int ++IDB_STAGE = 0; ++ ++int pg_pclose(FILE *stream) { ++ if (IDB_STAGE==1) ++ fprintf(stderr,"# pg_pclose(%s) 129:" __FILE__ "\n" , IDB_PIPE_BOOT); ++ if (IDB_STAGE==2) ++ fprintf(stderr,"# pg_pclose(%s) 129:" __FILE__ "\n" , IDB_PIPE_SINGLE); ++ ++ if (IDB_PIPE_FP) { ++ fflush(IDB_PIPE_FP); ++ fclose(IDB_PIPE_FP); ++ IDB_PIPE_FP = NULL; ++ } ++ return 0; ++} ++ ++ ++#endif // PG_EXEC ++ ++ ++/* ++ * and now popen will return predefined slot from a file list ++ * as file handle in initdb.c ++ */ ++#if defined(PG_INITDB) && !defined(PG_MAIN) ++#define popen(command, mode) pg_popen(command, mode) ++#include // FILE+fprintf ++extern FILE* IDB_PIPE_FP; ++extern FILE* SOCKET_FILE; ++extern int SOCKET_DATA; ++extern int IDB_STAGE; ++FILE *pg_popen(const char *command, const char *type) { ++ if (IDB_STAGE>1) { ++ fprintf(stderr,"# popen[%s]\n", command); ++ return stderr; ++ } ++ ++ if (!IDB_STAGE) { ++ fprintf(stderr,"# popen[%s] (BOOT)\n", command); ++ IDB_PIPE_FP = fopen( IDB_PIPE_BOOT, "w"); ++ IDB_STAGE = 1; ++ } else { ++ fprintf(stderr,"# popen[%s] (SINGLE)\n", command); ++ IDB_PIPE_FP = fopen( IDB_PIPE_SINGLE, "w"); ++ IDB_STAGE = 2; ++ } ++ ++ return IDB_PIPE_FP; ++ ++} ++#endif // PG_INITDB ++ ++ ++/* ++ * OpenPipeStream : another kind of pipe open in fd.c ++ * known to try "locale -a" from collationcmds.c when in initdb. ++ * ++ */ ++#if defined(PG_FD) ++#include // strlen ++#include // access+F_OK ++#include // FILE+fprintf ++ ++FILE * ++wasm_OpenPipeStream(const char *command, const char *mode) { ++ ++ FILE *result = NULL; ++ const char *prefix = getenv("PGSYSCONFDIR"); ++ const char *locale = "/locale"; ++ char *localefile = malloc( strlen(prefix) + strlen(locale) + 1 ); ++ localefile = strcat(prefix,locale); ++ fprintf(stderr, "# 204:%s: OpenPipeStream(command=%s, mode=%s)\n#\tredirected to %s\n", __FILE__, command, mode, localefile); ++ if (localefile) { ++ if (access(localefile, F_OK) != 0) { ++ FILE *fakeloc = fopen(localefile, "w"); ++ { ++ const char* encoding = getenv("PGCLIENTENCODING"); ++ fprintf(fakeloc, "C\nC.%s\nPOSIX\n%s\n", encoding, encoding); ++ puts("#211 locale created"); ++ } ++ if (fakeloc) ++ fclose(fakeloc); ++ } ++ result = fopen(localefile, "r"); ++ free(localefile); ++ } ++ ++ return result; ++} ++ ++/*int ++wasm_ClosePipeStream(FILE *handle) { ++ puts("wasm_ClosePipeStream"); ++} ++*/ ++#else ++# define OpenPipeStream(cmd, mode) wasm_OpenPipeStream(cmd, mode) ++/*# define ClosePipeStream(handle) wasm_ClosePipeStream(handle)*/ ++#endif ++ ++ ++ ++ ++/* ++ * handle pg_shmem.c special case ++ */ ++ ++#if defined(PG_SHMEM) ++#include // print ++#include // malloc ++#include // SC_ ++#include ++#include ++ ++/* ++ * Shared memory control operation. ++ */ ++ ++//extern int shmctl (int __shmid, int __cmd, struct shmid_ds *__buf); ++ ++int ++shmctl (int __shmid, int __cmd, struct shmid_ds *__buf) { ++ printf("FIXME: int shmctl (int __shmid=%d, int __cmd=%d, struct shmid_ds *__buf=%p)\n", __shmid, __cmd, __buf); ++ return 0; ++} ++ ++ ++void *FAKE_SHM ; ++key_t FAKE_KEY = 0; ++ ++/* Get shared memory segment. */ ++// extern int shmget (key_t __key, size_t __size, int __shmflg); ++int ++shmget (key_t __key, size_t __size, int __shmflg) { ++ printf("# FIXING: int shmget (key_t __key=%d, size_t __size=%zu, int __shmflg=%d) pagesize default=%d\n", __key, __size, __shmflg, getpagesize()); ++ if (!FAKE_KEY) { ++ FAKE_SHM = malloc(__size); ++ FAKE_KEY = 666; ++ return FAKE_KEY; ++ } else { ++ printf("# ERROR: int shmget (key_t __key=%d, size_t __size=%zu, int __shmflg=%d)\n", __key, __size, __shmflg); ++ abort(); ++ } ++ return -1; ++} ++ ++/* Attach shared memory segment. */ ++// extern void *shmat (int __shmid, const void *__shmaddr, int __shmflg); ++void *shmat (int __shmid, const void *__shmaddr, int __shmflg) { ++ printf("# FIXING: void *shmat (int __shmid=%d, const void *__shmaddr=%p, int __shmflg=%d)\n", __shmid, __shmaddr, __shmflg); ++ if (__shmid==666) { ++ return FAKE_SHM; ++ } else { ++ printf("# ERROR: void *shmat (int __shmid=%d, const void *__shmaddr=%p, int __shmflg=%d)\n", __shmid, __shmaddr, __shmflg); ++ abort(); ++ } ++ return NULL; ++} ++ ++/* Detach shared memory segment. */ ++// extern int shmdt (const void *__shmaddr); ++int ++shmdt (const void *__shmaddr) { ++ puts("# FIXME: int shmdt (const void *__shmaddr)"); ++ return 0; ++} ++ ++#endif // PG_SHMEM ++#endif // I_EMSCRIPTEN diff --git a/patches/postgresql-emscripten/src-makefiles-Makefile.emscripten.diff b/patches/postgresql-emscripten/src-makefiles-Makefile.emscripten.diff new file mode 100644 index 00000000..9cc6a1fa --- /dev/null +++ b/patches/postgresql-emscripten/src-makefiles-Makefile.emscripten.diff @@ -0,0 +1,11 @@ +--- postgresql-16.3/src/makefiles/Makefile.emscripten ++++ postgresql-16.3-wasm/src/makefiles/Makefile.emscripten +@@ -0,0 +1,8 @@ ++# Use --enable-new-dtags to generate DT_RUNPATH instead of DT_RPATH. ++# This allows LD_LIBRARY_PATH to still work when needed. ++rpath = ++AROPT = crs ++ ++# Rule for building a shared library from a single .o file ++%.so: %.o ++ $(CC) $(CFLAGS) $< $(LDFLAGS) $(LDFLAGS_SL) -shared -o $@ diff --git a/patches/postgresql-emscripten/src-template-emscripten.diff b/patches/postgresql-emscripten/src-template-emscripten.diff new file mode 100644 index 00000000..9d274a85 --- /dev/null +++ b/patches/postgresql-emscripten/src-template-emscripten.diff @@ -0,0 +1,17 @@ +--- postgresql-16.3/src/template/emscripten ++++ postgresql-16.3-wasm/src/template/emscripten +@@ -0,0 +1,14 @@ ++# src/template/emscripten ++ ++# Prefer unnamed POSIX semaphores if available, unless user overrides choice ++if test x"$PREFERRED_SEMAPHORES" = x"" ; then ++ PREFERRED_SEMAPHORES=UNNAMED_POSIX ++fi ++ ++# Force _GNU_SOURCE on; plperl is broken with Perl 5.8.0 otherwise ++# This is also required for ppoll(2), and perhaps other things ++CPPFLAGS="$CPPFLAGS -D_GNU_SOURCE" ++ ++# Extra CFLAGS for code that will go into a shared library ++CFLAGS_SL="-fPIC" ++ diff --git a/patches/postgresql-pglite/src-backend-main-main.c.diff b/patches/postgresql-pglite/src-backend-main-main.c.diff new file mode 100644 index 00000000..c63332bf --- /dev/null +++ b/patches/postgresql-pglite/src-backend-main-main.c.diff @@ -0,0 +1,35 @@ +--- postgresql-16.3/src/backend/main/main.c ++++ postgresql-16.3-wasm/src/backend/main/main.c +@@ -41,20 +41,20 @@ + #include "utils/pg_locale.h" + #include "utils/ps_status.h" + +- +-const char *progname; + static bool reached_main = false; + +- + static void startup_hacks(const char *progname); + static void init_locale(const char *categoryname, int category, const char *locale); + static void help(const char *progname); + static void check_root(const char *progname); + +- + /* + * Any Postgres server process begins execution here. + */ ++#if defined(__EMSCRIPTEN__) || defined(__wasi__) ++#include PG_MAIN_INCLUDE ++#else ++const char* progname; + int + main(int argc, char *argv[]) + { +@@ -199,6 +199,7 @@ + /* the functions above should not return */ + abort(); + } ++#endif /* wasm */ + + + diff --git a/patches/postgresql-pglite/src-bin-initdb-initdb.c.diff b/patches/postgresql-pglite/src-bin-initdb-initdb.c.diff new file mode 100644 index 00000000..d9df0e0e --- /dev/null +++ b/patches/postgresql-pglite/src-bin-initdb-initdb.c.diff @@ -0,0 +1,264 @@ +--- postgresql-16.3/src/bin/initdb/initdb.c ++++ postgresql-16.3-wasm/src/bin/initdb/initdb.c +@@ -45,7 +45,7 @@ + * + *------------------------------------------------------------------------- + */ +- ++#define PG_INITDB + #include "postgres_fe.h" + + #include +@@ -81,6 +81,23 @@ + #include "mb/pg_wchar.h" + #include "miscadmin.h" + ++#if defined(PG_INITDB_MAIN) ++ ++// FIXME: get PGDATA from env. ++ ++/* ++EMSCRIPTEN_KEEPALIVE void ++pg_logging_init(const char *argv0) { ++ puts("pg_logging_init"); ++} ++ ++EMSCRIPTEN_KEEPALIVE void ++pg_log_generic(enum pg_log_level level, enum pg_log_part part, ++ const char *pg_restrict fmt,...) { ++ puts("pg_log_generic"); ++} ++*/ ++#endif + + /* Ideally this would be in a .h file, but it hardly seems worth the trouble */ + extern const char *select_default_timezone(const char *share_path); +@@ -168,7 +185,11 @@ + + + /* internal vars */ ++#if !defined(PG_MAIN) + static const char *progname; ++#else ++# define dynamic_shared_memory_type idb_dynamic_shared_memory_type ++#endif + static int encodingid; + static char *bki_file; + static char *hba_file; +@@ -752,6 +773,7 @@ + static char * + get_id(void) + { ++#if !defined(__EMSCRIPTEN__) && !defined(__wasi__) + const char *username; + + #ifndef WIN32 +@@ -762,10 +784,12 @@ + exit(1); + } + #endif +- + username = get_user_name_or_exit(progname); + + return pg_strdup(username); ++#else ++ return pg_strdup(WASM_USERNAME); ++#endif /* wasm */ + } + + static char * +@@ -1615,9 +1639,11 @@ + if (ferror(pwf)) + pg_fatal("could not read password from file \"%s\": %m", + pwfilename); +- else +- pg_fatal("password file \"%s\" is empty", ++ else { ++ printf("password file \"%s\" is empty\n", + pwfilename); ++ pwd1 = strdup("password"); ++ } + } + fclose(pwf); + +@@ -2562,8 +2588,13 @@ + strlcpy(full_path, progname, sizeof(full_path)); + + if (ret == -1) ++#if defined(__EMSCRIPTEN__) || defined(__wasi__) ++ printf("# WARNING: program \"%s\" is needed by %s but was not found in the same directory as \"%s\"\n", ++ "postgres", progname, full_path); ++#else + pg_fatal("program \"%s\" is needed by %s but was not found in the same directory as \"%s\"", + "postgres", progname, full_path); ++#endif // wasm + else + pg_fatal("program \"%s\" was found by \"%s\" but was not the same version as %s", + "postgres", full_path, progname); +@@ -2617,22 +2648,22 @@ + lc_numeric, + lc_time); + } +- ++puts("# 2651"); + if (!encoding) +- { ++ { puts("# 2653"); + int ctype_enc; + + ctype_enc = pg_get_encoding_from_locale(lc_ctype, true); +- ++puts("# 2657"); + /* + * If ctype_enc=SQL_ASCII, it's compatible with any encoding. ICU does + * not support SQL_ASCII, so select UTF-8 instead. + */ + if (locale_provider == COLLPROVIDER_ICU && ctype_enc == PG_SQL_ASCII) + ctype_enc = PG_UTF8; +- ++puts("# 2664"); + if (ctype_enc == -1) +- { ++ { puts("# 2666"); + /* Couldn't recognize the locale's codeset */ + pg_log_error("could not find suitable encoding for locale \"%s\"", + lc_ctype); +@@ -2641,7 +2672,7 @@ + exit(1); + } + else if (!pg_valid_server_encoding_id(ctype_enc)) +- { ++ { puts("# 2675"); + /* + * We recognized it, but it's not a legal server encoding. On + * Windows, UTF-8 works with any locale, so we can fall back to +@@ -2664,15 +2695,17 @@ + #endif + } + else +- { ++ { puts("# 2698"); + encodingid = ctype_enc; + printf(_("The default database encoding has accordingly been set to \"%s\".\n"), + pg_encoding_to_char(encodingid)); + } + } +- else ++ else { ++ puts("# 2705"); + encodingid = get_encoding_id(encoding); +- ++ } ++ puts("# 2706"); + if (!check_locale_encoding(lc_ctype, encodingid) || + !check_locale_encoding(lc_collate, encodingid)) + exit(1); /* check_locale_encoding printed the error */ +@@ -2991,7 +3024,11 @@ + + /* Select suitable configuration settings */ + set_null_conf(); ++#if !defined(__EMSCRIPTEN__) && !defined(__wasi__) + test_config_settings(); ++#else ++ dynamic_shared_memory_type = choose_dsm_implementation(); ++#endif // wasm + + /* Now create all the text config files */ + setup_config(); +@@ -3056,9 +3093,30 @@ + check_ok(); + } + ++#if defined(PG_INITDB_MAIN) ++ ++extern void MemoryContextInit(void); ++ ++// FIXME: get PGDATA from env. ++char *argv[] = { ++ WASM_PREFIX "/bin/initdb", ++// "--no-clean", ++ "--wal-segsize=1", ++ "-g", ++ "-E", "UTF8", "--locale=C.UTF-8", "--locale-provider=libc", ++ "-U", WASM_USERNAME, "--pwfile=" WASM_PREFIX "/password", ++ "--pgdata=" WASM_PREFIX "/base", ++ NULL ++}; ++ ++int argc = sizeof(argv) / sizeof(char*) - 1; + ++EMSCRIPTEN_KEEPALIVE int ++pg_initdb_main() ++#else + int + main(int argc, char *argv[]) ++#endif + { + static struct option long_options[] = { + {"pgdata", required_argument, NULL, 'D'}, +@@ -3118,10 +3176,16 @@ + */ + setvbuf(stdout, NULL, PG_IOLBF, 0); + ++#if defined(PG_INITDB_MAIN) ++ progname = get_progname(argv[0]); ++// printf("calling pg_initdb_main for %s\n", progname); ++ MemoryContextInit(); ++ pg_logging_init(progname); ++#else + pg_logging_init(argv[0]); + progname = get_progname(argv[0]); ++#endif + set_pglocale_pgservice(argv[0], PG_TEXTDOMAIN("initdb")); +- + if (argc > 1) + { + if (strcmp(argv[1], "--help") == 0 || strcmp(argv[1], "-?") == 0) +@@ -3377,11 +3441,11 @@ + set_info_version(); + + setup_data_file_paths(); +- ++puts("# 3442"); + setup_locale_encoding(); +- ++puts("# 3444"); + setup_text_search(); +- ++puts("# 3446"); + printf("\n"); + + if (data_checksums) +@@ -3393,9 +3457,11 @@ + get_su_pwd(); + + printf("\n"); +- ++puts("# 3458"); + initialize_data_directory(); +- ++#if defined(PG_INITDB_MAIN) ++ puts("# 3461: TODO: fsync_pgdata ?"); ++#else + if (do_sync) + { + fputs(_("syncing data to disk ... "), stdout); +@@ -3413,7 +3479,7 @@ + pg_log_warning_hint("You can change this by editing pg_hba.conf or using the option -A, or " + "--auth-local and --auth-host, the next time you run initdb."); + } +- ++puts("# 3480"); + if (!noinstructions) + { + /* +@@ -3448,8 +3514,7 @@ + + destroyPQExpBuffer(start_db_cmd); + } +- +- ++#endif + success = true; + return 0; + } diff --git a/patches/postgresql-wasm/contrib-pgstattuple-pgstatindex.c.diff b/patches/postgresql-wasm/contrib-pgstattuple-pgstatindex.c.diff new file mode 100644 index 00000000..9edc02ca --- /dev/null +++ b/patches/postgresql-wasm/contrib-pgstattuple-pgstatindex.c.diff @@ -0,0 +1,14 @@ +--- postgresql-16.3/contrib/pgstattuple/pgstatindex.c ++++ postgresql-16.3-wasm/contrib/pgstattuple/pgstatindex.c +@@ -656,9 +656,9 @@ + stats.unused_pages++; + else if (PageGetSpecialSize(page) != + MAXALIGN(sizeof(HashPageOpaqueData))) +- ereport(ERROR, ++ ereport(WARNING, + (errcode(ERRCODE_INDEX_CORRUPTED), +- errmsg("index \"%s\" contains corrupted page at block %u", ++ errmsg("# 661(FATAL block=%d): index \"%s\" contains corrupted page at block %u", blkno, + RelationGetRelationName(rel), + BufferGetBlockNumber(buf)))); + else diff --git a/patches/postgresql-wasm/src-backend-access-nbtree-nbtutils.c.diff b/patches/postgresql-wasm/src-backend-access-nbtree-nbtutils.c.diff new file mode 100644 index 00000000..757953b7 --- /dev/null +++ b/patches/postgresql-wasm/src-backend-access-nbtree-nbtutils.c.diff @@ -0,0 +1,19 @@ +--- postgresql-16.3/src/backend/access/nbtree/nbtutils.c ++++ postgresql-16.3-wasm/src/backend/access/nbtree/nbtutils.c +@@ -2732,7 +2732,7 @@ + break; + } + } +- ++#if !defined(__EMSCRIPTEN__) + if (debugmessage) + { + if (allequalimage) +@@ -2742,6 +2742,6 @@ + elog(DEBUG1, "index \"%s\" cannot use deduplication", + RelationGetRelationName(rel)); + } +- ++#endif + return allequalimage; + } diff --git a/patches/postgresql-wasm/src-backend-access-transam-xact.c.diff b/patches/postgresql-wasm/src-backend-access-transam-xact.c.diff new file mode 100644 index 00000000..2e580c9f --- /dev/null +++ b/patches/postgresql-wasm/src-backend-access-transam-xact.c.diff @@ -0,0 +1,31 @@ +--- postgresql-16.3/src/backend/access/transam/xact.c ++++ postgresql-16.3-wasm/src/backend/access/transam/xact.c +@@ -1740,6 +1740,7 @@ + if (TransactionIdDidCommit(xid)) + elog(PANIC, "cannot abort transaction %u, it was already committed", + xid); ++ else elog(WARNING, "# 1743: aborting transaction %u", xid); + + /* + * Are we using the replication origins feature? Or, in other words, are +@@ -2748,7 +2749,9 @@ + * handler. We do this fairly early in the sequence so that the timeout + * infrastructure will be functional if needed while aborting. + */ ++#if !defined(__EMSCRIPTEN__) && !defined(__wasi__) + sigprocmask(SIG_SETMASK, &UnBlockSig, NULL); ++#endif + + /* + * check the current transaction state +@@ -5107,7 +5110,10 @@ + * handler. We do this fairly early in the sequence so that the timeout + * infrastructure will be functional if needed while aborting. + */ ++#if !defined(__EMSCRIPTEN__) && !defined(__wasi__) + sigprocmask(SIG_SETMASK, &UnBlockSig, NULL); ++#endif ++ + + /* + * check the current transaction state diff --git a/patches/postgresql-wasm/src-backend-bootstrap-bootstrap.c.diff b/patches/postgresql-wasm/src-backend-bootstrap-bootstrap.c.diff new file mode 100644 index 00000000..a2cd5701 --- /dev/null +++ b/patches/postgresql-wasm/src-backend-bootstrap-bootstrap.c.diff @@ -0,0 +1,36 @@ +--- postgresql-16.3/src/backend/bootstrap/bootstrap.c ++++ postgresql-16.3-wasm/src/backend/bootstrap/bootstrap.c +@@ -199,7 +199,11 @@ + * to shared memory sizing, options work (or at least do not cause an error + * up to shared memory creation). + */ ++#if !defined(__EMSCRIPTEN__) && !defined(__wasi__) + void ++#else ++int ++#endif + BootstrapModeMain(int argc, char *argv[], bool check_only) + { + int i; +@@ -353,7 +357,7 @@ + */ + if (pg_link_canary_is_frontend()) + elog(ERROR, "backend is incorrectly linked to frontend functions"); +- ++puts("# 360: InitPostgres(boot): " __FILE__ ); + InitPostgres(NULL, InvalidOid, NULL, InvalidOid, false, false, NULL); + + /* Initialize stuff for bootstrap-file processing */ +@@ -378,7 +382,12 @@ + + /* Clean up and exit */ + cleanup(); ++#if !defined(__EMSCRIPTEN__) && !defined(__wasi__) + proc_exit(0); ++#else ++ puts("# 338 cleanup(boot): " __FILE__); ++ return 0; ++#endif + } + + diff --git a/patches/postgresql-wasm/src-backend-catalog-index.c.diff b/patches/postgresql-wasm/src-backend-catalog-index.c.diff new file mode 100644 index 00000000..90708d2e --- /dev/null +++ b/patches/postgresql-wasm/src-backend-catalog-index.c.diff @@ -0,0 +1,20 @@ +--- postgresql-16.3/src/backend/catalog/index.c ++++ postgresql-16.3-wasm/src/backend/catalog/index.c +@@ -2994,7 +2994,7 @@ + indexInfo->ii_ParallelWorkers = + plan_create_index_workers(RelationGetRelid(heapRelation), + RelationGetRelid(indexRelation)); +- ++#if !defined(__EMSCRIPTEN__) + if (indexInfo->ii_ParallelWorkers == 0) + ereport(DEBUG1, + (errmsg_internal("building index \"%s\" on table \"%s\" serially", +@@ -3006,7 +3006,7 @@ + RelationGetRelationName(indexRelation), + RelationGetRelationName(heapRelation), + indexInfo->ii_ParallelWorkers))); +- ++#endif + /* + * Switch to the table owner's userid, so that any index functions are run + * as that user. Also lock down security-restricted operations and diff --git a/patches/postgresql-wasm/src-backend-commands-collationcmds.c.diff b/patches/postgresql-wasm/src-backend-commands-collationcmds.c.diff new file mode 100644 index 00000000..4b239c85 --- /dev/null +++ b/patches/postgresql-wasm/src-backend-commands-collationcmds.c.diff @@ -0,0 +1,20 @@ +--- postgresql-16.3/src/backend/commands/collationcmds.c ++++ postgresql-16.3-wasm/src/backend/commands/collationcmds.c +@@ -828,14 +828,14 @@ + maxaliases = 100; + aliases = (CollAliasData *) palloc(maxaliases * sizeof(CollAliasData)); + naliases = 0; +- + locale_a_handle = OpenPipeStream("locale -a", "r"); +- if (locale_a_handle == NULL) ++ if (locale_a_handle == NULL) { ++ puts("======================== ERROR ================"); + ereport(ERROR, + (errcode_for_file_access(), + errmsg("could not execute command \"%s\": %m", + "locale -a"))); +- ++ } + while (fgets(localebuf, sizeof(localebuf), locale_a_handle)) + { + size_t len; diff --git a/patches/postgresql-wasm/src-backend-libpq-be-fsstubs.c.diff b/patches/postgresql-wasm/src-backend-libpq-be-fsstubs.c.diff new file mode 100644 index 00000000..25671831 --- /dev/null +++ b/patches/postgresql-wasm/src-backend-libpq-be-fsstubs.c.diff @@ -0,0 +1,66 @@ +--- postgresql-16.3/src/backend/libpq/be-fsstubs.c ++++ postgresql-16.3-wasm/src/backend/libpq/be-fsstubs.c +@@ -150,8 +150,12 @@ + * + *****************************************************************************/ + ++#if defined(__EMSCRIPTEN__) || defined(__wasi__) ++static int ++#else + int +-lo_read(int fd, char *buf, int len) ++#endif ++lo_read3(int fd, char *buf, int len) + { + int status; + LargeObjectDesc *lobj; +@@ -178,8 +182,12 @@ + return status; + } + ++#if defined(__EMSCRIPTEN__) || defined(__wasi__) ++static int ++#else + int +-lo_write(int fd, const char *buf, int len) ++#endif ++lo_write3(int fd, const char *buf, int len) + { + int status; + LargeObjectDesc *lobj; +@@ -190,7 +198,7 @@ + errmsg("invalid large-object descriptor: %d", fd))); + lobj = cookies[fd]; + +- /* see comment in lo_read() */ ++ /* see comment in lo_read3() */ + if ((lobj->flags & IFS_WRLOCK) == 0) + ereport(ERROR, + (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), +@@ -365,7 +373,7 @@ + len = 0; + + retval = (bytea *) palloc(VARHDRSZ + len); +- totalread = lo_read(fd, VARDATA(retval), len); ++ totalread = lo_read3(fd, VARDATA(retval), len); + SET_VARSIZE(retval, totalread + VARHDRSZ); + + PG_RETURN_BYTEA_P(retval); +@@ -382,7 +390,7 @@ + PreventCommandIfReadOnly("lowrite()"); + + bytestowrite = VARSIZE_ANY_EXHDR(wbuf); +- totalwritten = lo_write(fd, VARDATA_ANY(wbuf), bytestowrite); ++ totalwritten = lo_write3(fd, VARDATA_ANY(wbuf), bytestowrite); + PG_RETURN_INT32(totalwritten); + } + +@@ -560,7 +568,7 @@ + errmsg("invalid large-object descriptor: %d", fd))); + lobj = cookies[fd]; + +- /* see comment in lo_read() */ ++ /* see comment in lo_read3() */ + if ((lobj->flags & IFS_WRLOCK) == 0) + ereport(ERROR, + (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), diff --git a/patches/postgresql-wasm/src-backend-libpq-pqcomm.c.diff b/patches/postgresql-wasm/src-backend-libpq-pqcomm.c.diff new file mode 100644 index 00000000..9d9aa325 --- /dev/null +++ b/patches/postgresql-wasm/src-backend-libpq-pqcomm.c.diff @@ -0,0 +1,217 @@ +--- postgresql-16.3/src/backend/libpq/pqcomm.c ++++ postgresql-16.3-wasm/src/backend/libpq/pqcomm.c +@@ -123,8 +123,12 @@ + static int PqSendBufferSize; /* Size send buffer */ + static int PqSendPointer; /* Next index to store a byte in PqSendBuffer */ + static int PqSendStart; /* Next index to send a byte in PqSendBuffer */ +- ++#if defined(__EMSCRIPTEN__) || defined(__wasi__) ++static char PqRecvBuffer_static[PQ_RECV_BUFFER_SIZE]; ++static char *PqRecvBuffer; ++#else + static char PqRecvBuffer[PQ_RECV_BUFFER_SIZE]; ++#endif + static int PqRecvPointer; /* Next index to read a byte from PqRecvBuffer */ + static int PqRecvLength; /* End of data available in PqRecvBuffer */ + +@@ -136,6 +140,7 @@ + + + /* Internal functions */ ++ + static void socket_comm_reset(void); + static void socket_close(int code, Datum arg); + static void socket_set_nonblocking(bool nonblocking); +@@ -146,11 +151,44 @@ + static void socket_putmessage_noblock(char msgtype, const char *s, size_t len); + static int internal_putbytes(const char *s, size_t len); + static int internal_flush(void); ++#if 0 ++static void wasm_comm_reset() { ++ puts(" ******************************** wasm_comm_reset"); ++} + +-static int Lock_AF_UNIX(const char *unixSocketDir, const char *unixSocketPath); +-static int Setup_AF_UNIX(const char *sock_path); ++static int wasm_flush() { ++ puts(" ******************************** wasm_flush"); ++ return 0; ++} ++ ++static int wasm_flush_if_writable() { ++ puts(" ******************************** wasm_flush_if_writable"); ++ return 0; ++} ++ ++static int wasm_putmessage(char msgtype, const char *s, size_t len) { ++ puts(" ******************************** wasm_putmessage"); ++ return 0; ++} ++ ++static bool wasm_is_send_pending(void) { ++ puts(" ******************************** wasm_is_send_pending"); ++ return false; ++} ++static void wasm_putmessage_noblock(char msgtype, const char *s, size_t len) { ++ puts(" ******************************** wasm_putmessage_noblock"); ++} + + static const PQcommMethods PqCommSocketMethods = { ++ wasm_comm_reset, ++ wasm_flush, ++ wasm_flush_if_writable, ++ wasm_is_send_pending, ++ wasm_putmessage, ++ wasm_putmessage_noblock ++}; ++#else ++static const PQcommMethods PqCommSocketMethods = { + socket_comm_reset, + socket_flush, + socket_flush_if_writable, +@@ -158,6 +196,10 @@ + socket_putmessage, + socket_putmessage_noblock + }; ++#endif ++static int Lock_AF_UNIX(const char *unixSocketDir, const char *unixSocketPath); ++static int Setup_AF_UNIX(const char *sock_path); ++ + + const PQcommMethods *PqCommMethods = &PqCommSocketMethods; + +@@ -180,7 +222,7 @@ + PqSendPointer = PqSendStart = PqRecvPointer = PqRecvLength = 0; + PqCommBusy = false; + PqCommReadingMsg = false; +- ++#if !defined(__EMSCRIPTEN__) && !defined(__wasi__) + /* set up process-exit hook to close the socket */ + on_proc_exit(socket_close, 0); + +@@ -210,7 +252,9 @@ + MyLatch, NULL); + AddWaitEventToSet(FeBeWaitSet, WL_POSTMASTER_DEATH, PGINVALID_SOCKET, + NULL, NULL); +- ++#else ++ #pragma message "FIXME: socketfile" ++#endif + /* + * The event positions match the order we added them, but let's sanity + * check them to be sure. +@@ -1148,6 +1192,16 @@ + * This must be called before any of the pq_get* functions. + * -------------------------------- + */ ++#if defined(I_EMSCRIPTEN) ++EMSCRIPTEN_KEEPALIVE void ++pq_recvbuf_fill(FILE* fp, int packetlen) { ++ fread( PqRecvBuffer, packetlen, 1, fp); ++ PqRecvPointer = 0; ++ PqRecvLength = packetlen; ++} ++#endif ++extern int cma_rsize; ++static char * PqSendBuffer_save; + void + pq_startmsgread(void) + { +@@ -1159,7 +1213,24 @@ + ereport(FATAL, + (errcode(ERRCODE_PROTOCOL_VIOLATION), + errmsg("terminating connection because protocol synchronization was lost"))); +- ++#if defined(I_EMSCRIPTEN) ++ if (cma_rsize) { ++ PqRecvPointer = 0; ++ PqRecvLength = cma_rsize; ++ PqRecvBuffer = (char*)0x1; ++ ++ PqSendPointer = 0; ++ PqSendBuffer_save = PqSendBuffer; ++ PqSendBuffer = 2 + (char*)(cma_rsize); ++ PqSendBufferSize = (64*1024*1024) - (int)(&PqSendBuffer[0]); ++ printf("# 1214 pq_startmsgread cma_rsize=%d buf=%p reply=%p\n", cma_rsize, &PqRecvBuffer[0], &PqSendBuffer[0]); ++ } else { ++ PqRecvBuffer = &PqRecvBuffer_static[0]; ++ if (PqSendBuffer_save) ++ PqSendBuffer=PqSendBuffer_save; ++ PqSendBufferSize = PQ_SEND_BUFFER_SIZE; ++ } ++#endif + PqCommReadingMsg = true; + } + +@@ -1282,9 +1353,62 @@ + + return 0; + } ++#if defined(__EMSCRIPTEN__) || defined(__wasm__) ++ ++extern FILE* SOCKET_FILE; ++extern int SOCKET_DATA; ++ ++static int ++internal_putbytes(const char *s, size_t len) { ++ if (PqSendPointer >= PqSendBufferSize) ++ { ++ puts("# 1336: overflow"); ++ } ++ ++ if (!cma_rsize) { ++ int wc= fwrite(s, 1, len, SOCKET_FILE); ++ fprintf(stderr,"# sockfile %d +%d top=%p %d/%d\n", SOCKET_DATA, wc, PqSendBuffer, PqSendPointer,PqSendBufferSize); ++ SOCKET_DATA+=wc; ++ } else { ++ size_t amount; ++ while (len > 0) ++ { ++ /* If buffer is full, then flush it out */ ++ if (PqSendPointer >= PqSendBufferSize) ++ { ++ socket_set_nonblocking(false); ++ if (internal_flush()) ++ return EOF; ++ } ++ amount = PqSendBufferSize - PqSendPointer; ++ if (amount > len) ++ amount = len; ++ memcpy(PqSendBuffer + PqSendPointer, s, amount); ++ PqSendPointer += amount; ++ s += amount; ++ len -= amount; ++ SOCKET_DATA+=amount; ++ } ++// fprintf(stderr,"# raw wire: %d +%zu top=%p %d/%d\n", SOCKET_DATA, amount, PqSendBuffer, PqSendPointer,PqSendBufferSize); ++ } ++ return 0; ++} + ++static int ++socket_flush(void) { ++ return internal_flush(); ++} + + static int ++internal_flush(void) { ++ /* no flush for raw wire */ ++ if (!cma_rsize) { ++ PqSendStart = PqSendPointer = 0; ++ } ++ return 0; ++} ++#else ++static int + internal_putbytes(const char *s, size_t len) + { + size_t amount; +@@ -1403,7 +1527,7 @@ + PqSendStart = PqSendPointer = 0; + return 0; + } +- ++#endif + /* -------------------------------- + * pq_flush_if_writable - flush pending output if writable without blocking + * diff --git a/patches/postgresql-wasm/src-backend-port-posix_sema.c.diff b/patches/postgresql-wasm/src-backend-port-posix_sema.c.diff new file mode 100644 index 00000000..16b162c4 --- /dev/null +++ b/patches/postgresql-wasm/src-backend-port-posix_sema.c.diff @@ -0,0 +1,27 @@ +--- postgresql-16.3/src/backend/port/posix_sema.c ++++ postgresql-16.3-wasm/src/backend/port/posix_sema.c +@@ -298,10 +298,16 @@ + * There's no direct API for this in POSIX, so we have to ratchet the + * semaphore down to 0 with repeated trywait's. + */ ++#if defined(__EMSCRIPTEN__) || defined(__wasi__) ++ sem_trywait(PG_SEM_REF(sema)); ++ return; ++#else + for (;;) + { + if (sem_trywait(PG_SEM_REF(sema)) < 0) + { ++ ++ + if (errno == EAGAIN || errno == EDEADLK) + break; /* got it down to 0 */ + if (errno == EINTR) +@@ -309,6 +315,7 @@ + elog(FATAL, "sem_trywait failed: %m"); + } + } ++#endif + } + + /* diff --git a/patches/postgresql-wasm/src-backend-port-sysv_shmem.c.diff b/patches/postgresql-wasm/src-backend-port-sysv_shmem.c.diff new file mode 100644 index 00000000..a0273a3c --- /dev/null +++ b/patches/postgresql-wasm/src-backend-port-sysv_shmem.c.diff @@ -0,0 +1,62 @@ +--- postgresql-16.3/src/backend/port/sysv_shmem.c ++++ postgresql-16.3-wasm/src/backend/port/sysv_shmem.c +@@ -17,8 +17,8 @@ + * + *------------------------------------------------------------------------- + */ ++#define PG_SHMEM + #include "postgres.h" +- + #include + #include + #include +@@ -691,12 +691,47 @@ + PGSharedMemoryCreate(Size size, + PGShmemHeader **shim) + { +- IpcMemoryKey NextShmemSegID; +- void *memAddress; ++ IpcMemoryKey NextShmemSegID = 0; ++ void *memAddress = NULL; + PGShmemHeader *hdr; + struct stat statbuf; + Size sysvsize; + ++/* ++ puts("@\n@\n@\n@\n@\n@\n PGSharedMemoryCreate @\n@\n@\n@\n@\n@\n"); ++ ++ elog(NOTICE, "Init WASM shared memory"); ++ ++ hdr = (PGShmemHeader *) malloc(size); ++ hdr->creatorPID = getpid(); ++ hdr->magic = PGShmemMagic; ++ hdr->dsm_control = 0; ++ ++ ++ hdr->device = statbuf.st_dev; ++ hdr->inode = statbuf.st_ino; ++ ++ hdr->totalsize = size; ++ hdr->freeoffset = MAXALIGN(sizeof(PGShmemHeader)); ++ *shim = hdr; ++ ++ UsedShmemSegAddr = memAddress; ++ UsedShmemSegID = (unsigned long) NextShmemSegID; ++ ++ if (AnonymousShmem == NULL) ++ return hdr; ++ memcpy(AnonymousShmem, hdr, sizeof(PGShmemHeader)); ++ return (PGShmemHeader *) AnonymousShmem; ++*/ ++ ++ ++ ++ ++ ++ ++ ++ ++ + /* + * We use the data directory's ID info (inode and device numbers) to + * positively identify shmem segments associated with this data dir, and diff --git a/patches/postgresql-wasm/src-backend-postmaster-postmaster.c.diff b/patches/postgresql-wasm/src-backend-postmaster-postmaster.c.diff new file mode 100644 index 00000000..532c04a4 --- /dev/null +++ b/patches/postgresql-wasm/src-backend-postmaster-postmaster.c.diff @@ -0,0 +1,32 @@ +--- postgresql-16.3/src/backend/postmaster/postmaster.c ++++ postgresql-16.3-wasm/src/backend/postmaster/postmaster.c +@@ -422,7 +422,8 @@ + static void ExitPostmaster(int status) pg_attribute_noreturn(); + static int ServerLoop(void); + static int BackendStartup(Port *port); +-static int ProcessStartupPacket(Port *port, bool ssl_done, bool gss_done); ++//static ++int ProcessStartupPacket(Port *port, bool ssl_done, bool gss_done); + static void SendNegotiateProtocolVersion(List *unrecognized_protocol_options); + static void processCancelRequest(Port *port, void *pkt); + static void report_fork_failure_to_client(Port *port, int errnum); +@@ -1533,7 +1534,7 @@ + /* Locate the postgres executable itself */ + if (find_my_exec(argv0, my_exec_path) < 0) + ereport(FATAL, +- (errmsg("%s: could not locate my own executable path", argv0))); ++ (errmsg("%s:1536: could not locate my own executable path", argv0))); + + #ifdef EXEC_BACKEND + /* Locate executable backend before we change working directory */ +@@ -1947,8 +1948,8 @@ + * should make no assumption here about the order in which the client may make + * requests. + */ +-static int +-ProcessStartupPacket(Port *port, bool ssl_done, bool gss_done) ++// static int ++int ProcessStartupPacket(Port *port, bool ssl_done, bool gss_done) + { + int32 len; + char *buf; diff --git a/patches/postgresql-wasm/src-backend-storage-buffer-bufmgr.c.diff b/patches/postgresql-wasm/src-backend-storage-buffer-bufmgr.c.diff new file mode 100644 index 00000000..b44ec333 --- /dev/null +++ b/patches/postgresql-wasm/src-backend-storage-buffer-bufmgr.c.diff @@ -0,0 +1,23 @@ +--- postgresql-16.3/src/backend/storage/buffer/bufmgr.c ++++ postgresql-16.3-wasm/src/backend/storage/buffer/bufmgr.c +@@ -1134,16 +1134,16 @@ + { + ereport(WARNING, + (errcode(ERRCODE_DATA_CORRUPTED), +- errmsg("invalid page in block %u of relation %s; zeroing out page", ++ errmsg("1137: invalid page in block %u of relation %s; zeroing out page", + blockNum, + relpath(smgr->smgr_rlocator, forkNum)))); + MemSet((char *) bufBlock, 0, BLCKSZ); + } + else +- ereport(ERROR, ++ ereport(WARNING, + (errcode(ERRCODE_DATA_CORRUPTED), +- errmsg("invalid page in block %u of relation %s", +- blockNum, ++ errmsg("1145(FATAL): invalid page in block %u fork=%d p=%p sz=%d of relation %s", ++ blockNum,forkNum,bufBlock, BLCKSZ, + relpath(smgr->smgr_rlocator, forkNum)))); + } + } diff --git a/patches/postgresql-wasm/src-backend-storage-file-fd.c.diff b/patches/postgresql-wasm/src-backend-storage-file-fd.c.diff new file mode 100644 index 00000000..3b2836d3 --- /dev/null +++ b/patches/postgresql-wasm/src-backend-storage-file-fd.c.diff @@ -0,0 +1,67 @@ +--- postgresql-16.3/src/backend/storage/file/fd.c ++++ postgresql-16.3-wasm/src/backend/storage/file/fd.c +@@ -69,7 +69,7 @@ + * + *------------------------------------------------------------------------- + */ +- ++#define PG_FD + #include "postgres.h" + + #include +@@ -478,6 +478,11 @@ + void + pg_flush_data(int fd, off_t offset, off_t nbytes) + { ++#if defined(__EMSCRIPTEN__) || defined(__wasi__) ++ //int res = sync_file_range(fd, offset, nbytes, SYNC_FILE_RANGE_WAIT_BEFORE | SYNC_FILE_RANGE_WRITE | SYNC_FILE_RANGE_WAIT_AFTER); ++ (void)fsync(fd); ++ // fprintf(stderr, "# pg_flush_data(int fd=%d, off_t offset=%lld, off_t nbytes=%lld res=%d\n", fd,offset,nbytes, res); ++#else + /* + * Right now file flushing is primarily used to avoid making later + * fsync()/fdatasync() calls have less impact. Thus don't trigger flushes +@@ -648,6 +653,7 @@ + return; + } + #endif ++#endif /* wasm */ + } + + /* +@@ -660,7 +666,7 @@ + + retry: + ret = ftruncate(fd, length); +- ++printf("# 670 pg_ftruncate(int fd=%d, off_t length=%lld)=%d\n" __FILE__, fd, length, ret); + if (ret == -1 && errno == EINTR) + goto retry; + +@@ -692,7 +698,7 @@ + + retry: + ret = truncate(path, length); +- ++printf("# 670 pg_truncate(path=%s, off_t length=%lld)=%d\n" __FILE__, path, length, ret); + if (ret == -1 && errno == EINTR) + goto retry; + #endif +@@ -2619,7 +2625,7 @@ + + return -1; /* failure */ + } +- ++#if !defined(__EMSCRIPTEN__) && !defined(__wasi__) + /* + * Routines that want to initiate a pipe stream should use OpenPipeStream + * rather than plain popen(). This lets fd.c deal with freeing FDs if +@@ -2679,7 +2685,7 @@ + + return NULL; + } +- ++#endif + /* + * Free an AllocateDesc of any type. + * diff --git a/patches/postgresql-wasm/src-backend-storage-ipc-ipc.c.diff b/patches/postgresql-wasm/src-backend-storage-ipc-ipc.c.diff new file mode 100644 index 00000000..7b313446 --- /dev/null +++ b/patches/postgresql-wasm/src-backend-storage-ipc-ipc.c.diff @@ -0,0 +1,77 @@ +--- postgresql-16.3/src/backend/storage/ipc/ipc.c ++++ postgresql-16.3-wasm/src/backend/storage/ipc/ipc.c +@@ -103,6 +103,48 @@ + void + proc_exit(int code) + { ++#if defined(__EMSCRIPTEN__) || defined(__wasi__) ++ if (code==66) { ++ fprintf(stderr,"# 108:fake shutdown\n"); ++ proc_exit_inprogress = true; ++ InterruptPending = false; ++ ProcDiePending = false; ++ QueryCancelPending = false; ++ InterruptHoldoffCount = 1; ++ CritSectionCount = 0; ++ ++ error_context_stack = NULL; ++ debug_query_string = NULL; ++ ++ shmem_exit_inprogress = true; ++ int save_before_shmem_exit_index = before_shmem_exit_index; ++ while (--before_shmem_exit_index >= 0) { ++ if (before_shmem_exit_index!=4) { ++ printf("# skipped shmem_exit_index=%d/%d\n", before_shmem_exit_index, save_before_shmem_exit_index); ++ continue; ++ } else ++ printf("# before_shmem_exit_index=%d/%d\n", before_shmem_exit_index, save_before_shmem_exit_index); ++ before_shmem_exit_list[before_shmem_exit_index].function(code, before_shmem_exit_list[before_shmem_exit_index].arg); ++ } ++ before_shmem_exit_index = save_before_shmem_exit_index; ++ puts("# dsm_backend_shutdown ?"); ++ // dsm_backend_shutdown(); ++ shmem_exit_inprogress = false; ++ /* ++ ++ int save_on_proc_exit_index = on_proc_exit_index; ++ while (--on_proc_exit_index >= 0) { ++ printf("# on_proc_exit_list=%d/%d\n", on_proc_exit_list, save_on_proc_exit_index); ++ on_proc_exit_list[on_proc_exit_index].function(code, on_proc_exit_list[on_proc_exit_index].arg); ++ } ++ on_proc_exit_index = save_on_proc_exit_index; ++ */ ++ } else { ++ proc_exit_inprogress = true; ++ fprintf(stderr,"# proc_exit(%d) ignored at 118:%s\n",code, __FILE__); ++ } ++ return; ++#endif + /* not safe if forked by system(), etc. */ + if (MyProcPid != (int) getpid()) + elog(PANIC, "proc_exit() called in child process"); +@@ -152,7 +194,6 @@ + #endif + + elog(DEBUG3, "exit(%d)", code); +- + exit(code); + } + +@@ -228,7 +269,7 @@ + shmem_exit(int code) + { + shmem_exit_inprogress = true; +- ++if (code!=66){ + /* + * Call before_shmem_exit callbacks. + * +@@ -276,7 +317,7 @@ + on_shmem_exit_list[on_shmem_exit_index].function(code, + on_shmem_exit_list[on_shmem_exit_index].arg); + on_shmem_exit_index = 0; +- ++} + shmem_exit_inprogress = false; + } + diff --git a/patches/postgresql-wasm/src-backend-tcop-postgres.c.diff b/patches/postgresql-wasm/src-backend-tcop-postgres.c.diff new file mode 100644 index 00000000..38257000 --- /dev/null +++ b/patches/postgresql-wasm/src-backend-tcop-postgres.c.diff @@ -0,0 +1,24 @@ +--- postgresql-16.3/src/backend/tcop/postgres.c ++++ postgresql-16.3-wasm/src/backend/tcop/postgres.c +@@ -3988,7 +3988,11 @@ + #endif + } + +- ++#if defined(__EMSCRIPTEN__) || defined(__wasi__) ++#define PG_MAIN ++#include PG_MAIN_INCLUDE ++#undef PG_MAIN ++#else + /* + * PostgresSingleUserMain + * Entry point for single user mode. argc/argv are the command line +@@ -4884,7 +4888,7 @@ + } + } /* end of input-reading loop */ + } +- ++#endif /* wasm */ + /* + * Throw an error if we're a WAL sender process. + * diff --git a/patches/postgresql-wasm/src-backend-tcop-utility.c.diff b/patches/postgresql-wasm/src-backend-tcop-utility.c.diff new file mode 100644 index 00000000..b22acb64 --- /dev/null +++ b/patches/postgresql-wasm/src-backend-tcop-utility.c.diff @@ -0,0 +1,20 @@ +--- postgresql-16.3/src/backend/tcop/utility.c ++++ postgresql-16.3-wasm/src/backend/tcop/utility.c +@@ -811,7 +811,7 @@ + ListenStmt *stmt = (ListenStmt *) parsetree; + + CheckRestrictedOperation("LISTEN"); +- ++#if !defined(__EMSCRIPTEN__) && !defined(__wasi__) + /* + * We don't allow LISTEN in background processes, as there is + * no mechanism for them to collect NOTIFY messages, so they'd +@@ -827,7 +827,7 @@ + /* translator: %s is name of a SQL command, eg LISTEN */ + errmsg("cannot execute %s within a background process", + "LISTEN"))); +- ++#endif + Async_Listen(stmt->conditionname); + } + break; diff --git a/patches/postgresql-wasm/src-backend-utils-adt-ruleutils.c.diff b/patches/postgresql-wasm/src-backend-utils-adt-ruleutils.c.diff new file mode 100644 index 00000000..37808e56 --- /dev/null +++ b/patches/postgresql-wasm/src-backend-utils-adt-ruleutils.c.diff @@ -0,0 +1,13 @@ +--- postgresql-16.3/src/backend/utils/adt/ruleutils.c ++++ postgresql-16.3-wasm/src/backend/utils/adt/ruleutils.c +@@ -321,8 +321,9 @@ + static const char *query_getviewrule = "SELECT * FROM pg_catalog.pg_rewrite WHERE ev_class = $1 AND rulename = $2"; + + /* GUC parameters */ ++#if !defined(__EMSCRIPTEN__) + bool quote_all_identifiers = false; +- ++#endif + + /* ---------- + * Local functions diff --git a/patches/postgresql-wasm/src-backend-utils-error-elog.c.diff b/patches/postgresql-wasm/src-backend-utils-error-elog.c.diff new file mode 100644 index 00000000..fe3a2e15 --- /dev/null +++ b/patches/postgresql-wasm/src-backend-utils-error-elog.c.diff @@ -0,0 +1,55 @@ +--- postgresql-16.3/src/backend/utils/error/elog.c ++++ postgresql-16.3-wasm/src/backend/utils/error/elog.c +@@ -355,6 +355,9 @@ + * Check some cases in which we want to promote an error into a more + * severe error. None of this logic applies for non-error messages. + */ ++#if defined(__EMSCRIPTEN__) || defined(__wasi__) ++ ++#else + if (elevel >= ERROR) + { + /* +@@ -395,7 +398,7 @@ + for (i = 0; i <= errordata_stack_depth; i++) + elevel = Max(elevel, errordata[i].elevel); + } +- ++#endif + /* + * Now decide whether we need to process this report at all; if it's + * warning or less and not enabled for logging, just return false without +@@ -540,7 +543,13 @@ + */ + + recursion_depth--; ++#if 0 //defined(__EMSCRIPTEN__) || defined(__wasi__) ++ fprintf(stderr, "# 547: PG_RE_THROW(ERROR : %d) ignored\n", recursion_depth); ++ trap(); ++#else ++ fprintf(stderr, "# 549: PG_RE_THROW(ERROR : %d)\n", recursion_depth); + PG_RE_THROW(); ++#endif + } + + /* Emit the message to the right places */ +@@ -588,7 +597,11 @@ + * FATAL termination. The postmaster may or may not consider this + * worthy of panic, depending on which subprocess returns it. + */ ++#if defined(__EMSCRIPTEN__) || defined(__wasi__) ++ puts("# 599: proc_exit(FATAL) ignored"); ++#else + proc_exit(1); ++#endif + } + + if (elevel >= PANIC) +@@ -698,6 +711,7 @@ + */ + if (edata->elevel >= ERROR) + { ++puts("#712"); + errfinish(filename, lineno, funcname); + pg_unreachable(); + } diff --git a/patches/postgresql-wasm/src-backend-utils-init-miscinit.c.diff b/patches/postgresql-wasm/src-backend-utils-init-miscinit.c.diff new file mode 100644 index 00000000..01019abf --- /dev/null +++ b/patches/postgresql-wasm/src-backend-utils-init-miscinit.c.diff @@ -0,0 +1,38 @@ +--- postgresql-16.3/src/backend/utils/init/miscinit.c ++++ postgresql-16.3-wasm/src/backend/utils/init/miscinit.c +@@ -209,7 +209,7 @@ + if (my_exec_path[0] == '\0') + { + if (find_my_exec(argv0, my_exec_path) < 0) +- elog(FATAL, "%s: could not locate my own executable path", ++ elog(WARNING, "%s:212: could not locate my own executable path", + argv0); + } + +@@ -368,7 +368,7 @@ + * + * XXX can we safely enable this check on Windows? + */ +-#if !defined(WIN32) && !defined(__CYGWIN__) ++#if !defined(WIN32) && !defined(__CYGWIN__) && !defined(__EMSCRIPTEN__) && !defined(__wasi__) + if (stat_buf.st_uid != geteuid()) + ereport(FATAL, + (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), +@@ -388,7 +388,7 @@ + * be proper support for Unix-y file permissions. Need to think of a + * reasonable check to apply on Windows. + */ +-#if !defined(WIN32) && !defined(__CYGWIN__) ++#if !defined(WIN32) && !defined(__CYGWIN__) && !defined(__EMSCRIPTEN__) && !defined(__wasi__) + if (stat_buf.st_mode & PG_MODE_MASK_GROUP) + ereport(FATAL, + (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), +@@ -409,7 +409,7 @@ + * Suppress when on Windows, because there may not be proper support for + * Unix-y file permissions. + */ +-#if !defined(WIN32) && !defined(__CYGWIN__) ++#if !defined(WIN32) && !defined(__CYGWIN__) && !defined(__EMSCRIPTEN__) && !defined(__wasi__) + SetDataDirectoryCreatePerm(stat_buf.st_mode); + + umask(pg_mode_mask); diff --git a/patches/postgresql-wasm/src-backend-utils-init-postinit.c.diff b/patches/postgresql-wasm/src-backend-utils-init-postinit.c.diff new file mode 100644 index 00000000..63761f79 --- /dev/null +++ b/patches/postgresql-wasm/src-backend-utils-init-postinit.c.diff @@ -0,0 +1,60 @@ +--- postgresql-16.3/src/backend/utils/init/postinit.c ++++ postgresql-16.3-wasm/src/backend/utils/init/postinit.c +@@ -714,6 +714,7 @@ + * Be very careful with the order of calls in the InitPostgres function. + * -------------------------------- + */ ++ + void + InitPostgres(const char *in_dbname, Oid dboid, + const char *username, Oid useroid, +@@ -1239,6 +1240,32 @@ + CommitTransactionCommand(); + } + ++/* ========================================================================*/ ++/* ++void ++ReInitPostgres(const char *in_dbname, Oid dboid, ++ const char *username, Oid useroid, ++ bool load_session_libraries, ++ bool override_allow_connections, ++ char *out_dbname) ++{ ++ puts("ReInitPostgres:Begin"); ++ InitPostgres(in_dbname, dboid, username, useroid, load_session_libraries, override_allow_connections, out_dbname); ++ puts("ReInitPostgres:End"); ++} ++*/ ++/* ========================================================================*/ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ + /* + * Process any command-line switches and any additional GUC variable + * settings passed in the startup packet. +@@ -1345,14 +1372,15 @@ + static void + ShutdownPostgres(int code, Datum arg) + { ++puts("# 1348: " __FILE__); + /* Make sure we've killed any active transaction */ + AbortOutOfAnyTransaction(); +- + /* + * User locks are not released by transaction end, so be sure to release + * them explicitly. + */ + LockReleaseAll(USER_LOCKMETHOD, true); ++puts("# 1356: " __FILE__); + } + + diff --git a/patches/postgresql-wasm/src-backend-utils-misc-timeout.c.diff b/patches/postgresql-wasm/src-backend-utils-misc-timeout.c.diff new file mode 100644 index 00000000..719c34cd --- /dev/null +++ b/patches/postgresql-wasm/src-backend-utils-misc-timeout.c.diff @@ -0,0 +1,31 @@ +--- postgresql-16.3/src/backend/utils/misc/timeout.c ++++ postgresql-16.3-wasm/src/backend/utils/misc/timeout.c +@@ -110,10 +110,20 @@ + * Insert specified timeout reason into the list of active timeouts + * at the given index. + */ ++ ++bool insert_timeout_warned = false; + static void + insert_timeout(TimeoutId id, int index) + { + int i; ++#if defined(__EMSCRIPTEN__) || defined(__wasi__) ++ if (!insert_timeout_warned) //(index<0) ++ { ++ insert_timeout_warned = true; ++ fprintf(stderr, "# 117(FATAL): insert_timeout(TimeoutId id=%d, int index=%d): " __FILE__ "\n", id, index); ++ } ++ return; ++#endif + + if (index < 0 || index > num_active_timeouts) + elog(FATAL, "timeout index %d out of range 0..%d", index, +@@ -128,6 +138,7 @@ + active_timeouts[index] = &all_timeouts[id]; + + num_active_timeouts++; ++ + } + + /* diff --git a/patches/postgresql-wasm/src-bin-pg_dump-pg_dump.c.diff b/patches/postgresql-wasm/src-bin-pg_dump-pg_dump.c.diff new file mode 100644 index 00000000..f5c583a5 --- /dev/null +++ b/patches/postgresql-wasm/src-bin-pg_dump-pg_dump.c.diff @@ -0,0 +1,36 @@ +--- postgresql-16.3/src/bin/pg_dump/pg_dump.c ++++ postgresql-16.3-wasm/src/bin/pg_dump/pg_dump.c +@@ -30,6 +30,13 @@ + *------------------------------------------------------------------------- + */ + #include "postgres_fe.h" ++#if !defined(__EMSCRIPTEN__) && !defined(__wasi__) ++#ifdef quote_all_identifiers ++#undef quote_all_identifiers ++#endif ++#define fe_utils_quote_all_identifiers quote_all_identifiers ++static bool quote_all_identifiers; ++#endif + + #include + #include +@@ -410,7 +417,7 @@ + {"lock-wait-timeout", required_argument, NULL, 2}, + {"no-table-access-method", no_argument, &dopt.outputNoTableAm, 1}, + {"no-tablespaces", no_argument, &dopt.outputNoTablespaces, 1}, +- {"quote-all-identifiers", no_argument, "e_all_identifiers, 1}, ++ {"quote-all-identifiers", no_argument, &fe_utils_quote_all_identifiers, true}, + {"load-via-partition-root", no_argument, &dopt.load_via_partition_root, 1}, + {"role", required_argument, NULL, 3}, + {"section", required_argument, NULL, 5}, +@@ -1238,8 +1245,8 @@ + /* + * Quote all identifiers, if requested. + */ +- if (quote_all_identifiers) +- ExecuteSqlStatement(AH, "SET quote_all_identifiers = true"); ++ if (fe_utils_quote_all_identifiers) ++ ExecuteSqlStatement(AH, "SET fe_utils_quote_all_identifiers = true"); + + /* + * Adjust row-security mode, if supported. diff --git a/patches/postgresql-wasm/src-bin-pg_dump-pg_dumpall.c.diff b/patches/postgresql-wasm/src-bin-pg_dump-pg_dumpall.c.diff new file mode 100644 index 00000000..6babb783 --- /dev/null +++ b/patches/postgresql-wasm/src-bin-pg_dump-pg_dumpall.c.diff @@ -0,0 +1,46 @@ +--- postgresql-16.3/src/bin/pg_dump/pg_dumpall.c ++++ postgresql-16.3-wasm/src/bin/pg_dump/pg_dumpall.c +@@ -14,7 +14,13 @@ + */ + + #include "postgres_fe.h" +- ++#if !defined(__EMSCRIPTEN__) && !defined(__wasi__) ++#ifdef quote_all_identifiers ++#undef quote_all_identifiers ++#endif ++#define fe_utils_quote_all_identifiers quote_all_identifiers ++static bool quote_all_identifiers; ++#endif + #include + #include + +@@ -163,7 +169,7 @@ + {"lock-wait-timeout", required_argument, NULL, 2}, + {"no-table-access-method", no_argument, &no_table_access_method, 1}, + {"no-tablespaces", no_argument, &no_tablespaces, 1}, +- {"quote-all-identifiers", no_argument, "e_all_identifiers, 1}, ++ {"quote-all-identifiers", no_argument, &fe_utils_quote_all_identifiers, true}, + {"load-via-partition-root", no_argument, &load_via_partition_root, 1}, + {"role", required_argument, NULL, 3}, + {"use-set-session-authorization", no_argument, &use_setsessauth, 1}, +@@ -434,7 +440,7 @@ + appendPQExpBufferStr(pgdumpopts, " --no-table-access-method"); + if (no_tablespaces) + appendPQExpBufferStr(pgdumpopts, " --no-tablespaces"); +- if (quote_all_identifiers) ++ if (fe_utils_quote_all_identifiers) + appendPQExpBufferStr(pgdumpopts, " --quote-all-identifiers"); + if (load_via_partition_root) + appendPQExpBufferStr(pgdumpopts, " --load-via-partition-root"); +@@ -534,8 +540,8 @@ + } + + /* Force quoting of all identifiers if requested. */ +- if (quote_all_identifiers) +- executeCommand(conn, "SET quote_all_identifiers = true"); ++ if (fe_utils_quote_all_identifiers) ++ executeCommand(conn, "SET fe_utils_quote_all_identifiers = true"); + + fprintf(OPF, "--\n-- PostgreSQL database cluster dump\n--\n\n"); + if (verbose) diff --git a/patches/postgresql-wasm/src-bin-pg_resetwal-pg_resetwal.c.diff b/patches/postgresql-wasm/src-bin-pg_resetwal-pg_resetwal.c.diff new file mode 100644 index 00000000..0e74fd00 --- /dev/null +++ b/patches/postgresql-wasm/src-bin-pg_resetwal-pg_resetwal.c.diff @@ -0,0 +1,11 @@ +--- postgresql-16.3/src/bin/pg_resetwal/pg_resetwal.c ++++ postgresql-16.3-wasm/src/bin/pg_resetwal/pg_resetwal.c +@@ -330,7 +330,7 @@ + * -- any other user won't have sufficient permissions to modify files in + * the data directory. + */ +-#ifndef WIN32 ++#if !defined(WIN32) && !defined(__EMSCRIPTEN__) && !defined(__wasi__) + if (geteuid() == 0) + { + pg_log_error("cannot be executed by \"root\""); diff --git a/patches/postgresql-wasm/src-common-exec.c.diff b/patches/postgresql-wasm/src-common-exec.c.diff new file mode 100644 index 00000000..a49758f6 --- /dev/null +++ b/patches/postgresql-wasm/src-common-exec.c.diff @@ -0,0 +1,11 @@ +--- postgresql-16.3/src/common/exec.c ++++ postgresql-16.3-wasm/src/common/exec.c +@@ -22,7 +22,7 @@ + * This should be harmless everywhere else. + */ + #define _DARWIN_BETTER_REALPATH +- ++#define PG_EXEC + #ifndef FRONTEND + #include "postgres.h" + #else diff --git a/patches/postgresql-wasm/src-fe_utils-string_utils.c.diff b/patches/postgresql-wasm/src-fe_utils-string_utils.c.diff new file mode 100644 index 00000000..f8b7ecbb --- /dev/null +++ b/patches/postgresql-wasm/src-fe_utils-string_utils.c.diff @@ -0,0 +1,22 @@ +--- postgresql-16.3/src/fe_utils/string_utils.c ++++ postgresql-16.3-wasm/src/fe_utils/string_utils.c +@@ -23,7 +23,9 @@ + static PQExpBuffer defaultGetLocalPQExpBuffer(void); + + /* Globals exported by this file */ +-int quote_all_identifiers = 0; ++bool fe_utils_quote_all_identifiers = false; ++ ++ + PQExpBuffer (*getLocalPQExpBuffer) (void) = defaultGetLocalPQExpBuffer; + + +@@ -72,7 +74,7 @@ + * These checks need to match the identifier production in scan.l. Don't + * use islower() etc. + */ +- if (quote_all_identifiers) ++ if (fe_utils_quote_all_identifiers) + need_quotes = true; + /* slightly different rules for first character */ + else if (!((rawid[0] >= 'a' && rawid[0] <= 'z') || rawid[0] == '_')) diff --git a/patches/postgresql-wasm/src-include-bootstrap-bootstrap.h.diff b/patches/postgresql-wasm/src-include-bootstrap-bootstrap.h.diff new file mode 100644 index 00000000..b5e00465 --- /dev/null +++ b/patches/postgresql-wasm/src-include-bootstrap-bootstrap.h.diff @@ -0,0 +1,15 @@ +--- postgresql-16.3/src/include/bootstrap/bootstrap.h ++++ postgresql-16.3-wasm/src/include/bootstrap/bootstrap.h +@@ -31,8 +31,11 @@ + extern PGDLLIMPORT Form_pg_attribute attrtypes[MAXATTR]; + extern PGDLLIMPORT int numattr; + +- ++#if defined(__EMSCRIPTEN__) || defined(__wasi__) ++ int BootstrapModeMain(int argc, char *argv[], bool check_only); ++#else + extern void BootstrapModeMain(int argc, char *argv[], bool check_only) pg_attribute_noreturn(); ++#endif + + extern void closerel(char *relname); + extern void boot_openrel(char *relname); diff --git a/patches/postgresql-wasm/src-include-common-file_utils.h.diff b/patches/postgresql-wasm/src-include-common-file_utils.h.diff new file mode 100644 index 00000000..dd6005ea --- /dev/null +++ b/patches/postgresql-wasm/src-include-common-file_utils.h.diff @@ -0,0 +1,17 @@ +--- postgresql-16.3/src/include/common/file_utils.h ++++ postgresql-16.3-wasm/src/include/common/file_utils.h +@@ -27,10 +27,14 @@ + struct iovec; /* avoid including port/pg_iovec.h here */ + + #ifdef FRONTEND ++#if !defined(fsync_fname) + extern int fsync_fname(const char *fname, bool isdir); ++#endif + extern void fsync_pgdata(const char *pg_data, int serverVersion); + extern void fsync_dir_recurse(const char *dir); ++#if !defined(durable_rename) + extern int durable_rename(const char *oldfile, const char *newfile); ++#endif + extern int fsync_parent_path(const char *fname); + #endif + diff --git a/patches/postgresql-wasm/src-include-common-logging.h.diff b/patches/postgresql-wasm/src-include-common-logging.h.diff new file mode 100644 index 00000000..8d2ded3a --- /dev/null +++ b/patches/postgresql-wasm/src-include-common-logging.h.diff @@ -0,0 +1,25 @@ +--- postgresql-16.3/src/include/common/logging.h ++++ postgresql-16.3-wasm/src/include/common/logging.h +@@ -85,16 +85,21 @@ + */ + #define PG_LOG_FLAG_TERSE 1 + ++#if defined(PG_INITDB) && defined(PG_MAIN) ++#else + void pg_logging_init(const char *argv0); ++#endif + void pg_logging_config(int new_flags); + void pg_logging_set_level(enum pg_log_level new_level); + void pg_logging_increase_verbosity(void); + void pg_logging_set_pre_callback(void (*cb) (void)); + void pg_logging_set_locus_callback(void (*cb) (const char **filename, uint64 *lineno)); +- ++#if defined(PG_INITDB) && defined(PG_MAIN) ++#else + void pg_log_generic(enum pg_log_level level, enum pg_log_part part, + const char *pg_restrict fmt,...) + pg_attribute_printf(3, 4); ++#endif + void pg_log_generic_v(enum pg_log_level level, enum pg_log_part part, + const char *pg_restrict fmt, va_list ap) + pg_attribute_printf(3, 0); diff --git a/patches/postgresql-wasm/src-include-fe_utils-string_utils.h.diff b/patches/postgresql-wasm/src-include-fe_utils-string_utils.h.diff new file mode 100644 index 00000000..6197da7b --- /dev/null +++ b/patches/postgresql-wasm/src-include-fe_utils-string_utils.h.diff @@ -0,0 +1,16 @@ +--- postgresql-16.3/src/include/fe_utils/string_utils.h ++++ postgresql-16.3-wasm/src/include/fe_utils/string_utils.h +@@ -19,8 +19,12 @@ + #include "libpq-fe.h" + #include "pqexpbuffer.h" + ++/*#include "../interfaces/libpq/libpq-fe.h"*/ ++/*#include "../interfaces/libpq/pqexpbuffer.h"*/ ++ ++ + /* Global variables controlling behavior of fmtId() and fmtQualifiedId() */ +-extern PGDLLIMPORT int quote_all_identifiers; ++extern PGDLLIMPORT bool quote_all_identifiers; + extern PQExpBuffer (*getLocalPQExpBuffer) (void); + + /* Functions */ diff --git a/patches/postgresql-wasm/src-include-libpq-be-fsstubs.h.diff b/patches/postgresql-wasm/src-include-libpq-be-fsstubs.h.diff new file mode 100644 index 00000000..a74fee0e --- /dev/null +++ b/patches/postgresql-wasm/src-include-libpq-be-fsstubs.h.diff @@ -0,0 +1,15 @@ +--- postgresql-16.3/src/include/libpq/be-fsstubs.h ++++ postgresql-16.3-wasm/src/include/libpq/be-fsstubs.h +@@ -19,8 +19,10 @@ + * Probably these should have had the underscore-free names, + * but too late now... + */ +-extern int lo_read(int fd, char *buf, int len); +-extern int lo_write(int fd, const char *buf, int len); ++#if !defined(__EMSCRIPTEN__) && !defined(__wasi__) ++extern int lo_read3(int fd, char *buf, int len); ++extern int lo_write3(int fd, const char *buf, int len); ++#endif + + /* + * Cleanup LOs at xact commit/abort diff --git a/patches/postgresql-wasm/src-include-storage-fd.h.diff b/patches/postgresql-wasm/src-include-storage-fd.h.diff new file mode 100644 index 00000000..458d2f39 --- /dev/null +++ b/patches/postgresql-wasm/src-include-storage-fd.h.diff @@ -0,0 +1,22 @@ +--- postgresql-16.3/src/include/storage/fd.h ++++ postgresql-16.3-wasm/src/include/storage/fd.h +@@ -188,13 +188,17 @@ + extern int pg_fdatasync(int fd); + extern void pg_flush_data(int fd, off_t offset, off_t nbytes); + extern int pg_truncate(const char *path, off_t length); +-extern void fsync_fname(const char *fname, bool isdir); ++extern void fd_fsync_fname(const char *fname, bool isdir); + extern int fsync_fname_ext(const char *fname, bool isdir, bool ignore_perm, int elevel); +-extern int durable_rename(const char *oldfile, const char *newfile, int elevel); ++extern int fd_durable_rename(const char *oldfile, const char *newfile, int elevel); + extern int durable_unlink(const char *fname, int elevel); + extern void SyncDataDirectory(void); + extern int data_sync_elevel(int elevel); + ++#define durable_rename(oldfile, newfile, elevel) fd_durable_rename(oldfile, newfile, elevel) ++#define fsync_fname(fname, isdir) fd_fsync_fname(fname, isdir) ++ ++ + /* Filename components */ + #define PG_TEMP_FILES_DIR "pgsql_tmp" + #define PG_TEMP_FILE_PREFIX "pgsql_tmp" diff --git a/patches/postgresql-wasm/src-include-storage-ipc.h.diff b/patches/postgresql-wasm/src-include-storage-ipc.h.diff new file mode 100644 index 00000000..d42650da --- /dev/null +++ b/patches/postgresql-wasm/src-include-storage-ipc.h.diff @@ -0,0 +1,15 @@ +--- postgresql-16.3/src/include/storage/ipc.h ++++ postgresql-16.3-wasm/src/include/storage/ipc.h +@@ -64,8 +64,11 @@ + /* ipc.c */ + extern PGDLLIMPORT bool proc_exit_inprogress; + extern PGDLLIMPORT bool shmem_exit_inprogress; +- ++#if defined(__EMSCRIPTEN__) || defined(__wasi__) ++extern void pg_proc_exit(int code); ++#else + extern void proc_exit(int code) pg_attribute_noreturn(); ++#endif + extern void shmem_exit(int code); + extern void on_proc_exit(pg_on_exit_callback function, Datum arg); + extern void on_shmem_exit(pg_on_exit_callback function, Datum arg); diff --git a/patches/postgresql-wasm/src-include-utils-elog.h.diff b/patches/postgresql-wasm/src-include-utils-elog.h.diff new file mode 100644 index 00000000..8498c61f --- /dev/null +++ b/patches/postgresql-wasm/src-include-utils-elog.h.diff @@ -0,0 +1,31 @@ +--- postgresql-16.3/src/include/utils/elog.h ++++ postgresql-16.3-wasm/src/include/utils/elog.h +@@ -137,6 +137,19 @@ + * prevents gcc from making the unreachability deduction at optlevel -O0. + *---------- + */ ++#if defined(__EMSCRIPTEN__) || defined(__wasi__) ++#define ereport_domain(elevel, domain, ...) \ ++ do { \ ++ pg_prevent_errno_in_scope(); \ ++ if (__builtin_constant_p(elevel) && (elevel) >= ERROR ? \ ++ errstart_cold(elevel, domain) : \ ++ errstart(elevel, domain)) \ ++ __VA_ARGS__, errfinish(__FILE__, __LINE__, __func__); \ ++ if (__builtin_constant_p(elevel) && (elevel) >= ERROR) \ ++ { puts("# 149:pg_unreachable():" __FILE__); pg_unreachable(); } \ ++ } while(0) ++ ++#else + #ifdef HAVE__BUILTIN_CONSTANT_P + #define ereport_domain(elevel, domain, ...) \ + do { \ +@@ -159,7 +172,7 @@ + pg_unreachable(); \ + } while(0) + #endif /* HAVE__BUILTIN_CONSTANT_P */ +- ++#endif + #define ereport(elevel, ...) \ + ereport_domain(elevel, TEXTDOMAIN, __VA_ARGS__) + diff --git a/patches/postgresql-wasm/src-include-utils-palloc.h.diff b/patches/postgresql-wasm/src-include-utils-palloc.h.diff new file mode 100644 index 00000000..dd8aec26 --- /dev/null +++ b/patches/postgresql-wasm/src-include-utils-palloc.h.diff @@ -0,0 +1,31 @@ +--- postgresql-16.3/src/include/utils/palloc.h ++++ postgresql-16.3-wasm/src/include/utils/palloc.h +@@ -56,7 +56,9 @@ + * Avoid accessing it directly! Instead, use MemoryContextSwitchTo() + * to change the setting. + */ ++#if !defined(PG_EXTERN) + extern PGDLLIMPORT MemoryContext CurrentMemoryContext; ++#endif + + /* + * Flags for MemoryContextAllocExtended. +@@ -132,7 +134,7 @@ + * it's necessary to hide the inline definition of MemoryContextSwitchTo in + * this scenario; hence the #ifndef FRONTEND. + */ +- ++#if !defined(PG_EXTERN) + #ifndef FRONTEND + static inline MemoryContext + MemoryContextSwitchTo(MemoryContext context) +@@ -143,7 +145,8 @@ + return old; + } + #endif /* FRONTEND */ +- ++#else ++#endif + /* Registration of memory context reset/delete callbacks */ + extern void MemoryContextRegisterResetCallback(MemoryContext context, + MemoryContextCallback *cb); diff --git a/patches/postgresql-wasm/src-interfaces-libpq-fe-auth.c.diff b/patches/postgresql-wasm/src-interfaces-libpq-fe-auth.c.diff new file mode 100644 index 00000000..a5ce4b92 --- /dev/null +++ b/patches/postgresql-wasm/src-interfaces-libpq-fe-auth.c.diff @@ -0,0 +1,21 @@ +--- postgresql-16.3/src/interfaces/libpq/fe-auth.c ++++ postgresql-16.3-wasm/src/interfaces/libpq/fe-auth.c +@@ -1170,6 +1170,7 @@ + pg_fe_getusername(uid_t user_id, PQExpBuffer errorMessage) + { + char *result = NULL; ++#if !defined(__EMSCRIPTEN__) + const char *name = NULL; + + #ifdef WIN32 +@@ -1202,7 +1203,9 @@ + else if (errorMessage) + appendPQExpBuffer(errorMessage, "%s\n", pwdbuf); + #endif +- ++#else ++ const char *name = WASM_USERNAME; ++#endif + if (name) + { + result = strdup(name); diff --git a/patches/postgresql-wasm/src-interfaces-libpq-fe-connect.c.diff b/patches/postgresql-wasm/src-interfaces-libpq-fe-connect.c.diff new file mode 100644 index 00000000..e02cd5a6 --- /dev/null +++ b/patches/postgresql-wasm/src-interfaces-libpq-fe-connect.c.diff @@ -0,0 +1,80 @@ +--- postgresql-16.3/src/interfaces/libpq/fe-connect.c ++++ postgresql-16.3-wasm/src/interfaces/libpq/fe-connect.c +@@ -1932,6 +1932,7 @@ + static int + connectNoDelay(PGconn *conn) + { ++#if !defined(__EMSCRIPTEN__) + #ifdef TCP_NODELAY + int on = 1; + +@@ -1946,7 +1947,7 @@ + return 0; + } + #endif +- ++#endif + return 1; + } + +@@ -2067,6 +2068,9 @@ + static int + useKeepalives(PGconn *conn) + { ++#if defined(__EMSCRIPTEN__) ++return 0; ++#else + char *ep; + int val; + +@@ -2076,6 +2080,7 @@ + if (*ep) + return -1; + return val != 0 ? 1 : 0; ++#endif + } + + /* +@@ -2348,13 +2353,14 @@ + * Nobody but developers should see this message, so we don't bother + * translating it. + */ ++#if !defined(__EMSCRIPTEN__) + if (!pg_link_canary_is_frontend()) + { + appendPQExpBufferStr(&conn->errorMessage, + "libpq is incorrectly linked to backend functions\n"); + goto connect_errReturn; + } +- ++#endif + /* Ensure our buffers are empty */ + conn->inStart = conn->inCursor = conn->inEnd = 0; + conn->outCount = 0; +@@ -2420,6 +2426,7 @@ + */ + if (conn->connect_timeout != NULL) + { ++puts("# timeout set !"); + if (!parse_int_param(conn->connect_timeout, &timeout, conn, + "connect_timeout")) + { +@@ -2440,7 +2447,9 @@ + } + else /* negative means 0 */ + timeout = 0; +- } ++ } else { ++puts("# no timeout"); flag = PGRES_POLLING_OK; ++} + + for (;;) + { +@@ -2498,6 +2507,7 @@ + + if (ret == 1) /* connect_timeout elapsed */ + { ++puts("# timeout !"); + /* + * Give up on current server/address, try the next one. + */ diff --git a/patches/postgresql-wasm/src-interfaces-libpq-fe-exec.c.diff b/patches/postgresql-wasm/src-interfaces-libpq-fe-exec.c.diff new file mode 100644 index 00000000..9d4343f9 --- /dev/null +++ b/patches/postgresql-wasm/src-interfaces-libpq-fe-exec.c.diff @@ -0,0 +1,18 @@ +--- postgresql-16.3/src/interfaces/libpq/fe-exec.c ++++ postgresql-16.3-wasm/src/interfaces/libpq/fe-exec.c +@@ -1694,8 +1694,15 @@ + /* Don't try to send if we know there's no live connection. */ + if (conn->status != CONNECTION_OK) + { ++#if defined(__EMSCRIPTEN__) || defined(__wasi__) ++ puts("#1699 !!!"); ++ conn->status = CONNECTION_OK; ++ conn->asyncStatus = PGASYNC_IDLE; ++ PQconnectPoll(conn); ++#else + libpq_append_conn_error(conn, "no connection to the server"); + return false; ++#endif + } + + /* Can't send while already busy, either, unless enqueuing for later */ diff --git a/patches/postgresql-wasm/src-interfaces-libpq-legacy-pqsignal.c.diff b/patches/postgresql-wasm/src-interfaces-libpq-legacy-pqsignal.c.diff new file mode 100644 index 00000000..80dab05a --- /dev/null +++ b/patches/postgresql-wasm/src-interfaces-libpq-legacy-pqsignal.c.diff @@ -0,0 +1,15 @@ +--- postgresql-16.3/src/interfaces/libpq/legacy-pqsignal.c ++++ postgresql-16.3-wasm/src/interfaces/libpq/legacy-pqsignal.c +@@ -32,6 +32,7 @@ + * non-ENABLE_THREAD_SAFETY builds), so the incompatibility isn't + * troublesome for internal references. + */ ++#if !defined(__EMSCRIPTEN__) + pqsigfunc + pqsignal(int signo, pqsigfunc func) + { +@@ -55,3 +56,4 @@ + return signal(signo, func); + #endif + } ++#endif // __EMSCRIPTEN__ diff --git a/patches/tinytar.min.js b/patches/tinytar.min.js new file mode 100644 index 00000000..48d9ee32 --- /dev/null +++ b/patches/tinytar.min.js @@ -0,0 +1,2 @@ +!function(r,e){"object"==typeof exports&&"object"==typeof module?module.exports=e():"function"==typeof define&&define.amd?define([],e):"object"==typeof exports?exports.tinyTar=e():r.tinyTar=e()}(this,function(){return function(r){function e(n){if(t[n])return t[n].exports;var i=t[n]={exports:{},id:n,loaded:!1};return r[n].call(i.exports,i,i.exports,e),i.loaded=!0,i.exports}var t={};return e.m=r,e.c=t,e.p="",e(0)}([function(r,e,t){"use strict";var n=t(1),i=t(2),o=t(3),u=t(5);n.extend(r.exports,o,u,i)},function(r,e){"use strict";function t(r){return r===T}function n(r){return"string"==typeof r||"[object String]"==Object.prototype.toString.call(r)}function i(r){return"[object Date]"==Object.prototype.toString.call(r)}function o(r){return null!==r&&"object"==typeof r}function u(r){return"function"==typeof r}function c(r){return"number"==typeof r&&r>-1&&r%1==0&&r<=x}function a(r){return"[object Array]"==Object.prototype.toString.call(r)}function f(r){return o(r)&&!u(r)&&c(r.length)}function s(r){return"[object ArrayBuffer]"==Object.prototype.toString.call(r)}function p(r,e){return Array.prototype.map.call(r,e)}function l(r,e){var t=T;return u(e)&&Array.prototype.every.call(r,function(r,n,i){var o=e(r,n,i);return o&&(t=r),!o}),t}function d(r){return Object.assign.apply(null,arguments)}function h(r){var e,t,i;if(n(r)){for(t=r.length,i=new Uint8Array(t),e=0;e=0?t.substr(0,n):t}function a(r){var e=String.fromCharCode.apply(null,r);return parseInt(e.replace(/^0+$/g,""),8)||0}function f(r){return 0==r.length||0==r[0]?null:new Date(1e3*a(r))}function s(r,e,t){var n=parseInt(e,10)||0,i=Math.min(n+A,r.length),o=0,u=0,c=0;t&&m.every(function(r){return"checksum"!=r[0]||(u=n+r[2],c=u+r[1],!1)});for(var a=" ".charCodeAt(0),f=n;f=u&&f=l.recordSize;){r=p.toUint8Array(r);var h=u(r,f,e);if(!h)break;f+=n(h);var x=c(r,f,h,e);if(t.push(a(h,x)),f+=i(h.size),o(r,f))break}return t}var s=t(2),p=t(1),l=t(4),d={extractData:!0,checkHeader:!0,checkChecksum:!0,checkFileSize:!0},h={size:!0,checksum:!0,ustar:!0},x={unexpectedEndOfFile:"Unexpected end of file.",fileCorrupted:"File is corrupted.",checksumCheckFailed:"Checksum check failed."};r.exports.untar=f}])}); +//# sourceMappingURL=tinytar.min.js.map \ No newline at end of file diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 2cae4aeb..2910fed7 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -17,7 +17,7 @@ importers: dependencies: wa-sqlite: specifier: github:rhashimoto/wa-sqlite - version: github.com/rhashimoto/wa-sqlite/5c3691569c150ca884314aa1a0d7e2760c48e873 + version: github.com/rhashimoto/wa-sqlite/55bf0b6646ac1bb81b00b9c25636b59a87e4a918 devDependencies: '@types/better-sqlite3': specifier: ^7.6.9 @@ -37,9 +37,15 @@ importers: packages/pglite: devDependencies: + '@types/emscripten': + specifier: ^1.39.13 + version: 1.39.13 '@types/node': specifier: ^20.11.18 version: 20.11.19 + '@types/node-fetch': + specifier: ^2.6.11 + version: 2.6.11 async-mutex: specifier: ^0.4.1 version: 0.4.1 @@ -49,6 +55,9 @@ importers: buffer: specifier: ^6.0.3 version: 6.0.3 + bun: + specifier: ^1.1.18 + version: 1.1.18 comlink: specifier: ^4.4.1 version: 4.4.1 @@ -67,6 +76,9 @@ importers: prettier: specifier: 3.2.5 version: 3.2.5 + tinytar: + specifier: ^0.1.0 + version: 0.1.0 tsup: specifier: ^8.0.2 version: 8.0.2(typescript@5.3.3) @@ -1191,6 +1203,70 @@ packages: fastq: 1.17.1 dev: true + /@oven/bun-darwin-aarch64@1.1.18: + resolution: {integrity: sha512-2YMh1G+S5AxDqOEDh9i+9kc17887mkP/yzK/d5DQ0NyPt5uR2w5FKGaalPLDiu5w139y3LKBi+1eGba1oEJnyw==} + cpu: [arm64] + os: [darwin] + requiresBuild: true + dev: true + optional: true + + /@oven/bun-darwin-x64-baseline@1.1.18: + resolution: {integrity: sha512-shwwfe9Yugpyr490FdjQ90O3JtETbszyUk4PBXQrbz3babPfhXGuVGewis8ORNYeb8zoWGo/adk4biby6kKwHA==} + cpu: [x64] + os: [darwin] + requiresBuild: true + dev: true + optional: true + + /@oven/bun-darwin-x64@1.1.18: + resolution: {integrity: sha512-ppeJpQqEXO6nfCneq2TXYFO/l1S/KYKTt3cintTiQxW0ISvj36vQcP/l0ln8BxEu46EnqulVKDrkTBAttv9sww==} + cpu: [x64] + os: [darwin] + requiresBuild: true + dev: true + optional: true + + /@oven/bun-linux-aarch64@1.1.18: + resolution: {integrity: sha512-cDwqcGA/PiiqM8pQkZSRW0HbSh3r1hMsS2ew61d6FjjEI7HP+bwTuu0n0rGdzQKWTtb3PzzXvOkiFZywKS5Gzg==} + cpu: [arm64] + os: [linux] + requiresBuild: true + dev: true + optional: true + + /@oven/bun-linux-x64-baseline@1.1.18: + resolution: {integrity: sha512-hxnFwssve6M9i4phusIn9swFvQKwLI+9i2taWSotshp1axLXQ5ruIIE9WPKJGR0i+yuw5Q8HBCnUDDh5ZMp9rA==} + cpu: [x64] + os: [linux] + requiresBuild: true + dev: true + optional: true + + /@oven/bun-linux-x64@1.1.18: + resolution: {integrity: sha512-oce0pELxlVhRO7clQGAkbo8vfxaCmRpf7Tu/Swn+T/wqeA5tew02HmsZAnDQqgYx8Z2/QpCOfF1SvLsdg7hR+A==} + cpu: [x64] + os: [linux] + requiresBuild: true + dev: true + optional: true + + /@oven/bun-windows-x64-baseline@1.1.18: + resolution: {integrity: sha512-Wlb55q9QbayO+7NvfYMnU8oaTPz1k2xMr7mm9+JOnG/I6q82HMvIQEG181bAhU1kcm5YcZZ5E0WMp2gX3NFsEw==} + cpu: [x64] + os: [win32] + requiresBuild: true + dev: true + optional: true + + /@oven/bun-windows-x64@1.1.18: + resolution: {integrity: sha512-d639p5g8hrXyvFX3FK9EpsaoVEhMRThftmkueljjpYnYjMvIiMQ2crHtI2zwZ6yLEHvecaFXVXlocu2+jxia7g==} + cpu: [x64] + os: [win32] + requiresBuild: true + dev: true + optional: true + /@pkgjs/parseargs@0.11.0: resolution: {integrity: sha512-+1VkjdD0QBLPodGrJUeqarH8VAIvQODIbwh9XpP5Syisf7YoQgsJKPNFoqqLQlu+VQ/tVSshMR6loPMn8U+dPg==} engines: {node: '>=14'} @@ -1542,10 +1618,21 @@ packages: '@types/node': 20.11.19 dev: true + /@types/emscripten@1.39.13: + resolution: {integrity: sha512-cFq+fO/isvhvmuP/+Sl4K4jtU6E23DoivtbO4r50e3odaxAiVdbfSYRDdJ4gCdxx+3aRjhphS5ZMwIH4hFy/Cw==} + dev: true + /@types/estree@1.0.5: resolution: {integrity: sha512-/kYRxGDLWzHOB7q+wtSUQlFrtcdUccpfy+X+9iMBpHK8QLLhx2wIPYuS5DYtR9Wa/YlZAbIovy7qVdB1Aq6Lyw==} dev: true + /@types/node-fetch@2.6.11: + resolution: {integrity: sha512-24xFj9R5+rfQJLRyM56qh+wnVSYhyXC2tkoBndtY0U+vubqNsYXGjufB2nn8Q6gt0LrARwL6UBtMCSVCwl4B1g==} + dependencies: + '@types/node': 20.11.19 + form-data: 4.0.0 + dev: true + /@types/node@20.11.19: resolution: {integrity: sha512-7xMnVEcZFu0DikYjWOlRq7NTPETrm7teqUT2WkQjrTIkEgUyyGdWsj/Zg8bEJt5TNklzbPD1X3fqfsHw3SpapQ==} dependencies: @@ -2027,6 +2114,10 @@ packages: lodash: 4.17.21 dev: true + /asynckit@0.4.0: + resolution: {integrity: sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==} + dev: true + /ava@6.1.2: resolution: {integrity: sha512-WcpxJ8yZ7mk9ABTinD0IAjcemovSeVGjuuwZx0JS9johREWFeLTl8UP6wd7l6nmnrWqkKZdwaD71a/ocH4qPKw==} engines: {node: ^18.18 || ^20.8 || ^21} @@ -2154,7 +2245,7 @@ packages: hasBin: true dependencies: caniuse-lite: 1.0.30001636 - electron-to-chromium: 1.4.806 + electron-to-chromium: 1.4.807 node-releases: 2.0.14 update-browserslist-db: 1.0.16(browserslist@4.23.1) dev: true @@ -2182,6 +2273,23 @@ packages: ieee754: 1.2.1 dev: true + /bun@1.1.18: + resolution: {integrity: sha512-bv1wLYtmkn6GCqYFsVO9xZzPvNaDlA3xHbtePGHMtXMqq8N/vo+L6b19LB4+I5RKXFAsSmgzonyh2oMExaaWcQ==} + cpu: [arm64, x64] + os: [darwin, linux, win32] + hasBin: true + requiresBuild: true + optionalDependencies: + '@oven/bun-darwin-aarch64': 1.1.18 + '@oven/bun-darwin-x64': 1.1.18 + '@oven/bun-darwin-x64-baseline': 1.1.18 + '@oven/bun-linux-aarch64': 1.1.18 + '@oven/bun-linux-x64': 1.1.18 + '@oven/bun-linux-x64-baseline': 1.1.18 + '@oven/bun-windows-x64': 1.1.18 + '@oven/bun-windows-x64-baseline': 1.1.18 + dev: true + /bundle-require@4.0.2(esbuild@0.19.12): resolution: {integrity: sha512-jwzPOChofl67PSTW2SGubV9HBQAhhR2i6nskiOThauo9dzwDUgOWQScFVaJkjEfYX+UXiD+LEx8EblQMc2wIag==} engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0} @@ -2352,6 +2460,13 @@ packages: hasBin: true dev: true + /combined-stream@1.0.8: + resolution: {integrity: sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==} + engines: {node: '>= 0.8'} + dependencies: + delayed-stream: 1.0.0 + dev: true + /comlink@4.4.1: resolution: {integrity: sha512-+1dlx0aY5Jo1vHy/tSsIGpSkN4tS9rZSW8FIhG0JH/crs9wwweswIo/POr451r7bZww3hFbPAKnTpimzL/mm4Q==} dev: true @@ -2522,6 +2637,11 @@ packages: gopd: 1.0.1 dev: true + /delayed-stream@1.0.0: + resolution: {integrity: sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==} + engines: {node: '>=0.4.0'} + dev: true + /delegates@1.0.0: resolution: {integrity: sha512-bd2L678uiWATM6m5Z1VzNCErI3jiGzt6HGY8OVICs40JQq/HALfbyNJmp0UDakEY4pMMaN0Ly5om/B1VI/+xfQ==} dev: true @@ -2549,8 +2669,8 @@ packages: resolution: {integrity: sha512-I88TYZWc9XiYHRQ4/3c5rjjfgkjhLyW2luGIheGERbNQ6OY7yTybanSpDXZa8y7VUP9YmDcYa+eyq4ca7iLqWA==} dev: true - /electron-to-chromium@1.4.806: - resolution: {integrity: sha512-nkoEX2QIB8kwCOtvtgwhXWy2IHVcOLQZu9Qo36uaGB835mdX/h8uLRlosL6QIhLVUnAiicXRW00PwaPZC74Nrg==} + /electron-to-chromium@1.4.807: + resolution: {integrity: sha512-kSmJl2ZwhNf/bcIuCH/imtNOKlpkLDn2jqT5FJ+/0CXjhnFaOa9cOe9gHKKy71eM49izwuQjZhKk+lWQ1JxB7A==} dev: true /embedded-postgres@15.5.1-beta.9: @@ -2944,6 +3064,15 @@ packages: signal-exit: 4.1.0 dev: true + /form-data@4.0.0: + resolution: {integrity: sha512-ETEklSGi5t0QMZuiXoA/Q6vcnxcLQP5vdugSpuAyi6SVGi2clPPp+xgEhuMaHC+zGgn31Kd235W35f7Hykkaww==} + engines: {node: '>= 6'} + dependencies: + asynckit: 0.4.0 + combined-stream: 1.0.8 + mime-types: 2.1.35 + dev: true + /fs-constants@1.0.0: resolution: {integrity: sha512-y6OAwoSIf7FyjMIv94u+b5rdheZEjzR63GTyZJm5qh4Bi+2YgwLCcI/fPFZkL5PSixOt6ZNKm+w+Hfp/Bciwow==} dev: true @@ -3572,6 +3701,18 @@ packages: picomatch: 2.3.1 dev: true + /mime-db@1.52.0: + resolution: {integrity: sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==} + engines: {node: '>= 0.6'} + dev: true + + /mime-types@2.1.35: + resolution: {integrity: sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==} + engines: {node: '>= 0.6'} + dependencies: + mime-db: 1.52.0 + dev: true + /mime@1.6.0: resolution: {integrity: sha512-x0Vn8spI+wuJ1O6S7gnbaQg8Pxh4NNHb7KSINmEWKiPE4RKOplvijn+NkmYmmRgP68mc70j2EbeTFRsrswaQeg==} engines: {node: '>=4'} @@ -4653,6 +4794,11 @@ packages: engines: {node: '>=4'} dev: true + /tinytar@0.1.0: + resolution: {integrity: sha512-Ko2BT2VtuszvlWz3fC1y5EJu2pTZ1bf4HvDRh9RhVJbeXXRtoSrWj7ml1RrWp5dA+JRKSlTcYDwFXlm/0fqDuw==} + engines: {node: ^4.0.0, npm: ^2.0.0} + dev: true + /to-fast-properties@2.0.0: resolution: {integrity: sha512-/OaKK0xYrs3DmxRYqL/yDc+FxFUVYhDlXMhRmv3z915w2HF1tnN1omB354j8VUGO/hbRzyD6Y3sA7v7GS/ceog==} engines: {node: '>=4'} @@ -5068,8 +5214,8 @@ packages: commander: 9.5.0 dev: true - github.com/rhashimoto/wa-sqlite/5c3691569c150ca884314aa1a0d7e2760c48e873: - resolution: {tarball: https://codeload.github.com/rhashimoto/wa-sqlite/tar.gz/5c3691569c150ca884314aa1a0d7e2760c48e873} + github.com/rhashimoto/wa-sqlite/55bf0b6646ac1bb81b00b9c25636b59a87e4a918: + resolution: {tarball: https://codeload.github.com/rhashimoto/wa-sqlite/tar.gz/55bf0b6646ac1bb81b00b9c25636b59a87e4a918} name: wa-sqlite - version: 1.0.0-beta.2 + version: 1.0.0 dev: false diff --git a/postgres b/postgres deleted file mode 160000 index 1ef91d2a..00000000 --- a/postgres +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 1ef91d2a9a370c463edaec0f98ac72cfd0b57647 diff --git a/tests/repl.html b/tests/repl.html new file mode 100644 index 00000000..711ce79b --- /dev/null +++ b/tests/repl.html @@ -0,0 +1,516 @@ + + + + + + + PG SHELL TEST + + + + + + + + + + + + +
+
emscripten
+
Downloading...
+
+ +
+ + + +
+ + +
+ +
+ +
+ + + + + + diff --git a/tests/vtx.js b/tests/vtx.js new file mode 100644 index 00000000..ada9ccae --- /dev/null +++ b/tests/vtx.js @@ -0,0 +1,320 @@ +"use strict"; + +var readline = { last_cx : -1 , index : 0, history : ["help()"] } + +// two modes based on RAW_MODE: default readline emulation or vm.rawstdin + +readline.complete = function (line) { + if ( readline.history[ readline.history.length -1 ] != line ) + readline.history.push(line); + readline.index = 0; + vm.readline(line + "\n") + +} + +if (!window.Terminal) { + if (window.vm) { + var xterm_cdn + if (window.vm.config && window.vm.config.cdn) { + xterm_cdn = window.vm.config.cdn+"vt/" + console.log("Terminal+ImageAddon importing from CDN :", xterm_cdn) + } else { + xterm_cdn = xterm_cdn || "https://pygame-web.github.io/archives/vt/" + console.warn("Terminal+ImageAddon importing from fallback ", xterm_cdn) + } + + for (const css of ["xterm.css"]) { + const cssref = document.createElement('link') + cssref.setAttribute("rel", "stylesheet") + cssref.setAttribute("type", "text/css") + cssref.setAttribute("href", xterm_cdn + css) + document.getElementsByTagName("head")[0].appendChild(cssref) + } + + await import(xterm_cdn + "xterm.js") + await import(xterm_cdn + "xterm-addon-image.js") + } else { + console.error("only use import for vtx.js module") + } + +} else { + console.warn("Terminal+ImageAddon were inlined") +} + + + +export class WasmTerminal { + constructor(hostid, cols, rows, fontsize, is_fbdev, addons_list) { + this.input = '' + this.resolveInput = null + this.activeInput = true + this.inputStartCursor = null + + this.nodup = 1 + var theme = { + background: '#1a1c1f' + } + + + var transparency = false + var sback = 1000 + + if (is_fbdev) { + theme = { + foreground: '#ffffff', + background: 'rgba(0, 0, 0, 0)' + } + sback = 0 + transparency = true + } + + this.xterm = new Terminal( + { +// rendererType : "dom", + rendererType : "webgl", + experimentalCharAtlas : "webgl", + theme: theme, + allowTransparency: transparency, + allowProposedApi : true , // xterm 0.5 + sixel + scrollback: sback, + fontFamily: 'Courier-new, courier, monospace', + fontSize: (fontsize || 12), + cols: (cols || 132), + rows: (rows || 32) + } + ); + + if (typeof(Worker) !== "undefined") { + + for (const addon of (addons_list||[]) ) { + console.warn(hostid, cols, rows, addon) + const imageAddon = new ImageAddon.ImageAddon(addon.url , addon); + this.xterm.loadAddon(imageAddon); + this.sixel = function write(data) { + this.xterm.write(data) + } + } + + } else { + console.warn("No worker support, not loading xterm addons") + this.sixel = function ni() { + console.warn("SIXEL N/I") + } + } + + this.xterm.open(document.getElementById(hostid)) + + this.xterm.onKey((keyEvent) => { + // Fix for iOS Keyboard Jumping on space + if (keyEvent.key === " ") { + keyEvent.domEvent.preventDefault(); + } + + }); + + this.xterm.onData(this.handleTermData) + } + + open(container) { + this.xterm.open(container); + } + + ESC() { + for (var i=0; i < arguments.length; i++) + this.xterm.write("\x1b"+arguments[i]) + } + + handleTermData = (data) => { + + + +// TODO: check mouse Y pos for raw mode in debug mode + if (window.RAW_MODE) { + vm.rawstdin(data) + return + } + + const ord = data.charCodeAt(0); + let ofs; + + const cx = this.xterm.buffer.active.cursorX + + // TODO: Handle ANSI escape sequences + if (ord === 0x1b) { + + // Handle special characters + switch ( data.charCodeAt(1) ) { + case 0x5b: + + const cursor = readline.history.length + readline.index + var histo = ">h> " + + switch ( data.charCodeAt(2) ) { + // "?" + case 63: + const c4 = data.charCodeAt(4) + const c5 = data.charCodeAt(5) + if ((c4==54) && (c5==99)) { + // Primary Device Attribute of Sixel support : 4 + // "?6c" https://github.com/odknt/st/issues/1 + console.log("query") + + } + + + case 65: + //console.log("VT UP") + // memo cursor pos before entering histo + if (!readline.index) { + if (readline.last_cx < 0 ) { + readline.last_cx = cx + readline.buffer = this.input + } + // TODO: get current line content from XTERM + } + + if ( cursor >0 ) { + readline.index-- + histo = ">h> " +readline.history[cursor-1] + //console.log(__FILE__," histo-up :", readline.index, cursor, histo) + + this.ESC("[132D","[2K") + this.xterm.write(histo) + this.input = histo.substr(4) + } + break; + + case 66: + //console.log("VT DOWN") + if ( readline.index < 0 ) { + readline.index++ + histo = histo + readline.history[cursor] + this.ESC("[132D","[2K") + this.xterm.write(histo) + this.input = histo.substr(4) + } else { + // we are back + if (readline.last_cx >= 0) { + histo = histo + readline.buffer + readline.buffer = "" + this.ESC("[2K") + this.ESC("[132D") + this.xterm.write(histo) + this.input = histo.substr(4) + this.ESC("[132D") + this.ESC("["+readline.last_cx+"C") + //console.log(__FILE__," histo-back", readline.index, cursor, histo) + readline.last_cx = -1 + } + } + break; + + case 67: + //console.log("VT RIGHT") + break; + + case 68: + //console.log("VT LEFT") + break; + + case 60: + // vm.rawstdin(data) + break; + + default: + console.log(__FILE__,"VT unhandled ? "+data.charCodeAt(2)) + } + break + default: + + console.log(__FILE__,"VT ESC "+data.charCodeAt(1)) + } + + } else if (ord < 32 || ord === 0x7f) { + switch (data) { + case "\r": // ENTER + case "\x0a": // CTRL+J + case "\x0d": // CTRL+M + this.xterm.write('\r\n'); + readline.complete(this.input) + this.input = ''; + break; + case "\x7F": // BACKSPACE + case "\x08": // CTRL+H + case "\x04": // CTRL+D + this.handleCursorErase(true); + break; + + case "\0x03": // CTRL+C + + break + + // ^L for clearing VT but keep X pos. + case "\x0c": + const cy = this.xterm.buffer.active.cursorY + + if (cy < this.xterm.rows ) + this.ESC("[B","[J","[A") + + this.ESC("[A","[K","[1J") + + for (var i=1;i0 ) + this.ESC("["+cx+"C") + break; + + default: + switch (ord) { + case 3: + readline.complete("raise KeyboardInterrupt") + break + default : + console.log("vt:" + ord ) + } + } + } else { + this.input += data; + this.xterm.write(data) + } + } + + handleCursorErase() { + // Don't delete past the start of input + if (this.xterm.buffer.active.cursorX <= this.inputStartCursor) { + return + } + this.input = this.input.slice(0, -1) + this.xterm.write('\x1B[D') + this.xterm.write('\x1B[P') + } + + + clear() { + this.xterm.clear() + } + + // direct write + sixel(data) { + this.xterm.write(data) + } + + print(message) { + const normInput = message.replace(/[\r\n]+/g, "\n").replace(/\n/g, "\r\n") + this.xterm.write(normInput) + } + +} + + +window.WasmTerminal = WasmTerminal +window.readline = readline + + + + +