From 228e3bad3eeace5899aa880715de752b8dbaf0e1 Mon Sep 17 00:00:00 2001 From: chris <1010084+cloverich@users.noreply.github.com> Date: Thu, 26 Dec 2024 07:23:37 -0800 Subject: [PATCH] maintain import tables; document clearing; fix import bugs (#284) - re-work import process to maintain data in import table by default - fix staging error tracking so staging errors are kept but cleared on re-run - expose clear import tables button on settings, document it - fix bug where tags or wikilinks in document would cause import to fail at staging phase - fix bug where same filename from different import would be preferentially used - fix bug where empty document would fail to import (potentially leaving valid title / frontmatter behind) - add success message when clear import button succeeds --- src/electron/migrations/20211005142122.sql | 1 - src/markdown/index.test.ts | 56 +++++--- src/markdown/index.ts | 7 +- src/markdown/test-utils.ts | 52 +++++++- src/preload/client/importer.ts | 108 +++++++++++---- .../client/importer/FilesImportResolver.ts | 2 +- .../{importer.test.ts => frontmatter.test.ts} | 86 ++++++++++-- src/preload/client/importer/frontmatter.ts | 124 +++++++++--------- src/preload/client/index.ts | 3 +- src/views/preferences/index.tsx | 56 +++++--- 10 files changed, 359 insertions(+), 136 deletions(-) rename src/preload/client/importer/{importer.test.ts => frontmatter.test.ts} (79%) diff --git a/src/electron/migrations/20211005142122.sql b/src/electron/migrations/20211005142122.sql index b4cf801..cca4cb4 100644 --- a/src/electron/migrations/20211005142122.sql +++ b/src/electron/migrations/20211005142122.sql @@ -97,7 +97,6 @@ CREATE TABLE IF NOT EXISTS "import_notes" ( "sourcePath" TEXT NOT NULL PRIMARY KEY, "sourceId" TEXT, "error" BOOLEAN, - "title" TEXT NOT NULL, "journal" TEXT NOT NULL, "frontMatter" TEXT, "content" TEXT diff --git a/src/markdown/index.test.ts b/src/markdown/index.test.ts index 2fbda33..f4855ce 100644 --- a/src/markdown/index.test.ts +++ b/src/markdown/index.test.ts @@ -5,7 +5,12 @@ import path from "path"; import yaml from "yaml"; import { slateToString, stringToSlate } from "./index.js"; -import { dig, parseMarkdown, parseMarkdownForImport } from "./test-utils.js"; +import { + dedent, + dig, + parseMarkdown, + parseMarkdownForImport, +} from "./test-utils.js"; // Tests can structure the data this way and use runTests to // test the various conversions. @@ -619,26 +624,26 @@ describe("Whacky shit", function () { }); describe("front matter parsing", function () { - const content = `--- -title: 2024-09-29 -tags: weekly-todo -createdAt: 2024-09-30T17:50:22.000Z -updatedAt: 2024-11-04T16:24:11.000Z ---- - -#weekly-todo - -Last week: [2024-09-22](../work/0193acd4fa3574698c36c4514b907c70.md) - -**I am on call this week** [On call week of 2024-09-30](../persona/0193acd4fa45731f81350d4443c1ed16.md) - -## Monday - -`; - // A very basic "it works" test // todo: End to end test with a real document, asserting against the database values it("parses front matter as an mdast node, and can be parsed with yaml.parse", function () { + const content = dedent(`--- + title: 2024-09-29 + tags: weekly-todo + createdAt: 2024-09-30T17:50:22.000Z + updatedAt: 2024-11-04T16:24:11.000Z + --- + + #weekly-todo + + Last week: [2024-09-22](../work/0193acd4fa3574698c36c4514b907c70.md) + + **I am on call this week** [On call week of 2024-09-30](../persona/0193acd4fa45731f81350d4443c1ed16.md) + + ## Monday + + `); + const parsed = parseMarkdown(content); expect(parsed.children[0].type).to.equal("yaml"); expect(parsed.children[0].value).to.equal( @@ -656,4 +661,19 @@ Last week: [2024-09-22](../work/0193acd4fa3574698c36c4514b907c70.md) updatedAt: "2024-11-04T16:24:11.000Z", }); }); + + it("handles colons in front matter titles", function () { + const content = dedent(`--- + title: "2024-09-29: A day to remember" + --- + + Last week I... + `); + + const parsed = parseMarkdown(content); + const frontMatter = yaml.parse(parsed.children[0].value as string); + expect(frontMatter).to.deep.equal({ + title: "2024-09-29: A day to remember", + }); + }); }); diff --git a/src/markdown/index.ts b/src/markdown/index.ts index 02a5887..46ea1ad 100644 --- a/src/markdown/index.ts +++ b/src/markdown/index.ts @@ -54,9 +54,11 @@ function wrapImages(tree: mdast.Root) { return tree; } -// The importer has additional support for #tag and [[WikiLink]], but converts them +// During import (processing) parse #tag and [[WikiLink]]; importer converts them // to Chronicles tags and markdown links. Future versions may support these properly. -export const parseMarkdownForImport = (markdown: string): mdast.Root => { +export const parseMarkdownForImportProcessing = ( + markdown: string, +): mdast.Root => { return fromMarkdown(markdown, { extensions: [gfm(), ofmTag(), ofmWikilink(), frontmatter(["yaml"])], mdastExtensions: [ @@ -64,7 +66,6 @@ export const parseMarkdownForImport = (markdown: string): mdast.Root => { ofmTagFromMarkdown(), ofmWikilinkFromMarkdown(), // https://github.com/micromark/micromark-extension-frontmatter?tab=readme-ov-file#preset - // todo: support toml (need toml parser) frontmatterFromMarkdown(["yaml"]), ], }); diff --git a/src/markdown/test-utils.ts b/src/markdown/test-utils.ts index 71811bb..7ba387c 100644 --- a/src/markdown/test-utils.ts +++ b/src/markdown/test-utils.ts @@ -1,7 +1,7 @@ import { Root } from "mdast"; import { - parseMarkdownForImport as parseMarkdownForImportRaw, + parseMarkdownForImportProcessing as parseMarkdownForImportRaw, parseMarkdown as parseMarkdownRaw, } from "./index.js"; @@ -43,3 +43,53 @@ export function dig(obj: any, path: string) { } }, obj); } + +// Adapted from https://github.com/MartinKolarik/dedent-js +// Copyright (c) 2015 Martin Kolárik. Released under the MIT license. +export function dedent( + templateStrings: TemplateStringsArray | string, + ...values: any[] +) { + let matches = []; + let strings = + typeof templateStrings === "string" + ? [templateStrings] + : templateStrings.slice(); + + // 1. Remove trailing whitespace. + strings[strings.length - 1] = strings[strings.length - 1].replace( + /\r?\n([\t ]*)$/, + "", + ); + + // 2. Find all line breaks to determine the highest common indentation level. + for (let i = 0; i < strings.length; i++) { + let match; + + if ((match = strings[i].match(/\n[\t ]+/g))) { + matches.push(...match); + } + } + + // 3. Remove the common indentation from all strings. + if (matches.length) { + let size = Math.min(...matches.map((value) => value.length - 1)); + let pattern = new RegExp(`\n[\t ]{${size}}`, "g"); + + for (let i = 0; i < strings.length; i++) { + strings[i] = strings[i].replace(pattern, "\n"); + } + } + + // 4. Remove leading whitespace. + strings[0] = strings[0].replace(/^\r?\n/, ""); + + // 5. Perform interpolation. + let string = strings[0]; + + for (let i = 0; i < values.length; i++) { + string += values[i] + strings[i + 1]; + } + + return string; +} diff --git a/src/preload/client/importer.ts b/src/preload/client/importer.ts index 815be7f..464f173 100644 --- a/src/preload/client/importer.ts +++ b/src/preload/client/importer.ts @@ -21,11 +21,11 @@ import { uuidv7obj } from "uuidv7"; import { isNoteLink, mdastToString, - parseMarkdownForImport as stringToMdast, + parseMarkdownForImportProcessing, } from "../../markdown"; import { FilesImportResolver } from "./importer/FilesImportResolver"; import { SourceType } from "./importer/SourceType"; -import { parseTitleAndFrontMatter } from "./importer/frontmatter"; +import { parseTitleAndFrontMatterForImport } from "./importer/frontmatter"; // UUID in Notion notes look like 32 character hex strings; make this somewhat more lenient const hexIdRegex = /\b[0-9a-f]{16,}\b/; @@ -90,6 +90,20 @@ export class ImporterClient { private syncs: ISyncClient, // sync is keyword? ) {} + processPending = async () => { + const pendingImports = await this.knex("imports").where({ + status: "pending", + }); + + for (const pendingImport of pendingImports) { + await this.processStagedNotes( + await this.ensureRoot(), + SourceType.Other, + new FilesImportResolver(this.knex, pendingImport.id, this.files), + ); + } + }; + /** * Imports importDir into the chronicles root directory, grabbing all markdown * and linked files; makes the following changes: @@ -104,7 +118,8 @@ export class ImporterClient { importDir: string, sourceType: SourceType = SourceType.Other, ) => { - await this.clearImportTables(); + // await this.clearImportTables(); + await this.clearIncomplete(); const importerId = uuidv7obj().toHex(); const chroniclesRoot = await this.ensureRoot(); @@ -165,7 +180,7 @@ export class ImporterClient { for await (const file of Files.walk( importDir, - 30, // avoid infinite loops, random guess at reasoable depth + 10, // random guess at reasoable max depth (dirent) => { // Skip hidden files and directories @@ -210,7 +225,7 @@ export class ImporterClient { try { // todo: fallback title to filename - uuid - const { frontMatter, body } = parseTitleAndFrontMatter( + const { frontMatter, body } = parseTitleAndFrontMatterForImport( contents, name, sourceType, @@ -270,8 +285,32 @@ export class ImporterClient { await this.knex("import_notes").insert(stagedNote); } catch (e) { - // todo: this error handler is far too big, obviously - console.error("Error processing note", file.path, e); + // todo: this error handler is too big + if ((e as any).code === "SQLITE_CONSTRAINT_PRIMARYKEY") { + console.log("Skipping re-import of note", file.path); + } else { + // track staging errors for review. For example, if a note has a title + // that is too long, or a front-matter key that is not supported, etc, user + // can use table logs to fix and re-run th e import + try { + const noteId = uuidv7obj().toHex(); + await this.knex("import_notes").insert({ + importerId, + sourcePath: file.path, + content: contents, + error: (e as any).message, + + // note: these all have non-null / unique constraints: + chroniclesId: noteId, + chroniclesPath: "staging_error", + journal: "staging_error", + frontMatter: {}, + status: "staging_error", + }); + } catch (err) { + console.error("Error tracking staging import error", file.path, err); + } + } } }; @@ -300,12 +339,18 @@ export class ImporterClient { const items = await this.knex("import_notes").where({ importerId, + status: "pending", }); for await (const item of items) { const frontMatter: FrontMatter = JSON.parse(item.frontMatter); - const mdast = stringToMdast(item.content) as any as mdast.Root; + // note: At this stage, we parse ofmTags and ofmWikilinks, to convert them to + // Chronicles tags and markdown links; they are not supported natively in Chronicles + // as of now. + const mdast = parseMarkdownForImportProcessing( + item.content, + ) as any as mdast.Root; await this.updateNoteLinks(mdast, item, linkMapping, wikiLinkMapping); // NOTE: A bit hacky: When we update file links, we also mark the file as referenced @@ -374,30 +419,46 @@ export class ImporterClient { // 1. Delete notes directory // 2. Run this command // 3. Re-run import - private clearImportTables = async () => { + clearImportTables = async () => { await this.db.exec("DELETE FROM import_notes"); await this.db.exec("DELETE FROM import_files"); await this.db.exec("DELETE FROM imports"); }; + // todo: optionally allow re-importing form a specific import directory by clearing + // all imports + + // Clear errored or stuck notes so re-import can be attempted; all notes that + // are not in the 'note_created' state are deleted. + clearIncomplete = async () => { + await this.knex("import_notes").not.where({ status: "note_created" }).del(); + }; + // Pull all staged notes and generate a mapping of original file path // (sourcePath) to the new file path (chroniclesPath). This is used to update // links in the notes after they are moved. private noteLinksMapping = async (importerId: string) => { - let linkMapping: Record = - {}; - - const importedItems = await this.knex("import_notes") - .where({ importerId }) - .select("sourcePath", "journal", "chroniclesId"); + try { + let linkMapping: Record< + string, + { journal: string; chroniclesId: string } + > = {}; + + const importedItems = await this.knex("import_notes") + .where({ importerId }) + .select("sourcePath", "journal", "chroniclesId"); + + for (const item of importedItems) { + if ("error" in item && item.error) continue; + const { journal, chroniclesId, sourcePath } = item; + linkMapping[sourcePath] = { journal, chroniclesId }; + } - for (const item of importedItems) { - if ("error" in item && item.error) continue; - const { journal, chroniclesId, sourcePath } = item; - linkMapping[sourcePath] = { journal, chroniclesId }; + return linkMapping; + } catch (err) { + console.error("Error generating link mappings", err); + throw err; } - - return linkMapping; }; // Pull all staged notes and generate a mapping of original note title @@ -409,11 +470,12 @@ export class ImporterClient { const importedItems = await this.knex("import_notes") .where({ importerId }) - .select("title", "journal", "chroniclesId"); + .select("frontMatter", "journal", "chroniclesId", "error"); for (const item of importedItems) { if ("error" in item && item.error) continue; - const { journal, chroniclesId, title } = item; + const { journal, chroniclesId } = item; + const title = JSON.parse(item.frontMatter).title; linkMapping[title] = { journal, chroniclesId }; } diff --git a/src/preload/client/importer/FilesImportResolver.ts b/src/preload/client/importer/FilesImportResolver.ts index bfccc83..f851cc5 100644 --- a/src/preload/client/importer/FilesImportResolver.ts +++ b/src/preload/client/importer/FilesImportResolver.ts @@ -38,7 +38,7 @@ export class FilesImportResolver { ): Promise => { // check db for chronicles id matching name, if any const result = await this.knex("import_files") - .where({ filename: name }) + .where({ filename: name, importerId: this.importerId }) .select("chroniclesId", "extension") .first()!; diff --git a/src/preload/client/importer/importer.test.ts b/src/preload/client/importer/frontmatter.test.ts similarity index 79% rename from src/preload/client/importer/importer.test.ts rename to src/preload/client/importer/frontmatter.test.ts index 452553a..bf53dfe 100644 --- a/src/preload/client/importer/importer.test.ts +++ b/src/preload/client/importer/frontmatter.test.ts @@ -1,19 +1,23 @@ -// Temporary helper to test frontmatter parsing and dump the results -// While I dev. May keep this around, but its pretty hacky and far -// from complete or a real test suite. +// todo drop this diff lib if migrated to mocha import { diff } from "deep-object-diff"; -import { ImporterClient } from "../importer"; import { SourceType } from "./SourceType"; -import { parseTitleAndFrontMatter } from "./frontmatter"; +import { parseTitleAndFrontMatterForImport } from "./frontmatter"; -export function runTests(importer: ImporterClient) { - runFrontmatterTests(importer); -} +import { expect } from "chai"; +import { describe, it } from "mocha"; +import { dedent } from "../../../markdown/test-utils"; // to the console; can convert to real tests at the end. -function runFrontmatterTests(importer: ImporterClient) { +// todo(chris): Finish refactoring all these tests to be run with Mocha +// they are left over from some manual testing that required preload +// i.e could not be run via mocha +function runFrontmatterTests() { for (const testCase of titleFrontMatterTestCases) { - const result = parseTitleAndFrontMatter( + it(testCase.input, () => { + expect(testCase.input).to.be.a("string"); + }); + + const result = parseTitleAndFrontMatterForImport( testCase.input, "Dont use this title", SourceType.Notion, @@ -363,3 +367,65 @@ export const inferOrGenerateJournalNameTestCases = [ output: "TODO_...", // shorter }, ]; + +describe("Frontmatter parsing", () => { + it("this does not break?", () => { + const parsed = parseTitleAndFrontMatterForImport( + dedent( + `--- +title: What chronicles was +tags: + - tags, thesixthprototype +createdAt: 2024-06-30T14:19:17.801Z +updatedAt: 2024-07-02T04:52:50.639Z +---`, + ), + "", + SourceType.Other, + ); + }); + + // note: Rather than trying to repair front matter issues, like colons in values (title), + // I ended up manually fixing in my source (imported) documents; leaving this breadcrumb + // here in case I want to revisit this later. + // it("colons in front matter values", () => { + // const parsedDoc = parseDocument( + // `title: 2024-01-01: a new year\ncreatedAt: 2024-01-01:00:00:00\n`, + // ); + // console.log(parsedDoc.errors[0].linePos); // code: BLOCK_AS_IMPLICIT_KEY, pos: [7,8], [ { line: 1, col: 8 }, { line: 1, col: 9 } ] + // const parsed = parseTitleAndFrontMatterForImport( + // dedent(`--- + // title: 2024-01-01: A new year + // --- + // `), + // "", + // SourceType.Other, + // ); + // }); + + it("tags or wikilinks in content, when importing from default source", () => { + const parsed = parseTitleAndFrontMatterForImport( + dedent(`--- + title: What chronicles was + --- + + This is some content with a #tag and a [[wikilink]].`), + "", + SourceType.Other, + ); + + // actually testing ^ does not throw, because it re-serializes the body without + // the front matter, and was choking when the wrong parser was used, which parsed + // ofmTags but could not re-serialize them at this step. + expect(parsed.frontMatter.title).to.equal("What chronicles was"); + }); + + it.only("empty contents -> default front matter / no error", () => { + const parsed = parseTitleAndFrontMatterForImport("", "", SourceType.Other); + expect(parsed.body).to.equal(""); + expect(parsed.frontMatter).to.deep.equal({ + title: "", + tags: [], + }); + }); +}); diff --git a/src/preload/client/importer/frontmatter.ts b/src/preload/client/importer/frontmatter.ts index 0d8b6ac..0cc103b 100644 --- a/src/preload/client/importer/frontmatter.ts +++ b/src/preload/client/importer/frontmatter.ts @@ -1,10 +1,6 @@ import { Stats } from "fs"; import yaml from "yaml"; -import { - mdastToString, - parseMarkdown, - parseMarkdownForImport, -} from "../../../markdown"; +import { mdastToString, parseMarkdown } from "../../../markdown"; import { SourceType } from "../importer/SourceType"; import { FrontMatter } from "../types"; @@ -19,28 +15,65 @@ interface RawExtractFrontMatterResponse { body: string; } -export const parseTitleAndFrontMatter = ( +/** + * For notes within Chronicles (created by it, or already imported). Other logic in this file + * is for importing documents that may have front matter, or may not, in different formats, etc. + * + * @param content + * @param stats - to set defaults and ensure dates are always present + * @returns - { frontMatter, body } + */ +export function parseChroniclesFrontMatter(content: string, stats: Stats) { + const { frontMatter, body } = extractFronMatter(content); + + frontMatter.tags = frontMatter.tags || []; + frontMatter.title = frontMatter.title; + frontMatter.createdAt = + frontMatter.createdAt || (stats.birthtime || stats.mtime).toISOString(); + frontMatter.updatedAt = frontMatter.updatedAt || stats.mtime.toISOString(); + + return { + frontMatter, + body, + } as { frontMatter: FrontMatter; body: string }; +} + +export const parseTitleAndFrontMatterForImport = ( contents: string, filename: string, sourceType: SourceType, ): ParseTitleAndFrontMatterRes => { + let fm; + // My Notion files were all in a database and hence exported with // a kind of "front matter"; can pull title from that. if (sourceType === "notion") { - return parseTitleAndFrontMatterNotion(contents); + fm = parseTitleAndFrontMatterNotion(contents); } else { - return parseTitleAndFrontMatterMarkdown(contents, filename); + fm = parseTitleAndFrontMatterDefault(contents, filename); + } + + if (!Array.isArray(fm.frontMatter.tags)) { + fm.frontMatter.tags = [fm.frontMatter.tags as any].filter( + Boolean, + ) as string[]; } + + return fm; }; -function parseTitleAndFrontMatterMarkdown( +// For importing documents, staging phase; this is for +// non-Notion markdown documents, that may have front matter +// demarcated the typical way (--- at top of file), or not at all. +function parseTitleAndFrontMatterDefault( contents: string, filename: string, ): ParseTitleAndFrontMatterRes { - const { frontMatter, body } = extractFronMatter( - contents, - parseMarkdownForImport, - ); + // note: unlike the importer processing phase, we do not parse ofmWiki or ofmTags + // here; because we don't convert them here, only re-serialize the body without the + // front matter. If they are parsed here, this routine will phase since it lacks + // serialization logic for those nodes. + const { frontMatter, body } = extractFronMatter(contents); frontMatter.title = frontMatter.title || filename; return { @@ -49,55 +82,24 @@ function parseTitleAndFrontMatterMarkdown( }; } -function extractFronMatter( - contents: string, - parse = parseMarkdown, -): { +function extractFronMatter(contents: string): { frontMatter: Partial; body: string; } { - const mdast = parse(contents); - if (mdast.children[0].type === "yaml") { - const frontMatter = yaml.parse(mdast.children[0].value); - mdast.children = mdast.children.slice(1); - const contents = mdastToString(mdast); - return { - frontMatter, - body: contents, - }; - } else { - return { - frontMatter: {}, - body: contents, - }; - } -} + let frontMatter = {}; + let body = contents.trim(); -// extract well formatted front matter from content, and return the front matter and body -// stats to set defaults and ensure dates are always present -export function parseChroniclesFrontMatter(content: string, stats: Stats) { - const { frontMatter, body } = extractFronMatter(content); - - frontMatter.tags = frontMatter.tags || []; - frontMatter.title = frontMatter.title; - frontMatter.createdAt = - frontMatter.createdAt || (stats.birthtime || stats.mtime).toISOString(); - frontMatter.updatedAt = frontMatter.updatedAt || stats.mtime.toISOString(); + if (body) { + const mdast = parseMarkdown(body); - // Prior version of Chronicles manually encoded as comma separated tags, - // then re-parsed out. Now using proper yaml parsing, this can be removed - // once all my personal notes are migrated. - if ("tags" in frontMatter && typeof frontMatter.tags === "string") { - frontMatter.tags = (frontMatter.tags as string) - .split(",") - .map((tag: string) => tag.trim()) - .filter(Boolean); + if (mdast.children[0].type === "yaml") { + frontMatter = yaml.parse(mdast.children[0].value); + mdast.children = mdast.children.slice(1); + body = mdastToString(mdast); + } } - return { - frontMatter, - body, - } as { frontMatter: FrontMatter; body: string }; + return { frontMatter, body }; } /** @@ -107,9 +109,9 @@ export function parseChroniclesFrontMatter(content: string, stats: Stats) { function parseTitleAndFrontMatterNotion( contents: string, ): ParseTitleAndFrontMatterRes { - const { title, rawFrontMatter, body } = extractRawFrontMatter(contents); + const { title, rawFrontMatter, body } = extractRawFrontMatterNotion(contents); const frontMatter = rawFrontMatter.length - ? parseExtractedFrontMatter(rawFrontMatter) + ? parseExtractedFrontMatterNotion(rawFrontMatter) : {}; frontMatter.title = title; @@ -120,7 +122,7 @@ function parseTitleAndFrontMatterNotion( * Attempt to extract a title and front matter from a string of contents; * return the original body on error. */ -function extractRawFrontMatter( +function extractRawFrontMatterNotion( contents: string, ): RawExtractFrontMatterResponse { try { @@ -241,8 +243,8 @@ function extractRawFrontMatter( * Parse the front matter from a string that has already been processed * by preprocessRawFrontMatter. */ -function parseExtractedFrontMatter(rawFrontMatter: string) { - const processedFrontMatter = preprocessRawFrontMatter(rawFrontMatter); +function parseExtractedFrontMatterNotion(rawFrontMatter: string) { + const processedFrontMatter = preprocessRawFrontMatterNotion(rawFrontMatter); try { // NOTE: Returns a string if no front matter is present...wtf. @@ -314,7 +316,7 @@ function parseExtractedFrontMatter(rawFrontMatter: string) { * See body comments for explanations. Should be called on the raw string before * calling yaml.parse. */ -function preprocessRawFrontMatter(content: string) { +function preprocessRawFrontMatterNotion(content: string) { return ( content // Handle keys with no values by assigning empty strings diff --git a/src/preload/client/index.ts b/src/preload/client/index.ts index 0221604..2acec33 100644 --- a/src/preload/client/index.ts +++ b/src/preload/client/index.ts @@ -10,7 +10,6 @@ import { TagsClient } from "./tags"; import { IClient } from "./types"; import Store from "electron-store"; -import { runTests } from "./importer/importer.test"; const settings = new Store({ name: "settings", }); @@ -40,7 +39,7 @@ let client: IClient; class TestsClient { constructor(private importer: ImporterClient) {} runTests = () => { - runTests(this.importer); + console.log("todo: fixme"); }; } diff --git a/src/views/preferences/index.tsx b/src/views/preferences/index.tsx index a58aff0..589fc1d 100644 --- a/src/views/preferences/index.tsx +++ b/src/views/preferences/index.tsx @@ -70,6 +70,19 @@ const Preferences = observer(() => { } } + async function clearImportTable() { + store.loading = true; + try { + await client.importer.clearImportTables(); + store.loading = false; + toaster.success("Import table cleared"); + } catch (e) { + console.error("Error clearing import table", e); + store.loading = false; + toaster.danger("Failed to clear import table"); + } + } + async function sync() { if (store.loading) return; @@ -115,10 +128,10 @@ const Preferences = observer(() => { -

Settings directory

+

Settings

+

Settings file: {client.preferences.settingsPath()}

- Location of files / directories are persisted to the settings file - located at {client.preferences.settingsPath()} + Database file: {store.preferences.DATABASE_URL}

@@ -144,21 +157,14 @@ const Preferences = observer(() => {

Import markdown directory

Import a directory of markdown files. Experimental.

-

The following file / directory names will be skipped:

-
    - {Array.from(SKIPPABLE_FILES).map((file) => ( -
  • {file}
  • - ))} -
+

+ The following file / directory names will be skipped:  + {Array.from(SKIPPABLE_FILES).join(", ")} +

Other than _attachments, the following prefixes will cause a file or - directory to be skipped: + directory to be skipped: {Array.from(SKIPPABLE_PREFIXES).join(", ")}

-
    - {Array.from(SKIPPABLE_PREFIXES).map((prefix) => ( -
  • {prefix}
  • - ))} -