Skip to content

Commit

Permalink
[API] Slight reorganization + add missing preambles
Browse files Browse the repository at this point in the history
  • Loading branch information
Aedial committed Jun 9, 2023
1 parent 7acadb6 commit f1edacb
Show file tree
Hide file tree
Showing 4 changed files with 168 additions and 138 deletions.
2 changes: 2 additions & 0 deletions novelai_api/Keystore.py
Original file line number Diff line number Diff line change
Expand Up @@ -92,6 +92,7 @@ def decrypt(self, key: bytes):

keystore = self.data.copy()

# keystore is empty, create a new one
if "keystore" in keystore and keystore["keystore"] is None: # keystore is null when empty
self._nonce = random(SecretBox.NONCE_SIZE)
self._version = 2
Expand All @@ -110,6 +111,7 @@ def decrypt(self, key: bytes):

return

# keystore is not empty, decrypt it
keystore = loads(b64decode(self.data["keystore"]).decode())
SchemaValidator.validate("schema_keystore_encrypted", keystore)

Expand Down
4 changes: 4 additions & 0 deletions novelai_api/Preset.py
Original file line number Diff line number Diff line change
Expand Up @@ -100,8 +100,12 @@ class Model(StrEnum):
Inline = "infillmodel"


#: Prompt sent to the model when the context is empty
PREAMBLE = {
# Model.Calliope: "⁂\n",
Model.Sigurd: "⁂\n",
Model.Genji: [60, 198, 198], # "]\n\n" - impossible combination, so it is pre-tokenized
Model.Snek: "<|endoftext|>\n",
Model.Euterpe: "\n***\n",
Model.Krake: "<|endoftext|>[ Prologue ]\n",
Model.Clio: "[ Author: Various ]\n[ Prologue ]\n",
Expand Down
14 changes: 11 additions & 3 deletions novelai_api/_high_level.py
Original file line number Diff line number Diff line change
Expand Up @@ -195,12 +195,12 @@ async def upload_user_content(
object_data = data["data"]

if encrypt:
if object_type in ("stories", "storycontent", "aimodules", "shelf"):
if object_type in ("stories", "storycontent", "aimodules"):
if keystore is None:
raise ValueError("'keystore' is not set, cannot encrypt data")

encrypt_user_data(data, keystore)
elif object_type in ("presets",):
elif object_type in ("shelf", "presets"):
compress_user_data(data)

# clean data introduced by decrypt_user_data
Expand Down Expand Up @@ -255,7 +255,7 @@ async def _generate(
**kwargs,
):
"""
Generate text with streaming support
Generate text with streaming support.
:param prompt: Context to give to the AI (raw text or list of tokens)
:param model: Model to use for the AI
Expand Down Expand Up @@ -328,6 +328,10 @@ async def generate(
To decode the text, the :meth:`novelai_api.utils.b64_to_tokens`
and :meth:`novelai_api.Tokenizer.Tokenizer.decode` methods should be used.
As the model accepts a complete prompt, the context building must be done before calling this function.
Any content going beyond the tokens limit will be truncated, starting from the top.
:param prompt: Context to give to the AI (raw text or list of tokens)
:param model: Model to use for the AI
:param preset: Preset to use for the generation settings
Expand Down Expand Up @@ -367,6 +371,10 @@ async def generate_stream(
"""
Generate text. The text is returned one token at a time, as it is generated.
As the model accepts a complete prompt, the context building must be done before calling this function.
Any content going beyond the tokens limit will be truncated, starting from the top.
:param prompt: Context to give to the AI (raw text or list of tokens)
:param model: Model to use for the AI
:param preset: Preset to use for the generation settings
Expand Down
Loading

0 comments on commit f1edacb

Please sign in to comment.