[rank0]: Traceback (most recent call last):
[rank0]: File "/home/barba/lean-experiments/mau/testing.py", line 78, in <module>
[rank0]: main(args.n_particles, args.max_tokens)
[rank0]: File "/home/barba/lean-experiments/mau/testing.py", line 65, in main
[rank0]: posterior = asyncio.run(eval_informal(statement['natural'], n_particles, max_tokens))
[rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
[rank0]: File "/opt/conda/envs/myenv/lib/python3.11/asyncio/runners.py", line 190, in run
[rank0]: return runner.run(main)
[rank0]: ^^^^^^^^^^^^^^^^
[rank0]: File "/opt/conda/envs/myenv/lib/python3.11/asyncio/runners.py", line 118, in run
[rank0]: return self._loop.run_until_complete(task)
[rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
[rank0]: File "/opt/conda/envs/myenv/lib/python3.11/asyncio/base_events.py", line 654, in run_until_complete
[rank0]: return future.result()
[rank0]: ^^^^^^^^^^^^^^^
[rank0]: File "/home/barba/lean-experiments/mau/testing.py", line 20, in eval_informal
[rank0]: llm_potential = PromptedLLM.from_name("google/gemma-3-1b-pt", temperature=0.7, engine_opts={"max_model_len": 4096})
[rank0]: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
[rank0]: File "/home/barba/genlm-control/genlm/control/potential/built_in/llm.py", line 126, in from_name
[rank0]: return cls(
[rank0]: ^^^^
[rank0]: File "/home/barba/genlm-control/genlm/control/potential/built_in/llm.py", line 93, in __init__
[rank0]: super().__init__(vocabulary=V)
[rank0]: File "/home/barba/genlm-control/genlm/control/potential/base.py", line 76, in __init__
[rank0]: raise ValueError(f"Duplicate token {x!r} found in vocabulary")
[rank0]: ValueError: Duplicate token b'\n' found in vocabulary
Using this model gives this error when passed into
PromptedLLM.fromt_name