Skip to content
This repository has been archived by the owner on Jul 16, 2024. It is now read-only.

Show default values in CLI help #121

Open
wants to merge 1 commit into
base: develop
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
39 changes: 25 additions & 14 deletions poker_ai/ai/runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,43 +13,54 @@
Options:
--strategy_interval INTEGER Update the current strategy whenever the
iteration % strategy_interval == 0.
[default: 20]
--n_iterations INTEGER The total number of iterations we should
train the model for.
train the model for. [default: 1500]
--lcfr_threshold INTEGER A threshold for linear CFR which means don't
apply discounting before this iteration.
[default: 400]
--discount_interval INTEGER Discount the current regret and strategy
whenever iteration % discount_interval == 0.
[default: 400]
--prune_threshold INTEGER When a uniform random number is less than
95%, and the iteration > prune_threshold,
use CFR with pruning.
use CFR with pruning. [default: 400]
--c INTEGER Pruning threshold for regret, which means
when we are using CFR with pruning and have
a state with a regret of less than `c`, then
we'll elect to not recusrively visit it and
it's child nodes.
we'll elect to not recursively visit it and
it's child nodes. [default: -20000]
--n_players INTEGER The number of players in the game.
[default: 3]
--dump_iteration INTEGER When the iteration % dump_iteration == 0, we
will compute a new strategy and write that
to the accumlated strategy, which gets
normalised at a later time.
to the accumulated strategy, which gets
normalised at a later time. [default: 20]
--update_threshold INTEGER When the iteration is greater than
update_threshold we can start updating the
strategy.
strategy. [default: 400]
--lut_path TEXT The path to the files for clustering the
infosets.
infosets. [default: .]
--pickle_dir TEXT Whether or not the lut files are pickle
files. This lookup method is deprecated.
[default: False]
--single_process / --multi_process
Either use or don't use multiple processes.
[default: False]
--sync_update_strategy / --async_update_strategy
Do or don't synchronise update_strategy.
--sync_cfr / --async_cfr Do or don't synchronuse CFR.
[default: False]
--sync_cfr / --async_cfr Do or don't synchronuse CFR. [default:
False]
--sync_discount / --async_discount
Do or don't synchronise the discounting.
[default: False]
--sync_serialise / --async_serialise
Do or don't synchronise the serialisation.
--nickname TEXT The nickname of the study.
--help Show this message and exit.
[default: False]
--nickname TEXT The nickname of the study. [default: ]
--help Show this message and exit. [default:
False]
```
"""
import logging
Expand Down Expand Up @@ -116,7 +127,7 @@ def resume(server_config_path: str):
_safe_search(server)


@train.command()
@train.command(context_settings=dict(show_default=True))
@click.option(
"--strategy_interval",
default=20,
Expand Down Expand Up @@ -157,7 +168,7 @@ def resume(server_config_path: str):
help=(
"Pruning threshold for regret, which means when we are using CFR with "
"pruning and have a state with a regret of less than `c`, then we'll "
"elect to not recusrively visit it and it's child nodes."
"elect to not recursively visit it and it's child nodes."
),
)
@click.option("--n_players", default=3, help="The number of players in the game.")
Expand All @@ -166,7 +177,7 @@ def resume(server_config_path: str):
default=20,
help=(
"When the iteration % dump_iteration == 0, we will compute a new strategy "
"and write that to the accumlated strategy, which gets normalised at a "
"and write that to the accumulated strategy, which gets normalised at a "
"later time."
),
)
Expand Down
23 changes: 12 additions & 11 deletions poker_ai/clustering/runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,38 +6,39 @@
Options:
--low_card_rank INTEGER The starting hand rank from 2 through 14 for
the deck we want to cluster. We recommend
starting small.
starting small. [default: 10]
--high_card_rank INTEGER The starting hand rank from 2 through 14 for
the deck we want to cluster. We recommend
starting small.
starting small. [default: 14]
--n_river_clusters INTEGER The number of card information buckets we
would like to create for the river. We
recommend to start small.
recommend to start small. [default: 50]
--n_turn_clusters INTEGER The number of card information buckets we
would like to create for the turn. We
recommend to start small.
recommend to start small. [default: 50]
--n_flop_clusters INTEGER The number of card information buckets we
would like to create for the flop. We
recommend to start small.
recommend to start small. [default: 50]
--n_simulations_river INTEGER The number of opponent hand simulations we
would like to run on the river. We recommend
to start small.
to start small. [default: 6]
--n_simulations_turn INTEGER The number of river card hand simulations we
would like to run on the turn. We recommend
to start small.
to start small. [default: 6]
--n_simulations_flop INTEGER The number of turn card hand simulations we
would like to run on the flop. We recommend
to start small.
to start small. [default: 6]
--save_dir TEXT Path to directory to save card info lookup
table and betting stage centroids.
--help Show this message and exit.
table and betting stage centroids. [default:
]
--help Show this message and exit. [default: False]
"""
import click

from poker_ai.clustering.card_info_lut_builder import CardInfoLutBuilder


@click.command()
@click.command(context_settings=dict(show_default=True))
@click.option(
"--low_card_rank",
default=10,
Expand Down
2 changes: 1 addition & 1 deletion requirements.txt
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
blessed==1.17.5
click==7.0
click==7.1
dill==0.3.1.1
enlighten==1.5.2
ipdb==0.12.3
Expand Down