You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
when i debug,a error have found, who can help me what's wrong,thanks
tf.flags.DEFINE_string("config_paths", "
../example_configs/conv_seq2seq.yml,
../example_configs/train_seq2seq.yml,
../example_configs/text_metrics_bpe.yml",
"""Path to a YAML configuration files defining FLAG
values. Multiple files can be separated by commas.
Files are merged recursively. Setting a key in these
files is equivalent to setting the FLAG value with
the same name.""")
tf.flags.DEFINE_string("hooks", "[]",
"""YAML configuration string for the
training hooks to use.""")
tf.flags.DEFINE_string("metrics", "[]",
"""YAML configuration string for the
training metrics to use.""")
tf.flags.DEFINE_string("model", "",
"""Name of the model class.
Can be either a fully-qualified name, or the name
of a class defined in seq2seq.models.""")
tf.flags.DEFINE_string("model_params", '{"vocab_source": "/root/nmt_data/toy_reverse/train/vocab.sources.txt","vocab_target": "/root/nmt_data/toy_reverse/train/vocab.targets.txt"}',
"""YAML configuration string for the model
parameters.""")
tf.flags.DEFINE_string("input_pipeline_train", '{"class": "ParallelTextInputPipelineFairseq", "params": {"source_files": "/root/nmt_data/toy_reverse/train/sources.txt", "target_files": "/root/nmt_data/toy_reverse/train/targets.txt"}}',
"""YAML configuration string for the training
data input pipeline.""")
tf.flags.DEFINE_string("input_pipeline_dev", '{"class": "ParallelTextInputPipelineFairseq", "params": {"source_files": "/root/nmt_data/toy_reverse/dev/sources.txt", "target_files": "/root/nmt_data/toy_reverse/dev/targets.txt"}}',
"""YAML configuration string for the development
data input pipeline.""")
tf.flags.DEFINE_string("buckets", None,
"""Buckets input sequences according to these length.
A comma-separated list of sequence length buckets, e.g.
"10,20,30" would result in 4 buckets:
<10, 10-20, 20-30, >30. None disabled bucketing. """)
tf.flags.DEFINE_integer("batch_size", 32,
"""Batch size used for training and evaluation.""")
tf.flags.DEFINE_string("output_dir", None,
"""The directory to write model checkpoints and summaries
to. If None, a local temporary directory is created.""")
Training parameters
tf.flags.DEFINE_string("schedule", "continuous_train_and_eval",
"""Estimator function to call, defaults to
continuous_train_and_eval for local run""")
tf.flags.DEFINE_integer("train_steps", None,
"""Maximum number of training steps to run.
If None, train forever.""")
tf.flags.DEFINE_integer("eval_every_n_steps", 1000,
"Run evaluation on validation data every N steps.")
RunConfig Flags
tf.flags.DEFINE_integer("tf_random_seed", None,
"""Random seed for TensorFlow initializers. Setting
this value allows consistency between reruns.""")
tf.flags.DEFINE_integer("save_checkpoints_secs", None,
"""Save checkpoints every this many seconds.
Can not be specified with save_checkpoints_steps.""")
tf.flags.DEFINE_integer("save_checkpoints_steps", None,
"""Save checkpoints every this many steps.
Can not be specified with save_checkpoints_secs.""")
tf.flags.DEFINE_integer("keep_checkpoint_max", 5,
"""Maximum number of recent checkpoint files to keep.
As new files are created, older files are deleted.
If None or 0, all checkpoint files are kept.""")
tf.flags.DEFINE_integer("keep_checkpoint_every_n_hours", 4,
"""In addition to keeping the most recent checkpoint
files, keep one checkpoint file for every N hours of
training.""")
tf.flags.DEFINE_float("gpu_memory_fraction", 1.0,
"""Fraction of GPU memory used by the process on
each GPU uniformly on the same machine.""")
tf.flags.DEFINE_boolean("gpu_allow_growth", False,
"""Allow GPU memory allocation to grow
dynamically.""")
tf.flags.DEFINE_boolean("log_device_placement", False,
"""Log the op placement to devices""")
The text was updated successfully, but these errors were encountered:
when i debug,a error have found, who can help me what's wrong,thanks
tf.flags.DEFINE_string("config_paths", "
../example_configs/conv_seq2seq.yml,
../example_configs/train_seq2seq.yml,
../example_configs/text_metrics_bpe.yml",
"""Path to a YAML configuration files defining FLAG
values. Multiple files can be separated by commas.
Files are merged recursively. Setting a key in these
files is equivalent to setting the FLAG value with
the same name.""")
tf.flags.DEFINE_string("hooks", "[]",
"""YAML configuration string for the
training hooks to use.""")
tf.flags.DEFINE_string("metrics", "[]",
"""YAML configuration string for the
training metrics to use.""")
tf.flags.DEFINE_string("model", "",
"""Name of the model class.
Can be either a fully-qualified name, or the name
of a class defined in
seq2seq.models
.""")tf.flags.DEFINE_string("model_params", '{"vocab_source": "/root/nmt_data/toy_reverse/train/vocab.sources.txt","vocab_target": "/root/nmt_data/toy_reverse/train/vocab.targets.txt"}',
"""YAML configuration string for the model
parameters.""")
tf.flags.DEFINE_string("input_pipeline_train", '{"class": "ParallelTextInputPipelineFairseq", "params": {"source_files": "/root/nmt_data/toy_reverse/train/sources.txt", "target_files": "/root/nmt_data/toy_reverse/train/targets.txt"}}',
"""YAML configuration string for the training
data input pipeline.""")
tf.flags.DEFINE_string("input_pipeline_dev", '{"class": "ParallelTextInputPipelineFairseq", "params": {"source_files": "/root/nmt_data/toy_reverse/dev/sources.txt", "target_files": "/root/nmt_data/toy_reverse/dev/targets.txt"}}',
"""YAML configuration string for the development
data input pipeline.""")
tf.flags.DEFINE_string("buckets", None,
"""Buckets input sequences according to these length.
A comma-separated list of sequence length buckets, e.g.
"10,20,30" would result in 4 buckets:
<10, 10-20, 20-30, >30. None disabled bucketing. """)
tf.flags.DEFINE_integer("batch_size", 32,
"""Batch size used for training and evaluation.""")
tf.flags.DEFINE_string("output_dir", None,
"""The directory to write model checkpoints and summaries
to. If None, a local temporary directory is created.""")
Training parameters
tf.flags.DEFINE_string("schedule", "continuous_train_and_eval",
"""Estimator function to call, defaults to
continuous_train_and_eval for local run""")
tf.flags.DEFINE_integer("train_steps", None,
"""Maximum number of training steps to run.
If None, train forever.""")
tf.flags.DEFINE_integer("eval_every_n_steps", 1000,
"Run evaluation on validation data every N steps.")
RunConfig Flags
tf.flags.DEFINE_integer("tf_random_seed", None,
"""Random seed for TensorFlow initializers. Setting
this value allows consistency between reruns.""")
tf.flags.DEFINE_integer("save_checkpoints_secs", None,
"""Save checkpoints every this many seconds.
Can not be specified with save_checkpoints_steps.""")
tf.flags.DEFINE_integer("save_checkpoints_steps", None,
"""Save checkpoints every this many steps.
Can not be specified with save_checkpoints_secs.""")
tf.flags.DEFINE_integer("keep_checkpoint_max", 5,
"""Maximum number of recent checkpoint files to keep.
As new files are created, older files are deleted.
If None or 0, all checkpoint files are kept.""")
tf.flags.DEFINE_integer("keep_checkpoint_every_n_hours", 4,
"""In addition to keeping the most recent checkpoint
files, keep one checkpoint file for every N hours of
training.""")
tf.flags.DEFINE_float("gpu_memory_fraction", 1.0,
"""Fraction of GPU memory used by the process on
each GPU uniformly on the same machine.""")
tf.flags.DEFINE_boolean("gpu_allow_growth", False,
"""Allow GPU memory allocation to grow
dynamically.""")
tf.flags.DEFINE_boolean("log_device_placement", False,
"""Log the op placement to devices""")
The text was updated successfully, but these errors were encountered: