feat: adds resumptions and ability to check completed work
This commit is contained in:
142
kalpaa/completions/__init__.py
Normal file
142
kalpaa/completions/__init__.py
Normal file
@@ -0,0 +1,142 @@
|
||||
import pathlib
|
||||
import kalpaa.config
|
||||
import logging
|
||||
from enum import Enum
|
||||
import filecmp
|
||||
|
||||
_logger = logging.getLogger(__name__)
|
||||
|
||||
KALPAA_COMPLETE = "kalpaa.complete"
|
||||
COMPLETIONS_DIR = "completions"
|
||||
|
||||
# let us implement our own stuff later, this just handles checking if a thing exists or not.
|
||||
|
||||
|
||||
class CompletionsStatus(Enum):
|
||||
NOT_COMPLETE = "not_complete"
|
||||
INVALID = "invalid"
|
||||
COMPLETE = "complete"
|
||||
|
||||
|
||||
def _cwd_file_matches_previous(root_dir: pathlib.Path, file_name: str) -> bool:
|
||||
"""
|
||||
Compare the file in the current working directory with the file in the target root.
|
||||
|
||||
Returns true if they match (meaning continuation is possible), false otherwise.
|
||||
|
||||
Should do byte-by-byte comparison
|
||||
|
||||
:param cwd_file_name: the file name in the current working directory
|
||||
:param root_file_name: the file name in the target root
|
||||
:return: True if the files match, False otherwise
|
||||
"""
|
||||
current_file = pathlib.Path.cwd() / file_name
|
||||
copied_file = root_dir / file_name
|
||||
|
||||
result = filecmp.cmp(current_file, copied_file, shallow=False)
|
||||
_logger.debug(f"Compared {current_file} with {copied_file}, got {result}")
|
||||
return result
|
||||
|
||||
|
||||
def check_completion_file(config: kalpaa.Config, filename: str) -> CompletionsStatus:
|
||||
"""
|
||||
Check if the completion file exists for a given filename.
|
||||
|
||||
:param config: the config object
|
||||
:param filename: the filename to check
|
||||
:return: the completion status
|
||||
"""
|
||||
if not config.general_config.check_completions:
|
||||
_logger.debug("Not checking completions")
|
||||
return CompletionsStatus.NOT_COMPLETE
|
||||
|
||||
root_dir = config.general_config.root_directory
|
||||
completions_dir = root_dir / COMPLETIONS_DIR
|
||||
|
||||
# completions_dir.mkdir(exist_ok=True, parents=True)
|
||||
if not completions_dir.is_dir():
|
||||
_logger.debug(
|
||||
f"Completions dir {completions_dir=} does not exist and it should, invalid!"
|
||||
)
|
||||
return CompletionsStatus.INVALID
|
||||
|
||||
complete_file = completions_dir / filename
|
||||
if complete_file.exists():
|
||||
_logger.info(f"Found {complete_file}, exiting")
|
||||
return CompletionsStatus.COMPLETE
|
||||
else:
|
||||
_logger.info(f"Did not find {complete_file}, continuing")
|
||||
return CompletionsStatus.NOT_COMPLETE
|
||||
|
||||
|
||||
def set_completion_file(config: kalpaa.Config, filename: str):
|
||||
"""
|
||||
Set the completion file for a given filename.
|
||||
|
||||
:param config: the config object
|
||||
:param filename: the filename to set
|
||||
"""
|
||||
if not config.general_config.check_completions:
|
||||
_logger.debug("Not checking completions or setting them")
|
||||
return
|
||||
root_dir = config.general_config.root_directory
|
||||
completions_dir = root_dir / COMPLETIONS_DIR
|
||||
completions_dir.mkdir(exist_ok=True, parents=True)
|
||||
complete_file = completions_dir / filename
|
||||
complete_file.touch()
|
||||
_logger.info(f"Set {complete_file}")
|
||||
|
||||
|
||||
def check_initial_completions(
|
||||
config_file: str, config: kalpaa.Config
|
||||
) -> CompletionsStatus:
|
||||
"""
|
||||
Check if the completion files exist.
|
||||
|
||||
First check if the out dir has been created.
|
||||
If not, then we can run as normal.
|
||||
|
||||
If the out dir exists, check whether the config file matches the one we are using.
|
||||
If not, we have an invalid case and should error (don't want to change settings when resuming!).
|
||||
|
||||
Finally, check whether a kalpaa.complete file exists, and if so then exit.
|
||||
"""
|
||||
|
||||
root_dir = config.general_config.root_directory
|
||||
_logger.debug(f"Checking completions for {root_dir=}")
|
||||
|
||||
if not config.general_config.check_completions:
|
||||
_logger.debug("Not checking completions")
|
||||
return CompletionsStatus.NOT_COMPLETE
|
||||
if not root_dir.is_dir():
|
||||
_logger.debug(f"Root dir {root_dir} does not exist, continuing")
|
||||
return CompletionsStatus.NOT_COMPLETE
|
||||
|
||||
# check if the config file matches
|
||||
|
||||
files_to_check = [
|
||||
config.general_config.indexes_json_name,
|
||||
config.general_config.dots_json_name,
|
||||
config_file,
|
||||
]
|
||||
|
||||
for file in files_to_check:
|
||||
if (root_dir / file).exists():
|
||||
_logger.info(f"Checking {file}, which exists")
|
||||
if not _cwd_file_matches_previous(root_dir, file):
|
||||
_logger.error(f"Config file {file} does not match copied config")
|
||||
return CompletionsStatus.INVALID
|
||||
else:
|
||||
_logger.debug(
|
||||
f"Config file {file} does not exist, expect it will be created this run"
|
||||
)
|
||||
|
||||
completions_dir = root_dir / COMPLETIONS_DIR
|
||||
completions_dir.mkdir(exist_ok=True, parents=True)
|
||||
complete_file = completions_dir / KALPAA_COMPLETE
|
||||
if complete_file.exists():
|
||||
_logger.info(f"Found {complete_file}, exiting")
|
||||
return CompletionsStatus.COMPLETE
|
||||
else:
|
||||
_logger.info(f"Did not find {complete_file}, continuing")
|
||||
return CompletionsStatus.NOT_COMPLETE
|
||||
@@ -81,6 +81,9 @@ class GeneralConfig:
|
||||
|
||||
skip_to_stage: typing.Optional[int] = None
|
||||
|
||||
# if true check for existence of completion sentinel files before running
|
||||
check_completions: bool = False
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class DefaultModelParamConfig:
|
||||
|
||||
@@ -10,27 +10,74 @@ import kalpaa.stages.stage04
|
||||
import kalpaa.common
|
||||
import kalpaa.config
|
||||
|
||||
from typing import Protocol
|
||||
|
||||
import kalpaa.completions
|
||||
|
||||
import argparse
|
||||
|
||||
|
||||
class Runnable(Protocol):
|
||||
config: kalpaa.Config
|
||||
|
||||
def run(self):
|
||||
pass
|
||||
|
||||
|
||||
class Completable:
|
||||
def __init__(self, runnable: Runnable, completion_name: str):
|
||||
self.runnable = runnable
|
||||
self.completion_name = completion_name
|
||||
|
||||
def run(self):
|
||||
_logger.info(
|
||||
f"Running {self.runnable} with completion name {self.completion_name}"
|
||||
)
|
||||
completions = kalpaa.completions.check_completion_file(
|
||||
self.runnable.config, self.completion_name
|
||||
)
|
||||
if completions == kalpaa.completions.CompletionsStatus.COMPLETE:
|
||||
_logger.info(f"Skipping {self.completion_name}")
|
||||
return
|
||||
elif completions == kalpaa.completions.CompletionsStatus.INVALID:
|
||||
_logger.error(f"Invalid completion status for {self.completion_name}")
|
||||
raise ValueError(f"Invalid completion status for {self.completion_name}")
|
||||
else:
|
||||
_logger.debug(f"Not completed for {self.completion_name}, running")
|
||||
self.runnable.run()
|
||||
_logger.info(f"Setting completion for {self.completion_name}")
|
||||
kalpaa.completions.set_completion_file(
|
||||
self.runnable.config, self.completion_name
|
||||
)
|
||||
|
||||
|
||||
# try not to use this out side of main or when defining config stuff pls
|
||||
# import numpy
|
||||
|
||||
_logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Runner:
|
||||
class Runner(Runnable):
|
||||
def __init__(self, config: kalpaa.Config):
|
||||
self.config = config
|
||||
_logger.info(f"Initialising runner with {config=}")
|
||||
|
||||
def run(self):
|
||||
|
||||
if self.config.general_config.skip_to_stage is not None:
|
||||
stage01 = Completable(
|
||||
kalpaa.stages.stage01.Stage01Runner(self.config), "stage01.complete"
|
||||
)
|
||||
stage02 = Completable(
|
||||
kalpaa.stages.stage02.Stage02Runner(self.config), "stage02.complete"
|
||||
)
|
||||
stage03 = Completable(
|
||||
kalpaa.stages.stage03.Stage03Runner(self.config), "stage03.complete"
|
||||
)
|
||||
stage04 = Completable(
|
||||
kalpaa.stages.stage04.Stage04Runner(self.config), "stage04.complete"
|
||||
)
|
||||
|
||||
stage01 = kalpaa.stages.stage01.Stage01Runner(self.config)
|
||||
stage02 = kalpaa.stages.stage02.Stage02Runner(self.config)
|
||||
stage03 = kalpaa.stages.stage03.Stage03Runner(self.config)
|
||||
stage04 = kalpaa.stages.stage04.Stage04Runner(self.config)
|
||||
if self.config.general_config.skip_to_stage is not None:
|
||||
|
||||
stages = [stage01, stage02, stage03, stage04]
|
||||
|
||||
@@ -44,20 +91,19 @@ class Runner:
|
||||
# standard run, can keep old
|
||||
|
||||
_logger.info("*** Beginning Stage 01 ***")
|
||||
stage01 = kalpaa.stages.stage01.Stage01Runner(self.config)
|
||||
stage01.run()
|
||||
|
||||
_logger.info("*** Beginning Stage 02 ***")
|
||||
stage02 = kalpaa.stages.stage02.Stage02Runner(self.config)
|
||||
stage02.run()
|
||||
|
||||
_logger.info("*** Beginning Stage 03 ***")
|
||||
stage03 = kalpaa.stages.stage03.Stage03Runner(self.config)
|
||||
stage03.run()
|
||||
|
||||
_logger.info("*** Beginning Stage 04 ***")
|
||||
stage04 = kalpaa.stages.stage04.Stage04Runner(self.config)
|
||||
stage04.run()
|
||||
kalpaa.completions.set_completion_file(
|
||||
self.config, kalpaa.completions.KALPAA_COMPLETE
|
||||
)
|
||||
|
||||
|
||||
def parse_args():
|
||||
@@ -129,40 +175,6 @@ def main():
|
||||
|
||||
_logger.info(skip)
|
||||
|
||||
kalpaa.common.set_up_logging(
|
||||
config,
|
||||
log_stream=args.log_stream,
|
||||
log_file=str(root / f"logs/kalpaa_{label}.log"),
|
||||
)
|
||||
|
||||
_logger.info(
|
||||
f"Root dir is {root}, copying over {config.general_config.indexes_json_name}, {config.general_config.dots_json_name} and {args.config_file}"
|
||||
)
|
||||
for file in [
|
||||
config.general_config.indexes_json_name,
|
||||
config.general_config.dots_json_name,
|
||||
args.config_file,
|
||||
]:
|
||||
_logger.info(f"Copying {file} to {root}")
|
||||
(root / file).write_text((pathlib.Path.cwd() / file).read_text())
|
||||
|
||||
if config.generation_config.override_measurement_filesets is not None:
|
||||
_logger.info(
|
||||
f"Overriding measurements with {config.generation_config.override_measurement_filesets}"
|
||||
)
|
||||
override_directory = root / kalpaa.config.OVERRIDE_MEASUREMENT_DIR_NAME
|
||||
override_directory.mkdir(exist_ok=True, parents=True)
|
||||
for (
|
||||
key,
|
||||
files,
|
||||
) in config.generation_config.override_measurement_filesets.items():
|
||||
_logger.info(f"Copying for {key=}, {files} to {override_directory}")
|
||||
for file in files:
|
||||
fileset_dir = override_directory / key
|
||||
fileset_dir.mkdir(exist_ok=True, parents=True)
|
||||
_logger.info(f"Copying {file} to {override_directory}")
|
||||
(fileset_dir / file).write_text((pathlib.Path.cwd() / file).read_text())
|
||||
|
||||
overridden_config = dataclasses.replace(
|
||||
config,
|
||||
general_config=dataclasses.replace(
|
||||
@@ -170,6 +182,52 @@ def main():
|
||||
),
|
||||
)
|
||||
|
||||
kalpaa.common.set_up_logging(
|
||||
config,
|
||||
log_stream=args.log_stream,
|
||||
log_file=str(root / f"logs/kalpaa_{label}.log"),
|
||||
)
|
||||
|
||||
completions_status = kalpaa.completions.check_initial_completions(
|
||||
args.config_file, overridden_config
|
||||
)
|
||||
if completions_status == kalpaa.completions.CompletionsStatus.COMPLETE:
|
||||
_logger.info("All stages complete, exiting")
|
||||
return
|
||||
elif completions_status == kalpaa.completions.CompletionsStatus.INVALID:
|
||||
_logger.error("Invalid completion status, exiting")
|
||||
raise ValueError("Invalid completion status")
|
||||
|
||||
# otherwise good to go
|
||||
|
||||
_logger.info(
|
||||
f"Root dir is {root}, copying over {overridden_config.general_config.indexes_json_name}, {overridden_config.general_config.dots_json_name} and {args.config_file}"
|
||||
)
|
||||
for file in [
|
||||
overridden_config.general_config.indexes_json_name,
|
||||
overridden_config.general_config.dots_json_name,
|
||||
args.config_file,
|
||||
]:
|
||||
_logger.info(f"Copying {file} to {root}")
|
||||
(root / file).write_text((pathlib.Path.cwd() / file).read_text())
|
||||
|
||||
if overridden_config.generation_config.override_measurement_filesets is not None:
|
||||
_logger.info(
|
||||
f"Overriding measurements with {overridden_config.generation_config.override_measurement_filesets}"
|
||||
)
|
||||
override_directory = root / kalpaa.config.OVERRIDE_MEASUREMENT_DIR_NAME
|
||||
override_directory.mkdir(exist_ok=True, parents=True)
|
||||
for (
|
||||
key,
|
||||
files,
|
||||
) in overridden_config.generation_config.override_measurement_filesets.items():
|
||||
_logger.info(f"Copying for {key=}, {files} to {override_directory}")
|
||||
for file in files:
|
||||
fileset_dir = override_directory / key
|
||||
fileset_dir.mkdir(exist_ok=True, parents=True)
|
||||
_logger.info(f"Copying {file} to {override_directory}")
|
||||
(fileset_dir / file).write_text((pathlib.Path.cwd() / file).read_text())
|
||||
|
||||
_logger.info(f"Got {config=}")
|
||||
runner = Runner(overridden_config)
|
||||
runner.run()
|
||||
|
||||
@@ -17,6 +17,7 @@ import json
|
||||
|
||||
import kalpaa
|
||||
import kalpaa.common
|
||||
import kalpaa.completions
|
||||
|
||||
|
||||
_logger = logging.getLogger(__name__)
|
||||
@@ -101,6 +102,19 @@ class Stage02Runner:
|
||||
_logger.debug(f"Have {num_jobs=}")
|
||||
seed_index = 0
|
||||
for job_index in range(num_jobs):
|
||||
|
||||
_logger.debug(f"Working on {job_index=}")
|
||||
completion_name = f"stage02.job_{job_index}.complete"
|
||||
completion = kalpaa.completions.check_completion_file(
|
||||
self.config, completion_name
|
||||
)
|
||||
if completion == kalpaa.completions.CompletionsStatus.COMPLETE:
|
||||
_logger.info(f"Skipping {completion_name}")
|
||||
continue
|
||||
elif completion == kalpaa.completions.CompletionsStatus.INVALID:
|
||||
_logger.error(f"Invalid completion status for {completion_name}")
|
||||
raise ValueError(f"Invalid completion status for {completion_name}")
|
||||
|
||||
for cost in self.config.deepdog_config.costs_to_try:
|
||||
for dot in self.dots:
|
||||
|
||||
@@ -112,6 +126,7 @@ class Stage02Runner:
|
||||
trial_name = (
|
||||
f"{dot.label}-{combined_dot_name}-{cost}-{job_index}"
|
||||
)
|
||||
|
||||
_logger.info(f"Working on {trial_name=}")
|
||||
_logger.debug(f"Have {seed_index=}")
|
||||
self.single_run_in_subdir(
|
||||
@@ -122,6 +137,7 @@ class Stage02Runner:
|
||||
seed_index,
|
||||
override_name=override_key,
|
||||
)
|
||||
kalpaa.completions.set_completion_file(self.config, completion_name)
|
||||
|
||||
def single_run_in_subdir(
|
||||
self,
|
||||
@@ -230,6 +246,8 @@ class Stage02Runner:
|
||||
write_successes_to_file=True,
|
||||
tag=trial_name,
|
||||
write_bayesrun_file=True,
|
||||
bayesrun_file_timestamp=False,
|
||||
skip_if_exists=True, # Can't see why we wouldn't want this, maybe hook to check_completions later
|
||||
)
|
||||
|
||||
_logger.info(f"{deepdog_config=}")
|
||||
|
||||
6
poetry.lock
generated
6
poetry.lock
generated
@@ -174,13 +174,13 @@ dev = ["black", "coveralls", "mypy", "pre-commit", "pylint", "pytest (>=5)", "py
|
||||
|
||||
[[package]]
|
||||
name = "deepdog"
|
||||
version = "1.5.0"
|
||||
version = "1.7.0"
|
||||
description = ""
|
||||
optional = false
|
||||
python-versions = "<3.10,>=3.8.1"
|
||||
files = [
|
||||
{file = "deepdog-1.5.0-py3-none-any.whl", hash = "sha256:b645fdc32a1933e17b4a76f97b5399d77e698fb10c6386f3fbcdb1fe9c5caf08"},
|
||||
{file = "deepdog-1.5.0.tar.gz", hash = "sha256:9012a9d375fce178fd222dd818a21c49ef4ce4127a65f5a3ad6ae16f5e96d1c5"},
|
||||
{file = "deepdog-1.7.0-py3-none-any.whl", hash = "sha256:53944ec281abf0118ff94033e7b7d73e13805cf6ee15489859a43a250968d45e"},
|
||||
{file = "deepdog-1.7.0.tar.gz", hash = "sha256:cb859f00da24117f49ddf544784dba4ff0df7a25fed83e2d9479fb55110a21d0"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
|
||||
@@ -22,6 +22,7 @@
|
||||
'z_min': 5,
|
||||
}),
|
||||
'general_config': dict({
|
||||
'check_completions': False,
|
||||
'dots_json_name': 'test_dots.json',
|
||||
'indexes_json_name': 'test_indexes.json',
|
||||
'log_pattern': '%(asctime)s | %(message)s',
|
||||
@@ -103,6 +104,7 @@
|
||||
'z_min': 5,
|
||||
}),
|
||||
'general_config': dict({
|
||||
'check_completions': False,
|
||||
'dots_json_name': 'dots.json',
|
||||
'indexes_json_name': 'indexes.json',
|
||||
'log_pattern': '%(asctime)s | %(process)d | %(levelname)-7s | %(name)s:%(lineno)d | %(message)s',
|
||||
@@ -184,6 +186,7 @@
|
||||
'z_min': 0,
|
||||
}),
|
||||
'general_config': dict({
|
||||
'check_completions': False,
|
||||
'dots_json_name': 'dots.json',
|
||||
'indexes_json_name': 'indexes.json',
|
||||
'log_pattern': '%(asctime)s | %(process)d | %(levelname)-7s | %(name)s:%(lineno)d | %(message)s',
|
||||
@@ -263,6 +266,7 @@
|
||||
'z_min': 5,
|
||||
}),
|
||||
'general_config': dict({
|
||||
'check_completions': False,
|
||||
'dots_json_name': 'test_dots.json',
|
||||
'indexes_json_name': 'test_indexes.json',
|
||||
'log_pattern': '%(asctime)s | %(process)d | %(levelname)-7s | %(name)s:%(lineno)d | %(message)s',
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
# serializer version: 1
|
||||
# name: test_parse_config_all_fields_toml
|
||||
Config(generation_config=GenerationConfig(counts=[1, 5, 10], orientations=[<Orientation.RANDOM: 'RANDOM'>, <Orientation.Z: 'Z'>, <Orientation.XY: 'XY'>], num_replicas=3, override_dipole_configs={'scenario1': [DipoleTO(p=array([3, 5, 7]), s=array([2, 4, 6]), w=10), DipoleTO(p=array([30, 50, 70]), s=array([20, 40, 60]), w=10.55)]}, override_measurement_filesets=None, tantri_configs=[TantriConfig(index_seed_starter=15151, num_seeds=5, delta_t=0.01, num_iterations=100), TantriConfig(index_seed_starter=1234, num_seeds=100, delta_t=1, num_iterations=200)], num_bin_time_series=25, bin_log_width=0.25), general_config=GeneralConfig(dots_json_name='test_dots.json', indexes_json_name='test_indexes.json', out_dir_name='out', log_pattern='%(asctime)s | %(message)s', measurement_type=<MeasurementTypeEnum.X_ELECTRIC_FIELD: 'x-electric-field'>, root_directory=PosixPath('test_root'), mega_merged_name='test_mega_merged.csv', mega_merged_inferenced_name='test_mega_merged_inferenced.csv', skip_to_stage=1), deepdog_config=DeepdogConfig(costs_to_try=[20, 2, 0.2], target_success=2000, max_monte_carlo_cycles_steps=20, use_log_noise=True), default_model_param_config=DefaultModelParamConfig(x_min=-20, x_max=20, y_min=-10, y_max=10, z_min=5, z_max=6.5, w_log_min=-5, w_log_max=1))
|
||||
Config(generation_config=GenerationConfig(counts=[1, 5, 10], orientations=[<Orientation.RANDOM: 'RANDOM'>, <Orientation.Z: 'Z'>, <Orientation.XY: 'XY'>], num_replicas=3, override_dipole_configs={'scenario1': [DipoleTO(p=array([3, 5, 7]), s=array([2, 4, 6]), w=10), DipoleTO(p=array([30, 50, 70]), s=array([20, 40, 60]), w=10.55)]}, override_measurement_filesets=None, tantri_configs=[TantriConfig(index_seed_starter=15151, num_seeds=5, delta_t=0.01, num_iterations=100), TantriConfig(index_seed_starter=1234, num_seeds=100, delta_t=1, num_iterations=200)], num_bin_time_series=25, bin_log_width=0.25), general_config=GeneralConfig(dots_json_name='test_dots.json', indexes_json_name='test_indexes.json', out_dir_name='out', log_pattern='%(asctime)s | %(message)s', measurement_type=<MeasurementTypeEnum.X_ELECTRIC_FIELD: 'x-electric-field'>, root_directory=PosixPath('test_root'), mega_merged_name='test_mega_merged.csv', mega_merged_inferenced_name='test_mega_merged_inferenced.csv', skip_to_stage=1, check_completions=False), deepdog_config=DeepdogConfig(costs_to_try=[20, 2, 0.2], target_success=2000, max_monte_carlo_cycles_steps=20, use_log_noise=True), default_model_param_config=DefaultModelParamConfig(x_min=-20, x_max=20, y_min=-10, y_max=10, z_min=5, z_max=6.5, w_log_min=-5, w_log_max=1))
|
||||
# ---
|
||||
# name: test_parse_config_few_fields_toml
|
||||
Config(generation_config=GenerationConfig(counts=[1, 5, 10], orientations=[<Orientation.RANDOM: 'RANDOM'>, <Orientation.Z: 'Z'>, <Orientation.XY: 'XY'>], num_replicas=2, override_dipole_configs={'scenario1': [DipoleTO(p=array([3, 5, 7]), s=array([2, 4, 6]), w=10), DipoleTO(p=array([30, 50, 70]), s=array([20, 40, 60]), w=10.55)]}, override_measurement_filesets=None, tantri_configs=[TantriConfig(index_seed_starter=15151, num_seeds=5, delta_t=0.01, num_iterations=100), TantriConfig(index_seed_starter=1234, num_seeds=100, delta_t=1, num_iterations=200)], num_bin_time_series=25, bin_log_width=0.25), general_config=GeneralConfig(dots_json_name='dots.json', indexes_json_name='indexes.json', out_dir_name='out', log_pattern='%(asctime)s | %(process)d | %(levelname)-7s | %(name)s:%(lineno)d | %(message)s', measurement_type=<MeasurementTypeEnum.POTENTIAL: 'electric-potential'>, root_directory=PosixPath('test_root1'), mega_merged_name='mega_merged_coalesced.csv', mega_merged_inferenced_name='mega_merged_coalesced_inferenced.csv', skip_to_stage=None), deepdog_config=DeepdogConfig(costs_to_try=[5, 2, 1, 0.5, 0.2], target_success=2000, max_monte_carlo_cycles_steps=20, use_log_noise=True), default_model_param_config=DefaultModelParamConfig(x_min=-20, x_max=20, y_min=-10, y_max=10, z_min=5, z_max=6.5, w_log_min=-5, w_log_max=1))
|
||||
Config(generation_config=GenerationConfig(counts=[1, 5, 10], orientations=[<Orientation.RANDOM: 'RANDOM'>, <Orientation.Z: 'Z'>, <Orientation.XY: 'XY'>], num_replicas=2, override_dipole_configs={'scenario1': [DipoleTO(p=array([3, 5, 7]), s=array([2, 4, 6]), w=10), DipoleTO(p=array([30, 50, 70]), s=array([20, 40, 60]), w=10.55)]}, override_measurement_filesets=None, tantri_configs=[TantriConfig(index_seed_starter=15151, num_seeds=5, delta_t=0.01, num_iterations=100), TantriConfig(index_seed_starter=1234, num_seeds=100, delta_t=1, num_iterations=200)], num_bin_time_series=25, bin_log_width=0.25), general_config=GeneralConfig(dots_json_name='dots.json', indexes_json_name='indexes.json', out_dir_name='out', log_pattern='%(asctime)s | %(process)d | %(levelname)-7s | %(name)s:%(lineno)d | %(message)s', measurement_type=<MeasurementTypeEnum.POTENTIAL: 'electric-potential'>, root_directory=PosixPath('test_root1'), mega_merged_name='mega_merged_coalesced.csv', mega_merged_inferenced_name='mega_merged_coalesced_inferenced.csv', skip_to_stage=None, check_completions=False), deepdog_config=DeepdogConfig(costs_to_try=[5, 2, 1, 0.5, 0.2], target_success=2000, max_monte_carlo_cycles_steps=20, use_log_noise=True), default_model_param_config=DefaultModelParamConfig(x_min=-20, x_max=20, y_min=-10, y_max=10, z_min=5, z_max=6.5, w_log_min=-5, w_log_max=1))
|
||||
# ---
|
||||
# name: test_parse_config_geom_params_toml
|
||||
Config(generation_config=GenerationConfig(counts=[1, 5, 10], orientations=[<Orientation.RANDOM: 'RANDOM'>, <Orientation.Z: 'Z'>, <Orientation.XY: 'XY'>], num_replicas=2, override_dipole_configs={'scenario1': [DipoleTO(p=array([3, 5, 7]), s=array([2, 4, 6]), w=10), DipoleTO(p=array([30, 50, 70]), s=array([20, 40, 60]), w=10.55)]}, override_measurement_filesets=None, tantri_configs=[TantriConfig(index_seed_starter=15151, num_seeds=5, delta_t=0.01, num_iterations=100), TantriConfig(index_seed_starter=1234, num_seeds=100, delta_t=1, num_iterations=200)], num_bin_time_series=25, bin_log_width=0.25), general_config=GeneralConfig(dots_json_name='dots.json', indexes_json_name='indexes.json', out_dir_name='out', log_pattern='%(asctime)s | %(process)d | %(levelname)-7s | %(name)s:%(lineno)d | %(message)s', measurement_type=<MeasurementTypeEnum.POTENTIAL: 'electric-potential'>, root_directory=PosixPath('test_root1'), mega_merged_name='mega_merged_coalesced.csv', mega_merged_inferenced_name='mega_merged_coalesced_inferenced.csv', skip_to_stage=None), deepdog_config=DeepdogConfig(costs_to_try=[5, 2, 1, 0.5, 0.2], target_success=2000, max_monte_carlo_cycles_steps=20, use_log_noise=True), default_model_param_config=DefaultModelParamConfig(x_min=-20, x_max=20, y_min=-10, y_max=10, z_min=0, z_max=2, w_log_min=-3, w_log_max=1.5))
|
||||
Config(generation_config=GenerationConfig(counts=[1, 5, 10], orientations=[<Orientation.RANDOM: 'RANDOM'>, <Orientation.Z: 'Z'>, <Orientation.XY: 'XY'>], num_replicas=2, override_dipole_configs={'scenario1': [DipoleTO(p=array([3, 5, 7]), s=array([2, 4, 6]), w=10), DipoleTO(p=array([30, 50, 70]), s=array([20, 40, 60]), w=10.55)]}, override_measurement_filesets=None, tantri_configs=[TantriConfig(index_seed_starter=15151, num_seeds=5, delta_t=0.01, num_iterations=100), TantriConfig(index_seed_starter=1234, num_seeds=100, delta_t=1, num_iterations=200)], num_bin_time_series=25, bin_log_width=0.25), general_config=GeneralConfig(dots_json_name='dots.json', indexes_json_name='indexes.json', out_dir_name='out', log_pattern='%(asctime)s | %(process)d | %(levelname)-7s | %(name)s:%(lineno)d | %(message)s', measurement_type=<MeasurementTypeEnum.POTENTIAL: 'electric-potential'>, root_directory=PosixPath('test_root1'), mega_merged_name='mega_merged_coalesced.csv', mega_merged_inferenced_name='mega_merged_coalesced_inferenced.csv', skip_to_stage=None, check_completions=False), deepdog_config=DeepdogConfig(costs_to_try=[5, 2, 1, 0.5, 0.2], target_success=2000, max_monte_carlo_cycles_steps=20, use_log_noise=True), default_model_param_config=DefaultModelParamConfig(x_min=-20, x_max=20, y_min=-10, y_max=10, z_min=0, z_max=2, w_log_min=-3, w_log_max=1.5))
|
||||
# ---
|
||||
# name: test_parse_config_toml
|
||||
Config(generation_config=GenerationConfig(counts=[1, 10], orientations=[<Orientation.RANDOM: 'RANDOM'>, <Orientation.Z: 'Z'>, <Orientation.XY: 'XY'>], num_replicas=3, override_dipole_configs=None, override_measurement_filesets=None, tantri_configs=[TantriConfig(index_seed_starter=31415, num_seeds=100, delta_t=0.05, num_iterations=100000)], num_bin_time_series=25, bin_log_width=0.25), general_config=GeneralConfig(dots_json_name='test_dots.json', indexes_json_name='test_indexes.json', out_dir_name='test_out', log_pattern='%(asctime)s | %(process)d | %(levelname)-7s | %(name)s:%(lineno)d | %(message)s', measurement_type=<MeasurementTypeEnum.X_ELECTRIC_FIELD: 'x-electric-field'>, root_directory=PosixPath('test_root'), mega_merged_name='test_mega_merged.csv', mega_merged_inferenced_name='test_mega_merged_inferenced.csv', skip_to_stage=1), deepdog_config=DeepdogConfig(costs_to_try=[10, 1, 0.1], target_success=1000, max_monte_carlo_cycles_steps=20, use_log_noise=False), default_model_param_config=DefaultModelParamConfig(x_min=-20, x_max=20, y_min=-10, y_max=10, z_min=5, z_max=6.5, w_log_min=-5, w_log_max=1))
|
||||
Config(generation_config=GenerationConfig(counts=[1, 10], orientations=[<Orientation.RANDOM: 'RANDOM'>, <Orientation.Z: 'Z'>, <Orientation.XY: 'XY'>], num_replicas=3, override_dipole_configs=None, override_measurement_filesets=None, tantri_configs=[TantriConfig(index_seed_starter=31415, num_seeds=100, delta_t=0.05, num_iterations=100000)], num_bin_time_series=25, bin_log_width=0.25), general_config=GeneralConfig(dots_json_name='test_dots.json', indexes_json_name='test_indexes.json', out_dir_name='test_out', log_pattern='%(asctime)s | %(process)d | %(levelname)-7s | %(name)s:%(lineno)d | %(message)s', measurement_type=<MeasurementTypeEnum.X_ELECTRIC_FIELD: 'x-electric-field'>, root_directory=PosixPath('test_root'), mega_merged_name='test_mega_merged.csv', mega_merged_inferenced_name='test_mega_merged_inferenced.csv', skip_to_stage=1, check_completions=False), deepdog_config=DeepdogConfig(costs_to_try=[10, 1, 0.1], target_success=1000, max_monte_carlo_cycles_steps=20, use_log_noise=False), default_model_param_config=DefaultModelParamConfig(x_min=-20, x_max=20, y_min=-10, y_max=10, z_min=5, z_max=6.5, w_log_min=-5, w_log_max=1))
|
||||
# ---
|
||||
|
||||
Reference in New Issue
Block a user