Compare commits
6 Commits
Author | SHA1 | Date | |
---|---|---|---|
72c76ec95d | |||
4d2d865e9d | |||
c331bc057f | |||
b685c63efe | |||
f2f326dcfe | |||
96742769be |
3
.gitignore
vendored
3
.gitignore
vendored
@ -155,3 +155,6 @@ out/
|
||||
/kalpa.toml
|
||||
/indexes.json
|
||||
/dots.json
|
||||
|
||||
# nix
|
||||
result
|
||||
|
@ -2,6 +2,13 @@
|
||||
|
||||
All notable changes to this project will be documented in this file. See [standard-version](https://github.com/conventional-changelog/standard-version) for commit guidelines.
|
||||
|
||||
## [1.2.0](https://gitea.deepak.science:2222/physics/kalpa/compare/1.1.0...1.2.0) (2025-03-03)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* adds ability to specify which dots are used manually, and better handles collating and coalescing results ([9674276](https://gitea.deepak.science:2222/physics/kalpa/commit/96742769bedad928890a27153e7c0952a6fc1cdb))
|
||||
|
||||
## [1.1.0](https://gitea.deepak.science:2222/physics/kalpa/compare/1.0.1...1.1.0) (2025-03-02)
|
||||
|
||||
|
||||
|
71
flake.nix
71
flake.nix
@ -13,28 +13,66 @@
|
||||
let
|
||||
pkgs = nixpkgs.legacyPackages.${system};
|
||||
poetry2nix = poetry2nixSrc.lib.mkPoetry2Nix { inherit pkgs; };
|
||||
kalpaaApp = poetry2nix.mkPoetryApplication {
|
||||
projectDir = self;
|
||||
python = pkgs.python39;
|
||||
preferWheels = true;
|
||||
};
|
||||
kalpaaEnv = poetry2nix.mkPoetryEnv {
|
||||
projectDir = self;
|
||||
python = pkgs.python39;
|
||||
preferWheels = true;
|
||||
overrides = poetry2nix.overrides.withDefaults (self: super: {
|
||||
});
|
||||
};
|
||||
kalpaa-docker-image = pkgs.dockerTools.buildLayeredImage {
|
||||
name = "kalpaa";
|
||||
tag = "latest";
|
||||
|
||||
|
||||
contents = [
|
||||
|
||||
# some stuff that dockertools provides?
|
||||
# pkgs.dockerTools.usrBinEnv
|
||||
# pkgs.dockerTools.binSh
|
||||
# pkgs.dockerTools.caCertificates
|
||||
# pkgs.dockerTools.fakeNss
|
||||
|
||||
pkgs.bash
|
||||
pkgs.coreutils
|
||||
# pkgs.cacert
|
||||
# pkgs.gnutar
|
||||
# pkgs.gzip
|
||||
# pkgs.gnused
|
||||
# pkgs.gnugrep
|
||||
pkgs.uv
|
||||
kalpaaApp
|
||||
|
||||
];
|
||||
|
||||
config = {
|
||||
Cmd = [ "/bin/bash" ];
|
||||
Env = [
|
||||
"PATH=/bin"
|
||||
];
|
||||
WorkingDir = "/workspace";
|
||||
};
|
||||
};
|
||||
in {
|
||||
packages = {
|
||||
kalpaApp = poetry2nix.mkPoetryApplication {
|
||||
projectDir = self;
|
||||
python = pkgs.python39;
|
||||
preferWheels = true;
|
||||
};
|
||||
kalpaEnv = poetry2nix.mkPoetryEnv {
|
||||
projectDir = self;
|
||||
python = pkgs.python39;
|
||||
preferWheels = true;
|
||||
overrides = poetry2nix.overrides.withDefaults (self: super: {
|
||||
});
|
||||
};
|
||||
default = self.packages.${system}.kalpaEnv;
|
||||
inherit kalpaaEnv;
|
||||
inherit kalpaaApp;
|
||||
inherit kalpaa-docker-image;
|
||||
default = self.packages.${system}.kalpaaEnv;
|
||||
};
|
||||
|
||||
|
||||
devShells.default = pkgs.mkShell {
|
||||
inputsFrom = [ self.packages.${system}.kalpaEnv ];
|
||||
inputsFrom = [ self.packages.${system}.kalpaaEnv ];
|
||||
buildInputs = [
|
||||
pkgs.poetry
|
||||
self.packages.${system}.kalpaEnv
|
||||
self.packages.${system}.kalpaApp
|
||||
self.packages.${system}.kalpaaEnv
|
||||
self.packages.${system}.kalpaaApp
|
||||
pkgs.just
|
||||
pkgs.nodejs
|
||||
];
|
||||
@ -42,6 +80,7 @@
|
||||
export DO_NIX_CUSTOM=1
|
||||
'';
|
||||
};
|
||||
|
||||
}
|
||||
);
|
||||
}
|
||||
|
21
justfile
21
justfile
@ -70,3 +70,24 @@ release version="":
|
||||
|
||||
# htmlcov:
|
||||
# poetry run pytest --cov-report=html
|
||||
|
||||
# build docker image
|
||||
build-container:
|
||||
#!/usr/bin/env bash
|
||||
set -euxo pipefail
|
||||
nix build .#kalpaa-docker-image
|
||||
|
||||
# load the image into docker
|
||||
load-container:
|
||||
#!/usr/bin/env bash
|
||||
set -euxo pipefail
|
||||
docker load < result
|
||||
|
||||
# build and load in one step
|
||||
build-load-container: build-container load-container
|
||||
echo "Image loaded successfully!"
|
||||
|
||||
exec-container:
|
||||
#!/usr/bin/env bash
|
||||
set -euxo pipefail
|
||||
docker run -it -v $(pwd)/kalpaa.toml:/workspace/kalpaa.toml -v $(pwd)/dots.json:/workspace/dots.json -v $(pwd)/indexes.json:/workspace/indexes.json kalpaa /bin/bash
|
||||
|
@ -154,6 +154,18 @@ class DeepdogConfig:
|
||||
# Whether to use a log log cost function
|
||||
use_log_noise: bool = False
|
||||
|
||||
# Manually specifying which dots to use
|
||||
# Outer layer is multiple configurations, within that is which dots to combine, then the inner layer is to distinguish single dots and pairs.
|
||||
# example:
|
||||
# [
|
||||
# [ ["dot1"]], # first one is to use just dot1
|
||||
# [ ["dot1"], ["dot2"] ] # second one is to use dot1 and dot2
|
||||
# [ ["dot1", "dot2"] ] # third one is to use dot1 and dot2 as a pair
|
||||
# ]
|
||||
manual_dot_seeds: typing.Optional[
|
||||
typing.Sequence[typing.Sequence[typing.Sequence[str]]]
|
||||
] = None
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class Config:
|
||||
|
@ -153,7 +153,7 @@ class MeasurementGroup:
|
||||
input_array,
|
||||
meas_array,
|
||||
stdev_array,
|
||||
log_noise=use_log_noise,
|
||||
log_noise=use_log_noise and not self._using_pairs,
|
||||
use_pair_measurement=self._using_pairs,
|
||||
)
|
||||
|
||||
|
@ -83,7 +83,7 @@ class Stage02Runner:
|
||||
)
|
||||
_logger.info(f"Got dots {self.dots=}")
|
||||
|
||||
def _dots_to_include(self, current_dot: str) -> typing.Sequence[str]:
|
||||
def _dots_to_include(self, current_dot: str) -> typing.List[str]:
|
||||
if current_dot == "dot1":
|
||||
return ["dot1"]
|
||||
if current_dot == "dot2":
|
||||
@ -116,27 +116,60 @@ class Stage02Runner:
|
||||
raise ValueError(f"Invalid completion status for {completion_name}")
|
||||
|
||||
for cost in self.config.deepdog_config.costs_to_try:
|
||||
for dot in self.dots:
|
||||
if self.config.deepdog_config.manual_dot_seeds is not None:
|
||||
for config_i, manual_config in enumerate(
|
||||
self.config.deepdog_config.manual_dot_seeds
|
||||
):
|
||||
|
||||
seed_index += 1
|
||||
seed_index += 1
|
||||
# validate config
|
||||
|
||||
combined_dot_name = ",".join(
|
||||
[d for d in self._dots_to_include(dot.label)]
|
||||
)
|
||||
trial_name = (
|
||||
f"{dot.label}-{combined_dot_name}-{cost}-{job_index}"
|
||||
)
|
||||
dot_label = str(config_i) + str(manual_config).translate(
|
||||
str.maketrans("", "", "[]\",' ")
|
||||
)
|
||||
dot_set = set()
|
||||
for dot_entry in manual_config:
|
||||
for dot_name in dot_entry:
|
||||
dot_set.add(dot_name)
|
||||
_logger.info(f"Dot set {dot_set=}")
|
||||
dot_included = ",".join([d for d in sorted(dot_set)])
|
||||
trial_name = (
|
||||
f"{dot_label}-{dot_included}-{cost}-{job_index}"
|
||||
)
|
||||
|
||||
_logger.info(f"Working on {trial_name=}")
|
||||
_logger.debug(f"Have {seed_index=}")
|
||||
self.single_run_in_subdir(
|
||||
job_index,
|
||||
cost,
|
||||
dot.label,
|
||||
trial_name,
|
||||
seed_index,
|
||||
override_name=override_key,
|
||||
)
|
||||
_logger.info(f"Working on {trial_name=}")
|
||||
_logger.debug(f"Have {seed_index=}")
|
||||
self.single_run_in_subdir(
|
||||
job_index,
|
||||
cost,
|
||||
dot_label,
|
||||
trial_name,
|
||||
seed_index,
|
||||
override_name=override_key,
|
||||
dot_spec=manual_config,
|
||||
)
|
||||
else:
|
||||
for dot in self.dots:
|
||||
|
||||
seed_index += 1
|
||||
|
||||
combined_dot_name = ",".join(
|
||||
[d for d in self._dots_to_include(dot.label)]
|
||||
)
|
||||
trial_name = (
|
||||
f"{dot.label}-{combined_dot_name}-{cost}-{job_index}"
|
||||
)
|
||||
|
||||
_logger.info(f"Working on {trial_name=}")
|
||||
_logger.debug(f"Have {seed_index=}")
|
||||
self.single_run_in_subdir(
|
||||
job_index,
|
||||
cost,
|
||||
dot.label,
|
||||
trial_name,
|
||||
seed_index,
|
||||
override_name=override_key,
|
||||
)
|
||||
kalpaa.completions.set_completion_file(self.config, completion_name)
|
||||
|
||||
def single_run_in_subdir(
|
||||
@ -147,6 +180,7 @@ class Stage02Runner:
|
||||
trial_name: str,
|
||||
seed_index: int,
|
||||
override_name: typing.Optional[str] = None,
|
||||
dot_spec: typing.Optional[typing.Sequence[typing.Sequence[str]]] = None,
|
||||
):
|
||||
# _logger.info(f"Got job index {job_index}")
|
||||
# NOTE This guy runs inside subdirs, obviously. In something like <kalpa>/out/z-10-2/dipoles
|
||||
@ -193,8 +227,26 @@ class Stage02Runner:
|
||||
for tantri_index in range(num_tantri_configs)
|
||||
]
|
||||
|
||||
dot_names = self._dots_to_include(dot_name)
|
||||
_logger.debug(f"Got dot names {dot_names}")
|
||||
single_dot_names: typing.List[str] = []
|
||||
pair_dot_names: typing.List[typing.Tuple[str, str]] = []
|
||||
if dot_spec is not None:
|
||||
_logger.info(f"Received dot_spec {dot_spec}, validating")
|
||||
for dot_entry in dot_spec:
|
||||
_logger.debug(f"Working on {dot_entry=}")
|
||||
if len(dot_entry) not in (1, 2):
|
||||
raise ValueError(
|
||||
f"Invalid dot spec {dot_spec}, {dot_entry} has wrong length"
|
||||
)
|
||||
|
||||
if len(dot_entry) == 1:
|
||||
_logger.debug(f"Adding {dot_entry[0]} to single_dot_names")
|
||||
single_dot_names.append(dot_entry[0])
|
||||
else:
|
||||
pair_dot_names.append((dot_entry[0], dot_entry[1]))
|
||||
else:
|
||||
single_dot_names = self._dots_to_include(dot_name)
|
||||
pair_dot_names = []
|
||||
_logger.debug(f"Got dot names {single_dot_names=}, {pair_dot_names=}")
|
||||
|
||||
models = []
|
||||
|
||||
@ -252,12 +304,25 @@ class Stage02Runner:
|
||||
|
||||
_logger.info(f"{deepdog_config=}")
|
||||
|
||||
stdev_cost_function_filters = [
|
||||
b.stdev_cost_function_filter(
|
||||
dot_names, cost, self.config.deepdog_config.use_log_noise
|
||||
)
|
||||
for b in binned_datas
|
||||
]
|
||||
stdev_cost_function_filters = []
|
||||
|
||||
if len(pair_dot_names):
|
||||
pair_stdev_cost_function_filters = [
|
||||
b.stdev_cost_function_filter(
|
||||
pair_dot_names, cost, self.config.deepdog_config.use_log_noise
|
||||
)
|
||||
for b in binned_datas
|
||||
]
|
||||
stdev_cost_function_filters.extend(pair_stdev_cost_function_filters)
|
||||
|
||||
if len(single_dot_names):
|
||||
single_stdev_cost_function_filters = [
|
||||
b.stdev_cost_function_filter(
|
||||
single_dot_names, cost, self.config.deepdog_config.use_log_noise
|
||||
)
|
||||
for b in binned_datas
|
||||
]
|
||||
stdev_cost_function_filters.extend(single_stdev_cost_function_filters)
|
||||
|
||||
_logger.debug(f"{stdev_cost_function_filters=}")
|
||||
combining_filter = deepdog.direct_monte_carlo.compose_filter.ComposedDMCFilter(
|
||||
|
@ -24,6 +24,7 @@ import tantri.dipoles.types
|
||||
# folder in curr dir
|
||||
import kalpaa
|
||||
import kalpaa.common
|
||||
import kalpaa.completions
|
||||
|
||||
|
||||
_logger = logging.getLogger(__name__)
|
||||
@ -48,17 +49,24 @@ OUT_FIELDNAMES = [
|
||||
]
|
||||
|
||||
|
||||
def coalesced_filename(dot_name, target_cost) -> str:
|
||||
return f"coalesced-{dot_name}-{target_cost}.csv"
|
||||
def coalesced_filename(subdir_name: str) -> str:
|
||||
return f"coalesced-{subdir_name}.csv"
|
||||
|
||||
|
||||
def read_coalesced_csv(parent_path: pathlib.Path, dot_name: str, target_cost):
|
||||
def read_coalesced_csv(parent_path: pathlib.Path, subdir_name: str):
|
||||
|
||||
# csv_name = f"coalesced-{dot_name}-{target_cost}.csv"
|
||||
csv_path = parent_path / coalesced_filename(dot_name, target_cost)
|
||||
csv_path = parent_path / coalesced_filename(subdir_name)
|
||||
_logger.debug(f"{csv_path=}")
|
||||
with csv_path.open("r", newline="") as csvfile:
|
||||
reader = csv.DictReader(csvfile)
|
||||
out_list = []
|
||||
|
||||
subdir_split = subdir_name.rsplit("-", 1)
|
||||
|
||||
dot_name = subdir_split[0]
|
||||
target_cost = subdir_split[1]
|
||||
_logger.debug(f"{dot_name=}, {target_cost=} for subdir_name {subdir_name=}")
|
||||
for row in reader:
|
||||
row["dot_name"] = dot_name
|
||||
row["target_cost"] = target_cost
|
||||
@ -86,12 +94,15 @@ class Stage03Runner:
|
||||
with out_path.open("w", newline="") as outfile:
|
||||
writer = csv.DictWriter(outfile, OUT_FIELDNAMES)
|
||||
writer.writeheader()
|
||||
for dot in self.dots:
|
||||
for cost in self.config.deepdog_config.costs_to_try:
|
||||
_logger.info(f"Reading {dot=} {cost=}")
|
||||
rows = read_coalesced_csv(sorted_dir, dot, cost)
|
||||
for row in rows:
|
||||
writer.writerow(row)
|
||||
for subdir in sorted_dir.iterdir():
|
||||
if not subdir.is_dir():
|
||||
_logger.info(f"That's not a dir {subdir=}")
|
||||
continue
|
||||
subdir_name = subdir.name
|
||||
_logger.info(f"Reading for {subdir_name=}")
|
||||
rows = read_coalesced_csv(sorted_dir, subdir_name)
|
||||
for row in rows:
|
||||
writer.writerow(row)
|
||||
|
||||
def run_in_subdir(self, subdir: pathlib.Path):
|
||||
"""
|
||||
@ -101,33 +112,49 @@ class Stage03Runner:
|
||||
|
||||
_logger.debug(f"Running inside {subdir=}")
|
||||
|
||||
kalpaa.stages.stage03_1.move_all_in_dipoles(subdir / "dipoles")
|
||||
subdir_name = subdir.name
|
||||
completion_name = f"stage03_1.job_{subdir_name}.complete"
|
||||
completion = kalpaa.completions.check_completion_file(
|
||||
self.config, completion_name
|
||||
)
|
||||
if completion == kalpaa.completions.CompletionsStatus.COMPLETE:
|
||||
_logger.info(f"Skipping {completion_name}")
|
||||
# continue
|
||||
elif completion == kalpaa.completions.CompletionsStatus.INVALID:
|
||||
_logger.error(f"Invalid completion status for {completion_name}")
|
||||
raise ValueError(f"Invalid completion status for {completion_name}")
|
||||
else:
|
||||
_logger.info(f"Moving dipoles for {subdir=}")
|
||||
kalpaa.stages.stage03_1.move_all_in_dipoles(subdir / "dipoles")
|
||||
kalpaa.completions.set_completion_file(self.config, completion_name)
|
||||
|
||||
seed_index = 0
|
||||
|
||||
sorted_dir = pathlib.Path(kalpaa.common.sorted_bayesruns_name())
|
||||
_logger.info(f"{sorted_dir.resolve()}")
|
||||
|
||||
for cost in self.config.deepdog_config.costs_to_try:
|
||||
for dot in self.dots:
|
||||
for sorted_subdir in sorted_dir.iterdir():
|
||||
if not subdir.is_dir():
|
||||
_logger.info(f"That's not a dir {subdir=}")
|
||||
continue
|
||||
|
||||
seed_index += 1
|
||||
# TODO pull out
|
||||
sorted_subdir = sorted_dir / f"{dot}-{cost}"
|
||||
seed_index += 1
|
||||
# TODO pull out
|
||||
# sorted_subdir = sorted_dir / f"{dot}-{cost}"
|
||||
|
||||
# TODO need to refactor deepdog probs method so I don't have to dump into args like this
|
||||
probs_args = argparse.Namespace()
|
||||
probs_args.bayesrun_directory = sorted_subdir
|
||||
probs_args.indexify_json = self.config.absify(
|
||||
self.config.general_config.indexes_json_name
|
||||
)
|
||||
probs_args.coalesced_keys = ""
|
||||
probs_args.uncoalesced_outfile = None
|
||||
probs_args.coalesced_outfile = sorted_dir / coalesced_filename(
|
||||
dot, cost
|
||||
)
|
||||
# TODO need to refactor deepdog probs method so I don't have to dump into args like this
|
||||
probs_args = argparse.Namespace()
|
||||
probs_args.bayesrun_directory = sorted_subdir
|
||||
probs_args.indexify_json = self.config.absify(
|
||||
self.config.general_config.indexes_json_name
|
||||
)
|
||||
probs_args.coalesced_keys = ""
|
||||
probs_args.uncoalesced_outfile = None
|
||||
probs_args.coalesced_outfile = sorted_dir / coalesced_filename(
|
||||
sorted_subdir.name
|
||||
)
|
||||
|
||||
deepdog.cli.probs.main.main(probs_args)
|
||||
deepdog.cli.probs.main.main(probs_args)
|
||||
|
||||
self.merge_coalesceds(sorted_dir)
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
[tool.poetry]
|
||||
name = "kalpaa"
|
||||
version = "1.1.0"
|
||||
version = "1.2.0"
|
||||
description = "Groups up and runs full run."
|
||||
authors = ["Deepak Mallubhotla <dmallubhotla+github@gmail.com>"]
|
||||
readme = "README.md"
|
||||
|
@ -14,7 +14,7 @@ if [ -z "$(git status --porcelain)" ]; then
|
||||
release_needed=false
|
||||
if \
|
||||
{ git log "$( git describe --tags --abbrev=0 )..HEAD" --format='%s' | cut -d: -f1 | sort -u | sed -e 's/([^)]*)//' | grep -q -i -E '^feat|fix|perf|refactor|revert$' ; } || \
|
||||
{ git log "$( git describe --tags --abbrev=0 )..HEAD" --format='%s' | cut -d: -f1 | sort -u | sed -e 's/([^)]*)//' | grep -q -E '\!$' ; } || \
|
||||
{ git log "$( git describe --tags --abbrev=0 )..HEAD" --format='%s' | cut -d: -f1 | sort -u | sed -e 's/([^)]*)//' | grep -q -E '!$' ; } || \
|
||||
{ git log "$( git describe --tags --abbrev=0 )..HEAD" --format='%b' | grep -q -E '^BREAKING CHANGE:' ; }
|
||||
then
|
||||
release_needed=true
|
||||
|
@ -7,6 +7,7 @@
|
||||
2,
|
||||
0.2,
|
||||
]),
|
||||
'manual_dot_seeds': None,
|
||||
'max_monte_carlo_cycles_steps': 20,
|
||||
'target_success': 2000,
|
||||
'use_log_noise': True,
|
||||
@ -89,6 +90,7 @@
|
||||
0.5,
|
||||
0.2,
|
||||
]),
|
||||
'manual_dot_seeds': None,
|
||||
'max_monte_carlo_cycles_steps': 20,
|
||||
'target_success': 2000,
|
||||
'use_log_noise': True,
|
||||
@ -171,6 +173,7 @@
|
||||
0.5,
|
||||
0.2,
|
||||
]),
|
||||
'manual_dot_seeds': None,
|
||||
'max_monte_carlo_cycles_steps': 20,
|
||||
'target_success': 2000,
|
||||
'use_log_noise': True,
|
||||
@ -251,6 +254,7 @@
|
||||
1,
|
||||
0.1,
|
||||
]),
|
||||
'manual_dot_seeds': None,
|
||||
'max_monte_carlo_cycles_steps': 20,
|
||||
'target_success': 1000,
|
||||
'use_log_noise': False,
|
||||
|
@ -1,13 +1,13 @@
|
||||
# serializer version: 1
|
||||
# name: test_parse_config_all_fields_toml
|
||||
Config(generation_config=GenerationConfig(counts=[1, 5, 10], orientations=[<Orientation.RANDOM: 'RANDOM'>, <Orientation.Z: 'Z'>, <Orientation.XY: 'XY'>], num_replicas=3, override_dipole_configs={'scenario1': [DipoleTO(p=array([3, 5, 7]), s=array([2, 4, 6]), w=10), DipoleTO(p=array([30, 50, 70]), s=array([20, 40, 60]), w=10.55)]}, override_measurement_filesets=None, tantri_configs=[TantriConfig(index_seed_starter=15151, num_seeds=5, delta_t=0.01, num_iterations=100), TantriConfig(index_seed_starter=1234, num_seeds=100, delta_t=1, num_iterations=200)], num_bin_time_series=25, bin_log_width=0.25), general_config=GeneralConfig(dots_json_name='test_dots.json', indexes_json_name='test_indexes.json', out_dir_name='out', log_pattern='%(asctime)s | %(message)s', measurement_type=<MeasurementTypeEnum.X_ELECTRIC_FIELD: 'x-electric-field'>, root_directory=PosixPath('test_root'), mega_merged_name='test_mega_merged.csv', mega_merged_inferenced_name='test_mega_merged_inferenced.csv', skip_to_stage=1, check_completions=False), deepdog_config=DeepdogConfig(costs_to_try=[20, 2, 0.2], target_success=2000, max_monte_carlo_cycles_steps=20, use_log_noise=True), default_model_param_config=DefaultModelParamConfig(x_min=-20, x_max=20, y_min=-10, y_max=10, z_min=5, z_max=6.5, w_log_min=-5, w_log_max=1))
|
||||
Config(generation_config=GenerationConfig(counts=[1, 5, 10], orientations=[<Orientation.RANDOM: 'RANDOM'>, <Orientation.Z: 'Z'>, <Orientation.XY: 'XY'>], num_replicas=3, override_dipole_configs={'scenario1': [DipoleTO(p=array([3, 5, 7]), s=array([2, 4, 6]), w=10), DipoleTO(p=array([30, 50, 70]), s=array([20, 40, 60]), w=10.55)]}, override_measurement_filesets=None, tantri_configs=[TantriConfig(index_seed_starter=15151, num_seeds=5, delta_t=0.01, num_iterations=100), TantriConfig(index_seed_starter=1234, num_seeds=100, delta_t=1, num_iterations=200)], num_bin_time_series=25, bin_log_width=0.25), general_config=GeneralConfig(dots_json_name='test_dots.json', indexes_json_name='test_indexes.json', out_dir_name='out', log_pattern='%(asctime)s | %(message)s', measurement_type=<MeasurementTypeEnum.X_ELECTRIC_FIELD: 'x-electric-field'>, root_directory=PosixPath('test_root'), mega_merged_name='test_mega_merged.csv', mega_merged_inferenced_name='test_mega_merged_inferenced.csv', skip_to_stage=1, check_completions=False), deepdog_config=DeepdogConfig(costs_to_try=[20, 2, 0.2], target_success=2000, max_monte_carlo_cycles_steps=20, use_log_noise=True, manual_dot_seeds=None), default_model_param_config=DefaultModelParamConfig(x_min=-20, x_max=20, y_min=-10, y_max=10, z_min=5, z_max=6.5, w_log_min=-5, w_log_max=1))
|
||||
# ---
|
||||
# name: test_parse_config_few_fields_toml
|
||||
Config(generation_config=GenerationConfig(counts=[1, 5, 10], orientations=[<Orientation.RANDOM: 'RANDOM'>, <Orientation.Z: 'Z'>, <Orientation.XY: 'XY'>], num_replicas=2, override_dipole_configs={'scenario1': [DipoleTO(p=array([3, 5, 7]), s=array([2, 4, 6]), w=10), DipoleTO(p=array([30, 50, 70]), s=array([20, 40, 60]), w=10.55)]}, override_measurement_filesets=None, tantri_configs=[TantriConfig(index_seed_starter=15151, num_seeds=5, delta_t=0.01, num_iterations=100), TantriConfig(index_seed_starter=1234, num_seeds=100, delta_t=1, num_iterations=200)], num_bin_time_series=25, bin_log_width=0.25), general_config=GeneralConfig(dots_json_name='dots.json', indexes_json_name='indexes.json', out_dir_name='out', log_pattern='%(asctime)s | %(process)d | %(levelname)-7s | %(name)s:%(lineno)d | %(message)s', measurement_type=<MeasurementTypeEnum.POTENTIAL: 'electric-potential'>, root_directory=PosixPath('test_root1'), mega_merged_name='mega_merged_coalesced.csv', mega_merged_inferenced_name='mega_merged_coalesced_inferenced.csv', skip_to_stage=None, check_completions=False), deepdog_config=DeepdogConfig(costs_to_try=[5, 2, 1, 0.5, 0.2], target_success=2000, max_monte_carlo_cycles_steps=20, use_log_noise=True), default_model_param_config=DefaultModelParamConfig(x_min=-20, x_max=20, y_min=-10, y_max=10, z_min=5, z_max=6.5, w_log_min=-5, w_log_max=1))
|
||||
Config(generation_config=GenerationConfig(counts=[1, 5, 10], orientations=[<Orientation.RANDOM: 'RANDOM'>, <Orientation.Z: 'Z'>, <Orientation.XY: 'XY'>], num_replicas=2, override_dipole_configs={'scenario1': [DipoleTO(p=array([3, 5, 7]), s=array([2, 4, 6]), w=10), DipoleTO(p=array([30, 50, 70]), s=array([20, 40, 60]), w=10.55)]}, override_measurement_filesets=None, tantri_configs=[TantriConfig(index_seed_starter=15151, num_seeds=5, delta_t=0.01, num_iterations=100), TantriConfig(index_seed_starter=1234, num_seeds=100, delta_t=1, num_iterations=200)], num_bin_time_series=25, bin_log_width=0.25), general_config=GeneralConfig(dots_json_name='dots.json', indexes_json_name='indexes.json', out_dir_name='out', log_pattern='%(asctime)s | %(process)d | %(levelname)-7s | %(name)s:%(lineno)d | %(message)s', measurement_type=<MeasurementTypeEnum.POTENTIAL: 'electric-potential'>, root_directory=PosixPath('test_root1'), mega_merged_name='mega_merged_coalesced.csv', mega_merged_inferenced_name='mega_merged_coalesced_inferenced.csv', skip_to_stage=None, check_completions=False), deepdog_config=DeepdogConfig(costs_to_try=[5, 2, 1, 0.5, 0.2], target_success=2000, max_monte_carlo_cycles_steps=20, use_log_noise=True, manual_dot_seeds=None), default_model_param_config=DefaultModelParamConfig(x_min=-20, x_max=20, y_min=-10, y_max=10, z_min=5, z_max=6.5, w_log_min=-5, w_log_max=1))
|
||||
# ---
|
||||
# name: test_parse_config_geom_params_toml
|
||||
Config(generation_config=GenerationConfig(counts=[1, 5, 10], orientations=[<Orientation.RANDOM: 'RANDOM'>, <Orientation.Z: 'Z'>, <Orientation.XY: 'XY'>], num_replicas=2, override_dipole_configs={'scenario1': [DipoleTO(p=array([3, 5, 7]), s=array([2, 4, 6]), w=10), DipoleTO(p=array([30, 50, 70]), s=array([20, 40, 60]), w=10.55)]}, override_measurement_filesets=None, tantri_configs=[TantriConfig(index_seed_starter=15151, num_seeds=5, delta_t=0.01, num_iterations=100), TantriConfig(index_seed_starter=1234, num_seeds=100, delta_t=1, num_iterations=200)], num_bin_time_series=25, bin_log_width=0.25), general_config=GeneralConfig(dots_json_name='dots.json', indexes_json_name='indexes.json', out_dir_name='out', log_pattern='%(asctime)s | %(process)d | %(levelname)-7s | %(name)s:%(lineno)d | %(message)s', measurement_type=<MeasurementTypeEnum.POTENTIAL: 'electric-potential'>, root_directory=PosixPath('test_root1'), mega_merged_name='mega_merged_coalesced.csv', mega_merged_inferenced_name='mega_merged_coalesced_inferenced.csv', skip_to_stage=None, check_completions=False), deepdog_config=DeepdogConfig(costs_to_try=[5, 2, 1, 0.5, 0.2], target_success=2000, max_monte_carlo_cycles_steps=20, use_log_noise=True), default_model_param_config=DefaultModelParamConfig(x_min=-20, x_max=20, y_min=-10, y_max=10, z_min=0, z_max=2, w_log_min=-3, w_log_max=1.5))
|
||||
Config(generation_config=GenerationConfig(counts=[1, 5, 10], orientations=[<Orientation.RANDOM: 'RANDOM'>, <Orientation.Z: 'Z'>, <Orientation.XY: 'XY'>], num_replicas=2, override_dipole_configs={'scenario1': [DipoleTO(p=array([3, 5, 7]), s=array([2, 4, 6]), w=10), DipoleTO(p=array([30, 50, 70]), s=array([20, 40, 60]), w=10.55)]}, override_measurement_filesets=None, tantri_configs=[TantriConfig(index_seed_starter=15151, num_seeds=5, delta_t=0.01, num_iterations=100), TantriConfig(index_seed_starter=1234, num_seeds=100, delta_t=1, num_iterations=200)], num_bin_time_series=25, bin_log_width=0.25), general_config=GeneralConfig(dots_json_name='dots.json', indexes_json_name='indexes.json', out_dir_name='out', log_pattern='%(asctime)s | %(process)d | %(levelname)-7s | %(name)s:%(lineno)d | %(message)s', measurement_type=<MeasurementTypeEnum.POTENTIAL: 'electric-potential'>, root_directory=PosixPath('test_root1'), mega_merged_name='mega_merged_coalesced.csv', mega_merged_inferenced_name='mega_merged_coalesced_inferenced.csv', skip_to_stage=None, check_completions=False), deepdog_config=DeepdogConfig(costs_to_try=[5, 2, 1, 0.5, 0.2], target_success=2000, max_monte_carlo_cycles_steps=20, use_log_noise=True, manual_dot_seeds=None), default_model_param_config=DefaultModelParamConfig(x_min=-20, x_max=20, y_min=-10, y_max=10, z_min=0, z_max=2, w_log_min=-3, w_log_max=1.5))
|
||||
# ---
|
||||
# name: test_parse_config_toml
|
||||
Config(generation_config=GenerationConfig(counts=[1, 10], orientations=[<Orientation.RANDOM: 'RANDOM'>, <Orientation.Z: 'Z'>, <Orientation.XY: 'XY'>], num_replicas=3, override_dipole_configs=None, override_measurement_filesets=None, tantri_configs=[TantriConfig(index_seed_starter=31415, num_seeds=100, delta_t=0.05, num_iterations=100000)], num_bin_time_series=25, bin_log_width=0.25), general_config=GeneralConfig(dots_json_name='test_dots.json', indexes_json_name='test_indexes.json', out_dir_name='test_out', log_pattern='%(asctime)s | %(process)d | %(levelname)-7s | %(name)s:%(lineno)d | %(message)s', measurement_type=<MeasurementTypeEnum.X_ELECTRIC_FIELD: 'x-electric-field'>, root_directory=PosixPath('test_root'), mega_merged_name='test_mega_merged.csv', mega_merged_inferenced_name='test_mega_merged_inferenced.csv', skip_to_stage=1, check_completions=False), deepdog_config=DeepdogConfig(costs_to_try=[10, 1, 0.1], target_success=1000, max_monte_carlo_cycles_steps=20, use_log_noise=False), default_model_param_config=DefaultModelParamConfig(x_min=-20, x_max=20, y_min=-10, y_max=10, z_min=5, z_max=6.5, w_log_min=-5, w_log_max=1))
|
||||
Config(generation_config=GenerationConfig(counts=[1, 10], orientations=[<Orientation.RANDOM: 'RANDOM'>, <Orientation.Z: 'Z'>, <Orientation.XY: 'XY'>], num_replicas=3, override_dipole_configs=None, override_measurement_filesets=None, tantri_configs=[TantriConfig(index_seed_starter=31415, num_seeds=100, delta_t=0.05, num_iterations=100000)], num_bin_time_series=25, bin_log_width=0.25), general_config=GeneralConfig(dots_json_name='test_dots.json', indexes_json_name='test_indexes.json', out_dir_name='test_out', log_pattern='%(asctime)s | %(process)d | %(levelname)-7s | %(name)s:%(lineno)d | %(message)s', measurement_type=<MeasurementTypeEnum.X_ELECTRIC_FIELD: 'x-electric-field'>, root_directory=PosixPath('test_root'), mega_merged_name='test_mega_merged.csv', mega_merged_inferenced_name='test_mega_merged_inferenced.csv', skip_to_stage=1, check_completions=False), deepdog_config=DeepdogConfig(costs_to_try=[10, 1, 0.1], target_success=1000, max_monte_carlo_cycles_steps=20, use_log_noise=False, manual_dot_seeds=None), default_model_param_config=DefaultModelParamConfig(x_min=-20, x_max=20, y_min=-10, y_max=10, z_min=5, z_max=6.5, w_log_min=-5, w_log_max=1))
|
||||
# ---
|
||||
|
Loading…
x
Reference in New Issue
Block a user