Compare commits
8 Commits
a47342ce7e
...
18c99acdd4
Author | SHA1 | Date | |
---|---|---|---|
18c99acdd4 | |||
700f32ea58 | |||
3737252c4b | |||
6f79a49e59 | |||
d962ecb11e | |||
7beca501bf | |||
5425ce1362 | |||
6a5c5931d4 |
14
CHANGELOG.md
14
CHANGELOG.md
@ -2,6 +2,20 @@
|
|||||||
|
|
||||||
All notable changes to this project will be documented in this file. See [standard-version](https://github.com/conventional-changelog/standard-version) for commit guidelines.
|
All notable changes to this project will be documented in this file. See [standard-version](https://github.com/conventional-changelog/standard-version) for commit guidelines.
|
||||||
|
|
||||||
|
## [1.4.0](https://gitea.deepak.science:2222/physics/deepdog/compare/1.3.0...1.4.0) (2024-09-04)
|
||||||
|
|
||||||
|
|
||||||
|
### Features
|
||||||
|
|
||||||
|
* add subset sim probs command for bayes for subset simulation results ([c881da2](https://gitea.deepak.science:2222/physics/deepdog/commit/c881da28370a1e51d062e1a7edaa62af6eb98d0a))
|
||||||
|
* allows some betetr matching for single_dipole runs ([5425ce1](https://gitea.deepak.science:2222/physics/deepdog/commit/5425ce1362919af4cc4dbd5813df3be8d877b198))
|
||||||
|
* indexifier now has len ([d962ecb](https://gitea.deepak.science:2222/physics/deepdog/commit/d962ecb11e929de1d9aa458b5d8e82270eff0039))
|
||||||
|
|
||||||
|
|
||||||
|
### Bug Fixes
|
||||||
|
|
||||||
|
* update log file arg names in cli scripts ([6a5c593](https://gitea.deepak.science:2222/physics/deepdog/commit/6a5c5931d4fc849d0d6a0f2b971523a0f039d559))
|
||||||
|
|
||||||
## [1.3.0](https://gitea.deepak.science:2222/physics/deepdog/compare/1.2.1...1.3.0) (2024-05-20)
|
## [1.3.0](https://gitea.deepak.science:2222/physics/deepdog/compare/1.2.1...1.3.0) (2024-05-20)
|
||||||
|
|
||||||
|
|
||||||
|
@ -13,7 +13,7 @@ def parse_args() -> argparse.Namespace:
|
|||||||
"probs", description="Calculating probability from finished bayesrun"
|
"probs", description="Calculating probability from finished bayesrun"
|
||||||
)
|
)
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--log_file",
|
"--log-file",
|
||||||
type=str,
|
type=str,
|
||||||
help="A filename for logging to, if not provided will only log to stderr",
|
help="A filename for logging to, if not provided will only log to stderr",
|
||||||
default=None,
|
default=None,
|
||||||
|
@ -14,7 +14,7 @@ def parse_args() -> argparse.Namespace:
|
|||||||
description="Calculating probability from finished subset sim run",
|
description="Calculating probability from finished subset sim run",
|
||||||
)
|
)
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--log_file",
|
"--log-file",
|
||||||
type=str,
|
type=str,
|
||||||
help="A filename for logging to, if not provided will only log to stderr",
|
help="A filename for logging to, if not provided will only log to stderr",
|
||||||
default=None,
|
default=None,
|
||||||
|
@ -36,8 +36,8 @@ class DirectMonteCarloConfig:
|
|||||||
tag: str = ""
|
tag: str = ""
|
||||||
cap_core_count: int = 0 # 0 means cap at num cores - 1
|
cap_core_count: int = 0 # 0 means cap at num cores - 1
|
||||||
chunk_size: int = 50
|
chunk_size: int = 50
|
||||||
write_bayesrun_file = True
|
write_bayesrun_file: bool = True
|
||||||
bayesrun_file_timestamp = True
|
bayesrun_file_timestamp: bool = True
|
||||||
# chunk size of some kind
|
# chunk size of some kind
|
||||||
|
|
||||||
|
|
||||||
@ -145,15 +145,21 @@ class DirectMonteCarloRun:
|
|||||||
single run wrapped up for multiprocessing call.
|
single run wrapped up for multiprocessing call.
|
||||||
|
|
||||||
takes in a tuple of arguments corresponding to
|
takes in a tuple of arguments corresponding to
|
||||||
(model_name_pair, seed)
|
(model_name_pair, seed, return_configs)
|
||||||
|
|
||||||
|
return_configs is a boolean, if true then will return tuple of (count, [matching configs])
|
||||||
|
if false, return (count, [])
|
||||||
"""
|
"""
|
||||||
# here's where we do our work
|
# here's where we do our work
|
||||||
|
|
||||||
model_name_pair, seed = args
|
model_name_pair, seed, return_configs = args
|
||||||
cycle_success_configs = self._single_run(model_name_pair, seed)
|
cycle_success_configs = self._single_run(model_name_pair, seed)
|
||||||
cycle_success_count = len(cycle_success_configs)
|
cycle_success_count = len(cycle_success_configs)
|
||||||
|
|
||||||
return cycle_success_count
|
if return_configs:
|
||||||
|
return (cycle_success_count, cycle_success_configs)
|
||||||
|
else:
|
||||||
|
return (cycle_success_count, [])
|
||||||
|
|
||||||
def execute_no_multiprocessing(self) -> Sequence[DirectMonteCarloResult]:
|
def execute_no_multiprocessing(self) -> Sequence[DirectMonteCarloResult]:
|
||||||
|
|
||||||
@ -198,9 +204,11 @@ class DirectMonteCarloRun:
|
|||||||
)
|
)
|
||||||
dipole_count = numpy.array(cycle_success_configs).shape[1]
|
dipole_count = numpy.array(cycle_success_configs).shape[1]
|
||||||
for n in range(dipole_count):
|
for n in range(dipole_count):
|
||||||
|
number_dipoles_to_write = self.config.target_success * 5
|
||||||
|
_logger.info(f"Limiting to {number_dipoles_to_write=}")
|
||||||
numpy.savetxt(
|
numpy.savetxt(
|
||||||
f"{self.config.tag}_{step_count}_{cycle_i}_dipole_{n}.csv",
|
f"{self.config.tag}_{step_count}_{cycle_i}_dipole_{n}.csv",
|
||||||
sorted_by_freq[:, n],
|
sorted_by_freq[:number_dipoles_to_write, n],
|
||||||
delimiter=",",
|
delimiter=",",
|
||||||
)
|
)
|
||||||
total_success += cycle_success_count
|
total_success += cycle_success_count
|
||||||
@ -259,13 +267,55 @@ class DirectMonteCarloRun:
|
|||||||
|
|
||||||
seeds = seed_sequence.spawn(self.config.monte_carlo_cycles)
|
seeds = seed_sequence.spawn(self.config.monte_carlo_cycles)
|
||||||
|
|
||||||
pool_results = sum(
|
raw_pool_results = list(
|
||||||
pool.imap_unordered(
|
pool.imap_unordered(
|
||||||
self._wrapped_single_run,
|
self._wrapped_single_run,
|
||||||
[(model_name_pair, seed) for seed in seeds],
|
[
|
||||||
|
(
|
||||||
|
model_name_pair,
|
||||||
|
seed,
|
||||||
|
self.config.write_successes_to_file,
|
||||||
|
)
|
||||||
|
for seed in seeds
|
||||||
|
],
|
||||||
self.config.chunk_size,
|
self.config.chunk_size,
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
pool_results = sum(result[0] for result in raw_pool_results)
|
||||||
|
|
||||||
|
if self.config.write_successes_to_file:
|
||||||
|
cycle_success_configs = numpy.concatenate(
|
||||||
|
[result[1] for result in raw_pool_results]
|
||||||
|
)
|
||||||
|
if len(cycle_success_configs):
|
||||||
|
|
||||||
|
sorted_by_freq = numpy.array(
|
||||||
|
[
|
||||||
|
pdme.subspace_simulation.sort_array_of_dipoles_by_frequency(
|
||||||
|
dipole_config
|
||||||
|
)
|
||||||
|
for dipole_config in cycle_success_configs
|
||||||
|
]
|
||||||
|
)
|
||||||
|
dipole_count = numpy.array(cycle_success_configs).shape[1]
|
||||||
|
|
||||||
|
number_dipoles_to_write = self.config.target_success * 5
|
||||||
|
_logger.info(
|
||||||
|
f"Limiting to {number_dipoles_to_write=}, have {dipole_count}"
|
||||||
|
)
|
||||||
|
|
||||||
|
for n in range(dipole_count):
|
||||||
|
numpy.savetxt(
|
||||||
|
f"{self.config.tag}_{step_count}_dipole_{n}.csv",
|
||||||
|
sorted_by_freq[::number_dipoles_to_write, n],
|
||||||
|
delimiter=",",
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
_logger.debug(
|
||||||
|
"Instructed to write results, but none obtained"
|
||||||
|
)
|
||||||
|
|
||||||
_logger.debug(f"Pool results: {pool_results}")
|
_logger.debug(f"Pool results: {pool_results}")
|
||||||
|
|
||||||
total_success += pool_results
|
total_success += pool_results
|
||||||
|
@ -36,6 +36,10 @@ class Indexifier:
|
|||||||
def indexify(self, n: int) -> typing.Dict[str, typing.Any]:
|
def indexify(self, n: int) -> typing.Dict[str, typing.Any]:
|
||||||
return self.product_dict[n]
|
return self.product_dict[n]
|
||||||
|
|
||||||
|
def __len__(self) -> int:
|
||||||
|
weights = [len(v) for v in self.dict.values()]
|
||||||
|
return math.prod(weights)
|
||||||
|
|
||||||
def _indexify_indices(self, n: int) -> typing.Sequence[int]:
|
def _indexify_indices(self, n: int) -> typing.Sequence[int]:
|
||||||
"""
|
"""
|
||||||
legacy indexify from old scripts, copypast.
|
legacy indexify from old scripts, copypast.
|
||||||
|
@ -8,6 +8,7 @@ FILE_SLUG_REGEXES = [
|
|||||||
r"(?P<tag>\w+)-(?P<job_index>\d+)",
|
r"(?P<tag>\w+)-(?P<job_index>\d+)",
|
||||||
r"mock_tarucha-(?P<job_index>\d+)",
|
r"mock_tarucha-(?P<job_index>\d+)",
|
||||||
r"(?:(?P<mock>mock)_)?tarucha(?:_(?P<tarucha_run_id>\d+))?-(?P<job_index>\d+)",
|
r"(?:(?P<mock>mock)_)?tarucha(?:_(?P<tarucha_run_id>\d+))?-(?P<job_index>\d+)",
|
||||||
|
r"(?P<tag>\w+)-(?P<included_dots>[\w,]+)-(?P<target_cost>\d*\.?\d+)-(?P<job_index>\d+)",
|
||||||
]
|
]
|
||||||
]
|
]
|
||||||
|
|
||||||
|
@ -1,13 +1,13 @@
|
|||||||
[tool.poetry]
|
[tool.poetry]
|
||||||
name = "deepdog"
|
name = "deepdog"
|
||||||
version = "1.3.0"
|
version = "1.4.0"
|
||||||
description = ""
|
description = ""
|
||||||
authors = ["Deepak Mallubhotla <dmallubhotla+github@gmail.com>"]
|
authors = ["Deepak Mallubhotla <dmallubhotla+github@gmail.com>"]
|
||||||
|
|
||||||
[tool.poetry.dependencies]
|
[tool.poetry.dependencies]
|
||||||
python = ">=3.8.1,<3.10"
|
python = ">=3.8.1,<3.10"
|
||||||
pdme = "^1.5.0"
|
pdme = "^1.5.0"
|
||||||
numpy = "2.1.1"
|
numpy = "2.1.2"
|
||||||
scipy = "1.10"
|
scipy = "1.10"
|
||||||
tqdm = "^4.66.2"
|
tqdm = "^4.66.2"
|
||||||
|
|
||||||
|
@ -10,3 +10,12 @@ def test_indexifier():
|
|||||||
_logger.debug(f"setting up indexifier {indexifier}")
|
_logger.debug(f"setting up indexifier {indexifier}")
|
||||||
assert indexifier.indexify(0) == {"key_1": 1, "key_2": "a"}
|
assert indexifier.indexify(0) == {"key_1": 1, "key_2": "a"}
|
||||||
assert indexifier.indexify(5) == {"key_1": 2, "key_2": "c"}
|
assert indexifier.indexify(5) == {"key_1": 2, "key_2": "c"}
|
||||||
|
assert len(indexifier) == 9
|
||||||
|
|
||||||
|
|
||||||
|
def test_indexifier_length_short():
|
||||||
|
weight_dict = {"key_1": [1, 2, 3], "key_2": ["b", "c"]}
|
||||||
|
indexifier = deepdog.indexify.Indexifier(weight_dict)
|
||||||
|
_logger.debug(f"setting up indexifier {indexifier}")
|
||||||
|
|
||||||
|
assert len(indexifier) == 6
|
||||||
|
Loading…
x
Reference in New Issue
Block a user