fmt: formatting changes
This commit is contained in:
parent
c881da2837
commit
e76c619c8b
@ -42,11 +42,11 @@ def parse_args() -> argparse.Namespace:
|
|||||||
confirm_outfile_overwrite_group.add_argument(
|
confirm_outfile_overwrite_group.add_argument(
|
||||||
"--never-overwrite-outfile",
|
"--never-overwrite-outfile",
|
||||||
action="store_true",
|
action="store_true",
|
||||||
help="If a duplicate outfile is detected, skip confirmation and automatically exit early"
|
help="If a duplicate outfile is detected, skip confirmation and automatically exit early",
|
||||||
)
|
)
|
||||||
confirm_outfile_overwrite_group.add_argument(
|
confirm_outfile_overwrite_group.add_argument(
|
||||||
"--force-overwrite-outfile",
|
"--force-overwrite-outfile",
|
||||||
action="store_true",
|
action="store_true",
|
||||||
help="Skips checking for duplicate outfiles and overwrites"
|
help="Skips checking for duplicate outfiles and overwrites",
|
||||||
)
|
)
|
||||||
return parser.parse_args()
|
return parser.parse_args()
|
||||||
|
@ -34,9 +34,13 @@ def build_model_dict(
|
|||||||
calculation_dict[calculation_key] = {
|
calculation_dict[calculation_key] = {
|
||||||
"_model_key_dict": model_result.parsed_model_keys,
|
"_model_key_dict": model_result.parsed_model_keys,
|
||||||
"_calculation_key_dict": out.data,
|
"_calculation_key_dict": out.data,
|
||||||
"num_finished_runs": int(model_result.result_dict["num_finished_runs"]),
|
"num_finished_runs": int(
|
||||||
|
model_result.result_dict["num_finished_runs"]
|
||||||
|
),
|
||||||
"num_runs": int(model_result.result_dict["num_runs"]),
|
"num_runs": int(model_result.result_dict["num_runs"]),
|
||||||
"estimated_likelihood": float(model_result.result_dict["estimated_likelihood"]),
|
"estimated_likelihood": float(
|
||||||
|
model_result.result_dict["estimated_likelihood"]
|
||||||
|
),
|
||||||
}
|
}
|
||||||
else:
|
else:
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
@ -46,7 +50,6 @@ def build_model_dict(
|
|||||||
return model_dict
|
return model_dict
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def coalesced_dict(
|
def coalesced_dict(
|
||||||
uncoalesced_model_dict: typing.Dict[
|
uncoalesced_model_dict: typing.Dict[
|
||||||
typing.Tuple, typing.Dict[typing.Tuple, typing.Dict["str", typing.Any]]
|
typing.Tuple, typing.Dict[typing.Tuple, typing.Dict["str", typing.Any]]
|
||||||
@ -77,7 +80,6 @@ def coalesced_dict(
|
|||||||
_logger.error(f"We shouldn't be here! Double key for {model_key=}")
|
_logger.error(f"We shouldn't be here! Double key for {model_key=}")
|
||||||
raise ValueError()
|
raise ValueError()
|
||||||
|
|
||||||
|
|
||||||
# second pass do probability calculation
|
# second pass do probability calculation
|
||||||
|
|
||||||
prior = 1 / num_keys
|
prior = 1 / num_keys
|
||||||
@ -114,7 +116,9 @@ def write_coalesced_dict(
|
|||||||
_logger.info(f"Detected model field names {model_field_names}")
|
_logger.info(f"Detected model field names {model_field_names}")
|
||||||
|
|
||||||
collected_fieldnames = list(model_field_names)
|
collected_fieldnames = list(model_field_names)
|
||||||
collected_fieldnames.extend(["calculations_coalesced", "num_finished_runs", "num_runs", "prob"])
|
collected_fieldnames.extend(
|
||||||
|
["calculations_coalesced", "num_finished_runs", "num_runs", "prob"]
|
||||||
|
)
|
||||||
with open(coalesced_output_filename, "w", newline="") as coalesced_output_file:
|
with open(coalesced_output_filename, "w", newline="") as coalesced_output_file:
|
||||||
writer = csv.DictWriter(coalesced_output_file, fieldnames=collected_fieldnames)
|
writer = csv.DictWriter(coalesced_output_file, fieldnames=collected_fieldnames)
|
||||||
writer.writeheader()
|
writer.writeheader()
|
||||||
|
@ -45,20 +45,25 @@ def main(args: argparse.Namespace):
|
|||||||
if "outfile" in args and args.outfile:
|
if "outfile" in args and args.outfile:
|
||||||
if os.path.exists(args.outfile):
|
if os.path.exists(args.outfile):
|
||||||
if args.never_overwrite_outfile:
|
if args.never_overwrite_outfile:
|
||||||
_logger.warning(f"Filename {args.outfile} already exists, and never want overwrite, so aborting.")
|
_logger.warning(
|
||||||
|
f"Filename {args.outfile} already exists, and never want overwrite, so aborting."
|
||||||
|
)
|
||||||
return
|
return
|
||||||
elif args.force_overwrite_outfile:
|
elif args.force_overwrite_outfile:
|
||||||
_logger.warning(f"Forcing overwrite of {args.outfile}")
|
_logger.warning(f"Forcing overwrite of {args.outfile}")
|
||||||
else:
|
else:
|
||||||
# need to confirm
|
# need to confirm
|
||||||
confirm_overwrite = deepdog.cli.util.confirm_prompt(f"Filename {args.outfile} exists, overwrite?")
|
confirm_overwrite = deepdog.cli.util.confirm_prompt(
|
||||||
|
f"Filename {args.outfile} exists, overwrite?"
|
||||||
|
)
|
||||||
if not confirm_overwrite:
|
if not confirm_overwrite:
|
||||||
_logger.warning(f"Filename {args.outfile} already exists and do not want overwrite, aborting.")
|
_logger.warning(
|
||||||
|
f"Filename {args.outfile} already exists and do not want overwrite, aborting."
|
||||||
|
)
|
||||||
return
|
return
|
||||||
else:
|
else:
|
||||||
_logger.warning(f"Overwriting file {args.outfile}")
|
_logger.warning(f"Overwriting file {args.outfile}")
|
||||||
|
|
||||||
|
|
||||||
indexifier = None
|
indexifier = None
|
||||||
if args.indexify_json:
|
if args.indexify_json:
|
||||||
with open(args.indexify_json, "r") as indexify_json_file:
|
with open(args.indexify_json, "r") as indexify_json_file:
|
||||||
@ -73,7 +78,9 @@ def main(args: argparse.Namespace):
|
|||||||
indexifier = deepdog.indexify.Indexifier(indexify_data)
|
indexifier = deepdog.indexify.Indexifier(indexify_data)
|
||||||
|
|
||||||
results_dir = pathlib.Path(args.results_directory)
|
results_dir = pathlib.Path(args.results_directory)
|
||||||
out_files = [f for f in results_dir.iterdir() if f.name.endswith("subsetsim.csv")]
|
out_files = [
|
||||||
|
f for f in results_dir.iterdir() if f.name.endswith("subsetsim.csv")
|
||||||
|
]
|
||||||
_logger.info(
|
_logger.info(
|
||||||
f"Reading {len(out_files)} subsetsim.csv files in directory {args.results_directory}"
|
f"Reading {len(out_files)} subsetsim.csv files in directory {args.results_directory}"
|
||||||
)
|
)
|
||||||
|
@ -1,3 +1,3 @@
|
|||||||
from deepdog.cli.util.confirm import confirm_prompt
|
from deepdog.cli.util.confirm import confirm_prompt
|
||||||
|
|
||||||
__all__ = ["confirm_prompt"]
|
__all__ = ["confirm_prompt"]
|
||||||
|
@ -8,12 +8,12 @@ _RESPONSE_MAP = {
|
|||||||
"n": False,
|
"n": False,
|
||||||
"nope": False,
|
"nope": False,
|
||||||
"true": True,
|
"true": True,
|
||||||
"false": False
|
"false": False,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
def confirm_prompt(question: str) -> bool:
|
def confirm_prompt(question: str) -> bool:
|
||||||
""" Prompt with the question and returns yes or no based on response.
|
"""Prompt with the question and returns yes or no based on response."""
|
||||||
"""
|
|
||||||
prompt = question + " [y/n]: "
|
prompt = question + " [y/n]: "
|
||||||
|
|
||||||
while True:
|
while True:
|
||||||
@ -22,4 +22,4 @@ def confirm_prompt(question: str) -> bool:
|
|||||||
if choice in _RESPONSE_MAP:
|
if choice in _RESPONSE_MAP:
|
||||||
return _RESPONSE_MAP[choice]
|
return _RESPONSE_MAP[choice]
|
||||||
else:
|
else:
|
||||||
print(f"Respond with \"yes\" or \"no\"")
|
print(f'Respond with "yes" or "no"')
|
||||||
|
Loading…
x
Reference in New Issue
Block a user