Compare commits
143 Commits
Author | SHA1 | Date | |
---|---|---|---|
57cd746e5c
|
|||
878e16286b
|
|||
4726ccfb8c
|
|||
598dad1e6d
|
|||
01c0d7e49b
|
|||
a170a3ce01
|
|||
9bb8fc50fe
|
|||
f775ed34c6
|
|||
7d0c2b22cc | |||
d6e6876a79
|
|||
fccf50eb27
|
|||
33cab9ab41
|
|||
ad521ba472
|
|||
266d6dd583
|
|||
c573f8806d | |||
a015daf5ff
|
|||
a089951bbe
|
|||
7568aef842
|
|||
c4b6cbbb6f | |||
1cf4454153
|
|||
bf15f4a7b7
|
|||
12903b2540
|
|||
959b9af378
|
|||
8fd1b75e13
|
|||
17ae84879d
|
|||
fc2880ba2f
|
|||
589c16f25c
|
|||
743c3e22ae
|
|||
b3e2acd79c
|
|||
de1ec3e700
|
|||
f4964a19ea
|
|||
08d73c73e9 | |||
7ea1d715f6
|
|||
ed102799d1 | |||
0b8d14ef48 | |||
a5d0d257d7 | |||
6ee995e561 | |||
a217ad2c75
|
|||
039f68ee97
|
|||
e9dd21f69b
|
|||
8303fc7860 | |||
2418e3a263 | |||
73465203b2 | |||
01ba4af229 | |||
2c5c122820
|
|||
0a1a27759b
|
|||
558a4df643 | |||
6f141af0fe | |||
2c99fcf687
|
|||
ad0ace4da3
|
|||
3f1265e3ec
|
|||
969f01e9c5
|
|||
b282ffa800 | |||
91e9e5198e | |||
d7e0f13ca5
|
|||
74de2b0433
|
|||
c036028902
|
|||
690ad9e288 | |||
bd56f24774
|
|||
362388363f
|
|||
252b4a4414 | |||
bb21355f5e
|
|||
df8977655d
|
|||
5d0a7a4be0
|
|||
67a9721c31
|
|||
b5e0ecb528
|
|||
feeb03b27c
|
|||
b7da3d61cc
|
|||
9afa209864
|
|||
ae8977bb1e
|
|||
0caad05e3c
|
|||
eec926aaac
|
|||
23b202beb8
|
|||
6e29f7a702
|
|||
31070b5342
|
|||
101569d749
|
|||
874d876c9d
|
|||
3dca288177
|
|||
bd0b375751 | |||
0fabd8f7fb | |||
3ea3d1dc56 | |||
edf0ba6532
|
|||
a487309549 | |||
42829c0327
|
|||
349341b405
|
|||
50dbc4835e
|
|||
0954429e2d
|
|||
4c06b3912c
|
|||
5684af783e
|
|||
f00b29391c
|
|||
492a5e6681
|
|||
e9277c3da7
|
|||
1e2657adad
|
|||
f168666045
|
|||
604916a829 | |||
941313a14c
|
|||
cb64c0b7b6
|
|||
ec7b4cac39
|
|||
31e6cfaf51
|
|||
c1c711f47b
|
|||
6463b135ef
|
|||
a283cbd670 | |||
0b45172ca0 | |||
b6383d0a47 | |||
450d8e0ec9 | |||
f81904a898
|
|||
88d961313c
|
|||
fa82caa752
|
|||
0784cd53d7
|
|||
fb4b012491
|
|||
8617e4d274
|
|||
fe2af1644e
|
|||
e6d8d33c27
|
|||
e00dc95f02
|
|||
527be26fb2
|
|||
456c81bca5
|
|||
7284dbeb34
|
|||
d078004773
|
|||
0441cde421
|
|||
a9e91779bc
|
|||
751bc66704 | |||
413ff16acc | |||
5118173f09
|
|||
|
6dfc26104a
|
||
3a6be738b1
|
|||
bd240900b4
|
|||
0e1fbec043
|
|||
3d3b1a83f6
|
|||
63cecba824
|
|||
344998835d
|
|||
838aeb0cf3
|
|||
e715d329fd
|
|||
521b49f14c
|
|||
6d65e8dec5
|
|||
36354c2f2c
|
|||
3534593557
|
|||
6ada52f82c
|
|||
34e124b18f
|
|||
c1591c9ce9
|
|||
10358287d9
|
|||
d4f6e2f99e
|
|||
d64939f346
|
|||
0c6490dad7
|
4
.gitignore
vendored
4
.gitignore
vendored
@@ -114,6 +114,10 @@ ENV/
|
|||||||
env.bak/
|
env.bak/
|
||||||
venv.bak/
|
venv.bak/
|
||||||
|
|
||||||
|
# direnv
|
||||||
|
.envrc
|
||||||
|
.direnv
|
||||||
|
|
||||||
# Spyder project settings
|
# Spyder project settings
|
||||||
.spyderproject
|
.spyderproject
|
||||||
.spyproject
|
.spyproject
|
||||||
|
10
.versionrc
Normal file
10
.versionrc
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
{
|
||||||
|
"bumpFiles": [
|
||||||
|
{
|
||||||
|
"filename": "pyproject.toml",
|
||||||
|
"updater": "scripts/standard-version/pyproject-updater.js"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"sign": true,
|
||||||
|
"tag-prefix": ""
|
||||||
|
}
|
202
CHANGELOG.md
Normal file
202
CHANGELOG.md
Normal file
@@ -0,0 +1,202 @@
|
|||||||
|
# Changelog
|
||||||
|
|
||||||
|
All notable changes to this project will be documented in this file. See [standard-version](https://github.com/conventional-changelog/standard-version) for commit guidelines.
|
||||||
|
|
||||||
|
### [0.7.3](https://gitea.deepak.science:2222/physics/deepdog/compare/0.7.2...0.7.3) (2023-07-27)
|
||||||
|
|
||||||
|
|
||||||
|
### Features
|
||||||
|
|
||||||
|
* adds utility options and avoids memory leak ([598dad1](https://gitea.deepak.science:2222/physics/deepdog/commit/598dad1e6dc8fc0b7a5b4a90c8e17bf744e8d98c))
|
||||||
|
|
||||||
|
### [0.7.2](https://gitea.deepak.science:2222/physics/deepdog/compare/0.7.1...0.7.2) (2023-07-24)
|
||||||
|
|
||||||
|
|
||||||
|
### Features
|
||||||
|
|
||||||
|
* clamps results now ([9bb8fc5](https://gitea.deepak.science:2222/physics/deepdog/commit/9bb8fc50fe1bd1a285a333c5a396bfb6ac3176cf))
|
||||||
|
|
||||||
|
|
||||||
|
### Bug Fixes
|
||||||
|
|
||||||
|
* fixes clamping format etc. ([a170a3c](https://gitea.deepak.science:2222/physics/deepdog/commit/a170a3ce01adcec356e5aaab9abcc0ec4accd64b))
|
||||||
|
|
||||||
|
### [0.7.1](https://gitea.deepak.science:2222/physics/deepdog/compare/0.7.0...0.7.1) (2023-07-24)
|
||||||
|
|
||||||
|
|
||||||
|
### Features
|
||||||
|
|
||||||
|
* adds subset simulation stuff ([33cab9a](https://gitea.deepak.science:2222/physics/deepdog/commit/33cab9ab4179cec13ae9e591a8ffc32df4dda989))
|
||||||
|
|
||||||
|
## [0.7.0](https://gitea.deepak.science:2222/physics/deepdog/compare/0.6.7...0.7.0) (2023-05-01)
|
||||||
|
|
||||||
|
|
||||||
|
### ⚠ BREAKING CHANGES
|
||||||
|
|
||||||
|
* removes fastfilter parameter because it should never be needed
|
||||||
|
|
||||||
|
### Features
|
||||||
|
|
||||||
|
* adds pair capability to real spectrum run hopefully ([a089951](https://gitea.deepak.science:2222/physics/deepdog/commit/a089951bbefcd8a0b2efeb49b7a8090412cbb23d))
|
||||||
|
* removes fastfilter parameter because it should never be needed ([a015daf](https://gitea.deepak.science:2222/physics/deepdog/commit/a015daf5ff6fa5f6155c8d7e02981b588840a5b0))
|
||||||
|
|
||||||
|
### [0.6.7](https://gitea.deepak.science:2222/physics/deepdog/compare/0.6.6...0.6.7) (2023-04-14)
|
||||||
|
|
||||||
|
|
||||||
|
### Features
|
||||||
|
|
||||||
|
* adds option to cap core count for real spectrum run ([bf15f4a](https://gitea.deepak.science:2222/physics/deepdog/commit/bf15f4a7b7f59504983624e7d512ed7474372032))
|
||||||
|
* adds option to cap core count for temp aware run ([12903b2](https://gitea.deepak.science:2222/physics/deepdog/commit/12903b2540cefb040174d230bc0d04719a6dc1b7))
|
||||||
|
|
||||||
|
|
||||||
|
### Bug Fixes
|
||||||
|
|
||||||
|
* avoids redefinition of core count in loop ([1cf4454](https://gitea.deepak.science:2222/physics/deepdog/commit/1cf44541531541088198bd4599d467df3e1acbcf))
|
||||||
|
|
||||||
|
### [0.6.6](https://gitea.deepak.science:2222/physics/deepdog/compare/0.6.5...0.6.6) (2023-04-09)
|
||||||
|
|
||||||
|
|
||||||
|
### Bug Fixes
|
||||||
|
|
||||||
|
* removes bad logging in multiprocessing function ([8fd1b75](https://gitea.deepak.science:2222/physics/deepdog/commit/8fd1b75e1378301210bfa8f14dd09174bbd21414))
|
||||||
|
|
||||||
|
### [0.6.5](https://gitea.deepak.science:2222/physics/deepdog/compare/0.6.4...0.6.5) (2023-04-09)
|
||||||
|
|
||||||
|
|
||||||
|
### Features
|
||||||
|
|
||||||
|
* adds temp aware guy using new pdme temp-flexible feature for bundling temp models ([de1ec3e](https://gitea.deepak.science:2222/physics/deepdog/commit/de1ec3e70062d418e0d4c89716905cc9313d2e26))
|
||||||
|
|
||||||
|
### [0.6.4](https://gitea.deepak.science:2222/physics/deepdog/compare/0.6.3...0.6.4) (2022-08-13)
|
||||||
|
|
||||||
|
|
||||||
|
### Features
|
||||||
|
|
||||||
|
* Prints model names while running ([7ea1d71](https://gitea.deepak.science:2222/physics/deepdog/commit/7ea1d715f67e81c9fa841c5a62f1cc700ff7363d))
|
||||||
|
|
||||||
|
### [0.6.3](https://gitea.deepak.science:2222/physics/deepdog/compare/0.6.2...0.6.3) (2022-06-12)
|
||||||
|
|
||||||
|
|
||||||
|
### Features
|
||||||
|
|
||||||
|
* adds fast filter variant ([2c5c122](https://gitea.deepak.science:2222/physics/deepdog/commit/2c5c1228209e51d17253f07470e2f1e6dc6872d7))
|
||||||
|
* adds tester for fast filter real spectrum ([0a1a277](https://gitea.deepak.science:2222/physics/deepdog/commit/0a1a27759b0d4ab01da214b76ab14bf2b1fe00e3))
|
||||||
|
|
||||||
|
### [0.6.2](https://gitea.deepak.science:2222/physics/deepdog/compare/0.6.1...0.6.2) (2022-05-26)
|
||||||
|
|
||||||
|
|
||||||
|
### Features
|
||||||
|
|
||||||
|
* adds better import api for real data run ([d7e0f13](https://gitea.deepak.science:2222/physics/deepdog/commit/d7e0f13ca55197b24cb534c80f321ee76b9c4a40))
|
||||||
|
|
||||||
|
### [0.6.1](https://gitea.deepak.science:2222/physics/deepdog/compare/0.6.0...0.6.1) (2022-05-22)
|
||||||
|
|
||||||
|
|
||||||
|
### Features
|
||||||
|
|
||||||
|
* adds new runner for real spectra ([bd56f24](https://gitea.deepak.science:2222/physics/deepdog/commit/bd56f247748babb2ee1f2a1182d25aa968bff5a5))
|
||||||
|
|
||||||
|
## [0.6.0](https://gitea.deepak.science:2222/physics/deepdog/compare/0.5.0...0.6.0) (2022-05-22)
|
||||||
|
|
||||||
|
|
||||||
|
### ⚠ BREAKING CHANGES
|
||||||
|
|
||||||
|
* bayes run now handles multidipoles with changes to output file format etc.
|
||||||
|
* logs multiple dipoles better maybe
|
||||||
|
* switches over to pdme new stuff, uses models and scraps discretisations entirely
|
||||||
|
* removes alt_bayes bayes distinction, which was superfluous when only alt worked
|
||||||
|
|
||||||
|
### Features
|
||||||
|
|
||||||
|
* adds pdme 0.7.0 for multiprocessing ([874d876](https://gitea.deepak.science:2222/physics/deepdog/commit/874d876c9d774433b034d47c4cc0cdac41e6f2c7))
|
||||||
|
* bayes run now handles multidipoles with changes to output file format etc. ([5d0a7a4](https://gitea.deepak.science:2222/physics/deepdog/commit/5d0a7a4be09c58f8f8f859384f01d7912a98b8b9))
|
||||||
|
* logs multiple dipoles better maybe ([ae8977b](https://gitea.deepak.science:2222/physics/deepdog/commit/ae8977bb1e4d6cd71e88ea0876da8f4318e030b6))
|
||||||
|
* removes alt_bayes bayes distinction, which was superfluous when only alt worked ([101569d](https://gitea.deepak.science:2222/physics/deepdog/commit/101569d749e4f3f1842886aa2fd3321b8132278b))
|
||||||
|
* switches over to pdme new stuff, uses models and scraps discretisations entirely ([6e29f7a](https://gitea.deepak.science:2222/physics/deepdog/commit/6e29f7a702b578c266a42bba23ac973d155ada10))
|
||||||
|
* Uses multidipole for bayes run, with more verbose output ([df89776](https://gitea.deepak.science:2222/physics/deepdog/commit/df8977655de977fd3c4f7383dd9571e551eb1382))
|
||||||
|
|
||||||
|
|
||||||
|
### Bug Fixes
|
||||||
|
|
||||||
|
* another bug fix for csv generation ([b7da3d6](https://gitea.deepak.science:2222/physics/deepdog/commit/b7da3d61cc5c128cba1d2fcb3770b71b7f6fc4b8))
|
||||||
|
* fixes crash when dipole count is smaller than expected max during file write ([b5e0ecb](https://gitea.deepak.science:2222/physics/deepdog/commit/b5e0ecb52886b32d9055302eacfabb69338026b4))
|
||||||
|
* fixes format string in csv output for headers ([9afa209](https://gitea.deepak.science:2222/physics/deepdog/commit/9afa209864cdb9255988778e987fe05952848fd4))
|
||||||
|
* fixes random issue ([eec926a](https://gitea.deepak.science:2222/physics/deepdog/commit/eec926aaac654f78942b4c6b612e4d1cdcbf81dc))
|
||||||
|
* moves logging successes to after they've actually happened ([0caad05](https://gitea.deepak.science:2222/physics/deepdog/commit/0caad05e3cc6a9adba8bf937c3d2f944e1b096a3))
|
||||||
|
* now doesn't double randomise frequency ([23b202b](https://gitea.deepak.science:2222/physics/deepdog/commit/23b202beb81cb89f7f20b691e83116fa53764902))
|
||||||
|
* whoops deleted word multiprocessing ([31070b5](https://gitea.deepak.science:2222/physics/deepdog/commit/31070b5342c265d930b4c51402f42a3ee2415066))
|
||||||
|
|
||||||
|
## [0.5.0](https://gitea.deepak.science:2222/physics/deepdog/compare/0.4.0...0.5.0) (2022-04-30)
|
||||||
|
|
||||||
|
|
||||||
|
### ⚠ BREAKING CHANGES
|
||||||
|
|
||||||
|
* simulpairs now uses different rng calculator
|
||||||
|
|
||||||
|
### Features
|
||||||
|
|
||||||
|
* adds simulpairs run ([e9277c3](https://gitea.deepak.science:2222/physics/deepdog/commit/e9277c3da777359feb352c0b19f3bb029248ba2f))
|
||||||
|
* has better parallelisation ([edf0ba6](https://gitea.deepak.science:2222/physics/deepdog/commit/edf0ba6532c0588fce32341709cdb70e384b83f4))
|
||||||
|
* simulpairs now uses different rng calculator ([50dbc48](https://gitea.deepak.science:2222/physics/deepdog/commit/50dbc4835e60bace9e9b4ba37415f073a3c9e479))
|
||||||
|
|
||||||
|
|
||||||
|
### Bug Fixes
|
||||||
|
|
||||||
|
* better parallelisation hopefully ([42829c0](https://gitea.deepak.science:2222/physics/deepdog/commit/42829c0327e080e18be2fb75e746f6ac0d7c2f6d))
|
||||||
|
* Makes altbayessimulpairs available in package ([492a5e6](https://gitea.deepak.science:2222/physics/deepdog/commit/492a5e6681c85f95840e28cfd5d4ce4ca1d54eba))
|
||||||
|
* stronger names ([0954429](https://gitea.deepak.science:2222/physics/deepdog/commit/0954429e2d015a105ff16dfbb9e7a352bf53e5e9))
|
||||||
|
* Uses correct filename arg for passed in rng ([349341b](https://gitea.deepak.science:2222/physics/deepdog/commit/349341b405375a43b933f1fd7db4ee9fc501def3))
|
||||||
|
* uses correct filename for pairs guy ([4c06b39](https://gitea.deepak.science:2222/physics/deepdog/commit/4c06b3912c811c93c310b1d9e4c153f2014c4f8b))
|
||||||
|
|
||||||
|
## [0.4.0](https://gitea.deepak.science:2222/physics/deepdog/compare/0.3.5...0.4.0) (2022-04-10)
|
||||||
|
|
||||||
|
|
||||||
|
### ⚠ BREAKING CHANGES
|
||||||
|
|
||||||
|
* Adds pair calculations, with changing api format
|
||||||
|
|
||||||
|
### Features
|
||||||
|
|
||||||
|
* Adds dynamic cycle count increases to help reach minimum success count ([ec7b4ca](https://gitea.deepak.science:2222/physics/deepdog/commit/ec7b4cac393c15e94c513215c4f1ba32be2ae87a))
|
||||||
|
* Adds pair calculations, with changing api format ([6463b13](https://gitea.deepak.science:2222/physics/deepdog/commit/6463b135ef2d212b565864b5ac1b655e014d2194))
|
||||||
|
|
||||||
|
|
||||||
|
### Bug Fixes
|
||||||
|
|
||||||
|
* uses bigfix from pdme for negatives ([c1c711f](https://gitea.deepak.science:2222/physics/deepdog/commit/c1c711f47b574d3a9b8a24dbcbdd7f50b9be8ea9))
|
||||||
|
|
||||||
|
### [0.3.5](https://gitea.deepak.science:2222/physics/deepdog/compare/0.3.4...0.3.5) (2022-03-07)
|
||||||
|
|
||||||
|
|
||||||
|
### Features
|
||||||
|
|
||||||
|
* makes chunksize configurable ([88d9613](https://gitea.deepak.science:2222/physics/deepdog/commit/88d961313c1db0d49fd96939aa725a8706fa0412))
|
||||||
|
|
||||||
|
### [0.3.4](https://gitea.deepak.science:2222/physics/deepdog/compare/0.3.3...0.3.4) (2022-03-06)
|
||||||
|
|
||||||
|
|
||||||
|
### Features
|
||||||
|
|
||||||
|
* Changes chunksize for multiprocessing ([0784cd5](https://gitea.deepak.science:2222/physics/deepdog/commit/0784cd53d79e00684506604f094b5d820b3994d4))
|
||||||
|
|
||||||
|
### [0.3.3](https://gitea.deepak.science:2222/physics/deepdog/compare/0.3.2...0.3.3) (2022-03-06)
|
||||||
|
|
||||||
|
|
||||||
|
### Bug Fixes
|
||||||
|
|
||||||
|
* Fixes count to use cycles as well ([8617e4d](https://gitea.deepak.science:2222/physics/deepdog/commit/8617e4d2742b112cc824068150682ce3b2cdd879))
|
||||||
|
|
||||||
|
### [0.3.2](https://gitea.deepak.science:2222/physics/deepdog/compare/0.3.1...0.3.2) (2022-03-06)
|
||||||
|
|
||||||
|
|
||||||
|
### Features
|
||||||
|
|
||||||
|
* Adds monte carlo cycles to trade off space and cpu ([e6d8d33](https://gitea.deepak.science:2222/physics/deepdog/commit/e6d8d33c27e7922581e91c10de4f5faff2a51f8b))
|
||||||
|
|
||||||
|
### [0.3.1](https://gitea.deepak.science:2222/physics/deepdog/compare/v0.3.0...v0.3.1) (2022-03-06)
|
||||||
|
|
||||||
|
|
||||||
|
### Features
|
||||||
|
|
||||||
|
* Adds alt bayes solver with monte carlo sampler ([7284dbe](https://gitea.deepak.science:2222/physics/deepdog/commit/7284dbeb34ef46189d81fb719252dfa74b8e9fa8))
|
||||||
|
* Updates to pdme version for faster bayes resolution ([d078004](https://gitea.deepak.science:2222/physics/deepdog/commit/d078004773d9d9dccd0a9a52ca96aa57690f9b7e))
|
20
Jenkinsfile
vendored
20
Jenkinsfile
vendored
@@ -4,7 +4,7 @@ pipeline {
|
|||||||
label 'deepdog' // all your pods will be named with this prefix, followed by a unique id
|
label 'deepdog' // all your pods will be named with this prefix, followed by a unique id
|
||||||
idleMinutes 5 // how long the pod will live after no jobs have run on it
|
idleMinutes 5 // how long the pod will live after no jobs have run on it
|
||||||
yamlFile 'jenkins/ci-agent-pod.yaml' // path to the pod definition relative to the root of our project
|
yamlFile 'jenkins/ci-agent-pod.yaml' // path to the pod definition relative to the root of our project
|
||||||
defaultContainer 'python' // define a default container if more than a few stages use it, will default to jnlp container
|
defaultContainer 'poetry' // define a default container if more than a few stages use it, will default to jnlp container
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -12,36 +12,30 @@ pipeline {
|
|||||||
parallelsAlwaysFailFast()
|
parallelsAlwaysFailFast()
|
||||||
}
|
}
|
||||||
|
|
||||||
environment {
|
|
||||||
POETRY_HOME="/opt/poetry"
|
|
||||||
POETRY_VERSION="1.1.12"
|
|
||||||
}
|
|
||||||
|
|
||||||
stages {
|
stages {
|
||||||
stage('Build') {
|
stage('Build') {
|
||||||
steps {
|
steps {
|
||||||
echo 'Building...'
|
echo 'Building...'
|
||||||
sh 'python --version'
|
sh 'python --version'
|
||||||
sh 'curl -sSL https://raw.githubusercontent.com/python-poetry/poetry/master/get-poetry.py | python'
|
sh 'poetry --version'
|
||||||
sh '${POETRY_HOME}/bin/poetry --version'
|
sh 'poetry install'
|
||||||
sh '${POETRY_HOME}/bin/poetry install'
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
stage('Test') {
|
stage('Test') {
|
||||||
parallel{
|
parallel{
|
||||||
stage('pytest') {
|
stage('pytest') {
|
||||||
steps {
|
steps {
|
||||||
sh '${POETRY_HOME}/bin/poetry run pytest'
|
sh 'poetry run pytest'
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
stage('lint') {
|
stage('lint') {
|
||||||
steps {
|
steps {
|
||||||
sh '${POETRY_HOME}/bin/poetry run flake8 deepdog tests'
|
sh 'poetry run flake8 deepdog tests'
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
stage('mypy') {
|
stage('mypy') {
|
||||||
steps {
|
steps {
|
||||||
sh '${POETRY_HOME}/bin/poetry run mypy deepdog'
|
sh 'poetry run mypy deepdog'
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -57,7 +51,7 @@ pipeline {
|
|||||||
}
|
}
|
||||||
steps {
|
steps {
|
||||||
echo 'Deploying...'
|
echo 'Deploying...'
|
||||||
sh '${POETRY_HOME}/bin/poetry publish -u ${PYPI_USR} -p ${PYPI_PSW} --build'
|
sh 'poetry publish -u ${PYPI_USR} -p ${PYPI_PSW} --build'
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
17
README.md
17
README.md
@@ -1,3 +1,18 @@
|
|||||||
# deepdog
|
# deepdog
|
||||||
|
|
||||||
The dipole diagnostic tool.
|
[](https://conventionalcommits.org)
|
||||||
|
[](https://pypi.org/project/deepdog/)
|
||||||
|
[](https://jenkins.deepak.science/job/gitea-physics/job/deepdog/job/master/)
|
||||||
|

|
||||||
|

|
||||||
|

|
||||||
|
|
||||||
|
The DiPole DiaGnostic tool.
|
||||||
|
|
||||||
|
## Getting started
|
||||||
|
|
||||||
|
`poetry install` to start locally
|
||||||
|
|
||||||
|
Commit using [Conventional Commits](https://www.conventionalcommits.org/en/v1.0.0/), and when commits are on master, release with `doo release`.
|
||||||
|
|
||||||
|
|
||||||
|
@@ -1,13 +1,24 @@
|
|||||||
import logging
|
import logging
|
||||||
from deepdog.meta import __version__
|
from deepdog.meta import __version__
|
||||||
from deepdog.bayes_run import BayesRun
|
from deepdog.bayes_run import BayesRun
|
||||||
|
from deepdog.bayes_run_simulpairs import BayesRunSimulPairs
|
||||||
|
from deepdog.real_spectrum_run import RealSpectrumRun
|
||||||
|
from deepdog.temp_aware_real_spectrum_run import TempAwareRealSpectrumRun
|
||||||
|
from deepdog.bayes_run_with_ss import BayesRunWithSubspaceSimulation
|
||||||
|
|
||||||
|
|
||||||
def get_version():
|
def get_version():
|
||||||
return __version__
|
return __version__
|
||||||
|
|
||||||
|
|
||||||
__all__ = ["get_version", "BayesRun"]
|
__all__ = [
|
||||||
|
"get_version",
|
||||||
|
"BayesRun",
|
||||||
|
"BayesRunSimulPairs",
|
||||||
|
"RealSpectrumRun",
|
||||||
|
"TempAwareRealSpectrumRun",
|
||||||
|
"BayesRunWithSubspaceSimulation",
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
logging.getLogger(__name__).addHandler(logging.NullHandler())
|
logging.getLogger(__name__).addHandler(logging.NullHandler())
|
||||||
|
@@ -1,17 +1,19 @@
|
|||||||
|
import pdme.inputs
|
||||||
import pdme.model
|
import pdme.model
|
||||||
|
import pdme.measurement.input_types
|
||||||
|
import pdme.measurement.oscillating_dipole
|
||||||
|
import pdme.util.fast_v_calc
|
||||||
|
import pdme.util.fast_nonlocal_spectrum
|
||||||
from typing import Sequence, Tuple, List
|
from typing import Sequence, Tuple, List
|
||||||
import datetime
|
import datetime
|
||||||
import itertools
|
|
||||||
import csv
|
import csv
|
||||||
|
import multiprocessing
|
||||||
import logging
|
import logging
|
||||||
import numpy
|
import numpy
|
||||||
import scipy.optimize
|
|
||||||
import multiprocessing
|
|
||||||
|
|
||||||
|
|
||||||
# TODO: remove hardcode
|
# TODO: remove hardcode
|
||||||
COST_THRESHOLD = 1e-10
|
CHUNKSIZE = 50
|
||||||
|
|
||||||
|
|
||||||
# TODO: It's garbage to have this here duplicated from pdme.
|
# TODO: It's garbage to have this here duplicated from pdme.
|
||||||
DotInput = Tuple[numpy.typing.ArrayLike, float]
|
DotInput = Tuple[numpy.typing.ArrayLike, float]
|
||||||
@@ -20,44 +22,137 @@ DotInput = Tuple[numpy.typing.ArrayLike, float]
|
|||||||
_logger = logging.getLogger(__name__)
|
_logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
def get_a_result(discretisation, dots, index) -> Tuple[Tuple[int, ...], scipy.optimize.OptimizeResult]:
|
def get_a_result(input) -> int:
|
||||||
return (index, discretisation.solve_for_index(dots, index))
|
model, dot_inputs, lows, highs, monte_carlo_count, max_frequency, seed = input
|
||||||
|
|
||||||
|
rng = numpy.random.default_rng(seed)
|
||||||
|
sample_dipoles = model.get_monte_carlo_dipole_inputs(
|
||||||
|
monte_carlo_count, max_frequency, rng_to_use=rng
|
||||||
|
)
|
||||||
|
vals = pdme.util.fast_v_calc.fast_vs_for_dipoleses(dot_inputs, sample_dipoles)
|
||||||
|
return numpy.count_nonzero(pdme.util.fast_v_calc.between(vals, lows, highs))
|
||||||
|
|
||||||
|
|
||||||
class BayesRun():
|
def get_a_result_using_pairs(input) -> int:
|
||||||
'''
|
(
|
||||||
|
model,
|
||||||
|
dot_inputs,
|
||||||
|
pair_inputs,
|
||||||
|
local_lows,
|
||||||
|
local_highs,
|
||||||
|
nonlocal_lows,
|
||||||
|
nonlocal_highs,
|
||||||
|
monte_carlo_count,
|
||||||
|
max_frequency,
|
||||||
|
) = input
|
||||||
|
sample_dipoles = model.get_n_single_dipoles(monte_carlo_count, max_frequency)
|
||||||
|
local_vals = pdme.util.fast_v_calc.fast_vs_for_dipoles(dot_inputs, sample_dipoles)
|
||||||
|
local_matches = pdme.util.fast_v_calc.between(local_vals, local_lows, local_highs)
|
||||||
|
nonlocal_vals = pdme.util.fast_nonlocal_spectrum.fast_s_nonlocal(
|
||||||
|
pair_inputs, sample_dipoles
|
||||||
|
)
|
||||||
|
nonlocal_matches = pdme.util.fast_v_calc.between(
|
||||||
|
nonlocal_vals, nonlocal_lows, nonlocal_highs
|
||||||
|
)
|
||||||
|
combined_matches = numpy.logical_and(local_matches, nonlocal_matches)
|
||||||
|
return numpy.count_nonzero(combined_matches)
|
||||||
|
|
||||||
|
|
||||||
|
class BayesRun:
|
||||||
|
"""
|
||||||
A single Bayes run for a given set of dots.
|
A single Bayes run for a given set of dots.
|
||||||
|
|
||||||
Parameters
|
Parameters
|
||||||
----------
|
----------
|
||||||
dot_inputs : Sequence[DotInput]
|
dot_inputs : Sequence[DotInput]
|
||||||
The dot inputs for this bayes run.
|
The dot inputs for this bayes run.
|
||||||
discretisations_with_names : Sequence[Tuple(str, pdme.model.Model)]
|
|
||||||
The models to evaluate.
|
|
||||||
actual_model_discretisation : pdme.model.Discretisation
|
|
||||||
The discretisation for the model which is actually correct.
|
|
||||||
filename_slug : str
|
|
||||||
The filename slug to include.
|
|
||||||
run_count: int
|
|
||||||
The number of runs to do.
|
|
||||||
'''
|
|
||||||
def __init__(self, dot_inputs: Sequence[DotInput], discretisations_with_names: Sequence[Tuple[str, pdme.model.Discretisation]], actual_model: pdme.model.Model, filename_slug: str, run_count: int) -> None:
|
|
||||||
self.dot_inputs = dot_inputs
|
|
||||||
self.discretisations = [disc for (_, disc) in discretisations_with_names]
|
|
||||||
self.model_names = [name for (name, _) in discretisations_with_names]
|
|
||||||
self.actual_model = actual_model
|
|
||||||
self.model_count = len(self.discretisations)
|
|
||||||
self.run_count = run_count
|
|
||||||
self.csv_fields = ["dipole_moment", "dipole_location", "dipole_frequency"]
|
|
||||||
self.compensate_zeros = True
|
|
||||||
|
|
||||||
|
models_with_names : Sequence[Tuple(str, pdme.model.DipoleModel)]
|
||||||
|
The models to evaluate.
|
||||||
|
|
||||||
|
actual_model : pdme.model.DipoleModel
|
||||||
|
The model which is actually correct.
|
||||||
|
|
||||||
|
filename_slug : str
|
||||||
|
The filename slug to include.
|
||||||
|
|
||||||
|
run_count: int
|
||||||
|
The number of runs to do.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
dot_positions: Sequence[numpy.typing.ArrayLike],
|
||||||
|
frequency_range: Sequence[float],
|
||||||
|
models_with_names: Sequence[Tuple[str, pdme.model.DipoleModel]],
|
||||||
|
actual_model: pdme.model.DipoleModel,
|
||||||
|
filename_slug: str,
|
||||||
|
run_count: int = 100,
|
||||||
|
low_error: float = 0.9,
|
||||||
|
high_error: float = 1.1,
|
||||||
|
monte_carlo_count: int = 10000,
|
||||||
|
monte_carlo_cycles: int = 10,
|
||||||
|
target_success: int = 100,
|
||||||
|
max_monte_carlo_cycles_steps: int = 10,
|
||||||
|
max_frequency: float = 20,
|
||||||
|
end_threshold: float = None,
|
||||||
|
chunksize: int = CHUNKSIZE,
|
||||||
|
) -> None:
|
||||||
|
self.dot_inputs = pdme.inputs.inputs_with_frequency_range(
|
||||||
|
dot_positions, frequency_range
|
||||||
|
)
|
||||||
|
self.dot_inputs_array = pdme.measurement.input_types.dot_inputs_to_array(
|
||||||
|
self.dot_inputs
|
||||||
|
)
|
||||||
|
|
||||||
|
self.models = [model for (_, model) in models_with_names]
|
||||||
|
self.model_names = [name for (name, _) in models_with_names]
|
||||||
|
self.actual_model = actual_model
|
||||||
|
|
||||||
|
self.n: int
|
||||||
|
try:
|
||||||
|
self.n = self.actual_model.n # type: ignore
|
||||||
|
except AttributeError:
|
||||||
|
self.n = 1
|
||||||
|
|
||||||
|
self.model_count = len(self.models)
|
||||||
|
self.monte_carlo_count = monte_carlo_count
|
||||||
|
self.monte_carlo_cycles = monte_carlo_cycles
|
||||||
|
self.target_success = target_success
|
||||||
|
self.max_monte_carlo_cycles_steps = max_monte_carlo_cycles_steps
|
||||||
|
self.run_count = run_count
|
||||||
|
self.low_error = low_error
|
||||||
|
self.high_error = high_error
|
||||||
|
|
||||||
|
self.csv_fields = []
|
||||||
|
for i in range(self.n):
|
||||||
|
self.csv_fields.extend(
|
||||||
|
[
|
||||||
|
f"dipole_moment_{i+1}",
|
||||||
|
f"dipole_location_{i+1}",
|
||||||
|
f"dipole_frequency_{i+1}",
|
||||||
|
]
|
||||||
|
)
|
||||||
|
self.compensate_zeros = True
|
||||||
|
self.chunksize = chunksize
|
||||||
for name in self.model_names:
|
for name in self.model_names:
|
||||||
self.csv_fields.extend([f"{name}_success", f"{name}_count", f"{name}_prob"])
|
self.csv_fields.extend([f"{name}_success", f"{name}_count", f"{name}_prob"])
|
||||||
|
|
||||||
self.probabilities = [1 / self.model_count] * self.model_count
|
self.probabilities = [1 / self.model_count] * self.model_count
|
||||||
|
|
||||||
timestamp = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
|
timestamp = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
|
||||||
self.filename = f"{timestamp}-{filename_slug}.csv"
|
self.filename = f"{timestamp}-{filename_slug}.bayesrun.csv"
|
||||||
|
self.max_frequency = max_frequency
|
||||||
|
|
||||||
|
if end_threshold is not None:
|
||||||
|
if 0 < end_threshold < 1:
|
||||||
|
self.end_threshold: float = end_threshold
|
||||||
|
self.use_end_threshold = True
|
||||||
|
_logger.info(f"Will abort early, at {self.end_threshold}.")
|
||||||
|
else:
|
||||||
|
raise ValueError(
|
||||||
|
f"end_threshold should be between 0 and 1, but is actually {end_threshold}"
|
||||||
|
)
|
||||||
|
|
||||||
def go(self) -> None:
|
def go(self) -> None:
|
||||||
with open(self.filename, "a", newline="") as outfile:
|
with open(self.filename, "a", newline="") as outfile:
|
||||||
@@ -65,44 +160,122 @@ class BayesRun():
|
|||||||
writer.writeheader()
|
writer.writeheader()
|
||||||
|
|
||||||
for run in range(1, self.run_count + 1):
|
for run in range(1, self.run_count + 1):
|
||||||
dipoles = self.actual_model.get_dipoles(run)
|
|
||||||
|
|
||||||
dots = dipoles.get_dot_measurements(self.dot_inputs)
|
# Generate the actual dipoles
|
||||||
_logger.info(f"Going to work on dipole at {dipoles.dipoles}")
|
actual_dipoles = self.actual_model.get_dipoles(self.max_frequency)
|
||||||
|
|
||||||
|
dots = actual_dipoles.get_percent_range_dot_measurements(
|
||||||
|
self.dot_inputs, self.low_error, self.high_error
|
||||||
|
)
|
||||||
|
(
|
||||||
|
lows,
|
||||||
|
highs,
|
||||||
|
) = pdme.measurement.input_types.dot_range_measurements_low_high_arrays(
|
||||||
|
dots
|
||||||
|
)
|
||||||
|
|
||||||
|
_logger.info(f"Going to work on dipole at {actual_dipoles.dipoles}")
|
||||||
|
|
||||||
|
# define a new seed sequence for each run
|
||||||
|
seed_sequence = numpy.random.SeedSequence(run)
|
||||||
|
|
||||||
results = []
|
results = []
|
||||||
_logger.debug("Going to iterate over discretisations now")
|
_logger.debug("Going to iterate over models now")
|
||||||
for disc_count, discretisation in enumerate(self.discretisations):
|
for model_count, model in enumerate(self.models):
|
||||||
_logger.debug(f"Doing discretisation #{disc_count}")
|
_logger.debug(f"Doing model #{model_count}")
|
||||||
with multiprocessing.Pool(multiprocessing.cpu_count() - 1 or 1) as pool:
|
core_count = multiprocessing.cpu_count() - 1 or 1
|
||||||
results.append(pool.starmap(get_a_result, zip(itertools.repeat(discretisation), itertools.repeat(dots), discretisation.all_indices())))
|
with multiprocessing.Pool(core_count) as pool:
|
||||||
|
cycle_count = 0
|
||||||
|
cycle_success = 0
|
||||||
|
cycles = 0
|
||||||
|
while (cycles < self.max_monte_carlo_cycles_steps) and (
|
||||||
|
cycle_success <= self.target_success
|
||||||
|
):
|
||||||
|
_logger.debug(f"Starting cycle {cycles}")
|
||||||
|
cycles += 1
|
||||||
|
current_success = 0
|
||||||
|
cycle_count += self.monte_carlo_count * self.monte_carlo_cycles
|
||||||
|
|
||||||
|
# generate a seed from the sequence for each core.
|
||||||
|
# note this needs to be inside the loop for monte carlo cycle steps!
|
||||||
|
# that way we get more stuff.
|
||||||
|
seeds = seed_sequence.spawn(self.monte_carlo_cycles)
|
||||||
|
|
||||||
|
current_success = sum(
|
||||||
|
pool.imap_unordered(
|
||||||
|
get_a_result,
|
||||||
|
[
|
||||||
|
(
|
||||||
|
model,
|
||||||
|
self.dot_inputs_array,
|
||||||
|
lows,
|
||||||
|
highs,
|
||||||
|
self.monte_carlo_count,
|
||||||
|
self.max_frequency,
|
||||||
|
seed,
|
||||||
|
)
|
||||||
|
for seed in seeds
|
||||||
|
],
|
||||||
|
self.chunksize,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
cycle_success += current_success
|
||||||
|
_logger.debug(f"current running successes: {cycle_success}")
|
||||||
|
results.append((cycle_count, cycle_success))
|
||||||
|
|
||||||
_logger.debug("Done, constructing output now")
|
_logger.debug("Done, constructing output now")
|
||||||
row = {
|
row = {
|
||||||
"dipole_moment": dipoles.dipoles[0].p,
|
"dipole_moment_1": actual_dipoles.dipoles[0].p,
|
||||||
"dipole_location": dipoles.dipoles[0].s,
|
"dipole_location_1": actual_dipoles.dipoles[0].s,
|
||||||
"dipole_frequency": dipoles.dipoles[0].w
|
"dipole_frequency_1": actual_dipoles.dipoles[0].w,
|
||||||
}
|
}
|
||||||
successes: List[int] = []
|
for i in range(1, self.n):
|
||||||
for model_index, (name, result) in enumerate(zip(self.model_names, results)):
|
try:
|
||||||
count = 0
|
current_dipoles = actual_dipoles.dipoles[i]
|
||||||
success = 0
|
row[f"dipole_moment_{i+1}"] = current_dipoles.p
|
||||||
for idx, val in result:
|
row[f"dipole_location_{i+1}"] = current_dipoles.s
|
||||||
count += 1
|
row[f"dipole_frequency_{i+1}"] = current_dipoles.w
|
||||||
if val.success and val.cost <= COST_THRESHOLD:
|
except IndexError:
|
||||||
success += 1
|
_logger.info(f"Not writing anymore, saw end after {i}")
|
||||||
|
break
|
||||||
|
|
||||||
row[f"{name}_success"] = success
|
successes: List[float] = []
|
||||||
|
counts: List[int] = []
|
||||||
|
for model_index, (name, (count, result)) in enumerate(
|
||||||
|
zip(self.model_names, results)
|
||||||
|
):
|
||||||
|
|
||||||
|
row[f"{name}_success"] = result
|
||||||
row[f"{name}_count"] = count
|
row[f"{name}_count"] = count
|
||||||
successes.append(max(success, 1))
|
successes.append(max(result, 0.5))
|
||||||
|
counts.append(count)
|
||||||
|
|
||||||
success_weight = sum([succ * prob for succ, prob in zip(successes, self.probabilities)])
|
success_weight = sum(
|
||||||
new_probabilities = [succ * old_prob / success_weight for succ, old_prob in zip(successes, self.probabilities)]
|
[
|
||||||
|
(succ / count) * prob
|
||||||
|
for succ, count, prob in zip(successes, counts, self.probabilities)
|
||||||
|
]
|
||||||
|
)
|
||||||
|
new_probabilities = [
|
||||||
|
(succ / count) * old_prob / success_weight
|
||||||
|
for succ, count, old_prob in zip(successes, counts, self.probabilities)
|
||||||
|
]
|
||||||
self.probabilities = new_probabilities
|
self.probabilities = new_probabilities
|
||||||
for name, probability in zip(self.model_names, self.probabilities):
|
for name, probability in zip(self.model_names, self.probabilities):
|
||||||
row[f"{name}_prob"] = probability
|
row[f"{name}_prob"] = probability
|
||||||
_logger.info(row)
|
_logger.info(row)
|
||||||
|
|
||||||
with open(self.filename, "a", newline="") as outfile:
|
with open(self.filename, "a", newline="") as outfile:
|
||||||
writer = csv.DictWriter(outfile, fieldnames=self.csv_fields, dialect="unix")
|
writer = csv.DictWriter(
|
||||||
|
outfile, fieldnames=self.csv_fields, dialect="unix"
|
||||||
|
)
|
||||||
writer.writerow(row)
|
writer.writerow(row)
|
||||||
|
|
||||||
|
if self.use_end_threshold:
|
||||||
|
max_prob = max(self.probabilities)
|
||||||
|
if max_prob > self.end_threshold:
|
||||||
|
_logger.info(
|
||||||
|
f"Aborting early, because {max_prob} is greater than {self.end_threshold}"
|
||||||
|
)
|
||||||
|
break
|
||||||
|
382
deepdog/bayes_run_simulpairs.py
Normal file
382
deepdog/bayes_run_simulpairs.py
Normal file
@@ -0,0 +1,382 @@
|
|||||||
|
import pdme.inputs
|
||||||
|
import pdme.model
|
||||||
|
import pdme.measurement.input_types
|
||||||
|
import pdme.measurement.oscillating_dipole
|
||||||
|
import pdme.util.fast_v_calc
|
||||||
|
import pdme.util.fast_nonlocal_spectrum
|
||||||
|
from typing import Sequence, Tuple, List
|
||||||
|
import datetime
|
||||||
|
import csv
|
||||||
|
import multiprocessing
|
||||||
|
import logging
|
||||||
|
import numpy
|
||||||
|
import numpy.random
|
||||||
|
|
||||||
|
|
||||||
|
# TODO: remove hardcode
|
||||||
|
CHUNKSIZE = 50
|
||||||
|
|
||||||
|
# TODO: It's garbage to have this here duplicated from pdme.
|
||||||
|
DotInput = Tuple[numpy.typing.ArrayLike, float]
|
||||||
|
|
||||||
|
|
||||||
|
_logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def get_a_simul_result_using_pairs(input) -> numpy.ndarray:
|
||||||
|
(
|
||||||
|
model,
|
||||||
|
dot_inputs,
|
||||||
|
pair_inputs,
|
||||||
|
local_lows,
|
||||||
|
local_highs,
|
||||||
|
nonlocal_lows,
|
||||||
|
nonlocal_highs,
|
||||||
|
monte_carlo_count,
|
||||||
|
monte_carlo_cycles,
|
||||||
|
max_frequency,
|
||||||
|
seed,
|
||||||
|
) = input
|
||||||
|
|
||||||
|
rng = numpy.random.default_rng(seed)
|
||||||
|
local_total = 0
|
||||||
|
combined_total = 0
|
||||||
|
|
||||||
|
sample_dipoles = model.get_monte_carlo_dipole_inputs(
|
||||||
|
monte_carlo_count, max_frequency, rng_to_use=rng
|
||||||
|
)
|
||||||
|
local_vals = pdme.util.fast_v_calc.fast_vs_for_dipoleses(dot_inputs, sample_dipoles)
|
||||||
|
local_matches = pdme.util.fast_v_calc.between(local_vals, local_lows, local_highs)
|
||||||
|
nonlocal_vals = pdme.util.fast_nonlocal_spectrum.fast_s_nonlocal_dipoleses(
|
||||||
|
pair_inputs, sample_dipoles
|
||||||
|
)
|
||||||
|
nonlocal_matches = pdme.util.fast_v_calc.between(
|
||||||
|
nonlocal_vals, nonlocal_lows, nonlocal_highs
|
||||||
|
)
|
||||||
|
combined_matches = numpy.logical_and(local_matches, nonlocal_matches)
|
||||||
|
|
||||||
|
local_total += numpy.count_nonzero(local_matches)
|
||||||
|
combined_total += numpy.count_nonzero(combined_matches)
|
||||||
|
return numpy.array([local_total, combined_total])
|
||||||
|
|
||||||
|
|
||||||
|
class BayesRunSimulPairs:
|
||||||
|
"""
|
||||||
|
A dual pairs-nonpairs Bayes run for a given set of dots.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
dot_inputs : Sequence[DotInput]
|
||||||
|
The dot inputs for this bayes run.
|
||||||
|
|
||||||
|
models_with_names : Sequence[Tuple(str, pdme.model.DipoleModel)]
|
||||||
|
The models to evaluate.
|
||||||
|
|
||||||
|
actual_model : pdme.model.DipoleModel
|
||||||
|
The modoel for the model which is actually correct.
|
||||||
|
|
||||||
|
filename_slug : str
|
||||||
|
The filename slug to include.
|
||||||
|
|
||||||
|
run_count: int
|
||||||
|
The number of runs to do.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
dot_positions: Sequence[numpy.typing.ArrayLike],
|
||||||
|
frequency_range: Sequence[float],
|
||||||
|
models_with_names: Sequence[Tuple[str, pdme.model.DipoleModel]],
|
||||||
|
actual_model: pdme.model.DipoleModel,
|
||||||
|
filename_slug: str,
|
||||||
|
run_count: int = 100,
|
||||||
|
low_error: float = 0.9,
|
||||||
|
high_error: float = 1.1,
|
||||||
|
pairs_high_error=None,
|
||||||
|
pairs_low_error=None,
|
||||||
|
monte_carlo_count: int = 10000,
|
||||||
|
monte_carlo_cycles: int = 10,
|
||||||
|
target_success: int = 100,
|
||||||
|
max_monte_carlo_cycles_steps: int = 10,
|
||||||
|
max_frequency: float = 20,
|
||||||
|
end_threshold: float = None,
|
||||||
|
chunksize: int = CHUNKSIZE,
|
||||||
|
) -> None:
|
||||||
|
self.dot_inputs = pdme.inputs.inputs_with_frequency_range(
|
||||||
|
dot_positions, frequency_range
|
||||||
|
)
|
||||||
|
self.dot_inputs_array = pdme.measurement.input_types.dot_inputs_to_array(
|
||||||
|
self.dot_inputs
|
||||||
|
)
|
||||||
|
|
||||||
|
self.dot_pair_inputs = pdme.inputs.input_pairs_with_frequency_range(
|
||||||
|
dot_positions, frequency_range
|
||||||
|
)
|
||||||
|
self.dot_pair_inputs_array = (
|
||||||
|
pdme.measurement.input_types.dot_pair_inputs_to_array(self.dot_pair_inputs)
|
||||||
|
)
|
||||||
|
|
||||||
|
self.models = [mod for (_, mod) in models_with_names]
|
||||||
|
self.model_names = [name for (name, _) in models_with_names]
|
||||||
|
self.actual_model = actual_model
|
||||||
|
|
||||||
|
self.n: int
|
||||||
|
try:
|
||||||
|
self.n = self.actual_model.n # type: ignore
|
||||||
|
except AttributeError:
|
||||||
|
self.n = 1
|
||||||
|
|
||||||
|
self.model_count = len(self.models)
|
||||||
|
self.monte_carlo_count = monte_carlo_count
|
||||||
|
self.monte_carlo_cycles = monte_carlo_cycles
|
||||||
|
self.target_success = target_success
|
||||||
|
self.max_monte_carlo_cycles_steps = max_monte_carlo_cycles_steps
|
||||||
|
self.run_count = run_count
|
||||||
|
self.low_error = low_error
|
||||||
|
self.high_error = high_error
|
||||||
|
if pairs_low_error is None:
|
||||||
|
self.pairs_low_error = self.low_error
|
||||||
|
else:
|
||||||
|
self.pairs_low_error = pairs_low_error
|
||||||
|
if pairs_high_error is None:
|
||||||
|
self.pairs_high_error = self.high_error
|
||||||
|
else:
|
||||||
|
self.pairs_high_error = pairs_high_error
|
||||||
|
|
||||||
|
self.csv_fields = []
|
||||||
|
for i in range(self.n):
|
||||||
|
self.csv_fields.extend(
|
||||||
|
[
|
||||||
|
f"dipole_moment_{i+1}",
|
||||||
|
f"dipole_location_{i+1}",
|
||||||
|
f"dipole_frequency_{i+1}",
|
||||||
|
]
|
||||||
|
)
|
||||||
|
self.compensate_zeros = True
|
||||||
|
self.chunksize = chunksize
|
||||||
|
for name in self.model_names:
|
||||||
|
self.csv_fields.extend([f"{name}_success", f"{name}_count", f"{name}_prob"])
|
||||||
|
|
||||||
|
self.probabilities_no_pairs = [1 / self.model_count] * self.model_count
|
||||||
|
self.probabilities_pairs = [1 / self.model_count] * self.model_count
|
||||||
|
|
||||||
|
timestamp = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
|
||||||
|
self.filename_pairs = f"{timestamp}-{filename_slug}.simulpairs.yespairs.csv"
|
||||||
|
self.filename_no_pairs = f"{timestamp}-{filename_slug}.simulpairs.noopairs.csv"
|
||||||
|
|
||||||
|
self.max_frequency = max_frequency
|
||||||
|
|
||||||
|
if end_threshold is not None:
|
||||||
|
if 0 < end_threshold < 1:
|
||||||
|
self.end_threshold: float = end_threshold
|
||||||
|
self.use_end_threshold = True
|
||||||
|
_logger.info(f"Will abort early, at {self.end_threshold}.")
|
||||||
|
else:
|
||||||
|
raise ValueError(
|
||||||
|
f"end_threshold should be between 0 and 1, but is actually {end_threshold}"
|
||||||
|
)
|
||||||
|
|
||||||
|
def go(self) -> None:
|
||||||
|
with open(self.filename_pairs, "a", newline="") as outfile:
|
||||||
|
writer = csv.DictWriter(outfile, fieldnames=self.csv_fields, dialect="unix")
|
||||||
|
writer.writeheader()
|
||||||
|
with open(self.filename_no_pairs, "a", newline="") as outfile:
|
||||||
|
writer = csv.DictWriter(outfile, fieldnames=self.csv_fields, dialect="unix")
|
||||||
|
writer.writeheader()
|
||||||
|
|
||||||
|
for run in range(1, self.run_count + 1):
|
||||||
|
|
||||||
|
# Generate the actual dipoles
|
||||||
|
actual_dipoles = self.actual_model.get_dipoles(self.max_frequency)
|
||||||
|
|
||||||
|
dots = actual_dipoles.get_percent_range_dot_measurements(
|
||||||
|
self.dot_inputs, self.low_error, self.high_error
|
||||||
|
)
|
||||||
|
(
|
||||||
|
lows,
|
||||||
|
highs,
|
||||||
|
) = pdme.measurement.input_types.dot_range_measurements_low_high_arrays(
|
||||||
|
dots
|
||||||
|
)
|
||||||
|
|
||||||
|
pair_lows, pair_highs = (None, None)
|
||||||
|
pair_measurements = actual_dipoles.get_percent_range_dot_pair_measurements(
|
||||||
|
self.dot_pair_inputs, self.pairs_low_error, self.pairs_high_error
|
||||||
|
)
|
||||||
|
(
|
||||||
|
pair_lows,
|
||||||
|
pair_highs,
|
||||||
|
) = pdme.measurement.input_types.dot_range_measurements_low_high_arrays(
|
||||||
|
pair_measurements
|
||||||
|
)
|
||||||
|
|
||||||
|
_logger.info(f"Going to work on dipole at {actual_dipoles.dipoles}")
|
||||||
|
|
||||||
|
# define a new seed sequence for each run
|
||||||
|
seed_sequence = numpy.random.SeedSequence(run)
|
||||||
|
|
||||||
|
results_pairs = []
|
||||||
|
results_no_pairs = []
|
||||||
|
_logger.debug("Going to iterate over models now")
|
||||||
|
for model_count, model in enumerate(self.models):
|
||||||
|
_logger.debug(f"Doing model #{model_count}")
|
||||||
|
|
||||||
|
core_count = multiprocessing.cpu_count() - 1 or 1
|
||||||
|
with multiprocessing.Pool(core_count) as pool:
|
||||||
|
cycle_count = 0
|
||||||
|
cycle_success_pairs = 0
|
||||||
|
cycle_success_no_pairs = 0
|
||||||
|
cycles = 0
|
||||||
|
while (cycles < self.max_monte_carlo_cycles_steps) and (
|
||||||
|
min(cycle_success_pairs, cycle_success_no_pairs)
|
||||||
|
<= self.target_success
|
||||||
|
):
|
||||||
|
_logger.debug(f"Starting cycle {cycles}")
|
||||||
|
|
||||||
|
cycles += 1
|
||||||
|
current_success_pairs = 0
|
||||||
|
current_success_no_pairs = 0
|
||||||
|
cycle_count += self.monte_carlo_count * self.monte_carlo_cycles
|
||||||
|
|
||||||
|
# generate a seed from the sequence for each core.
|
||||||
|
# note this needs to be inside the loop for monte carlo cycle steps!
|
||||||
|
# that way we get more stuff.
|
||||||
|
|
||||||
|
seeds = seed_sequence.spawn(self.monte_carlo_cycles)
|
||||||
|
_logger.debug(f"Creating {self.monte_carlo_cycles} seeds")
|
||||||
|
current_success_both = numpy.array(
|
||||||
|
sum(
|
||||||
|
pool.imap_unordered(
|
||||||
|
get_a_simul_result_using_pairs,
|
||||||
|
[
|
||||||
|
(
|
||||||
|
model,
|
||||||
|
self.dot_inputs_array,
|
||||||
|
self.dot_pair_inputs_array,
|
||||||
|
lows,
|
||||||
|
highs,
|
||||||
|
pair_lows,
|
||||||
|
pair_highs,
|
||||||
|
self.monte_carlo_count,
|
||||||
|
self.monte_carlo_cycles,
|
||||||
|
self.max_frequency,
|
||||||
|
seed,
|
||||||
|
)
|
||||||
|
for seed in seeds
|
||||||
|
],
|
||||||
|
self.chunksize,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
)
|
||||||
|
current_success_no_pairs = current_success_both[0]
|
||||||
|
current_success_pairs = current_success_both[1]
|
||||||
|
|
||||||
|
cycle_success_no_pairs += current_success_no_pairs
|
||||||
|
cycle_success_pairs += current_success_pairs
|
||||||
|
_logger.debug(
|
||||||
|
f"(pair, no_pair) successes are {(cycle_success_pairs, cycle_success_no_pairs)}"
|
||||||
|
)
|
||||||
|
results_pairs.append((cycle_count, cycle_success_pairs))
|
||||||
|
results_no_pairs.append((cycle_count, cycle_success_no_pairs))
|
||||||
|
|
||||||
|
_logger.debug("Done, constructing output now")
|
||||||
|
row_pairs = {
|
||||||
|
"dipole_moment_1": actual_dipoles.dipoles[0].p,
|
||||||
|
"dipole_location_1": actual_dipoles.dipoles[0].s,
|
||||||
|
"dipole_frequency_1": actual_dipoles.dipoles[0].w,
|
||||||
|
}
|
||||||
|
row_no_pairs = {
|
||||||
|
"dipole_moment_1": actual_dipoles.dipoles[0].p,
|
||||||
|
"dipole_location_1": actual_dipoles.dipoles[0].s,
|
||||||
|
"dipole_frequency_1": actual_dipoles.dipoles[0].w,
|
||||||
|
}
|
||||||
|
for i in range(1, self.n):
|
||||||
|
try:
|
||||||
|
current_dipoles = actual_dipoles.dipoles[i]
|
||||||
|
row_pairs[f"dipole_moment_{i+1}"] = current_dipoles.p
|
||||||
|
row_pairs[f"dipole_location_{i+1}"] = current_dipoles.s
|
||||||
|
row_pairs[f"dipole_frequency_{i+1}"] = current_dipoles.w
|
||||||
|
row_no_pairs[f"dipole_moment_{i+1}"] = current_dipoles.p
|
||||||
|
row_no_pairs[f"dipole_location_{i+1}"] = current_dipoles.s
|
||||||
|
row_no_pairs[f"dipole_frequency_{i+1}"] = current_dipoles.w
|
||||||
|
except IndexError:
|
||||||
|
_logger.info(f"Not writing anymore, saw end after {i}")
|
||||||
|
break
|
||||||
|
|
||||||
|
successes_pairs: List[float] = []
|
||||||
|
successes_no_pairs: List[float] = []
|
||||||
|
counts: List[int] = []
|
||||||
|
for model_index, (
|
||||||
|
name,
|
||||||
|
(count_pair, result_pair),
|
||||||
|
(count_no_pair, result_no_pair),
|
||||||
|
) in enumerate(zip(self.model_names, results_pairs, results_no_pairs)):
|
||||||
|
|
||||||
|
row_pairs[f"{name}_success"] = result_pair
|
||||||
|
row_pairs[f"{name}_count"] = count_pair
|
||||||
|
successes_pairs.append(max(result_pair, 0.5))
|
||||||
|
|
||||||
|
row_no_pairs[f"{name}_success"] = result_no_pair
|
||||||
|
row_no_pairs[f"{name}_count"] = count_no_pair
|
||||||
|
successes_no_pairs.append(max(result_no_pair, 0.5))
|
||||||
|
|
||||||
|
counts.append(count_pair)
|
||||||
|
|
||||||
|
success_weight_pair = sum(
|
||||||
|
[
|
||||||
|
(succ / count) * prob
|
||||||
|
for succ, count, prob in zip(
|
||||||
|
successes_pairs, counts, self.probabilities_pairs
|
||||||
|
)
|
||||||
|
]
|
||||||
|
)
|
||||||
|
success_weight_no_pair = sum(
|
||||||
|
[
|
||||||
|
(succ / count) * prob
|
||||||
|
for succ, count, prob in zip(
|
||||||
|
successes_no_pairs, counts, self.probabilities_no_pairs
|
||||||
|
)
|
||||||
|
]
|
||||||
|
)
|
||||||
|
new_probabilities_pair = [
|
||||||
|
(succ / count) * old_prob / success_weight_pair
|
||||||
|
for succ, count, old_prob in zip(
|
||||||
|
successes_pairs, counts, self.probabilities_pairs
|
||||||
|
)
|
||||||
|
]
|
||||||
|
new_probabilities_no_pair = [
|
||||||
|
(succ / count) * old_prob / success_weight_no_pair
|
||||||
|
for succ, count, old_prob in zip(
|
||||||
|
successes_no_pairs, counts, self.probabilities_no_pairs
|
||||||
|
)
|
||||||
|
]
|
||||||
|
self.probabilities_pairs = new_probabilities_pair
|
||||||
|
self.probabilities_no_pairs = new_probabilities_no_pair
|
||||||
|
for name, probability_pair, probability_no_pair in zip(
|
||||||
|
self.model_names, self.probabilities_pairs, self.probabilities_no_pairs
|
||||||
|
):
|
||||||
|
row_pairs[f"{name}_prob"] = probability_pair
|
||||||
|
row_no_pairs[f"{name}_prob"] = probability_no_pair
|
||||||
|
_logger.debug(row_pairs)
|
||||||
|
_logger.debug(row_no_pairs)
|
||||||
|
|
||||||
|
with open(self.filename_pairs, "a", newline="") as outfile:
|
||||||
|
writer = csv.DictWriter(
|
||||||
|
outfile, fieldnames=self.csv_fields, dialect="unix"
|
||||||
|
)
|
||||||
|
writer.writerow(row_pairs)
|
||||||
|
with open(self.filename_no_pairs, "a", newline="") as outfile:
|
||||||
|
writer = csv.DictWriter(
|
||||||
|
outfile, fieldnames=self.csv_fields, dialect="unix"
|
||||||
|
)
|
||||||
|
writer.writerow(row_no_pairs)
|
||||||
|
|
||||||
|
if self.use_end_threshold:
|
||||||
|
max_prob = min(
|
||||||
|
max(self.probabilities_pairs), max(self.probabilities_no_pairs)
|
||||||
|
)
|
||||||
|
if max_prob > self.end_threshold:
|
||||||
|
_logger.info(
|
||||||
|
f"Aborting early, because {max_prob} is greater than {self.end_threshold}"
|
||||||
|
)
|
||||||
|
break
|
238
deepdog/bayes_run_with_ss.py
Normal file
238
deepdog/bayes_run_with_ss.py
Normal file
@@ -0,0 +1,238 @@
|
|||||||
|
import deepdog.subset_simulation
|
||||||
|
import pdme.inputs
|
||||||
|
import pdme.model
|
||||||
|
import pdme.measurement.input_types
|
||||||
|
import pdme.measurement.oscillating_dipole
|
||||||
|
import pdme.util.fast_v_calc
|
||||||
|
import pdme.util.fast_nonlocal_spectrum
|
||||||
|
from typing import Sequence, Tuple, List, Optional
|
||||||
|
import datetime
|
||||||
|
import csv
|
||||||
|
import logging
|
||||||
|
import numpy
|
||||||
|
import numpy.typing
|
||||||
|
|
||||||
|
|
||||||
|
# TODO: remove hardcode
|
||||||
|
CHUNKSIZE = 50
|
||||||
|
|
||||||
|
# TODO: It's garbage to have this here duplicated from pdme.
|
||||||
|
DotInput = Tuple[numpy.typing.ArrayLike, float]
|
||||||
|
|
||||||
|
|
||||||
|
CLAMPING_FACTOR = 10
|
||||||
|
|
||||||
|
_logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class BayesRunWithSubspaceSimulation:
|
||||||
|
"""
|
||||||
|
A single Bayes run for a given set of dots.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
dot_inputs : Sequence[DotInput]
|
||||||
|
The dot inputs for this bayes run.
|
||||||
|
|
||||||
|
models_with_names : Sequence[Tuple(str, pdme.model.DipoleModel)]
|
||||||
|
The models to evaluate.
|
||||||
|
|
||||||
|
actual_model : pdme.model.DipoleModel
|
||||||
|
The model which is actually correct.
|
||||||
|
|
||||||
|
filename_slug : str
|
||||||
|
The filename slug to include.
|
||||||
|
|
||||||
|
run_count: int
|
||||||
|
The number of runs to do.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
dot_positions: Sequence[numpy.typing.ArrayLike],
|
||||||
|
frequency_range: Sequence[float],
|
||||||
|
models_with_names: Sequence[Tuple[str, pdme.model.DipoleModel]],
|
||||||
|
actual_model: pdme.model.DipoleModel,
|
||||||
|
filename_slug: str,
|
||||||
|
max_frequency: float = 20,
|
||||||
|
end_threshold: float = None,
|
||||||
|
run_count=100,
|
||||||
|
chunksize: int = CHUNKSIZE,
|
||||||
|
ss_n_c: int = 500,
|
||||||
|
ss_n_s: int = 100,
|
||||||
|
ss_m_max: int = 15,
|
||||||
|
ss_target_cost: Optional[float] = None,
|
||||||
|
ss_level_0_seed: int = 200,
|
||||||
|
ss_mcmc_seed: int = 20,
|
||||||
|
ss_use_adaptive_steps=True,
|
||||||
|
ss_default_phi_step=0.01,
|
||||||
|
ss_default_theta_step=0.01,
|
||||||
|
ss_default_r_step=0.01,
|
||||||
|
ss_default_w_log_step=0.01,
|
||||||
|
ss_default_upper_w_log_step=4,
|
||||||
|
ss_dump_last_generation=False,
|
||||||
|
) -> None:
|
||||||
|
self.dot_inputs = pdme.inputs.inputs_with_frequency_range(
|
||||||
|
dot_positions, frequency_range
|
||||||
|
)
|
||||||
|
self.dot_inputs_array = pdme.measurement.input_types.dot_inputs_to_array(
|
||||||
|
self.dot_inputs
|
||||||
|
)
|
||||||
|
|
||||||
|
self.models_with_names = models_with_names
|
||||||
|
self.models = [model for (_, model) in models_with_names]
|
||||||
|
self.model_names = [name for (name, _) in models_with_names]
|
||||||
|
self.actual_model = actual_model
|
||||||
|
|
||||||
|
self.n: int
|
||||||
|
try:
|
||||||
|
self.n = self.actual_model.n # type: ignore
|
||||||
|
except AttributeError:
|
||||||
|
self.n = 1
|
||||||
|
|
||||||
|
self.model_count = len(self.models)
|
||||||
|
|
||||||
|
self.csv_fields = []
|
||||||
|
for i in range(self.n):
|
||||||
|
self.csv_fields.extend(
|
||||||
|
[
|
||||||
|
f"dipole_moment_{i+1}",
|
||||||
|
f"dipole_location_{i+1}",
|
||||||
|
f"dipole_frequency_{i+1}",
|
||||||
|
]
|
||||||
|
)
|
||||||
|
self.compensate_zeros = True
|
||||||
|
self.chunksize = chunksize
|
||||||
|
for name in self.model_names:
|
||||||
|
self.csv_fields.extend([f"{name}_likelihood", f"{name}_prob"])
|
||||||
|
|
||||||
|
self.probabilities = [1 / self.model_count] * self.model_count
|
||||||
|
|
||||||
|
timestamp = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
|
||||||
|
self.filename = f"{timestamp}-{filename_slug}.bayesrunwithss.csv"
|
||||||
|
self.max_frequency = max_frequency
|
||||||
|
|
||||||
|
if end_threshold is not None:
|
||||||
|
if 0 < end_threshold < 1:
|
||||||
|
self.end_threshold: float = end_threshold
|
||||||
|
self.use_end_threshold = True
|
||||||
|
_logger.info(f"Will abort early, at {self.end_threshold}.")
|
||||||
|
else:
|
||||||
|
raise ValueError(
|
||||||
|
f"end_threshold should be between 0 and 1, but is actually {end_threshold}"
|
||||||
|
)
|
||||||
|
|
||||||
|
self.ss_n_c = ss_n_c
|
||||||
|
self.ss_n_s = ss_n_s
|
||||||
|
self.ss_m_max = ss_m_max
|
||||||
|
self.ss_target_cost = ss_target_cost
|
||||||
|
self.ss_level_0_seed = ss_level_0_seed
|
||||||
|
self.ss_mcmc_seed = ss_mcmc_seed
|
||||||
|
self.ss_use_adaptive_steps = ss_use_adaptive_steps
|
||||||
|
self.ss_default_phi_step = ss_default_phi_step
|
||||||
|
self.ss_default_theta_step = ss_default_theta_step
|
||||||
|
self.ss_default_r_step = ss_default_r_step
|
||||||
|
self.ss_default_w_log_step = ss_default_w_log_step
|
||||||
|
self.ss_default_upper_w_log_step = ss_default_upper_w_log_step
|
||||||
|
self.ss_dump_last_generation = ss_dump_last_generation
|
||||||
|
|
||||||
|
self.run_count = run_count
|
||||||
|
|
||||||
|
def go(self) -> None:
|
||||||
|
with open(self.filename, "a", newline="") as outfile:
|
||||||
|
writer = csv.DictWriter(outfile, fieldnames=self.csv_fields, dialect="unix")
|
||||||
|
writer.writeheader()
|
||||||
|
|
||||||
|
for run in range(1, self.run_count + 1):
|
||||||
|
|
||||||
|
# Generate the actual dipoles
|
||||||
|
actual_dipoles = self.actual_model.get_dipoles(self.max_frequency)
|
||||||
|
|
||||||
|
measurements = actual_dipoles.get_dot_measurements(self.dot_inputs)
|
||||||
|
|
||||||
|
_logger.info(f"Going to work on dipole at {actual_dipoles.dipoles}")
|
||||||
|
|
||||||
|
# define a new seed sequence for each run
|
||||||
|
|
||||||
|
results = []
|
||||||
|
_logger.debug("Going to iterate over models now")
|
||||||
|
for model_count, model in enumerate(self.models_with_names):
|
||||||
|
_logger.debug(f"Doing model #{model_count}, {model[0]}")
|
||||||
|
subset_run = deepdog.subset_simulation.SubsetSimulation(
|
||||||
|
model,
|
||||||
|
self.dot_inputs,
|
||||||
|
measurements,
|
||||||
|
self.ss_n_c,
|
||||||
|
self.ss_n_s,
|
||||||
|
self.ss_m_max,
|
||||||
|
self.ss_target_cost,
|
||||||
|
self.ss_level_0_seed,
|
||||||
|
self.ss_mcmc_seed,
|
||||||
|
self.ss_use_adaptive_steps,
|
||||||
|
self.ss_default_phi_step,
|
||||||
|
self.ss_default_theta_step,
|
||||||
|
self.ss_default_r_step,
|
||||||
|
self.ss_default_w_log_step,
|
||||||
|
self.ss_default_upper_w_log_step,
|
||||||
|
keep_probs_list=False,
|
||||||
|
dump_last_generation_to_file=self.ss_dump_last_generation,
|
||||||
|
)
|
||||||
|
results.append(subset_run.execute())
|
||||||
|
|
||||||
|
_logger.debug("Done, constructing output now")
|
||||||
|
row = {
|
||||||
|
"dipole_moment_1": actual_dipoles.dipoles[0].p,
|
||||||
|
"dipole_location_1": actual_dipoles.dipoles[0].s,
|
||||||
|
"dipole_frequency_1": actual_dipoles.dipoles[0].w,
|
||||||
|
}
|
||||||
|
for i in range(1, self.n):
|
||||||
|
try:
|
||||||
|
current_dipoles = actual_dipoles.dipoles[i]
|
||||||
|
row[f"dipole_moment_{i+1}"] = current_dipoles.p
|
||||||
|
row[f"dipole_location_{i+1}"] = current_dipoles.s
|
||||||
|
row[f"dipole_frequency_{i+1}"] = current_dipoles.w
|
||||||
|
except IndexError:
|
||||||
|
_logger.info(f"Not writing anymore, saw end after {i}")
|
||||||
|
break
|
||||||
|
|
||||||
|
likelihoods: List[float] = []
|
||||||
|
|
||||||
|
for (name, result) in zip(self.model_names, results):
|
||||||
|
if result.over_target_likelihood is None:
|
||||||
|
clamped_likelihood = result.probs_list[-1][0] / CLAMPING_FACTOR
|
||||||
|
_logger.warning(
|
||||||
|
f"got a none result, clamping to {clamped_likelihood}"
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
clamped_likelihood = result.over_target_likelihood
|
||||||
|
likelihoods.append(clamped_likelihood)
|
||||||
|
row[f"{name}_likelihood"] = clamped_likelihood
|
||||||
|
|
||||||
|
success_weight = sum(
|
||||||
|
[
|
||||||
|
likelihood * prob
|
||||||
|
for likelihood, prob in zip(likelihoods, self.probabilities)
|
||||||
|
]
|
||||||
|
)
|
||||||
|
new_probabilities = [
|
||||||
|
likelihood * old_prob / success_weight
|
||||||
|
for likelihood, old_prob in zip(likelihoods, self.probabilities)
|
||||||
|
]
|
||||||
|
self.probabilities = new_probabilities
|
||||||
|
for name, probability in zip(self.model_names, self.probabilities):
|
||||||
|
row[f"{name}_prob"] = probability
|
||||||
|
_logger.info(row)
|
||||||
|
|
||||||
|
with open(self.filename, "a", newline="") as outfile:
|
||||||
|
writer = csv.DictWriter(
|
||||||
|
outfile, fieldnames=self.csv_fields, dialect="unix"
|
||||||
|
)
|
||||||
|
writer.writerow(row)
|
||||||
|
|
||||||
|
if self.use_end_threshold:
|
||||||
|
max_prob = max(self.probabilities)
|
||||||
|
if max_prob > self.end_threshold:
|
||||||
|
_logger.info(
|
||||||
|
f"Aborting early, because {max_prob} is greater than {self.end_threshold}"
|
||||||
|
)
|
||||||
|
break
|
@@ -1,80 +0,0 @@
|
|||||||
from pdme.measurement import OscillatingDipole, OscillatingDipoleArrangement
|
|
||||||
import pdme
|
|
||||||
from deepdog.bayes_run import DotInput
|
|
||||||
import datetime
|
|
||||||
import numpy
|
|
||||||
import logging
|
|
||||||
from typing import Sequence, Tuple
|
|
||||||
import csv
|
|
||||||
import itertools
|
|
||||||
import multiprocessing
|
|
||||||
|
|
||||||
_logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
def get_a_result(discretisation, dots, index):
|
|
||||||
return (index, discretisation.solve_for_index(dots, index))
|
|
||||||
|
|
||||||
|
|
||||||
class Diagnostic():
|
|
||||||
'''
|
|
||||||
Represents a diagnostic for a single dipole moment given a set of discretisations.
|
|
||||||
|
|
||||||
Parameters
|
|
||||||
----------
|
|
||||||
dot_inputs : Sequence[DotInput]
|
|
||||||
The dot inputs for this diagnostic.
|
|
||||||
discretisations_with_names : Sequence[Tuple(str, pdme.model.Model)]
|
|
||||||
The models to evaluate.
|
|
||||||
actual_model_discretisation : pdme.model.Discretisation
|
|
||||||
The discretisation for the model which is actually correct.
|
|
||||||
filename_slug : str
|
|
||||||
The filename slug to include.
|
|
||||||
run_count: int
|
|
||||||
The number of runs to do.
|
|
||||||
'''
|
|
||||||
def __init__(self, actual_dipole_moment: numpy.ndarray, actual_dipole_position: numpy.ndarray, actual_dipole_frequency: float, dot_inputs: Sequence[DotInput], discretisations_with_names: Sequence[Tuple[str, pdme.model.Discretisation]], filename_slug: str) -> None:
|
|
||||||
self.dipoles = OscillatingDipoleArrangement([OscillatingDipole(actual_dipole_moment, actual_dipole_position, actual_dipole_frequency)])
|
|
||||||
self.dots = self.dipoles.get_dot_measurements(dot_inputs)
|
|
||||||
|
|
||||||
self.discretisations_with_names = discretisations_with_names
|
|
||||||
self.model_count = len(self.discretisations_with_names)
|
|
||||||
|
|
||||||
self.csv_fields = ["model", "index", "bounds", "actual_dipole_moment", "actual_dipole_position", "actual_dipole_freq", "success", "result"]
|
|
||||||
|
|
||||||
timestamp = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
|
|
||||||
self.filename = f"{timestamp}-{filename_slug}.csv"
|
|
||||||
|
|
||||||
def go(self):
|
|
||||||
with open(self.filename, "a", newline="") as outfile:
|
|
||||||
# csv fields
|
|
||||||
writer = csv.DictWriter(outfile, fieldnames=self.csv_fields, dialect='unix')
|
|
||||||
writer.writeheader()
|
|
||||||
|
|
||||||
for (name, discretisation) in self.discretisations_with_names:
|
|
||||||
_logger.info(f"Working on discretisation {name}")
|
|
||||||
|
|
||||||
results = []
|
|
||||||
with multiprocessing.Pool(multiprocessing.cpu_count() - 1 or 1) as pool:
|
|
||||||
results = pool.starmap(get_a_result, zip(itertools.repeat(discretisation), itertools.repeat(self.dots), discretisation.all_indices()))
|
|
||||||
|
|
||||||
with open(self.filename, "a", newline='') as outfile:
|
|
||||||
writer = csv.DictWriter(outfile, fieldnames=self.csv_fields, dialect='unix')
|
|
||||||
|
|
||||||
for idx, result in results:
|
|
||||||
|
|
||||||
bounds = discretisation.bounds(idx)
|
|
||||||
|
|
||||||
actual_success = result.success and result.cost <= 1e-10
|
|
||||||
row = {
|
|
||||||
"model": name,
|
|
||||||
"index": idx,
|
|
||||||
"bounds_px": bounds,
|
|
||||||
"actual_dipole_moment": self.dipoles.dipoles[0].p,
|
|
||||||
"actual_dipole_position": self.dipoles.dipoles[0].s,
|
|
||||||
"actual_dipole_freq": self.dipoles.dipoles[0].w,
|
|
||||||
"success": actual_success,
|
|
||||||
"result": result.normalised_x if actual_success else None,
|
|
||||||
}
|
|
||||||
_logger.debug(f"Writing result {row}")
|
|
||||||
writer.writerow(row)
|
|
@@ -1,3 +1,3 @@
|
|||||||
from importlib.metadata import version
|
from importlib.metadata import version
|
||||||
|
|
||||||
__version__ = version('deepdog')
|
__version__ = version("deepdog")
|
||||||
|
307
deepdog/real_spectrum_run.py
Normal file
307
deepdog/real_spectrum_run.py
Normal file
@@ -0,0 +1,307 @@
|
|||||||
|
import pdme.inputs
|
||||||
|
import pdme.model
|
||||||
|
import pdme.measurement
|
||||||
|
import pdme.measurement.input_types
|
||||||
|
import pdme.measurement.oscillating_dipole
|
||||||
|
import pdme.util.fast_v_calc
|
||||||
|
import pdme.util.fast_nonlocal_spectrum
|
||||||
|
from typing import Sequence, Tuple, List, Dict, Union, Optional
|
||||||
|
import datetime
|
||||||
|
import csv
|
||||||
|
import multiprocessing
|
||||||
|
import logging
|
||||||
|
import numpy
|
||||||
|
|
||||||
|
|
||||||
|
# TODO: remove hardcode
|
||||||
|
CHUNKSIZE = 50
|
||||||
|
|
||||||
|
|
||||||
|
_logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def get_a_result_fast_filter_pairs(input) -> int:
|
||||||
|
(
|
||||||
|
model,
|
||||||
|
dot_inputs,
|
||||||
|
lows,
|
||||||
|
highs,
|
||||||
|
pair_inputs,
|
||||||
|
pair_lows,
|
||||||
|
pair_highs,
|
||||||
|
monte_carlo_count,
|
||||||
|
seed,
|
||||||
|
) = input
|
||||||
|
|
||||||
|
rng = numpy.random.default_rng(seed)
|
||||||
|
# TODO: A long term refactor is to pull the frequency stuff out from here. The None stands for max_frequency, which is unneeded in the actually useful models.
|
||||||
|
sample_dipoles = model.get_monte_carlo_dipole_inputs(
|
||||||
|
monte_carlo_count, None, rng_to_use=rng
|
||||||
|
)
|
||||||
|
|
||||||
|
current_sample = sample_dipoles
|
||||||
|
for di, low, high in zip(dot_inputs, lows, highs):
|
||||||
|
|
||||||
|
if len(current_sample) < 1:
|
||||||
|
break
|
||||||
|
vals = pdme.util.fast_v_calc.fast_vs_for_dipoleses(
|
||||||
|
numpy.array([di]), current_sample
|
||||||
|
)
|
||||||
|
|
||||||
|
current_sample = current_sample[numpy.all((vals > low) & (vals < high), axis=1)]
|
||||||
|
|
||||||
|
for pi, plow, phigh in zip(pair_inputs, pair_lows, pair_highs):
|
||||||
|
if len(current_sample) < 1:
|
||||||
|
break
|
||||||
|
vals = pdme.util.fast_nonlocal_spectrum.fast_s_nonlocal_dipoleses(
|
||||||
|
numpy.array([pi]), current_sample
|
||||||
|
)
|
||||||
|
|
||||||
|
current_sample = current_sample[
|
||||||
|
numpy.all(
|
||||||
|
((vals > plow) & (vals < phigh)) | ((vals < plow) & (vals > phigh)),
|
||||||
|
axis=1,
|
||||||
|
)
|
||||||
|
]
|
||||||
|
return len(current_sample)
|
||||||
|
|
||||||
|
|
||||||
|
def get_a_result_fast_filter(input) -> int:
|
||||||
|
model, dot_inputs, lows, highs, monte_carlo_count, seed = input
|
||||||
|
|
||||||
|
rng = numpy.random.default_rng(seed)
|
||||||
|
# TODO: A long term refactor is to pull the frequency stuff out from here. The None stands for max_frequency, which is unneeded in the actually useful models.
|
||||||
|
sample_dipoles = model.get_monte_carlo_dipole_inputs(
|
||||||
|
monte_carlo_count, None, rng_to_use=rng
|
||||||
|
)
|
||||||
|
|
||||||
|
current_sample = sample_dipoles
|
||||||
|
for di, low, high in zip(dot_inputs, lows, highs):
|
||||||
|
|
||||||
|
if len(current_sample) < 1:
|
||||||
|
break
|
||||||
|
vals = pdme.util.fast_v_calc.fast_vs_for_dipoleses(
|
||||||
|
numpy.array([di]), current_sample
|
||||||
|
)
|
||||||
|
|
||||||
|
current_sample = current_sample[numpy.all((vals > low) & (vals < high), axis=1)]
|
||||||
|
return len(current_sample)
|
||||||
|
|
||||||
|
|
||||||
|
class RealSpectrumRun:
|
||||||
|
"""
|
||||||
|
A bayes run given some real data.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
measurements : Sequence[pdme.measurement.DotRangeMeasurement]
|
||||||
|
The dot inputs for this bayes run.
|
||||||
|
|
||||||
|
models_with_names : Sequence[Tuple(str, pdme.model.DipoleModel)]
|
||||||
|
The models to evaluate.
|
||||||
|
|
||||||
|
actual_model : pdme.model.DipoleModel
|
||||||
|
The model which is actually correct.
|
||||||
|
|
||||||
|
filename_slug : str
|
||||||
|
The filename slug to include.
|
||||||
|
|
||||||
|
run_count: int
|
||||||
|
The number of runs to do.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
measurements: Sequence[pdme.measurement.DotRangeMeasurement],
|
||||||
|
models_with_names: Sequence[Tuple[str, pdme.model.DipoleModel]],
|
||||||
|
filename_slug: str,
|
||||||
|
monte_carlo_count: int = 10000,
|
||||||
|
monte_carlo_cycles: int = 10,
|
||||||
|
target_success: int = 100,
|
||||||
|
max_monte_carlo_cycles_steps: int = 10,
|
||||||
|
chunksize: int = CHUNKSIZE,
|
||||||
|
initial_seed: int = 12345,
|
||||||
|
cap_core_count: int = 0,
|
||||||
|
pair_measurements: Optional[
|
||||||
|
Sequence[pdme.measurement.DotPairRangeMeasurement]
|
||||||
|
] = None,
|
||||||
|
) -> None:
|
||||||
|
self.measurements = measurements
|
||||||
|
self.dot_inputs = [(measure.r, measure.f) for measure in self.measurements]
|
||||||
|
|
||||||
|
self.dot_inputs_array = pdme.measurement.input_types.dot_inputs_to_array(
|
||||||
|
self.dot_inputs
|
||||||
|
)
|
||||||
|
|
||||||
|
if pair_measurements is not None:
|
||||||
|
self.pair_measurements = pair_measurements
|
||||||
|
self.use_pair_measurements = True
|
||||||
|
self.dot_pair_inputs = [
|
||||||
|
(measure.r1, measure.r2, measure.f)
|
||||||
|
for measure in self.pair_measurements
|
||||||
|
]
|
||||||
|
self.dot_pair_inputs_array = (
|
||||||
|
pdme.measurement.input_types.dot_pair_inputs_to_array(
|
||||||
|
self.dot_pair_inputs
|
||||||
|
)
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
self.use_pair_measurements = False
|
||||||
|
|
||||||
|
self.models = [model for (_, model) in models_with_names]
|
||||||
|
self.model_names = [name for (name, _) in models_with_names]
|
||||||
|
self.model_count = len(self.models)
|
||||||
|
|
||||||
|
self.monte_carlo_count = monte_carlo_count
|
||||||
|
self.monte_carlo_cycles = monte_carlo_cycles
|
||||||
|
self.target_success = target_success
|
||||||
|
self.max_monte_carlo_cycles_steps = max_monte_carlo_cycles_steps
|
||||||
|
|
||||||
|
self.csv_fields = []
|
||||||
|
|
||||||
|
self.compensate_zeros = True
|
||||||
|
self.chunksize = chunksize
|
||||||
|
for name in self.model_names:
|
||||||
|
self.csv_fields.extend([f"{name}_success", f"{name}_count", f"{name}_prob"])
|
||||||
|
|
||||||
|
# for now initialise priors as uniform.
|
||||||
|
self.probabilities = [1 / self.model_count] * self.model_count
|
||||||
|
|
||||||
|
timestamp = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
|
||||||
|
|
||||||
|
ff_string = "fast_filter"
|
||||||
|
|
||||||
|
self.filename = f"{timestamp}-{filename_slug}.realdata.{ff_string}.bayesrun.csv"
|
||||||
|
self.initial_seed = initial_seed
|
||||||
|
|
||||||
|
self.cap_core_count = cap_core_count
|
||||||
|
|
||||||
|
def go(self) -> None:
|
||||||
|
with open(self.filename, "a", newline="") as outfile:
|
||||||
|
writer = csv.DictWriter(outfile, fieldnames=self.csv_fields, dialect="unix")
|
||||||
|
writer.writeheader()
|
||||||
|
|
||||||
|
(
|
||||||
|
lows,
|
||||||
|
highs,
|
||||||
|
) = pdme.measurement.input_types.dot_range_measurements_low_high_arrays(
|
||||||
|
self.measurements
|
||||||
|
)
|
||||||
|
|
||||||
|
pair_lows = None
|
||||||
|
pair_highs = None
|
||||||
|
if self.use_pair_measurements:
|
||||||
|
(
|
||||||
|
pair_lows,
|
||||||
|
pair_highs,
|
||||||
|
) = pdme.measurement.input_types.dot_range_measurements_low_high_arrays(
|
||||||
|
self.pair_measurements
|
||||||
|
)
|
||||||
|
|
||||||
|
# define a new seed sequence for each run
|
||||||
|
seed_sequence = numpy.random.SeedSequence(self.initial_seed)
|
||||||
|
|
||||||
|
results = []
|
||||||
|
_logger.debug("Going to iterate over models now")
|
||||||
|
core_count = multiprocessing.cpu_count() - 1 or 1
|
||||||
|
if (self.cap_core_count >= 1) and (self.cap_core_count < core_count):
|
||||||
|
core_count = self.cap_core_count
|
||||||
|
_logger.info(f"Using {core_count} cores")
|
||||||
|
for model_count, (model, model_name) in enumerate(
|
||||||
|
zip(self.models, self.model_names)
|
||||||
|
):
|
||||||
|
_logger.debug(f"Doing model #{model_count}: {model_name}")
|
||||||
|
with multiprocessing.Pool(core_count) as pool:
|
||||||
|
cycle_count = 0
|
||||||
|
cycle_success = 0
|
||||||
|
cycles = 0
|
||||||
|
while (cycles < self.max_monte_carlo_cycles_steps) and (
|
||||||
|
cycle_success <= self.target_success
|
||||||
|
):
|
||||||
|
_logger.debug(f"Starting cycle {cycles}")
|
||||||
|
cycles += 1
|
||||||
|
current_success = 0
|
||||||
|
cycle_count += self.monte_carlo_count * self.monte_carlo_cycles
|
||||||
|
|
||||||
|
# generate a seed from the sequence for each core.
|
||||||
|
# note this needs to be inside the loop for monte carlo cycle steps!
|
||||||
|
# that way we get more stuff.
|
||||||
|
seeds = seed_sequence.spawn(self.monte_carlo_cycles)
|
||||||
|
|
||||||
|
if self.use_pair_measurements:
|
||||||
|
current_success = sum(
|
||||||
|
pool.imap_unordered(
|
||||||
|
get_a_result_fast_filter_pairs,
|
||||||
|
[
|
||||||
|
(
|
||||||
|
model,
|
||||||
|
self.dot_inputs_array,
|
||||||
|
lows,
|
||||||
|
highs,
|
||||||
|
self.dot_pair_inputs_array,
|
||||||
|
pair_lows,
|
||||||
|
pair_highs,
|
||||||
|
self.monte_carlo_count,
|
||||||
|
seed,
|
||||||
|
)
|
||||||
|
for seed in seeds
|
||||||
|
],
|
||||||
|
self.chunksize,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
|
||||||
|
current_success = sum(
|
||||||
|
pool.imap_unordered(
|
||||||
|
get_a_result_fast_filter,
|
||||||
|
[
|
||||||
|
(
|
||||||
|
model,
|
||||||
|
self.dot_inputs_array,
|
||||||
|
lows,
|
||||||
|
highs,
|
||||||
|
self.monte_carlo_count,
|
||||||
|
seed,
|
||||||
|
)
|
||||||
|
for seed in seeds
|
||||||
|
],
|
||||||
|
self.chunksize,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
cycle_success += current_success
|
||||||
|
_logger.debug(f"current running successes: {cycle_success}")
|
||||||
|
results.append((cycle_count, cycle_success))
|
||||||
|
|
||||||
|
_logger.debug("Done, constructing output now")
|
||||||
|
row: Dict[str, Union[int, float, str]] = {}
|
||||||
|
|
||||||
|
successes: List[float] = []
|
||||||
|
counts: List[int] = []
|
||||||
|
for model_index, (name, (count, result)) in enumerate(
|
||||||
|
zip(self.model_names, results)
|
||||||
|
):
|
||||||
|
|
||||||
|
row[f"{name}_success"] = result
|
||||||
|
row[f"{name}_count"] = count
|
||||||
|
successes.append(max(result, 0.5))
|
||||||
|
counts.append(count)
|
||||||
|
|
||||||
|
success_weight = sum(
|
||||||
|
[
|
||||||
|
(succ / count) * prob
|
||||||
|
for succ, count, prob in zip(successes, counts, self.probabilities)
|
||||||
|
]
|
||||||
|
)
|
||||||
|
new_probabilities = [
|
||||||
|
(succ / count) * old_prob / success_weight
|
||||||
|
for succ, count, old_prob in zip(successes, counts, self.probabilities)
|
||||||
|
]
|
||||||
|
self.probabilities = new_probabilities
|
||||||
|
for name, probability in zip(self.model_names, self.probabilities):
|
||||||
|
row[f"{name}_prob"] = probability
|
||||||
|
_logger.info(row)
|
||||||
|
|
||||||
|
with open(self.filename, "a", newline="") as outfile:
|
||||||
|
writer = csv.DictWriter(outfile, fieldnames=self.csv_fields, dialect="unix")
|
||||||
|
writer.writerow(row)
|
3
deepdog/subset_simulation/__init__.py
Normal file
3
deepdog/subset_simulation/__init__.py
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
from deepdog.subset_simulation.subset_simulation_impl import SubsetSimulation
|
||||||
|
|
||||||
|
__all__ = ["SubsetSimulation"]
|
337
deepdog/subset_simulation/subset_simulation_impl.py
Normal file
337
deepdog/subset_simulation/subset_simulation_impl.py
Normal file
@@ -0,0 +1,337 @@
|
|||||||
|
import logging
|
||||||
|
import numpy
|
||||||
|
import pdme.measurement
|
||||||
|
import pdme.measurement.input_types
|
||||||
|
import pdme.subspace_simulation
|
||||||
|
from typing import Sequence, Tuple, Optional
|
||||||
|
|
||||||
|
from dataclasses import dataclass
|
||||||
|
|
||||||
|
_logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class SubsetSimulationResult:
|
||||||
|
probs_list: Sequence[Tuple]
|
||||||
|
over_target_cost: Optional[float]
|
||||||
|
over_target_likelihood: Optional[float]
|
||||||
|
under_target_cost: Optional[float]
|
||||||
|
under_target_likelihood: Optional[float]
|
||||||
|
lowest_likelihood: Optional[float]
|
||||||
|
|
||||||
|
|
||||||
|
class SubsetSimulation:
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
model_name_pair,
|
||||||
|
dot_inputs,
|
||||||
|
actual_measurements: Sequence[pdme.measurement.DotMeasurement],
|
||||||
|
n_c: int,
|
||||||
|
n_s: int,
|
||||||
|
m_max: int,
|
||||||
|
target_cost: Optional[float] = None,
|
||||||
|
level_0_seed: int = 200,
|
||||||
|
mcmc_seed: int = 20,
|
||||||
|
use_adaptive_steps=True,
|
||||||
|
default_phi_step=0.01,
|
||||||
|
default_theta_step=0.01,
|
||||||
|
default_r_step=0.01,
|
||||||
|
default_w_log_step=0.01,
|
||||||
|
default_upper_w_log_step=4,
|
||||||
|
keep_probs_list=True,
|
||||||
|
dump_last_generation_to_file=False,
|
||||||
|
):
|
||||||
|
name, model = model_name_pair
|
||||||
|
self.model_name = name
|
||||||
|
self.model = model
|
||||||
|
_logger.info(f"got model {self.model_name}")
|
||||||
|
|
||||||
|
self.dot_inputs_array = pdme.measurement.input_types.dot_inputs_to_array(
|
||||||
|
dot_inputs
|
||||||
|
)
|
||||||
|
# _logger.debug(f"actual measurements: {actual_measurements}")
|
||||||
|
self.actual_measurement_array = numpy.array([m.v for m in actual_measurements])
|
||||||
|
|
||||||
|
def cost_function_to_use(dipoles_to_test):
|
||||||
|
return pdme.subspace_simulation.proportional_costs_vs_actual_measurement(
|
||||||
|
self.dot_inputs_array, self.actual_measurement_array, dipoles_to_test
|
||||||
|
)
|
||||||
|
|
||||||
|
self.cost_function_to_use = cost_function_to_use
|
||||||
|
|
||||||
|
self.n_c = n_c
|
||||||
|
self.n_s = n_s
|
||||||
|
self.m_max = m_max
|
||||||
|
|
||||||
|
self.level_0_seed = level_0_seed
|
||||||
|
self.mcmc_seed = mcmc_seed
|
||||||
|
|
||||||
|
self.use_adaptive_steps = use_adaptive_steps
|
||||||
|
self.default_phi_step = default_phi_step
|
||||||
|
self.default_theta_step = default_theta_step
|
||||||
|
self.default_r_step = default_r_step
|
||||||
|
self.default_w_log_step = default_w_log_step
|
||||||
|
self.default_upper_w_log_step = default_upper_w_log_step
|
||||||
|
|
||||||
|
_logger.info("using params:")
|
||||||
|
_logger.info(f"\tn_c: {self.n_c}")
|
||||||
|
_logger.info(f"\tn_s: {self.n_s}")
|
||||||
|
_logger.info(f"\tm: {self.m_max}")
|
||||||
|
_logger.info("let's do level 0...")
|
||||||
|
|
||||||
|
self.target_cost = target_cost
|
||||||
|
_logger.info(f"will stop at target cost {target_cost}")
|
||||||
|
|
||||||
|
self.keep_probs_list = keep_probs_list
|
||||||
|
self.dump_last_generations = dump_last_generation_to_file
|
||||||
|
|
||||||
|
def execute(self) -> SubsetSimulationResult:
|
||||||
|
|
||||||
|
probs_list = []
|
||||||
|
|
||||||
|
sample_dipoles = self.model.get_monte_carlo_dipole_inputs(
|
||||||
|
self.n_c * self.n_s,
|
||||||
|
-1,
|
||||||
|
rng_to_use=numpy.random.default_rng(self.level_0_seed),
|
||||||
|
)
|
||||||
|
# _logger.debug(sample_dipoles)
|
||||||
|
# _logger.debug(sample_dipoles.shape)
|
||||||
|
costs = self.cost_function_to_use(sample_dipoles)
|
||||||
|
|
||||||
|
_logger.debug(f"costs: {costs}")
|
||||||
|
sorted_indexes = costs.argsort()[::-1]
|
||||||
|
|
||||||
|
_logger.debug(costs[sorted_indexes])
|
||||||
|
_logger.debug(sample_dipoles[sorted_indexes])
|
||||||
|
|
||||||
|
sorted_costs = costs[sorted_indexes]
|
||||||
|
sorted_dipoles = sample_dipoles[sorted_indexes]
|
||||||
|
|
||||||
|
threshold_cost = sorted_costs[-self.n_c]
|
||||||
|
|
||||||
|
all_dipoles = numpy.array(
|
||||||
|
[
|
||||||
|
pdme.subspace_simulation.sort_array_of_dipoles_by_frequency(samp)
|
||||||
|
for samp in sorted_dipoles
|
||||||
|
]
|
||||||
|
)
|
||||||
|
all_chains = list(zip(sorted_costs, all_dipoles))
|
||||||
|
|
||||||
|
mcmc_rng = numpy.random.default_rng(self.mcmc_seed)
|
||||||
|
|
||||||
|
for i in range(self.m_max):
|
||||||
|
next_seeds = all_chains[-self.n_c:]
|
||||||
|
|
||||||
|
if self.dump_last_generations:
|
||||||
|
_logger.info("writing out csv file")
|
||||||
|
next_dipoles_seed_dipoles = numpy.array([n[1] for n in next_seeds])
|
||||||
|
for n in range(self.model.n):
|
||||||
|
_logger.info(f"{next_dipoles_seed_dipoles[:, n].shape}")
|
||||||
|
numpy.savetxt(
|
||||||
|
f"generation_{self.n_c}_{self.n_s}_{i}_dipole_{n}.csv",
|
||||||
|
next_dipoles_seed_dipoles[:, n],
|
||||||
|
delimiter=",",
|
||||||
|
)
|
||||||
|
|
||||||
|
if self.keep_probs_list:
|
||||||
|
for cost_index, cost_chain in enumerate(all_chains[: -self.n_c]):
|
||||||
|
probs_list.append(
|
||||||
|
(
|
||||||
|
((self.n_c * self.n_s - cost_index) / (self.n_c * self.n_s))
|
||||||
|
/ (self.n_s ** (i)),
|
||||||
|
cost_chain[0],
|
||||||
|
i + 1,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
next_seeds_as_array = numpy.array([s for _, s in next_seeds])
|
||||||
|
|
||||||
|
stdevs = self.get_stdevs_from_arrays(next_seeds_as_array)
|
||||||
|
_logger.info(f"got stdevs: {stdevs.stdevs}")
|
||||||
|
|
||||||
|
all_chains = []
|
||||||
|
for c, s in next_seeds:
|
||||||
|
# chain = mcmc(s, threshold_cost, n_s, model, dot_inputs_array, actual_measurement_array, mcmc_rng, curr_cost=c, stdevs=stdevs)
|
||||||
|
# until new version gotta do
|
||||||
|
chain = self.model.get_mcmc_chain(
|
||||||
|
s,
|
||||||
|
self.cost_function_to_use,
|
||||||
|
self.n_s,
|
||||||
|
threshold_cost,
|
||||||
|
stdevs,
|
||||||
|
initial_cost=c,
|
||||||
|
rng_arg=mcmc_rng,
|
||||||
|
)
|
||||||
|
for cost, chained in chain:
|
||||||
|
try:
|
||||||
|
filtered_cost = cost[0]
|
||||||
|
except IndexError:
|
||||||
|
filtered_cost = cost
|
||||||
|
all_chains.append((filtered_cost, chained))
|
||||||
|
|
||||||
|
# _logger.debug(all_chains)
|
||||||
|
|
||||||
|
all_chains.sort(key=lambda c: c[0], reverse=True)
|
||||||
|
|
||||||
|
threshold_cost = all_chains[-self.n_c][0]
|
||||||
|
_logger.info(
|
||||||
|
f"current threshold cost: {threshold_cost}, at P = (1 / {self.n_s})^{i + 1}"
|
||||||
|
)
|
||||||
|
if (self.target_cost is not None) and (threshold_cost < self.target_cost):
|
||||||
|
_logger.info(
|
||||||
|
f"got a threshold cost {threshold_cost}, less than {self.target_cost}. will leave early"
|
||||||
|
)
|
||||||
|
|
||||||
|
cost_list = [c[0] for c in all_chains]
|
||||||
|
over_index = reverse_bisect_right(cost_list, self.target_cost)
|
||||||
|
|
||||||
|
shorter_probs_list = []
|
||||||
|
for cost_index, cost_chain in enumerate(all_chains):
|
||||||
|
if self.keep_probs_list:
|
||||||
|
probs_list.append(
|
||||||
|
(
|
||||||
|
(
|
||||||
|
(self.n_c * self.n_s - cost_index)
|
||||||
|
/ (self.n_c * self.n_s)
|
||||||
|
)
|
||||||
|
/ (self.n_s ** (i)),
|
||||||
|
cost_chain[0],
|
||||||
|
i + 1,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
shorter_probs_list.append(
|
||||||
|
(
|
||||||
|
cost_chain[0],
|
||||||
|
((self.n_c * self.n_s - cost_index) / (self.n_c * self.n_s))
|
||||||
|
/ (self.n_s ** (i)),
|
||||||
|
)
|
||||||
|
)
|
||||||
|
# _logger.info(shorter_probs_list)
|
||||||
|
result = SubsetSimulationResult(
|
||||||
|
probs_list=probs_list,
|
||||||
|
over_target_cost=shorter_probs_list[over_index - 1][0],
|
||||||
|
over_target_likelihood=shorter_probs_list[over_index - 1][1],
|
||||||
|
under_target_cost=shorter_probs_list[over_index][0],
|
||||||
|
under_target_likelihood=shorter_probs_list[over_index][1],
|
||||||
|
lowest_likelihood=shorter_probs_list[-1][1],
|
||||||
|
)
|
||||||
|
return result
|
||||||
|
|
||||||
|
# _logger.debug([c[0] for c in all_chains[-n_c:]])
|
||||||
|
_logger.info(f"doing level {i + 1}")
|
||||||
|
|
||||||
|
if self.keep_probs_list:
|
||||||
|
for cost_index, cost_chain in enumerate(all_chains):
|
||||||
|
probs_list.append(
|
||||||
|
(
|
||||||
|
((self.n_c * self.n_s - cost_index) / (self.n_c * self.n_s))
|
||||||
|
/ (self.n_s ** (self.m_max)),
|
||||||
|
cost_chain[0],
|
||||||
|
self.m_max + 1,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
threshold_cost = all_chains[-self.n_c][0]
|
||||||
|
_logger.info(
|
||||||
|
f"final threshold cost: {threshold_cost}, at P = (1 / {self.n_s})^{self.m_max + 1}"
|
||||||
|
)
|
||||||
|
for a in all_chains[-10:]:
|
||||||
|
_logger.info(a)
|
||||||
|
# for prob, prob_cost in probs_list:
|
||||||
|
# _logger.info(f"\t{prob}: {prob_cost}")
|
||||||
|
probs_list.sort(key=lambda c: c[0], reverse=True)
|
||||||
|
|
||||||
|
min_likelihood = ((1) / (self.n_c * self.n_s)) / (self.n_s ** (self.m_max + 1))
|
||||||
|
|
||||||
|
result = SubsetSimulationResult(
|
||||||
|
probs_list=probs_list,
|
||||||
|
over_target_cost=None,
|
||||||
|
over_target_likelihood=None,
|
||||||
|
under_target_cost=None,
|
||||||
|
under_target_likelihood=None,
|
||||||
|
lowest_likelihood=min_likelihood,
|
||||||
|
)
|
||||||
|
return result
|
||||||
|
|
||||||
|
def get_stdevs_from_arrays(
|
||||||
|
self, array
|
||||||
|
) -> pdme.subspace_simulation.MCMCStandardDeviation:
|
||||||
|
# stdevs = get_stdevs_from_arrays(next_seeds_as_array, model)
|
||||||
|
if self.use_adaptive_steps:
|
||||||
|
|
||||||
|
stdev_array = []
|
||||||
|
count = array.shape[1]
|
||||||
|
for dipole_index in range(count):
|
||||||
|
selected = array[:, dipole_index]
|
||||||
|
pxs = selected[:, 0]
|
||||||
|
pys = selected[:, 1]
|
||||||
|
pzs = selected[:, 2]
|
||||||
|
thetas = numpy.arccos(pzs / self.model.pfixed)
|
||||||
|
phis = numpy.arctan2(pys, pxs)
|
||||||
|
|
||||||
|
rstdevs = numpy.maximum(
|
||||||
|
numpy.std(selected, axis=0)[3:6],
|
||||||
|
self.default_r_step / (self.n_s * 10),
|
||||||
|
)
|
||||||
|
frequency_stdevs = numpy.minimum(
|
||||||
|
numpy.maximum(
|
||||||
|
numpy.std(numpy.log(selected[:, -1])),
|
||||||
|
self.default_w_log_step / (self.n_s * 10),
|
||||||
|
),
|
||||||
|
self.default_upper_w_log_step,
|
||||||
|
)
|
||||||
|
stdev_array.append(
|
||||||
|
pdme.subspace_simulation.DipoleStandardDeviation(
|
||||||
|
p_theta_step=max(
|
||||||
|
numpy.std(thetas), self.default_theta_step / (self.n_s * 10)
|
||||||
|
),
|
||||||
|
p_phi_step=max(
|
||||||
|
numpy.std(phis), self.default_phi_step / (self.n_s * 10)
|
||||||
|
),
|
||||||
|
rx_step=rstdevs[0],
|
||||||
|
ry_step=rstdevs[1],
|
||||||
|
rz_step=rstdevs[2],
|
||||||
|
w_log_step=frequency_stdevs,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
default_stdev = pdme.subspace_simulation.DipoleStandardDeviation(
|
||||||
|
self.default_phi_step,
|
||||||
|
self.default_theta_step,
|
||||||
|
self.default_r_step,
|
||||||
|
self.default_r_step,
|
||||||
|
self.default_r_step,
|
||||||
|
self.default_w_log_step,
|
||||||
|
)
|
||||||
|
stdev_array = [default_stdev]
|
||||||
|
stdevs = pdme.subspace_simulation.MCMCStandardDeviation(stdev_array)
|
||||||
|
return stdevs
|
||||||
|
|
||||||
|
|
||||||
|
def reverse_bisect_right(a, x, lo=0, hi=None):
|
||||||
|
"""Return the index where to insert item x in list a, assuming a is sorted in descending order.
|
||||||
|
|
||||||
|
The return value i is such that all e in a[:i] have e >= x, and all e in
|
||||||
|
a[i:] have e < x. So if x already appears in the list, a.insert(x) will
|
||||||
|
insert just after the rightmost x already there.
|
||||||
|
|
||||||
|
Optional args lo (default 0) and hi (default len(a)) bound the
|
||||||
|
slice of a to be searched.
|
||||||
|
|
||||||
|
Essentially, the function returns number of elements in a which are >= than x.
|
||||||
|
>>> a = [8, 6, 5, 4, 2]
|
||||||
|
>>> reverse_bisect_right(a, 5)
|
||||||
|
3
|
||||||
|
>>> a[:reverse_bisect_right(a, 5)]
|
||||||
|
[8, 6, 5]
|
||||||
|
"""
|
||||||
|
if lo < 0:
|
||||||
|
raise ValueError("lo must be non-negative")
|
||||||
|
if hi is None:
|
||||||
|
hi = len(a)
|
||||||
|
while lo < hi:
|
||||||
|
mid = (lo + hi) // 2
|
||||||
|
if x > a[mid]:
|
||||||
|
hi = mid
|
||||||
|
else:
|
||||||
|
lo = mid + 1
|
||||||
|
return lo
|
231
deepdog/temp_aware_real_spectrum_run.py
Normal file
231
deepdog/temp_aware_real_spectrum_run.py
Normal file
@@ -0,0 +1,231 @@
|
|||||||
|
import pdme.inputs
|
||||||
|
import pdme.model
|
||||||
|
import pdme.measurement
|
||||||
|
import pdme.measurement.input_types
|
||||||
|
import pdme.measurement.oscillating_dipole
|
||||||
|
import pdme.util.fast_v_calc
|
||||||
|
import pdme.util.fast_nonlocal_spectrum
|
||||||
|
from typing import Sequence, Tuple, List, Dict, Union, Mapping
|
||||||
|
import datetime
|
||||||
|
import csv
|
||||||
|
import multiprocessing
|
||||||
|
import logging
|
||||||
|
import numpy
|
||||||
|
|
||||||
|
|
||||||
|
# TODO: remove hardcode
|
||||||
|
CHUNKSIZE = 50
|
||||||
|
|
||||||
|
|
||||||
|
_logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def get_a_result_fast_filter(input) -> int:
|
||||||
|
# (
|
||||||
|
# model,
|
||||||
|
# self.dot_inputs_array_dict,
|
||||||
|
# low_high_dict,
|
||||||
|
# self.monte_carlo_count,
|
||||||
|
# seed,
|
||||||
|
# )
|
||||||
|
model, dot_inputs_dict, low_high_dict, monte_carlo_count, seed = input
|
||||||
|
|
||||||
|
rng = numpy.random.default_rng(seed)
|
||||||
|
# TODO: A long term refactor is to pull the frequency stuff out from here. The None stands for max_frequency, which is unneeded in the actually useful models.
|
||||||
|
sample_dipoles = model.get_monte_carlo_dipole_inputs(
|
||||||
|
monte_carlo_count, None, rng_to_use=rng
|
||||||
|
)
|
||||||
|
|
||||||
|
current_sample = sample_dipoles
|
||||||
|
for temp in dot_inputs_dict.keys():
|
||||||
|
dot_inputs = dot_inputs_dict[temp]
|
||||||
|
lows, highs = low_high_dict[temp]
|
||||||
|
for di, low, high in zip(dot_inputs, lows, highs):
|
||||||
|
|
||||||
|
if len(current_sample) < 1:
|
||||||
|
break
|
||||||
|
vals = pdme.util.fast_v_calc.fast_vs_for_asymmetric_dipoleses(
|
||||||
|
numpy.array([di]), current_sample, temp
|
||||||
|
)
|
||||||
|
|
||||||
|
current_sample = current_sample[
|
||||||
|
numpy.all((vals > low) & (vals < high), axis=1)
|
||||||
|
]
|
||||||
|
return len(current_sample)
|
||||||
|
|
||||||
|
|
||||||
|
class TempAwareRealSpectrumRun:
|
||||||
|
"""
|
||||||
|
A bayes run given some real data, with potentially variable temperature.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
measurements_dict : Dict[float, Sequence[pdme.measurement.DotRangeMeasurement]]
|
||||||
|
The dot inputs for this bayes run, in a dictionary indexed by temperatures
|
||||||
|
|
||||||
|
models_with_names : models_with_names: Sequence[Tuple[str, pdme.model.DipoleModel]],
|
||||||
|
|
||||||
|
The models to evaluate.
|
||||||
|
|
||||||
|
actual_model : pdme.model.DipoleModel
|
||||||
|
The model which is actually correct.
|
||||||
|
|
||||||
|
filename_slug : str
|
||||||
|
The filename slug to include.
|
||||||
|
|
||||||
|
run_count: int
|
||||||
|
The number of runs to do.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
measurements_dict: Mapping[
|
||||||
|
float, Sequence[pdme.measurement.DotRangeMeasurement]
|
||||||
|
],
|
||||||
|
models_with_names: Sequence[Tuple[str, pdme.model.DipoleModel]],
|
||||||
|
filename_slug: str,
|
||||||
|
monte_carlo_count: int = 10000,
|
||||||
|
monte_carlo_cycles: int = 10,
|
||||||
|
target_success: int = 100,
|
||||||
|
max_monte_carlo_cycles_steps: int = 10,
|
||||||
|
chunksize: int = CHUNKSIZE,
|
||||||
|
initial_seed: int = 12345,
|
||||||
|
cap_core_count: int = 0,
|
||||||
|
) -> None:
|
||||||
|
self.measurements_dict = measurements_dict
|
||||||
|
self.dot_inputs_dict = {
|
||||||
|
k: [(measure.r, measure.f) for measure in measurements]
|
||||||
|
for k, measurements in measurements_dict.items()
|
||||||
|
}
|
||||||
|
|
||||||
|
self.dot_inputs_array_dict = {
|
||||||
|
k: pdme.measurement.input_types.dot_inputs_to_array(dot_inputs)
|
||||||
|
for k, dot_inputs in self.dot_inputs_dict.items()
|
||||||
|
}
|
||||||
|
|
||||||
|
self.models = [model for (_, model) in models_with_names]
|
||||||
|
self.model_names = [name for (name, _) in models_with_names]
|
||||||
|
self.model_count = len(self.models)
|
||||||
|
|
||||||
|
self.monte_carlo_count = monte_carlo_count
|
||||||
|
self.monte_carlo_cycles = monte_carlo_cycles
|
||||||
|
self.target_success = target_success
|
||||||
|
self.max_monte_carlo_cycles_steps = max_monte_carlo_cycles_steps
|
||||||
|
|
||||||
|
self.csv_fields = []
|
||||||
|
|
||||||
|
self.compensate_zeros = True
|
||||||
|
self.chunksize = chunksize
|
||||||
|
for name in self.model_names:
|
||||||
|
self.csv_fields.extend([f"{name}_success", f"{name}_count", f"{name}_prob"])
|
||||||
|
|
||||||
|
# for now initialise priors as uniform.
|
||||||
|
self.probabilities = [1 / self.model_count] * self.model_count
|
||||||
|
|
||||||
|
timestamp = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
|
||||||
|
ff_string = "fast_filter"
|
||||||
|
self.filename = f"{timestamp}-{filename_slug}.realdata.{ff_string}.bayesrun.csv"
|
||||||
|
self.initial_seed = initial_seed
|
||||||
|
|
||||||
|
self.cap_core_count = cap_core_count
|
||||||
|
|
||||||
|
def go(self) -> None:
|
||||||
|
with open(self.filename, "a", newline="") as outfile:
|
||||||
|
writer = csv.DictWriter(outfile, fieldnames=self.csv_fields, dialect="unix")
|
||||||
|
writer.writeheader()
|
||||||
|
|
||||||
|
low_high_dict = {}
|
||||||
|
for temp, measurements in self.measurements_dict.items():
|
||||||
|
(
|
||||||
|
lows,
|
||||||
|
highs,
|
||||||
|
) = pdme.measurement.input_types.dot_range_measurements_low_high_arrays(
|
||||||
|
measurements
|
||||||
|
)
|
||||||
|
low_high_dict[temp] = (lows, highs)
|
||||||
|
|
||||||
|
# define a new seed sequence for each run
|
||||||
|
seed_sequence = numpy.random.SeedSequence(self.initial_seed)
|
||||||
|
|
||||||
|
results = []
|
||||||
|
_logger.debug("Going to iterate over models now")
|
||||||
|
core_count = multiprocessing.cpu_count() - 1 or 1
|
||||||
|
if (self.cap_core_count >= 1) and (self.cap_core_count < core_count):
|
||||||
|
core_count = self.cap_core_count
|
||||||
|
_logger.info(f"Using {core_count} cores")
|
||||||
|
for model_count, (model, model_name) in enumerate(
|
||||||
|
zip(self.models, self.model_names)
|
||||||
|
):
|
||||||
|
_logger.debug(f"Doing model #{model_count}: {model_name}")
|
||||||
|
with multiprocessing.Pool(core_count) as pool:
|
||||||
|
cycle_count = 0
|
||||||
|
cycle_success = 0
|
||||||
|
cycles = 0
|
||||||
|
while (cycles < self.max_monte_carlo_cycles_steps) and (
|
||||||
|
cycle_success <= self.target_success
|
||||||
|
):
|
||||||
|
_logger.debug(f"Starting cycle {cycles}")
|
||||||
|
cycles += 1
|
||||||
|
current_success = 0
|
||||||
|
cycle_count += self.monte_carlo_count * self.monte_carlo_cycles
|
||||||
|
|
||||||
|
# generate a seed from the sequence for each core.
|
||||||
|
# note this needs to be inside the loop for monte carlo cycle steps!
|
||||||
|
# that way we get more stuff.
|
||||||
|
seeds = seed_sequence.spawn(self.monte_carlo_cycles)
|
||||||
|
|
||||||
|
result_func = get_a_result_fast_filter
|
||||||
|
|
||||||
|
current_success = sum(
|
||||||
|
pool.imap_unordered(
|
||||||
|
result_func,
|
||||||
|
[
|
||||||
|
(
|
||||||
|
model,
|
||||||
|
self.dot_inputs_array_dict,
|
||||||
|
low_high_dict,
|
||||||
|
self.monte_carlo_count,
|
||||||
|
seed,
|
||||||
|
)
|
||||||
|
for seed in seeds
|
||||||
|
],
|
||||||
|
self.chunksize,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
cycle_success += current_success
|
||||||
|
_logger.debug(f"current running successes: {cycle_success}")
|
||||||
|
results.append((cycle_count, cycle_success))
|
||||||
|
|
||||||
|
_logger.debug("Done, constructing output now")
|
||||||
|
row: Dict[str, Union[int, float, str]] = {}
|
||||||
|
|
||||||
|
successes: List[float] = []
|
||||||
|
counts: List[int] = []
|
||||||
|
for model_index, (name, (count, result)) in enumerate(
|
||||||
|
zip(self.model_names, results)
|
||||||
|
):
|
||||||
|
|
||||||
|
row[f"{name}_success"] = result
|
||||||
|
row[f"{name}_count"] = count
|
||||||
|
successes.append(max(result, 0.5))
|
||||||
|
counts.append(count)
|
||||||
|
|
||||||
|
success_weight = sum(
|
||||||
|
[
|
||||||
|
(succ / count) * prob
|
||||||
|
for succ, count, prob in zip(successes, counts, self.probabilities)
|
||||||
|
]
|
||||||
|
)
|
||||||
|
new_probabilities = [
|
||||||
|
(succ / count) * old_prob / success_weight
|
||||||
|
for succ, count, old_prob in zip(successes, counts, self.probabilities)
|
||||||
|
]
|
||||||
|
self.probabilities = new_probabilities
|
||||||
|
for name, probability in zip(self.model_names, self.probabilities):
|
||||||
|
row[f"{name}_prob"] = probability
|
||||||
|
_logger.info(row)
|
||||||
|
|
||||||
|
with open(self.filename, "a", newline="") as outfile:
|
||||||
|
writer = csv.DictWriter(outfile, fieldnames=self.csv_fields, dialect="unix")
|
||||||
|
writer.writerow(row)
|
9
do.sh
9
do.sh
@@ -16,6 +16,15 @@ test() {
|
|||||||
poetry run pytest
|
poetry run pytest
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fmt() {
|
||||||
|
poetry run black .
|
||||||
|
find . -type f -name "*.py" -exec sed -i -e 's/ /\t/g' {} \;
|
||||||
|
}
|
||||||
|
|
||||||
|
release() {
|
||||||
|
./scripts/release.sh
|
||||||
|
}
|
||||||
|
|
||||||
htmlcov() {
|
htmlcov() {
|
||||||
poetry run pytest --cov-report=html
|
poetry run pytest --cov-report=html
|
||||||
}
|
}
|
||||||
|
95
flake.lock
generated
Normal file
95
flake.lock
generated
Normal file
@@ -0,0 +1,95 @@
|
|||||||
|
{
|
||||||
|
"nodes": {
|
||||||
|
"flake-utils": {
|
||||||
|
"locked": {
|
||||||
|
"lastModified": 1648297722,
|
||||||
|
"narHash": "sha256-W+qlPsiZd8F3XkzXOzAoR+mpFqzm3ekQkJNa+PIh1BQ=",
|
||||||
|
"owner": "numtide",
|
||||||
|
"repo": "flake-utils",
|
||||||
|
"rev": "0f8662f1319ad6abf89b3380dd2722369fc51ade",
|
||||||
|
"type": "github"
|
||||||
|
},
|
||||||
|
"original": {
|
||||||
|
"owner": "numtide",
|
||||||
|
"repo": "flake-utils",
|
||||||
|
"rev": "0f8662f1319ad6abf89b3380dd2722369fc51ade",
|
||||||
|
"type": "github"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"flake-utils_2": {
|
||||||
|
"locked": {
|
||||||
|
"lastModified": 1653893745,
|
||||||
|
"narHash": "sha256-0jntwV3Z8//YwuOjzhV2sgJJPt+HY6KhU7VZUL0fKZQ=",
|
||||||
|
"owner": "numtide",
|
||||||
|
"repo": "flake-utils",
|
||||||
|
"rev": "1ed9fb1935d260de5fe1c2f7ee0ebaae17ed2fa1",
|
||||||
|
"type": "github"
|
||||||
|
},
|
||||||
|
"original": {
|
||||||
|
"owner": "numtide",
|
||||||
|
"repo": "flake-utils",
|
||||||
|
"type": "github"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"nixpkgs": {
|
||||||
|
"locked": {
|
||||||
|
"lastModified": 1655087213,
|
||||||
|
"narHash": "sha256-4R5oQ+OwGAAcXWYrxC4gFMTUSstGxaN8kN7e8hkum/8=",
|
||||||
|
"owner": "NixOS",
|
||||||
|
"repo": "nixpkgs",
|
||||||
|
"rev": "37b6b161e536fddca54424cf80662bce735bdd1e",
|
||||||
|
"type": "github"
|
||||||
|
},
|
||||||
|
"original": {
|
||||||
|
"owner": "NixOS",
|
||||||
|
"repo": "nixpkgs",
|
||||||
|
"rev": "37b6b161e536fddca54424cf80662bce735bdd1e",
|
||||||
|
"type": "github"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"nixpkgs_2": {
|
||||||
|
"locked": {
|
||||||
|
"lastModified": 1655046959,
|
||||||
|
"narHash": "sha256-gxqHZKq1ReLDe6ZMJSbmSZlLY95DsVq5o6jQihhzvmw=",
|
||||||
|
"owner": "NixOS",
|
||||||
|
"repo": "nixpkgs",
|
||||||
|
"rev": "07bf3d25ce1da3bee6703657e6a787a4c6cdcea9",
|
||||||
|
"type": "github"
|
||||||
|
},
|
||||||
|
"original": {
|
||||||
|
"owner": "NixOS",
|
||||||
|
"repo": "nixpkgs",
|
||||||
|
"type": "github"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"poetry2nix": {
|
||||||
|
"inputs": {
|
||||||
|
"flake-utils": "flake-utils_2",
|
||||||
|
"nixpkgs": "nixpkgs_2"
|
||||||
|
},
|
||||||
|
"locked": {
|
||||||
|
"lastModified": 1654921554,
|
||||||
|
"narHash": "sha256-hkfMdQAHSwLWlg0sBVvgrQdIiBP45U1/ktmFpY4g2Mo=",
|
||||||
|
"owner": "nix-community",
|
||||||
|
"repo": "poetry2nix",
|
||||||
|
"rev": "7b71679fa7df00e1678fc3f1d1d4f5f372341b63",
|
||||||
|
"type": "github"
|
||||||
|
},
|
||||||
|
"original": {
|
||||||
|
"owner": "nix-community",
|
||||||
|
"repo": "poetry2nix",
|
||||||
|
"rev": "7b71679fa7df00e1678fc3f1d1d4f5f372341b63",
|
||||||
|
"type": "github"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"root": {
|
||||||
|
"inputs": {
|
||||||
|
"flake-utils": "flake-utils",
|
||||||
|
"nixpkgs": "nixpkgs",
|
||||||
|
"poetry2nix": "poetry2nix"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"root": "root",
|
||||||
|
"version": 7
|
||||||
|
}
|
63
flake.nix
Normal file
63
flake.nix
Normal file
@@ -0,0 +1,63 @@
|
|||||||
|
{
|
||||||
|
description = "Application packaged using poetry2nix";
|
||||||
|
|
||||||
|
inputs.flake-utils.url = "github:numtide/flake-utils?rev=0f8662f1319ad6abf89b3380dd2722369fc51ade";
|
||||||
|
inputs.nixpkgs.url = "github:NixOS/nixpkgs?rev=37b6b161e536fddca54424cf80662bce735bdd1e";
|
||||||
|
inputs.poetry2nix.url = "github:nix-community/poetry2nix?rev=7b71679fa7df00e1678fc3f1d1d4f5f372341b63";
|
||||||
|
|
||||||
|
outputs = { self, nixpkgs, flake-utils, poetry2nix }:
|
||||||
|
{
|
||||||
|
# Nixpkgs overlay providing the application
|
||||||
|
overlay = nixpkgs.lib.composeManyExtensions [
|
||||||
|
poetry2nix.overlay
|
||||||
|
(final: prev: {
|
||||||
|
# The application
|
||||||
|
deepdog = prev.poetry2nix.mkPoetryApplication {
|
||||||
|
overrides = final.poetry2nix.overrides.withDefaults (self: super: {
|
||||||
|
# …
|
||||||
|
# workaround https://github.com/nix-community/poetry2nix/issues/568
|
||||||
|
pdme = super.pdme.overridePythonAttrs (old: {
|
||||||
|
buildInputs = old.buildInputs or [ ] ++ [ final.python39.pkgs.poetry-core ];
|
||||||
|
});
|
||||||
|
});
|
||||||
|
projectDir = ./.;
|
||||||
|
};
|
||||||
|
deepdogEnv = prev.poetry2nix.mkPoetryEnv {
|
||||||
|
overrides = final.poetry2nix.overrides.withDefaults (self: super: {
|
||||||
|
# …
|
||||||
|
# workaround https://github.com/nix-community/poetry2nix/issues/568
|
||||||
|
pdme = super.pdme.overridePythonAttrs (old: {
|
||||||
|
buildInputs = old.buildInputs or [ ] ++ [ final.python39.pkgs.poetry-core ];
|
||||||
|
});
|
||||||
|
});
|
||||||
|
projectDir = ./.;
|
||||||
|
};
|
||||||
|
})
|
||||||
|
];
|
||||||
|
} // (flake-utils.lib.eachDefaultSystem (system:
|
||||||
|
let
|
||||||
|
pkgs = import nixpkgs {
|
||||||
|
inherit system;
|
||||||
|
overlays = [ self.overlay ];
|
||||||
|
};
|
||||||
|
in
|
||||||
|
{
|
||||||
|
apps = {
|
||||||
|
deepdog = pkgs.deepdog;
|
||||||
|
};
|
||||||
|
|
||||||
|
defaultApp = pkgs.deepdog;
|
||||||
|
devShell = pkgs.mkShell {
|
||||||
|
buildInputs = [
|
||||||
|
pkgs.poetry
|
||||||
|
pkgs.deepdogEnv
|
||||||
|
pkgs.deepdog
|
||||||
|
];
|
||||||
|
shellHook = ''
|
||||||
|
export DO_NIX_CUSTOM=1
|
||||||
|
'';
|
||||||
|
packages = [ pkgs.nodejs-16_x ];
|
||||||
|
};
|
||||||
|
|
||||||
|
}));
|
||||||
|
}
|
@@ -1,9 +1,11 @@
|
|||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
kind: Pod
|
kind: Pod
|
||||||
spec:
|
spec:
|
||||||
|
imagePullSecrets:
|
||||||
|
- name: regcreds
|
||||||
containers: # list of containers that you want present for your build, you can define a default container in the Jenkinsfile
|
containers: # list of containers that you want present for your build, you can define a default container in the Jenkinsfile
|
||||||
- name: python
|
- name: poetry
|
||||||
image: python:3.8
|
image: ghcr.io/dmallubhotla/poetry-image:1
|
||||||
command: ["tail", "-f", "/dev/null"] # this or any command that is bascially a noop is required, this is so that you don't overwrite the entrypoint of the base container
|
command: ["tail", "-f", "/dev/null"] # this or any command that is bascially a noop is required, this is so that you don't overwrite the entrypoint of the base container
|
||||||
imagePullPolicy: Always # use cache or pull image for agent
|
imagePullPolicy: Always # use cache or pull image for agent
|
||||||
resources: # limits the resources your build contaienr
|
resources: # limits the resources your build contaienr
|
||||||
|
887
poetry.lock
generated
887
poetry.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -1,18 +1,22 @@
|
|||||||
[tool.poetry]
|
[tool.poetry]
|
||||||
name = "deepdog"
|
name = "deepdog"
|
||||||
version = "0.1.3"
|
version = "0.7.3"
|
||||||
description = ""
|
description = ""
|
||||||
authors = ["Deepak Mallubhotla <dmallubhotla+github@gmail.com>"]
|
authors = ["Deepak Mallubhotla <dmallubhotla+github@gmail.com>"]
|
||||||
|
|
||||||
[tool.poetry.dependencies]
|
[tool.poetry.dependencies]
|
||||||
python = "^3.8,<3.10"
|
python = ">=3.8.1,<3.10"
|
||||||
pdme = "^0.4.1"
|
pdme = "^0.9.1"
|
||||||
|
numpy = "1.22.3"
|
||||||
|
scipy = "1.10"
|
||||||
|
|
||||||
[tool.poetry.dev-dependencies]
|
[tool.poetry.dev-dependencies]
|
||||||
pytest = ">=6"
|
pytest = ">=6"
|
||||||
flake8 = "^4.0.1"
|
flake8 = "^4.0.1"
|
||||||
pytest-cov = "^3.0.0"
|
pytest-cov = "^4.1.0"
|
||||||
mypy = "^0.931"
|
mypy = "^0.971"
|
||||||
|
python-semantic-release = "^7.24.0"
|
||||||
|
black = "^22.3.0"
|
||||||
|
|
||||||
[build-system]
|
[build-system]
|
||||||
requires = ["poetry-core>=1.0.0"]
|
requires = ["poetry-core>=1.0.0"]
|
||||||
@@ -32,3 +36,7 @@ module = [
|
|||||||
"scipy.optimize"
|
"scipy.optimize"
|
||||||
]
|
]
|
||||||
ignore_missing_imports = true
|
ignore_missing_imports = true
|
||||||
|
|
||||||
|
[tool.semantic_release]
|
||||||
|
version_toml = "pyproject.toml:tool.poetry.version"
|
||||||
|
tag_format = "{version}"
|
||||||
|
3
renovate.json
Normal file
3
renovate.json
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
{
|
||||||
|
"$schema": "https://docs.renovatebot.com/renovate-schema.json"
|
||||||
|
}
|
@@ -1,29 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
set -Eeuo pipefail
|
|
||||||
|
|
||||||
if [ -z "$(git status --porcelain)" ]; then
|
|
||||||
# Working directory clean
|
|
||||||
branch_name=$(git symbolic-ref -q HEAD)
|
|
||||||
branch_name=${branch_name##refs/heads/}
|
|
||||||
branch_name=${branch_name:-HEAD}
|
|
||||||
|
|
||||||
poetry version patch
|
|
||||||
version=`sed 's/version = "\([0-9]*.[0-9]*.[0-9]*\)"/\1/p' -n <pyproject.toml`
|
|
||||||
read -p "Create commit for version $version? " -n 1 -r
|
|
||||||
echo # (optional) move to a new line
|
|
||||||
if [[ $REPLY =~ ^[Yy]$ ]]
|
|
||||||
then
|
|
||||||
# do dangerous stuff
|
|
||||||
echo "Creating a new patch"
|
|
||||||
git add pyproject.toml
|
|
||||||
git commit -m "Created version $version"
|
|
||||||
git tag -a "$version" -m "patch.sh created version $version"
|
|
||||||
git push --tags
|
|
||||||
else
|
|
||||||
echo "Surrendering, clean up by reverting pyproject.toml..."
|
|
||||||
exit 2
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
echo "Can't create patch version, working tree unclean..."
|
|
||||||
exit 1
|
|
||||||
fi
|
|
45
scripts/release.sh
Normal file
45
scripts/release.sh
Normal file
@@ -0,0 +1,45 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
set -Eeuo pipefail
|
||||||
|
|
||||||
|
if [ -z "$(git status --porcelain)" ]; then
|
||||||
|
branch_name=$(git symbolic-ref -q HEAD)
|
||||||
|
branch_name=${branch_name##refs/heads/}
|
||||||
|
branch_name=${branch_name:-HEAD}
|
||||||
|
if [ $branch_name != "master" ]; then
|
||||||
|
echo "The current branch is not master!"
|
||||||
|
echo "I'd feel uncomfortable releasing from here..."
|
||||||
|
exit 3
|
||||||
|
fi
|
||||||
|
|
||||||
|
release_needed=false
|
||||||
|
if \
|
||||||
|
{ git log "$( git describe --tags --abbrev=0 )..HEAD" --format='%s' | cut -d: -f1 | sort -u | sed -e 's/([^)]*)//' | grep -q -i -E '^feat|fix|perf|refactor|revert$' ; } || \
|
||||||
|
{ git log "$( git describe --tags --abbrev=0 )..HEAD" --format='%s' | cut -d: -f1 | sort -u | sed -e 's/([^)]*)//' | grep -q -E '\!$' ; } || \
|
||||||
|
{ git log "$( git describe --tags --abbrev=0 )..HEAD" --format='%b' | grep -q -E '^BREAKING CHANGE:' ; }
|
||||||
|
then
|
||||||
|
release_needed=true
|
||||||
|
fi
|
||||||
|
|
||||||
|
if ! [ "$release_needed" = true ]; then
|
||||||
|
echo "No release needed..."
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Working directory clean
|
||||||
|
echo "Doing a dry run..."
|
||||||
|
npx standard-version --dry-run
|
||||||
|
read -p "Does that look good? [y/N] " -n 1 -r
|
||||||
|
echo # (optional) move to a new line
|
||||||
|
if [[ $REPLY =~ ^[Yy]$ ]]
|
||||||
|
then
|
||||||
|
# do dangerous stuff
|
||||||
|
npx standard-version
|
||||||
|
git push --follow-tags origin master
|
||||||
|
else
|
||||||
|
echo "okay, never mind then..."
|
||||||
|
exit 2
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
echo "Can't create release, working tree unclean..."
|
||||||
|
exit 1
|
||||||
|
fi
|
11
scripts/standard-version/pyproject-updater.js
Normal file
11
scripts/standard-version/pyproject-updater.js
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
const pattern = /(\[tool\.poetry\]\nname = "deepdog"\nversion = ")(?<vers>\d+\.\d+\.\d)(")/mg;
|
||||||
|
|
||||||
|
module.exports.readVersion = function (contents) {
|
||||||
|
const result = pattern.exec(contents);
|
||||||
|
return result.groups.vers;
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports.writeVersion = function (contents, version) {
|
||||||
|
const newContents = contents.replace(pattern, `$1${version}$3`);
|
||||||
|
return newContents;
|
||||||
|
}
|
Reference in New Issue
Block a user