Spaces:
Sleeping
Sleeping
added waveOrder reconstruction demo
Browse files- .gitattributes +2 -0
- .gitignore +162 -0
- README.md +80 -4
- app.py +49 -5
- data/20x.zarr/A/1/002026/0/c/0/0/0/0/0 +3 -0
- data/20x.zarr/A/1/002026/0/zarr.json +84 -0
- data/20x.zarr/A/1/002026/zarr.json +86 -0
- data/20x.zarr/A/1/002027/0/c/0/0/0/0/0 +3 -0
- data/20x.zarr/A/1/002027/0/zarr.json +84 -0
- data/20x.zarr/A/1/002027/zarr.json +86 -0
- data/20x.zarr/A/1/002028/0/c/0/0/0/0/0 +3 -0
- data/20x.zarr/A/1/002028/0/zarr.json +84 -0
- data/20x.zarr/A/1/002028/zarr.json +86 -0
- data/20x.zarr/A/1/zarr.json +26 -0
- data/20x.zarr/A/zarr.json +5 -0
- data/20x.zarr/zarr.json +34 -0
- demo_utils.py +818 -0
- optimize_demo.py +702 -0
- requirements.txt +10 -0
.gitattributes
CHANGED
|
@@ -33,3 +33,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
*.zarr filter=lfs diff=lfs merge=lfs -text
|
| 37 |
+
data/20x.zarr/A/1/*/0/c/0/0/0/0/0 filter=lfs diff=lfs merge=lfs -text
|
.gitignore
ADDED
|
@@ -0,0 +1,162 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Binary image files.
|
| 2 |
+
*.tif[f]
|
| 3 |
+
*.jp[e]g
|
| 4 |
+
|
| 5 |
+
# pycharm IDE
|
| 6 |
+
.idea
|
| 7 |
+
.DS_Store
|
| 8 |
+
|
| 9 |
+
# Byte-compiled / optimized / DLL files
|
| 10 |
+
__pycache__/
|
| 11 |
+
*.py[cod]
|
| 12 |
+
*$py.class
|
| 13 |
+
|
| 14 |
+
# C extensions
|
| 15 |
+
*.so
|
| 16 |
+
|
| 17 |
+
# Distribution / packaging
|
| 18 |
+
.Python
|
| 19 |
+
build/
|
| 20 |
+
docs/build/
|
| 21 |
+
docs/wo_examples/
|
| 22 |
+
develop-eggs/
|
| 23 |
+
dist/
|
| 24 |
+
downloads/
|
| 25 |
+
eggs/
|
| 26 |
+
.eggs/
|
| 27 |
+
lib/
|
| 28 |
+
lib64/
|
| 29 |
+
parts/
|
| 30 |
+
sdist/
|
| 31 |
+
var/
|
| 32 |
+
wheels/
|
| 33 |
+
pip-wheel-metadata/
|
| 34 |
+
share/python-wheels/
|
| 35 |
+
*.egg-info/
|
| 36 |
+
.installed.cfg
|
| 37 |
+
*.egg
|
| 38 |
+
MANIFEST
|
| 39 |
+
|
| 40 |
+
# PyInstaller
|
| 41 |
+
# Usually these files are written by a python script from a template
|
| 42 |
+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
| 43 |
+
*.manifest
|
| 44 |
+
*.spec
|
| 45 |
+
|
| 46 |
+
# Installer logs
|
| 47 |
+
pip-log.txt
|
| 48 |
+
pip-delete-this-directory.txt
|
| 49 |
+
|
| 50 |
+
# Unit test / coverage reports
|
| 51 |
+
htmlcov/
|
| 52 |
+
.tox/
|
| 53 |
+
.nox/
|
| 54 |
+
.coverage
|
| 55 |
+
.coverage.*
|
| 56 |
+
.cache
|
| 57 |
+
nosetests.xml
|
| 58 |
+
coverage.xml
|
| 59 |
+
*.cover
|
| 60 |
+
*.py,cover
|
| 61 |
+
.hypothesis/
|
| 62 |
+
.pytest_cache/
|
| 63 |
+
pytest_temp/
|
| 64 |
+
|
| 65 |
+
# Translations
|
| 66 |
+
*.mo
|
| 67 |
+
*.pot
|
| 68 |
+
|
| 69 |
+
# Django stuff:
|
| 70 |
+
*.log
|
| 71 |
+
local_settings.py
|
| 72 |
+
db.sqlite3
|
| 73 |
+
db.sqlite3-journal
|
| 74 |
+
|
| 75 |
+
# Flask stuff:
|
| 76 |
+
instance/
|
| 77 |
+
.webassets-cache
|
| 78 |
+
|
| 79 |
+
# Scrapy stuff:
|
| 80 |
+
.scrapy
|
| 81 |
+
|
| 82 |
+
# Sphinx documentation
|
| 83 |
+
docs/_build/
|
| 84 |
+
|
| 85 |
+
# PyBuilder
|
| 86 |
+
target/
|
| 87 |
+
|
| 88 |
+
# Jupyter Notebook
|
| 89 |
+
.ipynb_checkpoints
|
| 90 |
+
|
| 91 |
+
# IPython
|
| 92 |
+
profile_default/
|
| 93 |
+
ipython_config.py
|
| 94 |
+
|
| 95 |
+
# pyenv
|
| 96 |
+
.python-version
|
| 97 |
+
|
| 98 |
+
# pipenv
|
| 99 |
+
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
| 100 |
+
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
| 101 |
+
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
| 102 |
+
# install all needed dependencies.
|
| 103 |
+
#Pipfile.lock
|
| 104 |
+
|
| 105 |
+
# PEP 582; used by e.g. github.com/David-OConnor/pyflow
|
| 106 |
+
__pypackages__/
|
| 107 |
+
|
| 108 |
+
# Celery stuff
|
| 109 |
+
celerybeat-schedule
|
| 110 |
+
celerybeat.pid
|
| 111 |
+
|
| 112 |
+
# SageMath parsed files
|
| 113 |
+
*.sage.py
|
| 114 |
+
|
| 115 |
+
# Environments
|
| 116 |
+
.env
|
| 117 |
+
.venv
|
| 118 |
+
env/
|
| 119 |
+
venv/
|
| 120 |
+
ENV/
|
| 121 |
+
env.bak/
|
| 122 |
+
venv.bak/
|
| 123 |
+
|
| 124 |
+
# Spyder project settings
|
| 125 |
+
.spyderproject
|
| 126 |
+
.spyproject
|
| 127 |
+
|
| 128 |
+
# Rope project settings
|
| 129 |
+
.ropeproject
|
| 130 |
+
|
| 131 |
+
# mkdocs documentation
|
| 132 |
+
/site
|
| 133 |
+
|
| 134 |
+
# mypy
|
| 135 |
+
.mypy_cache/
|
| 136 |
+
.dmypy.json
|
| 137 |
+
dmypy.json
|
| 138 |
+
|
| 139 |
+
# Pyre type checker
|
| 140 |
+
.pyre/
|
| 141 |
+
|
| 142 |
+
.DS_Store
|
| 143 |
+
|
| 144 |
+
# written by setuptools_scm
|
| 145 |
+
*/_version.py
|
| 146 |
+
waveorder/_version.py
|
| 147 |
+
*.autosave
|
| 148 |
+
|
| 149 |
+
# images
|
| 150 |
+
*.npz
|
| 151 |
+
*.tif[f]
|
| 152 |
+
*.pdf
|
| 153 |
+
|
| 154 |
+
# example data
|
| 155 |
+
/examples/data_temp/*
|
| 156 |
+
/logs/*
|
| 157 |
+
runs/*
|
| 158 |
+
|
| 159 |
+
# misc
|
| 160 |
+
claudedocs/
|
| 161 |
+
.claude/
|
| 162 |
+
concatenate_3fovs.yml
|
README.md
CHANGED
|
@@ -1,12 +1,88 @@
|
|
| 1 |
---
|
| 2 |
-
title:
|
| 3 |
-
emoji:
|
| 4 |
-
colorFrom:
|
| 5 |
colorTo: purple
|
| 6 |
sdk: gradio
|
| 7 |
sdk_version: 6.0.0
|
| 8 |
app_file: app.py
|
| 9 |
pinned: false
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 10 |
---
|
| 11 |
|
| 12 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
---
|
| 2 |
+
title: waveOrder Phase Reconstruction Viewer
|
| 3 |
+
emoji: 🔬
|
| 4 |
+
colorFrom: blue
|
| 5 |
colorTo: purple
|
| 6 |
sdk: gradio
|
| 7 |
sdk_version: 6.0.0
|
| 8 |
app_file: app.py
|
| 9 |
pinned: false
|
| 10 |
+
license: bsd-3-clause
|
| 11 |
+
tags:
|
| 12 |
+
- microscopy
|
| 13 |
+
- computational-imaging
|
| 14 |
+
- phase-reconstruction
|
| 15 |
+
- bioimaging
|
| 16 |
+
- scientific-visualization
|
| 17 |
+
arxiv: 2412.09775
|
| 18 |
---
|
| 19 |
|
| 20 |
+
# waveOrder: Phase Reconstruction Interactive Viewer
|
| 21 |
+
|
| 22 |
+
<div align="center">
|
| 23 |
+
|
| 24 |
+
[](https://arxiv.org/abs/2412.09775)
|
| 25 |
+
[](https://github.com/mehta-lab/waveorder)
|
| 26 |
+
[](https://huggingface.co/papers/2412.09775)
|
| 27 |
+
|
| 28 |
+
</div>
|
| 29 |
+
|
| 30 |
+
## 📄 Paper
|
| 31 |
+
|
| 32 |
+
**waveOrder: generalist framework for label-agnostic computational microscopy**
|
| 33 |
+
Chandler et al. (2024)
|
| 34 |
+
[arXiv:2412.09775](https://arxiv.org/abs/2412.09775)
|
| 35 |
+
|
| 36 |
+
## 🔬 About
|
| 37 |
+
|
| 38 |
+
Interactive web interface for exploring phase reconstruction from quantitative label-free microscopy data. This demo showcases the waveOrder framework's capabilities for reconstructing phase contrast images with interactive parameter optimization.
|
| 39 |
+
|
| 40 |
+
### Features
|
| 41 |
+
|
| 42 |
+
- **Interactive Visualization**: Side-by-side comparison of raw and reconstructed phase images
|
| 43 |
+
- **Real-time Parameter Tuning**: Adjust reconstruction parameters and see results instantly
|
| 44 |
+
- **Automated Optimization**: Gradient-based optimization to find optimal reconstruction parameters
|
| 45 |
+
- **GPU Acceleration**: 15-25× speedup with CUDA-capable devices (auto-detected)
|
| 46 |
+
- **Multi-FOV Support**: Navigate through multiple fields of view from plate imaging
|
| 47 |
+
|
| 48 |
+
### Reconstruction Parameters
|
| 49 |
+
|
| 50 |
+
- **Z Offset**: Axial focus calibration
|
| 51 |
+
- **Numerical Apertures**: Detection and illumination NA optimization
|
| 52 |
+
- **Tilt Angles**: Zenith and azimuthal illumination tilt correction
|
| 53 |
+
|
| 54 |
+
## 🚀 Usage
|
| 55 |
+
|
| 56 |
+
1. **Select Field of View**: Choose from available FOVs in the dropdown
|
| 57 |
+
2. **Navigate Z-stack**: Use the Z-slice slider to explore different focal planes
|
| 58 |
+
3. **Optimize Parameters**: Click "⚡ Optimize Parameters" to automatically find optimal settings
|
| 59 |
+
4. **Manual Reconstruction**: Adjust sliders manually and click "🔬 Run Reconstruction"
|
| 60 |
+
5. **Review Results**: Scrub through optimization iterations to see parameter evolution
|
| 61 |
+
|
| 62 |
+
## 📊 Dataset
|
| 63 |
+
|
| 64 |
+
This demo uses concatenated 20x objective microscopy data from high-content screening plates, featuring brightfield phase contrast imaging.
|
| 65 |
+
|
| 66 |
+
## 🔗 Links
|
| 67 |
+
|
| 68 |
+
- **Paper**: [arXiv:2412.09775](https://arxiv.org/abs/2412.09775)
|
| 69 |
+
- **GitHub Repository**: [mehta-lab/waveorder](https://github.com/mehta-lab/waveorder)
|
| 70 |
+
- **Documentation**: [waveOrder Docs](https://github.com/mehta-lab/waveorder/tree/main/docs)
|
| 71 |
+
|
| 72 |
+
## 📝 Citation
|
| 73 |
+
|
| 74 |
+
```bibtex
|
| 75 |
+
@misc{chandler2024waveordergeneralistframeworklabelagnostic,
|
| 76 |
+
title={waveOrder: generalist framework for label-agnostic computational microscopy},
|
| 77 |
+
author={Talon Chandler and Eduardo Hirata-Miyasaki and Ivan E. Ivanov and Ziwen Liu and Deepika Sundarraman and Allyson Quinn Ryan and Adrian Jacobo and Keir Balla and Shalin B. Mehta},
|
| 78 |
+
year={2024},
|
| 79 |
+
eprint={2412.09775},
|
| 80 |
+
archivePrefix={arXiv},
|
| 81 |
+
primaryClass={physics.optics},
|
| 82 |
+
url={https://arxiv.org/abs/2412.09775},
|
| 83 |
+
}
|
| 84 |
+
```
|
| 85 |
+
|
| 86 |
+
## ⚖️ License
|
| 87 |
+
|
| 88 |
+
BSD 3-Clause License
|
app.py
CHANGED
|
@@ -1,7 +1,51 @@
|
|
| 1 |
-
|
|
|
|
| 2 |
|
| 3 |
-
|
| 4 |
-
|
| 5 |
|
| 6 |
-
|
| 7 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
WaveOrder Phase Reconstruction Viewer - Main Entry Point
|
| 3 |
|
| 4 |
+
This is the main application entry point that launches the phase reconstruction
|
| 5 |
+
viewer with interactive optimization capabilities.
|
| 6 |
|
| 7 |
+
Usage:
|
| 8 |
+
python app.py
|
| 9 |
+
|
| 10 |
+
For more details, see optimize_demo.py
|
| 11 |
+
"""
|
| 12 |
+
|
| 13 |
+
from optimize_demo import (
|
| 14 |
+
initialize_plate_metadata,
|
| 15 |
+
load_default_fov,
|
| 16 |
+
create_gradio_interface,
|
| 17 |
+
Config,
|
| 18 |
+
)
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
def main():
|
| 22 |
+
"""Launch the Phase Reconstruction Viewer application."""
|
| 23 |
+
print("\n" + "=" * 60)
|
| 24 |
+
print("WaveOrder Phase Reconstruction Viewer")
|
| 25 |
+
print("=" * 60 + "\n")
|
| 26 |
+
|
| 27 |
+
# Initialize application state
|
| 28 |
+
print("Initializing application...")
|
| 29 |
+
plate_metadata, default_fields = initialize_plate_metadata()
|
| 30 |
+
data_xr, pixel_scales = load_default_fov(plate_metadata)
|
| 31 |
+
|
| 32 |
+
# Create Gradio interface
|
| 33 |
+
print("\nBuilding user interface...")
|
| 34 |
+
demo = create_gradio_interface(plate_metadata, default_fields, data_xr, pixel_scales)
|
| 35 |
+
|
| 36 |
+
# Launch application
|
| 37 |
+
print("\n" + "=" * 60)
|
| 38 |
+
print("Starting Gradio Phase Reconstruction Viewer")
|
| 39 |
+
print("=" * 60)
|
| 40 |
+
print("Open your browser to the URL shown below")
|
| 41 |
+
print("=" * 60 + "\n")
|
| 42 |
+
|
| 43 |
+
demo.launch(
|
| 44 |
+
share=False, # Set to True to create public link
|
| 45 |
+
# server_name="0.0.0.0", # Uncomment to allow external access
|
| 46 |
+
server_port=Config.SERVER_PORT,
|
| 47 |
+
)
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
if __name__ == "__main__":
|
| 51 |
+
main()
|
data/20x.zarr/A/1/002026/0/c/0/0/0/0/0
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:907a383d1ab54801143b6c34faaae986bb12303d6676a21f8ccc01cfce05f031
|
| 3 |
+
size 42807124
|
data/20x.zarr/A/1/002026/0/zarr.json
ADDED
|
@@ -0,0 +1,84 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"shape": [
|
| 3 |
+
1,
|
| 4 |
+
1,
|
| 5 |
+
7,
|
| 6 |
+
2048,
|
| 7 |
+
2048
|
| 8 |
+
],
|
| 9 |
+
"data_type": "uint16",
|
| 10 |
+
"chunk_grid": {
|
| 11 |
+
"name": "regular",
|
| 12 |
+
"configuration": {
|
| 13 |
+
"chunk_shape": [
|
| 14 |
+
1,
|
| 15 |
+
1,
|
| 16 |
+
7,
|
| 17 |
+
16384,
|
| 18 |
+
16384
|
| 19 |
+
]
|
| 20 |
+
}
|
| 21 |
+
},
|
| 22 |
+
"chunk_key_encoding": {
|
| 23 |
+
"name": "default",
|
| 24 |
+
"configuration": {
|
| 25 |
+
"separator": "/"
|
| 26 |
+
}
|
| 27 |
+
},
|
| 28 |
+
"fill_value": 0,
|
| 29 |
+
"codecs": [
|
| 30 |
+
{
|
| 31 |
+
"name": "sharding_indexed",
|
| 32 |
+
"configuration": {
|
| 33 |
+
"chunk_shape": [
|
| 34 |
+
1,
|
| 35 |
+
1,
|
| 36 |
+
7,
|
| 37 |
+
2048,
|
| 38 |
+
2048
|
| 39 |
+
],
|
| 40 |
+
"codecs": [
|
| 41 |
+
{
|
| 42 |
+
"name": "bytes",
|
| 43 |
+
"configuration": {
|
| 44 |
+
"endian": "little"
|
| 45 |
+
}
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"name": "blosc",
|
| 49 |
+
"configuration": {
|
| 50 |
+
"typesize": 2,
|
| 51 |
+
"cname": "zstd",
|
| 52 |
+
"clevel": 1,
|
| 53 |
+
"shuffle": "bitshuffle",
|
| 54 |
+
"blocksize": 0
|
| 55 |
+
}
|
| 56 |
+
}
|
| 57 |
+
],
|
| 58 |
+
"index_codecs": [
|
| 59 |
+
{
|
| 60 |
+
"name": "bytes",
|
| 61 |
+
"configuration": {
|
| 62 |
+
"endian": "little"
|
| 63 |
+
}
|
| 64 |
+
},
|
| 65 |
+
{
|
| 66 |
+
"name": "crc32c"
|
| 67 |
+
}
|
| 68 |
+
],
|
| 69 |
+
"index_location": "end"
|
| 70 |
+
}
|
| 71 |
+
}
|
| 72 |
+
],
|
| 73 |
+
"attributes": {},
|
| 74 |
+
"dimension_names": [
|
| 75 |
+
"T",
|
| 76 |
+
"C",
|
| 77 |
+
"Z",
|
| 78 |
+
"Y",
|
| 79 |
+
"X"
|
| 80 |
+
],
|
| 81 |
+
"zarr_format": 3,
|
| 82 |
+
"node_type": "array",
|
| 83 |
+
"storage_transformers": []
|
| 84 |
+
}
|
data/20x.zarr/A/1/002026/zarr.json
ADDED
|
@@ -0,0 +1,86 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"attributes": {
|
| 3 |
+
"ome": {
|
| 4 |
+
"multiscales": [
|
| 5 |
+
{
|
| 6 |
+
"version": "0.5",
|
| 7 |
+
"axes": [
|
| 8 |
+
{
|
| 9 |
+
"name": "T",
|
| 10 |
+
"type": "time",
|
| 11 |
+
"unit": "second"
|
| 12 |
+
},
|
| 13 |
+
{
|
| 14 |
+
"name": "C",
|
| 15 |
+
"type": "channel"
|
| 16 |
+
},
|
| 17 |
+
{
|
| 18 |
+
"name": "Z",
|
| 19 |
+
"type": "space",
|
| 20 |
+
"unit": "micrometer"
|
| 21 |
+
},
|
| 22 |
+
{
|
| 23 |
+
"name": "Y",
|
| 24 |
+
"type": "space",
|
| 25 |
+
"unit": "micrometer"
|
| 26 |
+
},
|
| 27 |
+
{
|
| 28 |
+
"name": "X",
|
| 29 |
+
"type": "space",
|
| 30 |
+
"unit": "micrometer"
|
| 31 |
+
}
|
| 32 |
+
],
|
| 33 |
+
"datasets": [
|
| 34 |
+
{
|
| 35 |
+
"path": "0",
|
| 36 |
+
"coordinateTransformations": [
|
| 37 |
+
{
|
| 38 |
+
"type": "scale",
|
| 39 |
+
"scale": [
|
| 40 |
+
1.0,
|
| 41 |
+
1.0,
|
| 42 |
+
2.0,
|
| 43 |
+
0.65,
|
| 44 |
+
0.65
|
| 45 |
+
]
|
| 46 |
+
}
|
| 47 |
+
]
|
| 48 |
+
}
|
| 49 |
+
],
|
| 50 |
+
"name": "0"
|
| 51 |
+
}
|
| 52 |
+
],
|
| 53 |
+
"omero": {
|
| 54 |
+
"version": "0.5",
|
| 55 |
+
"id": 0,
|
| 56 |
+
"name": "002026",
|
| 57 |
+
"channels": [
|
| 58 |
+
{
|
| 59 |
+
"active": true,
|
| 60 |
+
"coefficient": 1.0,
|
| 61 |
+
"color": "FFFFFF",
|
| 62 |
+
"family": "linear",
|
| 63 |
+
"inverted": false,
|
| 64 |
+
"label": "BF",
|
| 65 |
+
"window": {
|
| 66 |
+
"start": 0.0,
|
| 67 |
+
"end": 5.0,
|
| 68 |
+
"min": 0.0,
|
| 69 |
+
"max": 65535.0
|
| 70 |
+
}
|
| 71 |
+
}
|
| 72 |
+
],
|
| 73 |
+
"rdefs": {
|
| 74 |
+
"defaultT": 0,
|
| 75 |
+
"defaultZ": 0,
|
| 76 |
+
"model": "color",
|
| 77 |
+
"projection": "normal"
|
| 78 |
+
}
|
| 79 |
+
},
|
| 80 |
+
"version": "0.5"
|
| 81 |
+
},
|
| 82 |
+
"extra_metadata": null
|
| 83 |
+
},
|
| 84 |
+
"zarr_format": 3,
|
| 85 |
+
"node_type": "group"
|
| 86 |
+
}
|
data/20x.zarr/A/1/002027/0/c/0/0/0/0/0
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b4e974afc2a62a019249c465ca281e77aa700bf0b88903d322a48e0c5370b67b
|
| 3 |
+
size 42747558
|
data/20x.zarr/A/1/002027/0/zarr.json
ADDED
|
@@ -0,0 +1,84 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"shape": [
|
| 3 |
+
1,
|
| 4 |
+
1,
|
| 5 |
+
7,
|
| 6 |
+
2048,
|
| 7 |
+
2048
|
| 8 |
+
],
|
| 9 |
+
"data_type": "uint16",
|
| 10 |
+
"chunk_grid": {
|
| 11 |
+
"name": "regular",
|
| 12 |
+
"configuration": {
|
| 13 |
+
"chunk_shape": [
|
| 14 |
+
1,
|
| 15 |
+
1,
|
| 16 |
+
7,
|
| 17 |
+
16384,
|
| 18 |
+
16384
|
| 19 |
+
]
|
| 20 |
+
}
|
| 21 |
+
},
|
| 22 |
+
"chunk_key_encoding": {
|
| 23 |
+
"name": "default",
|
| 24 |
+
"configuration": {
|
| 25 |
+
"separator": "/"
|
| 26 |
+
}
|
| 27 |
+
},
|
| 28 |
+
"fill_value": 0,
|
| 29 |
+
"codecs": [
|
| 30 |
+
{
|
| 31 |
+
"name": "sharding_indexed",
|
| 32 |
+
"configuration": {
|
| 33 |
+
"chunk_shape": [
|
| 34 |
+
1,
|
| 35 |
+
1,
|
| 36 |
+
7,
|
| 37 |
+
2048,
|
| 38 |
+
2048
|
| 39 |
+
],
|
| 40 |
+
"codecs": [
|
| 41 |
+
{
|
| 42 |
+
"name": "bytes",
|
| 43 |
+
"configuration": {
|
| 44 |
+
"endian": "little"
|
| 45 |
+
}
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"name": "blosc",
|
| 49 |
+
"configuration": {
|
| 50 |
+
"typesize": 2,
|
| 51 |
+
"cname": "zstd",
|
| 52 |
+
"clevel": 1,
|
| 53 |
+
"shuffle": "bitshuffle",
|
| 54 |
+
"blocksize": 0
|
| 55 |
+
}
|
| 56 |
+
}
|
| 57 |
+
],
|
| 58 |
+
"index_codecs": [
|
| 59 |
+
{
|
| 60 |
+
"name": "bytes",
|
| 61 |
+
"configuration": {
|
| 62 |
+
"endian": "little"
|
| 63 |
+
}
|
| 64 |
+
},
|
| 65 |
+
{
|
| 66 |
+
"name": "crc32c"
|
| 67 |
+
}
|
| 68 |
+
],
|
| 69 |
+
"index_location": "end"
|
| 70 |
+
}
|
| 71 |
+
}
|
| 72 |
+
],
|
| 73 |
+
"attributes": {},
|
| 74 |
+
"dimension_names": [
|
| 75 |
+
"T",
|
| 76 |
+
"C",
|
| 77 |
+
"Z",
|
| 78 |
+
"Y",
|
| 79 |
+
"X"
|
| 80 |
+
],
|
| 81 |
+
"zarr_format": 3,
|
| 82 |
+
"node_type": "array",
|
| 83 |
+
"storage_transformers": []
|
| 84 |
+
}
|
data/20x.zarr/A/1/002027/zarr.json
ADDED
|
@@ -0,0 +1,86 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"attributes": {
|
| 3 |
+
"ome": {
|
| 4 |
+
"multiscales": [
|
| 5 |
+
{
|
| 6 |
+
"version": "0.5",
|
| 7 |
+
"axes": [
|
| 8 |
+
{
|
| 9 |
+
"name": "T",
|
| 10 |
+
"type": "time",
|
| 11 |
+
"unit": "second"
|
| 12 |
+
},
|
| 13 |
+
{
|
| 14 |
+
"name": "C",
|
| 15 |
+
"type": "channel"
|
| 16 |
+
},
|
| 17 |
+
{
|
| 18 |
+
"name": "Z",
|
| 19 |
+
"type": "space",
|
| 20 |
+
"unit": "micrometer"
|
| 21 |
+
},
|
| 22 |
+
{
|
| 23 |
+
"name": "Y",
|
| 24 |
+
"type": "space",
|
| 25 |
+
"unit": "micrometer"
|
| 26 |
+
},
|
| 27 |
+
{
|
| 28 |
+
"name": "X",
|
| 29 |
+
"type": "space",
|
| 30 |
+
"unit": "micrometer"
|
| 31 |
+
}
|
| 32 |
+
],
|
| 33 |
+
"datasets": [
|
| 34 |
+
{
|
| 35 |
+
"path": "0",
|
| 36 |
+
"coordinateTransformations": [
|
| 37 |
+
{
|
| 38 |
+
"type": "scale",
|
| 39 |
+
"scale": [
|
| 40 |
+
1.0,
|
| 41 |
+
1.0,
|
| 42 |
+
2.0,
|
| 43 |
+
0.65,
|
| 44 |
+
0.65
|
| 45 |
+
]
|
| 46 |
+
}
|
| 47 |
+
]
|
| 48 |
+
}
|
| 49 |
+
],
|
| 50 |
+
"name": "0"
|
| 51 |
+
}
|
| 52 |
+
],
|
| 53 |
+
"omero": {
|
| 54 |
+
"version": "0.5",
|
| 55 |
+
"id": 0,
|
| 56 |
+
"name": "002027",
|
| 57 |
+
"channels": [
|
| 58 |
+
{
|
| 59 |
+
"active": true,
|
| 60 |
+
"coefficient": 1.0,
|
| 61 |
+
"color": "FFFFFF",
|
| 62 |
+
"family": "linear",
|
| 63 |
+
"inverted": false,
|
| 64 |
+
"label": "BF",
|
| 65 |
+
"window": {
|
| 66 |
+
"start": 0.0,
|
| 67 |
+
"end": 5.0,
|
| 68 |
+
"min": 0.0,
|
| 69 |
+
"max": 65535.0
|
| 70 |
+
}
|
| 71 |
+
}
|
| 72 |
+
],
|
| 73 |
+
"rdefs": {
|
| 74 |
+
"defaultT": 0,
|
| 75 |
+
"defaultZ": 0,
|
| 76 |
+
"model": "color",
|
| 77 |
+
"projection": "normal"
|
| 78 |
+
}
|
| 79 |
+
},
|
| 80 |
+
"version": "0.5"
|
| 81 |
+
},
|
| 82 |
+
"extra_metadata": null
|
| 83 |
+
},
|
| 84 |
+
"zarr_format": 3,
|
| 85 |
+
"node_type": "group"
|
| 86 |
+
}
|
data/20x.zarr/A/1/002028/0/c/0/0/0/0/0
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:fa115aed2ebdb7c0f15e15dbe728ee86c3189d19431c854f1515021b7578e320
|
| 3 |
+
size 42727003
|
data/20x.zarr/A/1/002028/0/zarr.json
ADDED
|
@@ -0,0 +1,84 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"shape": [
|
| 3 |
+
1,
|
| 4 |
+
1,
|
| 5 |
+
7,
|
| 6 |
+
2048,
|
| 7 |
+
2048
|
| 8 |
+
],
|
| 9 |
+
"data_type": "uint16",
|
| 10 |
+
"chunk_grid": {
|
| 11 |
+
"name": "regular",
|
| 12 |
+
"configuration": {
|
| 13 |
+
"chunk_shape": [
|
| 14 |
+
1,
|
| 15 |
+
1,
|
| 16 |
+
7,
|
| 17 |
+
16384,
|
| 18 |
+
16384
|
| 19 |
+
]
|
| 20 |
+
}
|
| 21 |
+
},
|
| 22 |
+
"chunk_key_encoding": {
|
| 23 |
+
"name": "default",
|
| 24 |
+
"configuration": {
|
| 25 |
+
"separator": "/"
|
| 26 |
+
}
|
| 27 |
+
},
|
| 28 |
+
"fill_value": 0,
|
| 29 |
+
"codecs": [
|
| 30 |
+
{
|
| 31 |
+
"name": "sharding_indexed",
|
| 32 |
+
"configuration": {
|
| 33 |
+
"chunk_shape": [
|
| 34 |
+
1,
|
| 35 |
+
1,
|
| 36 |
+
7,
|
| 37 |
+
2048,
|
| 38 |
+
2048
|
| 39 |
+
],
|
| 40 |
+
"codecs": [
|
| 41 |
+
{
|
| 42 |
+
"name": "bytes",
|
| 43 |
+
"configuration": {
|
| 44 |
+
"endian": "little"
|
| 45 |
+
}
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"name": "blosc",
|
| 49 |
+
"configuration": {
|
| 50 |
+
"typesize": 2,
|
| 51 |
+
"cname": "zstd",
|
| 52 |
+
"clevel": 1,
|
| 53 |
+
"shuffle": "bitshuffle",
|
| 54 |
+
"blocksize": 0
|
| 55 |
+
}
|
| 56 |
+
}
|
| 57 |
+
],
|
| 58 |
+
"index_codecs": [
|
| 59 |
+
{
|
| 60 |
+
"name": "bytes",
|
| 61 |
+
"configuration": {
|
| 62 |
+
"endian": "little"
|
| 63 |
+
}
|
| 64 |
+
},
|
| 65 |
+
{
|
| 66 |
+
"name": "crc32c"
|
| 67 |
+
}
|
| 68 |
+
],
|
| 69 |
+
"index_location": "end"
|
| 70 |
+
}
|
| 71 |
+
}
|
| 72 |
+
],
|
| 73 |
+
"attributes": {},
|
| 74 |
+
"dimension_names": [
|
| 75 |
+
"T",
|
| 76 |
+
"C",
|
| 77 |
+
"Z",
|
| 78 |
+
"Y",
|
| 79 |
+
"X"
|
| 80 |
+
],
|
| 81 |
+
"zarr_format": 3,
|
| 82 |
+
"node_type": "array",
|
| 83 |
+
"storage_transformers": []
|
| 84 |
+
}
|
data/20x.zarr/A/1/002028/zarr.json
ADDED
|
@@ -0,0 +1,86 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"attributes": {
|
| 3 |
+
"ome": {
|
| 4 |
+
"multiscales": [
|
| 5 |
+
{
|
| 6 |
+
"version": "0.5",
|
| 7 |
+
"axes": [
|
| 8 |
+
{
|
| 9 |
+
"name": "T",
|
| 10 |
+
"type": "time",
|
| 11 |
+
"unit": "second"
|
| 12 |
+
},
|
| 13 |
+
{
|
| 14 |
+
"name": "C",
|
| 15 |
+
"type": "channel"
|
| 16 |
+
},
|
| 17 |
+
{
|
| 18 |
+
"name": "Z",
|
| 19 |
+
"type": "space",
|
| 20 |
+
"unit": "micrometer"
|
| 21 |
+
},
|
| 22 |
+
{
|
| 23 |
+
"name": "Y",
|
| 24 |
+
"type": "space",
|
| 25 |
+
"unit": "micrometer"
|
| 26 |
+
},
|
| 27 |
+
{
|
| 28 |
+
"name": "X",
|
| 29 |
+
"type": "space",
|
| 30 |
+
"unit": "micrometer"
|
| 31 |
+
}
|
| 32 |
+
],
|
| 33 |
+
"datasets": [
|
| 34 |
+
{
|
| 35 |
+
"path": "0",
|
| 36 |
+
"coordinateTransformations": [
|
| 37 |
+
{
|
| 38 |
+
"type": "scale",
|
| 39 |
+
"scale": [
|
| 40 |
+
1.0,
|
| 41 |
+
1.0,
|
| 42 |
+
2.0,
|
| 43 |
+
0.65,
|
| 44 |
+
0.65
|
| 45 |
+
]
|
| 46 |
+
}
|
| 47 |
+
]
|
| 48 |
+
}
|
| 49 |
+
],
|
| 50 |
+
"name": "0"
|
| 51 |
+
}
|
| 52 |
+
],
|
| 53 |
+
"omero": {
|
| 54 |
+
"version": "0.5",
|
| 55 |
+
"id": 0,
|
| 56 |
+
"name": "002028",
|
| 57 |
+
"channels": [
|
| 58 |
+
{
|
| 59 |
+
"active": true,
|
| 60 |
+
"coefficient": 1.0,
|
| 61 |
+
"color": "FFFFFF",
|
| 62 |
+
"family": "linear",
|
| 63 |
+
"inverted": false,
|
| 64 |
+
"label": "BF",
|
| 65 |
+
"window": {
|
| 66 |
+
"start": 0.0,
|
| 67 |
+
"end": 5.0,
|
| 68 |
+
"min": 0.0,
|
| 69 |
+
"max": 65535.0
|
| 70 |
+
}
|
| 71 |
+
}
|
| 72 |
+
],
|
| 73 |
+
"rdefs": {
|
| 74 |
+
"defaultT": 0,
|
| 75 |
+
"defaultZ": 0,
|
| 76 |
+
"model": "color",
|
| 77 |
+
"projection": "normal"
|
| 78 |
+
}
|
| 79 |
+
},
|
| 80 |
+
"version": "0.5"
|
| 81 |
+
},
|
| 82 |
+
"extra_metadata": null
|
| 83 |
+
},
|
| 84 |
+
"zarr_format": 3,
|
| 85 |
+
"node_type": "group"
|
| 86 |
+
}
|
data/20x.zarr/A/1/zarr.json
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"attributes": {
|
| 3 |
+
"ome": {
|
| 4 |
+
"well": {
|
| 5 |
+
"version": "0.5",
|
| 6 |
+
"images": [
|
| 7 |
+
{
|
| 8 |
+
"acquisition": 0,
|
| 9 |
+
"path": "002026"
|
| 10 |
+
},
|
| 11 |
+
{
|
| 12 |
+
"acquisition": 0,
|
| 13 |
+
"path": "002027"
|
| 14 |
+
},
|
| 15 |
+
{
|
| 16 |
+
"acquisition": 0,
|
| 17 |
+
"path": "002028"
|
| 18 |
+
}
|
| 19 |
+
]
|
| 20 |
+
},
|
| 21 |
+
"version": "0.5"
|
| 22 |
+
}
|
| 23 |
+
},
|
| 24 |
+
"zarr_format": 3,
|
| 25 |
+
"node_type": "group"
|
| 26 |
+
}
|
data/20x.zarr/A/zarr.json
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"attributes": {},
|
| 3 |
+
"zarr_format": 3,
|
| 4 |
+
"node_type": "group"
|
| 5 |
+
}
|
data/20x.zarr/zarr.json
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"attributes": {
|
| 3 |
+
"ome": {
|
| 4 |
+
"plate": {
|
| 5 |
+
"version": "0.5",
|
| 6 |
+
"acquisitions": [
|
| 7 |
+
{
|
| 8 |
+
"id": 0
|
| 9 |
+
}
|
| 10 |
+
],
|
| 11 |
+
"rows": [
|
| 12 |
+
{
|
| 13 |
+
"name": "A"
|
| 14 |
+
}
|
| 15 |
+
],
|
| 16 |
+
"columns": [
|
| 17 |
+
{
|
| 18 |
+
"name": "1"
|
| 19 |
+
}
|
| 20 |
+
],
|
| 21 |
+
"wells": [
|
| 22 |
+
{
|
| 23 |
+
"path": "A/1",
|
| 24 |
+
"rowIndex": 0,
|
| 25 |
+
"columnIndex": 0
|
| 26 |
+
}
|
| 27 |
+
]
|
| 28 |
+
},
|
| 29 |
+
"version": "0.5"
|
| 30 |
+
}
|
| 31 |
+
},
|
| 32 |
+
"zarr_format": 3,
|
| 33 |
+
"node_type": "group"
|
| 34 |
+
}
|
demo_utils.py
ADDED
|
@@ -0,0 +1,818 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Utility functions for Gradio demos
|
| 3 |
+
|
| 4 |
+
Provides reusable components for:
|
| 5 |
+
- Data loading from OME-Zarr stores
|
| 6 |
+
- Image normalization and processing
|
| 7 |
+
- Slice extraction from xarray DataArrays
|
| 8 |
+
- Phase reconstruction and optimization
|
| 9 |
+
|
| 10 |
+
Design Notes
|
| 11 |
+
------------
|
| 12 |
+
All image processing functions work with xarray.DataArray to maintain
|
| 13 |
+
labeled dimensions and coordinate information as long as possible.
|
| 14 |
+
Only convert to numpy arrays at the final display step.
|
| 15 |
+
"""
|
| 16 |
+
|
| 17 |
+
from pathlib import Path
|
| 18 |
+
from typing import Generator
|
| 19 |
+
|
| 20 |
+
import numpy as np
|
| 21 |
+
import torch
|
| 22 |
+
import xarray as xr
|
| 23 |
+
from numpy.typing import NDArray
|
| 24 |
+
from xarray_ome import open_ome_dataset
|
| 25 |
+
|
| 26 |
+
from waveorder import util
|
| 27 |
+
from waveorder.models import isotropic_thin_3d
|
| 28 |
+
|
| 29 |
+
# Type alias for device specification
|
| 30 |
+
Device = torch.device | str | None
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
def get_device(device: Device = None) -> torch.device:
|
| 34 |
+
"""
|
| 35 |
+
Get torch device with smart defaults.
|
| 36 |
+
|
| 37 |
+
Parameters
|
| 38 |
+
----------
|
| 39 |
+
device : torch.device | str | None
|
| 40 |
+
If None, auto-selects cuda if available, else cpu.
|
| 41 |
+
If str, converts to torch.device.
|
| 42 |
+
If torch.device, returns as-is.
|
| 43 |
+
|
| 44 |
+
Returns
|
| 45 |
+
-------
|
| 46 |
+
torch.device
|
| 47 |
+
Validated device ready for use
|
| 48 |
+
|
| 49 |
+
Examples
|
| 50 |
+
--------
|
| 51 |
+
>>> get_device() # Auto-detect
|
| 52 |
+
device(type='cuda', index=0) # if GPU available
|
| 53 |
+
|
| 54 |
+
>>> get_device("cpu") # Force CPU
|
| 55 |
+
device(type='cpu')
|
| 56 |
+
|
| 57 |
+
>>> get_device(torch.device("cuda:1")) # Specific GPU
|
| 58 |
+
device(type='cuda', index=1)
|
| 59 |
+
"""
|
| 60 |
+
if device is None:
|
| 61 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 62 |
+
if device.type == "cuda":
|
| 63 |
+
print(f"🚀 Using GPU: {torch.cuda.get_device_name(device)}")
|
| 64 |
+
gpu_mem_gb = torch.cuda.get_device_properties(device).total_memory / 1e9
|
| 65 |
+
print(f" GPU Memory: {gpu_mem_gb:.2f} GB")
|
| 66 |
+
else:
|
| 67 |
+
print("💻 Using CPU (GPU not available)")
|
| 68 |
+
return device
|
| 69 |
+
|
| 70 |
+
if isinstance(device, str):
|
| 71 |
+
return torch.device(device)
|
| 72 |
+
|
| 73 |
+
return device
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
# === HCS Plate Loading with iohub ===
|
| 77 |
+
def get_plate_metadata(zarr_path: Path | str, allowed_fovs: list[str]) -> dict:
|
| 78 |
+
"""
|
| 79 |
+
Extract HCS plate metadata for FOV selection using iohub.
|
| 80 |
+
|
| 81 |
+
Optimized to only load metadata for specified FOVs.
|
| 82 |
+
|
| 83 |
+
Parameters
|
| 84 |
+
----------
|
| 85 |
+
zarr_path : Path | str
|
| 86 |
+
Path to the HCS plate zarr store
|
| 87 |
+
allowed_fovs : list[str]
|
| 88 |
+
List of allowed FOV names (e.g., ['002026', '002027', '002028'])
|
| 89 |
+
|
| 90 |
+
Returns
|
| 91 |
+
-------
|
| 92 |
+
dict
|
| 93 |
+
Metadata with keys:
|
| 94 |
+
- 'rows': list of row names (e.g., ['A'])
|
| 95 |
+
- 'columns': list of column names (e.g., ['1', '2', '3'])
|
| 96 |
+
- 'wells': dict mapping (row, col) to list of field names
|
| 97 |
+
- 'plate': iohub Plate object for later access
|
| 98 |
+
- 'zarr_path': stored path for data loading
|
| 99 |
+
"""
|
| 100 |
+
from iohub import open_ome_zarr
|
| 101 |
+
|
| 102 |
+
# Open HCS plate with iohub (fast - doesn't load data)
|
| 103 |
+
plate = open_ome_zarr(str(zarr_path), mode="r", layout="hcs")
|
| 104 |
+
|
| 105 |
+
# Hardcoded metadata for known structure (avoids iterating 1000s of positions)
|
| 106 |
+
rows = ["A"]
|
| 107 |
+
columns = ["1", "2", "3"]
|
| 108 |
+
|
| 109 |
+
# Only return the allowed FOVs for each well
|
| 110 |
+
wells = {
|
| 111 |
+
("A", "1"): allowed_fovs,
|
| 112 |
+
("A", "2"): allowed_fovs,
|
| 113 |
+
("A", "3"): allowed_fovs,
|
| 114 |
+
}
|
| 115 |
+
|
| 116 |
+
return {
|
| 117 |
+
"rows": rows,
|
| 118 |
+
"columns": columns,
|
| 119 |
+
"wells": wells,
|
| 120 |
+
"plate": plate,
|
| 121 |
+
"zarr_path": str(zarr_path),
|
| 122 |
+
}
|
| 123 |
+
|
| 124 |
+
|
| 125 |
+
def load_fov_from_plate(
|
| 126 |
+
plate, row: str, column: str, field: str, resolution: int = 0
|
| 127 |
+
) -> xr.DataArray:
|
| 128 |
+
"""
|
| 129 |
+
Load a specific FOV from HCS plate using hybrid iohub + xarray-ome approach.
|
| 130 |
+
|
| 131 |
+
Uses iohub for navigation, then xarray-ome for fast data loading.
|
| 132 |
+
|
| 133 |
+
Parameters
|
| 134 |
+
----------
|
| 135 |
+
plate : iohub.Plate
|
| 136 |
+
Plate loaded with open_ome_zarr(..., layout="hcs")
|
| 137 |
+
row : str
|
| 138 |
+
Row name (e.g., 'A')
|
| 139 |
+
column : str
|
| 140 |
+
Column name (e.g., '1')
|
| 141 |
+
field : str
|
| 142 |
+
Field/position name (e.g., '002026')
|
| 143 |
+
resolution : int, optional
|
| 144 |
+
Resolution level to load, by default 0
|
| 145 |
+
|
| 146 |
+
Returns
|
| 147 |
+
-------
|
| 148 |
+
xr.DataArray
|
| 149 |
+
Image data with labeled dimensions (T, C, Z, Y, X)
|
| 150 |
+
"""
|
| 151 |
+
# Navigate to position using iohub (fast)
|
| 152 |
+
position_key = f"{row}/{column}/{field}"
|
| 153 |
+
position = plate[position_key]
|
| 154 |
+
|
| 155 |
+
# Get full zarr path from position (handle both Zarr V2 and V3)
|
| 156 |
+
store = position.zgroup.store
|
| 157 |
+
if hasattr(store, 'path'):
|
| 158 |
+
base_path = Path(store.path) # Zarr V2 (DirectoryStore)
|
| 159 |
+
elif hasattr(store, 'root'):
|
| 160 |
+
base_path = Path(store.root) # Zarr V3 (LocalStore)
|
| 161 |
+
else:
|
| 162 |
+
raise RuntimeError(f"Unknown store type: {type(store)}")
|
| 163 |
+
|
| 164 |
+
position_path = base_path / position.zgroup.path
|
| 165 |
+
|
| 166 |
+
# Load with xarray-ome (fast and reliable)
|
| 167 |
+
fov_dataset = open_ome_dataset(position_path, resolution=resolution, validate=False)
|
| 168 |
+
data_xr = fov_dataset["image"]
|
| 169 |
+
|
| 170 |
+
return data_xr
|
| 171 |
+
|
| 172 |
+
|
| 173 |
+
# === Data Loading ===
|
| 174 |
+
def load_ome_zarr_fov(
|
| 175 |
+
zarr_path: Path | str, fov_path: Path | str, resolution: int = 0
|
| 176 |
+
) -> xr.DataArray:
|
| 177 |
+
"""
|
| 178 |
+
Load a field of view from an OME-Zarr store as an xarray DataArray.
|
| 179 |
+
|
| 180 |
+
Parameters
|
| 181 |
+
----------
|
| 182 |
+
zarr_path : Path | str
|
| 183 |
+
Path to the root OME-Zarr store
|
| 184 |
+
fov_path : Path | str
|
| 185 |
+
Relative path to the FOV (e.g., "A/1/001007")
|
| 186 |
+
resolution : int, optional
|
| 187 |
+
Resolution level to load (0 is full resolution), by default 0
|
| 188 |
+
|
| 189 |
+
Returns
|
| 190 |
+
-------
|
| 191 |
+
xr.DataArray
|
| 192 |
+
Image data with labeled dimensions (T, C, Z, Y, X)
|
| 193 |
+
"""
|
| 194 |
+
zarr_path = Path(zarr_path)
|
| 195 |
+
fov_path = Path(fov_path)
|
| 196 |
+
|
| 197 |
+
print(f"Loading zarr store from: {zarr_path}")
|
| 198 |
+
print(f"Accessing FOV: {fov_path}")
|
| 199 |
+
|
| 200 |
+
# Load as xarray Dataset
|
| 201 |
+
fov_dataset: xr.Dataset = open_ome_dataset(
|
| 202 |
+
zarr_path / fov_path, resolution=resolution, validate=False
|
| 203 |
+
)
|
| 204 |
+
|
| 205 |
+
# Extract the image DataArray
|
| 206 |
+
data_xr = fov_dataset["image"]
|
| 207 |
+
|
| 208 |
+
print(f"Loaded data shape: {dict(data_xr.sizes)}")
|
| 209 |
+
print(f"Dimensions: {list(data_xr.dims)}")
|
| 210 |
+
print(f"Data type: {data_xr.dtype}")
|
| 211 |
+
|
| 212 |
+
return data_xr
|
| 213 |
+
|
| 214 |
+
|
| 215 |
+
# === Image Processing ===
|
| 216 |
+
def normalize_for_display(
|
| 217 |
+
img_2d: xr.DataArray,
|
| 218 |
+
percentiles: tuple[float, float] = (1, 99),
|
| 219 |
+
clip_to_uint8: bool = True,
|
| 220 |
+
) -> np.ndarray:
|
| 221 |
+
"""
|
| 222 |
+
Normalize a 2D microscopy image using percentile clipping.
|
| 223 |
+
|
| 224 |
+
Uses robust percentile-based normalization to handle outliers
|
| 225 |
+
common in microscopy data. Works with xarray DataArrays to maintain
|
| 226 |
+
labeled dimensions through the processing pipeline.
|
| 227 |
+
|
| 228 |
+
Parameters
|
| 229 |
+
----------
|
| 230 |
+
img_2d : xr.DataArray
|
| 231 |
+
2D image DataArray to normalize
|
| 232 |
+
percentiles : tuple[float, float], optional
|
| 233 |
+
Lower and upper percentiles for clipping, by default (1, 99)
|
| 234 |
+
clip_to_uint8 : bool, optional
|
| 235 |
+
If True, convert to uint8 (0-255), otherwise keep as float (0-1),
|
| 236 |
+
by default True
|
| 237 |
+
|
| 238 |
+
Returns
|
| 239 |
+
-------
|
| 240 |
+
np.ndarray
|
| 241 |
+
Normalized numpy array (uint8 if clip_to_uint8=True, else float32)
|
| 242 |
+
|
| 243 |
+
Notes
|
| 244 |
+
-----
|
| 245 |
+
Expects xarray.DataArray input. For raw numpy arrays,
|
| 246 |
+
wrap in xarray first: xr.DataArray(array, dims=["Y", "X"])
|
| 247 |
+
"""
|
| 248 |
+
# Calculate percentiles using xarray
|
| 249 |
+
p_low = float(img_2d.quantile(percentiles[0] / 100.0).values)
|
| 250 |
+
p_high = float(img_2d.quantile(percentiles[1] / 100.0).values)
|
| 251 |
+
|
| 252 |
+
# Handle edge case: no intensity variation
|
| 253 |
+
if p_high - p_low < 1e-10:
|
| 254 |
+
return np.zeros(img_2d.shape, dtype=np.uint8 if clip_to_uint8 else np.float32)
|
| 255 |
+
|
| 256 |
+
# Clip and normalize using xarray operations
|
| 257 |
+
img_clipped = img_2d.clip(min=p_low, max=p_high)
|
| 258 |
+
img_normalized = (img_clipped - p_low) / (p_high - p_low)
|
| 259 |
+
|
| 260 |
+
# Convert to numpy array
|
| 261 |
+
result = img_normalized.values
|
| 262 |
+
|
| 263 |
+
# Convert to requested output format
|
| 264 |
+
if clip_to_uint8:
|
| 265 |
+
result = (result * 255).astype(np.uint8)
|
| 266 |
+
else:
|
| 267 |
+
result = result.astype(np.float32)
|
| 268 |
+
|
| 269 |
+
return result
|
| 270 |
+
|
| 271 |
+
|
| 272 |
+
# === Slice Extraction ===
|
| 273 |
+
def extract_2d_slice(
|
| 274 |
+
data_xr: xr.DataArray,
|
| 275 |
+
t: int | None = None,
|
| 276 |
+
c: int | None = None,
|
| 277 |
+
z: int | None = None,
|
| 278 |
+
normalize: bool = True,
|
| 279 |
+
verbose: bool = True,
|
| 280 |
+
) -> np.ndarray:
|
| 281 |
+
"""
|
| 282 |
+
Extract and optionally normalize a 2D slice from xarray data.
|
| 283 |
+
|
| 284 |
+
Flexibly handles different dimension specifications. If a dimension
|
| 285 |
+
index is None, it will be squeezed out if size=1 or raise an error
|
| 286 |
+
if size>1.
|
| 287 |
+
|
| 288 |
+
Parameters
|
| 289 |
+
----------
|
| 290 |
+
data_xr : xr.DataArray
|
| 291 |
+
Image data with dimensions (T, C, Z, Y, X)
|
| 292 |
+
t : int | None, optional
|
| 293 |
+
Timepoint index, by default None
|
| 294 |
+
c : int | None, optional
|
| 295 |
+
Channel index, by default None
|
| 296 |
+
z : int | None, optional
|
| 297 |
+
Z-slice index, by default None
|
| 298 |
+
normalize : bool, optional
|
| 299 |
+
Whether to normalize for display, by default True
|
| 300 |
+
verbose : bool, optional
|
| 301 |
+
Whether to print slice information, by default True
|
| 302 |
+
|
| 303 |
+
Returns
|
| 304 |
+
-------
|
| 305 |
+
np.ndarray
|
| 306 |
+
2D numpy array (normalized uint8 if normalize=True, else raw values)
|
| 307 |
+
|
| 308 |
+
Raises
|
| 309 |
+
------
|
| 310 |
+
ValueError
|
| 311 |
+
If result is empty or not 2D after slicing and squeezing
|
| 312 |
+
"""
|
| 313 |
+
# Build selection dictionary for indexed dimensions
|
| 314 |
+
sel_dict = {}
|
| 315 |
+
if t is not None:
|
| 316 |
+
sel_dict["T"] = int(t)
|
| 317 |
+
if c is not None:
|
| 318 |
+
sel_dict["C"] = int(c)
|
| 319 |
+
if z is not None:
|
| 320 |
+
sel_dict["Z"] = int(z)
|
| 321 |
+
|
| 322 |
+
# Extract slice using xarray's labeled indexing
|
| 323 |
+
slice_xr = data_xr.isel(**sel_dict) if sel_dict else data_xr
|
| 324 |
+
|
| 325 |
+
# Compute if Dask-backed (load from disk)
|
| 326 |
+
if hasattr(slice_xr.data, "compute"):
|
| 327 |
+
slice_xr = slice_xr.compute()
|
| 328 |
+
|
| 329 |
+
# Squeeze singleton dimensions (e.g., single channel, single Z)
|
| 330 |
+
slice_xr = slice_xr.squeeze()
|
| 331 |
+
|
| 332 |
+
# Validation: ensure non-empty result
|
| 333 |
+
if slice_xr.size == 0:
|
| 334 |
+
raise ValueError(
|
| 335 |
+
f"Empty array after slicing. Selection: {sel_dict}, "
|
| 336 |
+
f"Original shape: {data_xr.shape}"
|
| 337 |
+
)
|
| 338 |
+
|
| 339 |
+
# Validation: ensure 2D result
|
| 340 |
+
if slice_xr.ndim != 2:
|
| 341 |
+
raise ValueError(
|
| 342 |
+
f"Expected 2D array after slicing, got shape {slice_xr.shape}. "
|
| 343 |
+
f"Selection: {sel_dict}"
|
| 344 |
+
)
|
| 345 |
+
|
| 346 |
+
# Verbose output: print slice information
|
| 347 |
+
if verbose:
|
| 348 |
+
sel_str = (
|
| 349 |
+
", ".join(f"{k}={v}" for k, v in sel_dict.items())
|
| 350 |
+
if sel_dict
|
| 351 |
+
else "full array"
|
| 352 |
+
)
|
| 353 |
+
print(
|
| 354 |
+
f"Extracted slice: {sel_str}, Shape={slice_xr.shape}, "
|
| 355 |
+
f"Range=[{float(slice_xr.min()):.1f}, {float(slice_xr.max()):.1f}]"
|
| 356 |
+
)
|
| 357 |
+
|
| 358 |
+
# Normalize or convert to numpy
|
| 359 |
+
if normalize:
|
| 360 |
+
slice_2d = normalize_for_display(slice_xr)
|
| 361 |
+
else:
|
| 362 |
+
slice_2d = slice_xr.values
|
| 363 |
+
|
| 364 |
+
return slice_2d
|
| 365 |
+
|
| 366 |
+
|
| 367 |
+
# === Slice Extraction Factory ===
|
| 368 |
+
def create_slice_extractor(
|
| 369 |
+
data_xr: xr.DataArray,
|
| 370 |
+
normalize: bool = True,
|
| 371 |
+
channel: int = 0,
|
| 372 |
+
):
|
| 373 |
+
"""
|
| 374 |
+
Create a closure function for extracting slices from a specific dataset.
|
| 375 |
+
|
| 376 |
+
This factory function is useful for Gradio callbacks where the data
|
| 377 |
+
is loaded once and the same extraction function is called multiple times.
|
| 378 |
+
|
| 379 |
+
Parameters
|
| 380 |
+
----------
|
| 381 |
+
data_xr : xr.DataArray
|
| 382 |
+
Image data to extract slices from
|
| 383 |
+
normalize : bool, optional
|
| 384 |
+
Whether to normalize for display, by default True
|
| 385 |
+
channel : int, optional
|
| 386 |
+
Default channel to use, by default 0
|
| 387 |
+
|
| 388 |
+
Returns
|
| 389 |
+
-------
|
| 390 |
+
callable
|
| 391 |
+
Function with signature (t: int, z: int) -> np.ndarray that extracts
|
| 392 |
+
and normalizes 2D slices
|
| 393 |
+
"""
|
| 394 |
+
|
| 395 |
+
def get_slice(t: int, z: int) -> np.ndarray:
|
| 396 |
+
"""Extract and normalize a 2D slice at timepoint t and z-slice z."""
|
| 397 |
+
return extract_2d_slice(
|
| 398 |
+
data_xr,
|
| 399 |
+
t=int(t),
|
| 400 |
+
c=channel,
|
| 401 |
+
z=int(z),
|
| 402 |
+
normalize=normalize,
|
| 403 |
+
verbose=True,
|
| 404 |
+
)
|
| 405 |
+
|
| 406 |
+
return get_slice
|
| 407 |
+
|
| 408 |
+
|
| 409 |
+
# === Metadata Helpers ===
|
| 410 |
+
def get_dimension_info(data_xr: xr.DataArray) -> dict:
|
| 411 |
+
"""
|
| 412 |
+
Extract dimension information from xarray DataArray.
|
| 413 |
+
|
| 414 |
+
Parameters
|
| 415 |
+
----------
|
| 416 |
+
data_xr : xr.DataArray
|
| 417 |
+
Image data with dimensions
|
| 418 |
+
|
| 419 |
+
Returns
|
| 420 |
+
-------
|
| 421 |
+
dict
|
| 422 |
+
Dictionary with keys: 'sizes', 'dims', 'coords', 'dtype'
|
| 423 |
+
"""
|
| 424 |
+
return {
|
| 425 |
+
"sizes": dict(data_xr.sizes),
|
| 426 |
+
"dims": list(data_xr.dims),
|
| 427 |
+
"coords": {dim: data_xr.coords[dim].values.tolist() for dim in data_xr.dims},
|
| 428 |
+
"dtype": str(data_xr.dtype),
|
| 429 |
+
}
|
| 430 |
+
|
| 431 |
+
|
| 432 |
+
def print_data_summary(data_xr: xr.DataArray) -> None:
|
| 433 |
+
"""
|
| 434 |
+
Print a formatted summary of xarray DataArray.
|
| 435 |
+
|
| 436 |
+
Parameters
|
| 437 |
+
----------
|
| 438 |
+
data_xr : xr.DataArray
|
| 439 |
+
Image data to summarize
|
| 440 |
+
"""
|
| 441 |
+
info = get_dimension_info(data_xr)
|
| 442 |
+
|
| 443 |
+
print("\n" + "=" * 60)
|
| 444 |
+
print("DATA SUMMARY")
|
| 445 |
+
print("=" * 60)
|
| 446 |
+
print(f"Shape: {info['sizes']}")
|
| 447 |
+
print(f"Dimensions: {info['dims']}")
|
| 448 |
+
print(f"Data type: {info['dtype']}")
|
| 449 |
+
|
| 450 |
+
# Print coordinate ranges
|
| 451 |
+
print("\nCoordinate Ranges:")
|
| 452 |
+
for dim in info["dims"]:
|
| 453 |
+
coords = info["coords"][dim]
|
| 454 |
+
if len(coords) > 0:
|
| 455 |
+
print(f" {dim}: [{coords[0]:.2f} ... {coords[-1]:.2f}] (n={len(coords)})")
|
| 456 |
+
|
| 457 |
+
# Print memory size estimate
|
| 458 |
+
total_elements = np.prod(list(info["sizes"].values()))
|
| 459 |
+
dtype_size = np.dtype(data_xr.dtype).itemsize
|
| 460 |
+
size_mb = (total_elements * dtype_size) / (1024**2)
|
| 461 |
+
print(f"\nEstimated size: {size_mb:.1f} MB")
|
| 462 |
+
print("=" * 60 + "\n")
|
| 463 |
+
|
| 464 |
+
|
| 465 |
+
# === Phase Reconstruction Functions ===
|
| 466 |
+
def run_reconstruction(zyx_tile: torch.Tensor, recon_args: dict) -> torch.Tensor:
|
| 467 |
+
"""
|
| 468 |
+
Run phase reconstruction on a Z-stack.
|
| 469 |
+
|
| 470 |
+
Takes a 3D stack (Z, Y, X) and produces a 2D phase reconstruction (Y, X).
|
| 471 |
+
Device is inferred from the input tensor.
|
| 472 |
+
|
| 473 |
+
Parameters
|
| 474 |
+
----------
|
| 475 |
+
zyx_tile : torch.Tensor
|
| 476 |
+
Input Z-stack data with shape (Z, Y, X). Can be on CPU or GPU.
|
| 477 |
+
recon_args : dict
|
| 478 |
+
Reconstruction arguments including wavelength, NA, pixel sizes, etc.
|
| 479 |
+
All tensor values should be on the same device as zyx_tile.
|
| 480 |
+
|
| 481 |
+
Returns
|
| 482 |
+
-------
|
| 483 |
+
torch.Tensor
|
| 484 |
+
Reconstructed 2D phase image with shape (Y, X), on same device as input.
|
| 485 |
+
|
| 486 |
+
Notes
|
| 487 |
+
-----
|
| 488 |
+
All intermediate tensors are created on the same device as the input
|
| 489 |
+
to ensure efficient computation without device transfers.
|
| 490 |
+
"""
|
| 491 |
+
# Infer device from input tensor
|
| 492 |
+
device = zyx_tile.device
|
| 493 |
+
|
| 494 |
+
# Prepare transfer function arguments - ensure all tensors are on the same device
|
| 495 |
+
tf_args = {}
|
| 496 |
+
for key, value in recon_args.items():
|
| 497 |
+
if isinstance(value, torch.Tensor):
|
| 498 |
+
tf_args[key] = value.to(device)
|
| 499 |
+
else:
|
| 500 |
+
tf_args[key] = value
|
| 501 |
+
|
| 502 |
+
Z, _, _ = zyx_tile.shape
|
| 503 |
+
tf_args["z_position_list"] = (
|
| 504 |
+
torch.arange(Z, device=device) - (Z // 2) + tf_args["z_offset"]
|
| 505 |
+
) * tf_args["z_scale"]
|
| 506 |
+
tf_args.pop("z_offset")
|
| 507 |
+
tf_args.pop("z_scale")
|
| 508 |
+
|
| 509 |
+
# Core reconstruction calls (all on same device)
|
| 510 |
+
tf_abs, tf_phase = isotropic_thin_3d.calculate_transfer_function(**tf_args)
|
| 511 |
+
system = isotropic_thin_3d.calculate_singular_system(tf_abs, tf_phase)
|
| 512 |
+
_, yx_phase_recon = isotropic_thin_3d.apply_inverse_transfer_function(
|
| 513 |
+
zyx_tile, system, regularization_strength=1e-2
|
| 514 |
+
)
|
| 515 |
+
return yx_phase_recon
|
| 516 |
+
|
| 517 |
+
|
| 518 |
+
def compute_midband_power(
|
| 519 |
+
yx_array: torch.Tensor,
|
| 520 |
+
NA_det: float,
|
| 521 |
+
lambda_ill: float,
|
| 522 |
+
pixel_size: float,
|
| 523 |
+
band: tuple[float, float] = (0.125, 0.25),
|
| 524 |
+
) -> torch.Tensor:
|
| 525 |
+
"""
|
| 526 |
+
Compute midband power metric for optimization loss.
|
| 527 |
+
|
| 528 |
+
Parameters
|
| 529 |
+
----------
|
| 530 |
+
yx_array : torch.Tensor
|
| 531 |
+
2D reconstructed image (on CPU or GPU)
|
| 532 |
+
NA_det : float
|
| 533 |
+
Numerical aperture of detection
|
| 534 |
+
lambda_ill : float
|
| 535 |
+
Illumination wavelength
|
| 536 |
+
pixel_size : float
|
| 537 |
+
Pixel size in same units as wavelength
|
| 538 |
+
band : tuple[float, float], optional
|
| 539 |
+
Frequency band as fraction of cutoff, by default (0.125, 0.25)
|
| 540 |
+
|
| 541 |
+
Returns
|
| 542 |
+
-------
|
| 543 |
+
torch.Tensor
|
| 544 |
+
Scalar power value in the specified frequency band, on same device as input.
|
| 545 |
+
|
| 546 |
+
Notes
|
| 547 |
+
-----
|
| 548 |
+
All operations are performed on the same device as the input tensor
|
| 549 |
+
for efficient GPU computation.
|
| 550 |
+
"""
|
| 551 |
+
device = yx_array.device
|
| 552 |
+
|
| 553 |
+
# Generate frequency coordinates (returns numpy arrays)
|
| 554 |
+
_, _, fxx, fyy = util.gen_coordinate(yx_array.shape, pixel_size)
|
| 555 |
+
|
| 556 |
+
# Convert to torch tensor on same device
|
| 557 |
+
frr = torch.tensor(np.sqrt(fxx**2 + fyy**2), dtype=torch.float32, device=device)
|
| 558 |
+
|
| 559 |
+
# FFT and frequency masking (all on device)
|
| 560 |
+
xy_abs_fft = torch.abs(torch.fft.fftn(yx_array))
|
| 561 |
+
cutoff = 2 * NA_det / lambda_ill
|
| 562 |
+
mask = torch.logical_and(frr > cutoff * band[0], frr < cutoff * band[1])
|
| 563 |
+
|
| 564 |
+
return torch.sum(xy_abs_fft[mask])
|
| 565 |
+
|
| 566 |
+
|
| 567 |
+
def prepare_optimizer(
|
| 568 |
+
optimizable_params: dict[str, tuple[bool, float, float]],
|
| 569 |
+
device: torch.device,
|
| 570 |
+
) -> tuple[dict[str, torch.nn.Parameter], torch.optim.Optimizer]:
|
| 571 |
+
"""
|
| 572 |
+
Prepare optimization parameters and Adam optimizer.
|
| 573 |
+
|
| 574 |
+
Parameters
|
| 575 |
+
----------
|
| 576 |
+
optimizable_params : dict
|
| 577 |
+
Dict mapping param names to (enabled, initial_value, learning_rate)
|
| 578 |
+
device : torch.device
|
| 579 |
+
Device to create parameters on (CPU or GPU)
|
| 580 |
+
|
| 581 |
+
Returns
|
| 582 |
+
-------
|
| 583 |
+
tuple[dict, Optimizer]
|
| 584 |
+
optimization_params dict and configured optimizer
|
| 585 |
+
|
| 586 |
+
Notes
|
| 587 |
+
-----
|
| 588 |
+
All parameters are created on the specified device for efficient
|
| 589 |
+
GPU-accelerated optimization if available.
|
| 590 |
+
"""
|
| 591 |
+
optimization_params: dict[str, torch.nn.Parameter] = {}
|
| 592 |
+
optimizer_config = []
|
| 593 |
+
for name, (enabled, initial, lr) in optimizable_params.items():
|
| 594 |
+
if enabled:
|
| 595 |
+
param = torch.nn.Parameter(
|
| 596 |
+
torch.tensor([initial], dtype=torch.float32, device=device),
|
| 597 |
+
requires_grad=True,
|
| 598 |
+
)
|
| 599 |
+
optimization_params[name] = param
|
| 600 |
+
optimizer_config.append({"params": [param], "lr": lr})
|
| 601 |
+
|
| 602 |
+
optimizer = torch.optim.Adam(optimizer_config)
|
| 603 |
+
return optimization_params, optimizer
|
| 604 |
+
|
| 605 |
+
|
| 606 |
+
def run_reconstruction_single(
|
| 607 |
+
zyx_stack: np.ndarray,
|
| 608 |
+
pixel_scales: tuple[float, float, float],
|
| 609 |
+
fixed_params: dict,
|
| 610 |
+
param_values: dict,
|
| 611 |
+
device: Device = None,
|
| 612 |
+
) -> np.ndarray:
|
| 613 |
+
"""
|
| 614 |
+
Run a single phase reconstruction with specified parameters (no optimization).
|
| 615 |
+
|
| 616 |
+
Parameters
|
| 617 |
+
----------
|
| 618 |
+
zyx_stack : np.ndarray
|
| 619 |
+
Input Z-stack with shape (Z, Y, X)
|
| 620 |
+
pixel_scales : tuple[float, float, float]
|
| 621 |
+
(z_scale, y_scale, x_scale) in micrometers
|
| 622 |
+
fixed_params : dict
|
| 623 |
+
Fixed reconstruction parameters (wavelength, index, etc.)
|
| 624 |
+
param_values : dict
|
| 625 |
+
Parameter values to use (z_offset, numerical_aperture_detection, etc.)
|
| 626 |
+
device : torch.device | str | None, optional
|
| 627 |
+
Computing device. If None, auto-selects GPU if available, else CPU.
|
| 628 |
+
|
| 629 |
+
Returns
|
| 630 |
+
-------
|
| 631 |
+
np.ndarray
|
| 632 |
+
Normalized uint8 array of reconstructed phase image (for display)
|
| 633 |
+
"""
|
| 634 |
+
# Resolve device (will print GPU info if available)
|
| 635 |
+
device = get_device(device)
|
| 636 |
+
|
| 637 |
+
# Convert to torch tensor on target device
|
| 638 |
+
zyx_tile = torch.tensor(zyx_stack, dtype=torch.float32, device=device)
|
| 639 |
+
|
| 640 |
+
# Prepare reconstruction arguments
|
| 641 |
+
z_scale, y_scale, x_scale = pixel_scales
|
| 642 |
+
recon_args = fixed_params.copy()
|
| 643 |
+
|
| 644 |
+
# Remove non-reconstruction parameters from fixed_params
|
| 645 |
+
recon_args.pop("num_iterations", None)
|
| 646 |
+
recon_args.pop("use_tiling", None)
|
| 647 |
+
recon_args.pop("device", None)
|
| 648 |
+
|
| 649 |
+
recon_args["yx_shape"] = zyx_tile.shape[1:]
|
| 650 |
+
recon_args["yx_pixel_size"] = y_scale
|
| 651 |
+
recon_args["z_scale"] = z_scale
|
| 652 |
+
|
| 653 |
+
# Set parameter values (convert to tensors on device)
|
| 654 |
+
for name, value in param_values.items():
|
| 655 |
+
recon_args[name] = torch.tensor([value], dtype=torch.float32, device=device)
|
| 656 |
+
|
| 657 |
+
# Run reconstruction
|
| 658 |
+
yx_recon = run_reconstruction(zyx_tile, recon_args)
|
| 659 |
+
|
| 660 |
+
# Transfer to CPU and normalize for display
|
| 661 |
+
recon_numpy = yx_recon.detach().cpu().numpy()
|
| 662 |
+
# Wrap in xarray for normalize_for_display (expects xr.DataArray)
|
| 663 |
+
recon_normalized = normalize_for_display(xr.DataArray(recon_numpy))
|
| 664 |
+
|
| 665 |
+
return recon_normalized
|
| 666 |
+
|
| 667 |
+
|
| 668 |
+
def run_optimization_streaming(
|
| 669 |
+
zyx_stack: np.ndarray,
|
| 670 |
+
pixel_scales: tuple[float, float, float],
|
| 671 |
+
fixed_params: dict,
|
| 672 |
+
optimizable_params: dict,
|
| 673 |
+
num_iterations: int = 10,
|
| 674 |
+
device: Device = None,
|
| 675 |
+
) -> Generator[dict, None, None]:
|
| 676 |
+
"""
|
| 677 |
+
Run phase reconstruction optimization with streaming updates.
|
| 678 |
+
|
| 679 |
+
Generator that yields reconstruction results and loss after each iteration.
|
| 680 |
+
Supports GPU acceleration for significant speedup (15-25x on typical hardware).
|
| 681 |
+
|
| 682 |
+
Parameters
|
| 683 |
+
----------
|
| 684 |
+
zyx_stack : np.ndarray
|
| 685 |
+
Input Z-stack with shape (Z, Y, X)
|
| 686 |
+
pixel_scales : tuple[float, float, float]
|
| 687 |
+
(z_scale, y_scale, x_scale) in micrometers
|
| 688 |
+
fixed_params : dict
|
| 689 |
+
Fixed reconstruction parameters (wavelength, index, etc.)
|
| 690 |
+
optimizable_params : dict
|
| 691 |
+
Parameters to optimize with (enabled, initial, lr) tuples
|
| 692 |
+
num_iterations : int, optional
|
| 693 |
+
Number of optimization iterations, by default 10
|
| 694 |
+
device : torch.device | str | None, optional
|
| 695 |
+
Computing device. If None, auto-selects GPU if available, else CPU.
|
| 696 |
+
Examples: "cuda", "cpu", "cuda:0", torch.device("cuda")
|
| 697 |
+
By default None
|
| 698 |
+
|
| 699 |
+
Yields
|
| 700 |
+
------
|
| 701 |
+
dict
|
| 702 |
+
Dictionary with keys:
|
| 703 |
+
- 'reconstructed_image': normalized uint8 array (on CPU for display)
|
| 704 |
+
- 'loss': float loss value
|
| 705 |
+
- 'iteration': int iteration number (1-indexed)
|
| 706 |
+
- 'params': dict of current parameter values
|
| 707 |
+
|
| 708 |
+
Notes
|
| 709 |
+
-----
|
| 710 |
+
All computation is performed on the specified device (GPU if available).
|
| 711 |
+
Only final results are transferred to CPU for display, minimizing
|
| 712 |
+
transfer overhead.
|
| 713 |
+
"""
|
| 714 |
+
# Resolve device (will print GPU info if available)
|
| 715 |
+
device = get_device(device)
|
| 716 |
+
|
| 717 |
+
# Convert to torch tensor on target device (single transfer)
|
| 718 |
+
zyx_tile = torch.tensor(zyx_stack, dtype=torch.float32, device=device)
|
| 719 |
+
|
| 720 |
+
# Prepare reconstruction arguments
|
| 721 |
+
z_scale, y_scale, x_scale = pixel_scales
|
| 722 |
+
recon_args = fixed_params.copy()
|
| 723 |
+
|
| 724 |
+
# Remove non-reconstruction parameters from fixed_params
|
| 725 |
+
recon_args.pop("num_iterations", None)
|
| 726 |
+
recon_args.pop("use_tiling", None)
|
| 727 |
+
recon_args.pop("device", None) # Remove device if present
|
| 728 |
+
|
| 729 |
+
recon_args["yx_shape"] = zyx_tile.shape[1:]
|
| 730 |
+
recon_args["yx_pixel_size"] = y_scale
|
| 731 |
+
recon_args["z_scale"] = z_scale
|
| 732 |
+
|
| 733 |
+
# Initialize optimizable parameters on device
|
| 734 |
+
for name, (enabled, initial, lr) in optimizable_params.items():
|
| 735 |
+
recon_args[name] = torch.tensor([initial], dtype=torch.float32, device=device)
|
| 736 |
+
|
| 737 |
+
# Prepare optimizer with parameters on device
|
| 738 |
+
optimization_params, optimizer = prepare_optimizer(optimizable_params, device)
|
| 739 |
+
|
| 740 |
+
# Optimization loop (all on device)
|
| 741 |
+
for step in range(num_iterations):
|
| 742 |
+
# Update parameters
|
| 743 |
+
for name, param in optimization_params.items():
|
| 744 |
+
recon_args[name] = param
|
| 745 |
+
|
| 746 |
+
# Run reconstruction (all on device)
|
| 747 |
+
yx_recon = run_reconstruction(zyx_tile, recon_args)
|
| 748 |
+
|
| 749 |
+
# Compute loss (all on device, negative midband power - we want to maximize)
|
| 750 |
+
loss = -compute_midband_power(
|
| 751 |
+
yx_recon,
|
| 752 |
+
NA_det=0.15,
|
| 753 |
+
lambda_ill=recon_args["wavelength_illumination"],
|
| 754 |
+
pixel_size=recon_args["yx_pixel_size"],
|
| 755 |
+
band=(0.1, 0.2),
|
| 756 |
+
)
|
| 757 |
+
|
| 758 |
+
# Backward pass and optimizer step (on device)
|
| 759 |
+
loss.backward()
|
| 760 |
+
optimizer.step()
|
| 761 |
+
optimizer.zero_grad()
|
| 762 |
+
|
| 763 |
+
# Transfer to CPU ONLY for display (single transfer per iteration)
|
| 764 |
+
recon_numpy = yx_recon.detach().cpu().numpy()
|
| 765 |
+
# Wrap in xarray for normalize_for_display (expects xr.DataArray)
|
| 766 |
+
recon_normalized = normalize_for_display(xr.DataArray(recon_numpy))
|
| 767 |
+
|
| 768 |
+
# Extract current parameter values (scalars, already on CPU)
|
| 769 |
+
param_values = {
|
| 770 |
+
name: param.item() for name, param in optimization_params.items()
|
| 771 |
+
}
|
| 772 |
+
|
| 773 |
+
# Yield results
|
| 774 |
+
yield {
|
| 775 |
+
"reconstructed_image": recon_normalized,
|
| 776 |
+
"loss": loss.item(),
|
| 777 |
+
"iteration": step + 1,
|
| 778 |
+
"params": param_values,
|
| 779 |
+
}
|
| 780 |
+
|
| 781 |
+
|
| 782 |
+
def extract_tiles(
|
| 783 |
+
zyx_data: np.ndarray, num_tiles: tuple[int, int], overlap_pct: float
|
| 784 |
+
) -> tuple[dict[str, np.ndarray], dict[str, tuple[int, int, int]]]:
|
| 785 |
+
"""
|
| 786 |
+
Extract overlapping tiles from a Z-stack for processing.
|
| 787 |
+
|
| 788 |
+
Parameters
|
| 789 |
+
----------
|
| 790 |
+
zyx_data : np.ndarray
|
| 791 |
+
Input data with shape (Z, Y, X)
|
| 792 |
+
num_tiles : tuple[int, int]
|
| 793 |
+
Number of tiles in (Y, X) dimensions
|
| 794 |
+
overlap_pct : float
|
| 795 |
+
Overlap percentage between tiles (0.0 to 1.0)
|
| 796 |
+
|
| 797 |
+
Returns
|
| 798 |
+
-------
|
| 799 |
+
tuple[dict, dict]
|
| 800 |
+
tiles: dict mapping tile names to arrays
|
| 801 |
+
translations: dict mapping tile names to (z, y, x) positions
|
| 802 |
+
"""
|
| 803 |
+
Z, Y, X = zyx_data.shape
|
| 804 |
+
tile_height = int(np.ceil(Y / (num_tiles[0] - (num_tiles[0] - 1) * overlap_pct)))
|
| 805 |
+
tile_width = int(np.ceil(X / (num_tiles[1] - (num_tiles[1] - 1) * overlap_pct)))
|
| 806 |
+
stride_y = int(tile_height * (1 - overlap_pct))
|
| 807 |
+
stride_x = int(tile_width * (1 - overlap_pct))
|
| 808 |
+
|
| 809 |
+
tiles = {}
|
| 810 |
+
translations = {}
|
| 811 |
+
for yi in range(num_tiles[0]):
|
| 812 |
+
for xi in range(num_tiles[1]):
|
| 813 |
+
y0, x0 = yi * stride_y, xi * stride_x
|
| 814 |
+
y1, x1 = min(y0 + tile_height, Y), min(x0 + tile_width, X)
|
| 815 |
+
tile_name = f"0/0/{yi:03d}{xi:03d}"
|
| 816 |
+
tiles[tile_name] = zyx_data[:, y0:y1, x0:x1]
|
| 817 |
+
translations[tile_name] = (0, y0, x0)
|
| 818 |
+
return tiles, translations
|
optimize_demo.py
ADDED
|
@@ -0,0 +1,702 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Gradio Phase Reconstruction Viewer
|
| 3 |
+
|
| 4 |
+
Interactive web interface for viewing zarr microscopy data with T/Z navigation.
|
| 5 |
+
Based on: docs/examples/visuals/optimize_phase_recon.py
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
import gradio as gr
|
| 9 |
+
import numpy as np
|
| 10 |
+
import pandas as pd
|
| 11 |
+
from pathlib import Path
|
| 12 |
+
|
| 13 |
+
from demo_utils import (
|
| 14 |
+
print_data_summary,
|
| 15 |
+
run_optimization_streaming,
|
| 16 |
+
get_plate_metadata,
|
| 17 |
+
load_fov_from_plate,
|
| 18 |
+
extract_2d_slice,
|
| 19 |
+
run_reconstruction_single,
|
| 20 |
+
)
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
# ============================================================================
|
| 24 |
+
# CONFIGURATION
|
| 25 |
+
# ============================================================================
|
| 26 |
+
|
| 27 |
+
class Config:
|
| 28 |
+
"""Centralized configuration for the phase reconstruction viewer."""
|
| 29 |
+
|
| 30 |
+
# Input data path
|
| 31 |
+
INPUT_PATH = Path("data/20x.zarr")
|
| 32 |
+
|
| 33 |
+
# Default FOV selection
|
| 34 |
+
DEFAULT_ROW = "A"
|
| 35 |
+
DEFAULT_COLUMN = "1"
|
| 36 |
+
DEFAULT_FIELD = "002026"
|
| 37 |
+
|
| 38 |
+
# Restrict to specific FOVs (filter large plate)
|
| 39 |
+
ALLOWED_FOVS = ['002026', '002027', '002028']
|
| 40 |
+
|
| 41 |
+
# Channel selection (only BF channel in concatenated data)
|
| 42 |
+
CHANNEL = 0 # BF is now channel 0 (GFP was filtered out during concatenation)
|
| 43 |
+
|
| 44 |
+
# Pixel sizes for 20x objective (override incorrect Zarr metadata)
|
| 45 |
+
PIXEL_SIZE_YX = 0.325 # micrometers
|
| 46 |
+
PIXEL_SIZE_Z = 2.0 # micrometers
|
| 47 |
+
|
| 48 |
+
# Reconstruction configuration
|
| 49 |
+
RECON_CONFIG = {
|
| 50 |
+
"wavelength_illumination": 0.45,
|
| 51 |
+
"index_of_refraction_media": 1.3,
|
| 52 |
+
"invert_phase_contrast": False,
|
| 53 |
+
"num_iterations": 10,
|
| 54 |
+
# GPU Configuration (auto-detects GPU for 15-25x speedup)
|
| 55 |
+
# - None: Auto-detect (uses CUDA if available, else CPU)
|
| 56 |
+
# - "cuda": Force GPU usage (requires CUDA-capable device)
|
| 57 |
+
# - "cpu": Force CPU usage (for testing/debugging)
|
| 58 |
+
"device": None,
|
| 59 |
+
# Tiling (not implemented - using full image)
|
| 60 |
+
"use_tiling": False,
|
| 61 |
+
}
|
| 62 |
+
|
| 63 |
+
# Optimizable parameters: (optimize_flag, initial_value, learning_rate)
|
| 64 |
+
OPTIMIZABLE_PARAMS = {
|
| 65 |
+
"z_offset": (True, 0.0, 0.01),
|
| 66 |
+
"numerical_aperture_detection": (True, 0.55, 0.001),
|
| 67 |
+
"numerical_aperture_illumination": (True, 0.54, 0.001),
|
| 68 |
+
"tilt_angle_zenith": (True, 0.0, 0.005),
|
| 69 |
+
"tilt_angle_azimuth": (True, 0.0, 0.001),
|
| 70 |
+
}
|
| 71 |
+
|
| 72 |
+
# UI slider ranges
|
| 73 |
+
SLIDER_RANGES = {
|
| 74 |
+
"z_offset": (-0.5, 0.5, 0.01),
|
| 75 |
+
"na_detection": (0.05, 0.65, 0.001), # Max 0.65 to accommodate optimization
|
| 76 |
+
"na_illumination": (0.05, 0.65, 0.001), # Max 0.65 (but constrained <= NA_detection)
|
| 77 |
+
"tilt_zenith": (0.0, np.pi / 2, 0.005),
|
| 78 |
+
"tilt_azimuth": (0.0, 2 * np.pi, 0.001),
|
| 79 |
+
}
|
| 80 |
+
|
| 81 |
+
# UI configuration
|
| 82 |
+
IMAGE_HEIGHT = 800
|
| 83 |
+
SERVER_PORT = 12124
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
# ============================================================================
|
| 87 |
+
# GLOBAL STATE INITIALIZATION
|
| 88 |
+
# ============================================================================
|
| 89 |
+
|
| 90 |
+
def initialize_plate_metadata():
|
| 91 |
+
"""Load and display plate metadata."""
|
| 92 |
+
print("\n" + "=" * 60)
|
| 93 |
+
print("Loading HCS Plate Metadata...")
|
| 94 |
+
print("=" * 60)
|
| 95 |
+
|
| 96 |
+
# Pass allowed FOVs to avoid iterating through all positions
|
| 97 |
+
plate_metadata = get_plate_metadata(Config.INPUT_PATH, Config.ALLOWED_FOVS)
|
| 98 |
+
|
| 99 |
+
print(f"Available rows: {plate_metadata['rows']}")
|
| 100 |
+
print(f"Available columns: {plate_metadata['columns']}")
|
| 101 |
+
print(f"Total wells: {len(plate_metadata['wells'])}")
|
| 102 |
+
|
| 103 |
+
# Get default well fields (already filtered)
|
| 104 |
+
default_well_key = (Config.DEFAULT_ROW, Config.DEFAULT_COLUMN)
|
| 105 |
+
default_fields = plate_metadata["wells"].get(default_well_key, [])
|
| 106 |
+
|
| 107 |
+
print(f"Fields in {Config.DEFAULT_ROW}/{Config.DEFAULT_COLUMN}: {len(default_fields)}")
|
| 108 |
+
print(f"Allowed FOVs: {Config.ALLOWED_FOVS}")
|
| 109 |
+
print("=" * 60 + "\n")
|
| 110 |
+
|
| 111 |
+
return plate_metadata, default_fields
|
| 112 |
+
|
| 113 |
+
|
| 114 |
+
def load_default_fov(plate_metadata):
|
| 115 |
+
"""Load the default field of view and use correct pixel scales."""
|
| 116 |
+
print(f"Loading default FOV: {Config.DEFAULT_ROW}/{Config.DEFAULT_COLUMN}/{Config.DEFAULT_FIELD}")
|
| 117 |
+
|
| 118 |
+
data_xr = load_fov_from_plate(
|
| 119 |
+
plate_metadata["plate"],
|
| 120 |
+
Config.DEFAULT_ROW,
|
| 121 |
+
Config.DEFAULT_COLUMN,
|
| 122 |
+
Config.DEFAULT_FIELD,
|
| 123 |
+
resolution=0,
|
| 124 |
+
)
|
| 125 |
+
|
| 126 |
+
print_data_summary(data_xr)
|
| 127 |
+
|
| 128 |
+
# Use correct pixel scales from config (20x objective)
|
| 129 |
+
# Note: Zarr metadata may have incorrect values from different magnification
|
| 130 |
+
pixel_scales = (
|
| 131 |
+
Config.PIXEL_SIZE_Z, # z_scale
|
| 132 |
+
Config.PIXEL_SIZE_YX, # y_scale
|
| 133 |
+
Config.PIXEL_SIZE_YX, # x_scale
|
| 134 |
+
)
|
| 135 |
+
print(f"Using pixel scales (Z, Y, X): {pixel_scales} micrometers (from config, 20x objective)")
|
| 136 |
+
|
| 137 |
+
return data_xr, pixel_scales
|
| 138 |
+
|
| 139 |
+
|
| 140 |
+
# ============================================================================
|
| 141 |
+
# FOV LOADING CALLBACKS
|
| 142 |
+
# ============================================================================
|
| 143 |
+
|
| 144 |
+
def load_selected_fov(field: str, current_z: int, plate_metadata):
|
| 145 |
+
"""Load selected FOV and update UI components."""
|
| 146 |
+
try:
|
| 147 |
+
print(f"\nLoading FOV: {Config.DEFAULT_ROW}/{Config.DEFAULT_COLUMN}/{field}")
|
| 148 |
+
|
| 149 |
+
# Load new data
|
| 150 |
+
new_data_xr = load_fov_from_plate(
|
| 151 |
+
plate_metadata["plate"],
|
| 152 |
+
Config.DEFAULT_ROW,
|
| 153 |
+
Config.DEFAULT_COLUMN,
|
| 154 |
+
field,
|
| 155 |
+
resolution=0,
|
| 156 |
+
)
|
| 157 |
+
|
| 158 |
+
# Use pixel scales from config (not Zarr metadata)
|
| 159 |
+
new_pixel_scales = (Config.PIXEL_SIZE_Z, Config.PIXEL_SIZE_YX, Config.PIXEL_SIZE_YX)
|
| 160 |
+
|
| 161 |
+
# Update Z slider
|
| 162 |
+
z_max = new_data_xr.sizes["Z"] - 1
|
| 163 |
+
new_z = min(current_z, z_max)
|
| 164 |
+
|
| 165 |
+
print(f"✅ Loaded: {dict(new_data_xr.sizes)}")
|
| 166 |
+
|
| 167 |
+
# Get preview image
|
| 168 |
+
preview_image = extract_2d_slice(
|
| 169 |
+
new_data_xr, t=0, c=Config.CHANNEL, z=new_z, normalize=True, verbose=False
|
| 170 |
+
)
|
| 171 |
+
|
| 172 |
+
return (
|
| 173 |
+
gr.Slider(maximum=z_max, value=new_z), # Updated Z slider
|
| 174 |
+
(preview_image, preview_image), # ImageSlider in preview mode
|
| 175 |
+
new_data_xr, # Update state
|
| 176 |
+
new_pixel_scales, # Update state
|
| 177 |
+
)
|
| 178 |
+
|
| 179 |
+
except Exception as e:
|
| 180 |
+
print(f"❌ Error loading FOV: {str(e)}")
|
| 181 |
+
import traceback
|
| 182 |
+
traceback.print_exc()
|
| 183 |
+
return (gr.skip(), gr.skip(), gr.skip(), gr.skip())
|
| 184 |
+
|
| 185 |
+
|
| 186 |
+
# ============================================================================
|
| 187 |
+
# IMAGE DISPLAY CALLBACKS
|
| 188 |
+
# ============================================================================
|
| 189 |
+
|
| 190 |
+
def get_slice_for_preview(z: int, data_xr_state):
|
| 191 |
+
"""Extract slice and show in preview mode (same image twice)."""
|
| 192 |
+
slice_img = extract_2d_slice(
|
| 193 |
+
data_xr_state, t=0, c=Config.CHANNEL, z=int(z), normalize=True, verbose=False
|
| 194 |
+
)
|
| 195 |
+
return (slice_img, slice_img) # Preview mode: both sides show same image
|
| 196 |
+
|
| 197 |
+
|
| 198 |
+
# ============================================================================
|
| 199 |
+
# RECONSTRUCTION CALLBACKS
|
| 200 |
+
# ============================================================================
|
| 201 |
+
|
| 202 |
+
def run_reconstruction_ui(
|
| 203 |
+
z: int,
|
| 204 |
+
z_offset: float,
|
| 205 |
+
na_det: float,
|
| 206 |
+
na_ill: float,
|
| 207 |
+
tilt_zenith: float,
|
| 208 |
+
tilt_azimuth: float,
|
| 209 |
+
data_xr_state,
|
| 210 |
+
pixel_scales_state,
|
| 211 |
+
):
|
| 212 |
+
"""
|
| 213 |
+
Run reconstruction with CURRENT slider values (no optimization).
|
| 214 |
+
|
| 215 |
+
Uses slider parameters directly for a single fast reconstruction.
|
| 216 |
+
"""
|
| 217 |
+
# Extract full Z-stack for timepoint 0 (for reconstruction)
|
| 218 |
+
zyx_stack = data_xr_state.isel(T=0, C=Config.CHANNEL).values
|
| 219 |
+
|
| 220 |
+
# Get current Z-slice for comparison (left side of ImageSlider)
|
| 221 |
+
original_normalized = extract_2d_slice(
|
| 222 |
+
data_xr_state, t=0, c=Config.CHANNEL, z=int(z), normalize=True, verbose=False
|
| 223 |
+
)
|
| 224 |
+
|
| 225 |
+
# Build parameter dict from slider values
|
| 226 |
+
param_values = {
|
| 227 |
+
"z_offset": z_offset,
|
| 228 |
+
"numerical_aperture_detection": na_det,
|
| 229 |
+
"numerical_aperture_illumination": na_ill,
|
| 230 |
+
"tilt_angle_zenith": tilt_zenith,
|
| 231 |
+
"tilt_angle_azimuth": tilt_azimuth,
|
| 232 |
+
}
|
| 233 |
+
|
| 234 |
+
# Run single reconstruction with these parameters
|
| 235 |
+
reconstructed_image = run_reconstruction_single(
|
| 236 |
+
zyx_stack, pixel_scales_state, Config.RECON_CONFIG, param_values
|
| 237 |
+
)
|
| 238 |
+
|
| 239 |
+
# Return updated image slider (no optimization results)
|
| 240 |
+
return (original_normalized, reconstructed_image)
|
| 241 |
+
|
| 242 |
+
|
| 243 |
+
def run_optimization_ui(z: int, data_xr_state, pixel_scales_state):
|
| 244 |
+
"""
|
| 245 |
+
Run OPTIMIZATION and stream updates to UI with iteration caching.
|
| 246 |
+
|
| 247 |
+
Uses OPTIMIZABLE_PARAMS as initial guesses, runs full optimization loop.
|
| 248 |
+
Yields progressive updates for ImageSlider, loss plot, status,
|
| 249 |
+
iteration history, iteration slider, and SLIDER UPDATES.
|
| 250 |
+
"""
|
| 251 |
+
# Extract full Z-stack for timepoint 0 (for reconstruction)
|
| 252 |
+
zyx_stack = data_xr_state.isel(T=0, C=Config.CHANNEL).values
|
| 253 |
+
|
| 254 |
+
# Get current Z-slice for comparison (left side of ImageSlider)
|
| 255 |
+
original_normalized = extract_2d_slice(
|
| 256 |
+
data_xr_state, t=0, c=Config.CHANNEL, z=int(z), normalize=True, verbose=False
|
| 257 |
+
)
|
| 258 |
+
|
| 259 |
+
# Initialize tracking
|
| 260 |
+
loss_history = []
|
| 261 |
+
iteration_cache = []
|
| 262 |
+
|
| 263 |
+
# Set raw image once at the start (pin it)
|
| 264 |
+
yield (
|
| 265 |
+
(original_normalized, original_normalized), # Show raw image on both sides initially
|
| 266 |
+
pd.DataFrame({"iteration": [], "loss": []}), # Initialize loss plot with empty data
|
| 267 |
+
[], # Clear iteration history
|
| 268 |
+
gr.skip(), # Don't update slider yet (avoid min=max=1 error)
|
| 269 |
+
gr.Markdown(value="Starting optimization...", visible=True),
|
| 270 |
+
# Slider updates (5 outputs):
|
| 271 |
+
gr.skip(), # z_offset
|
| 272 |
+
gr.skip(), # na_det
|
| 273 |
+
gr.skip(), # na_ill
|
| 274 |
+
gr.skip(), # tilt_zenith
|
| 275 |
+
gr.skip(), # tilt_azimuth
|
| 276 |
+
)
|
| 277 |
+
|
| 278 |
+
# Run optimization with streaming
|
| 279 |
+
for result in run_optimization_streaming(
|
| 280 |
+
zyx_stack,
|
| 281 |
+
pixel_scales_state,
|
| 282 |
+
Config.RECON_CONFIG,
|
| 283 |
+
Config.OPTIMIZABLE_PARAMS,
|
| 284 |
+
num_iterations=Config.RECON_CONFIG["num_iterations"],
|
| 285 |
+
):
|
| 286 |
+
# Current iteration number
|
| 287 |
+
n = result["iteration"]
|
| 288 |
+
|
| 289 |
+
# Cache iteration result
|
| 290 |
+
iteration_cache.append(
|
| 291 |
+
{
|
| 292 |
+
"iteration": n,
|
| 293 |
+
"reconstructed_image": result["reconstructed_image"],
|
| 294 |
+
"loss": result["loss"],
|
| 295 |
+
"params": result["params"],
|
| 296 |
+
"raw_image": original_normalized,
|
| 297 |
+
}
|
| 298 |
+
)
|
| 299 |
+
|
| 300 |
+
# Accumulate loss history (ensure iteration is int for proper x-axis)
|
| 301 |
+
loss_history.append({"iteration": int(n), "loss": result["loss"]})
|
| 302 |
+
|
| 303 |
+
# Format iteration info
|
| 304 |
+
info_md = f"**Iteration {n}/{Config.RECON_CONFIG['num_iterations']}** | Loss: `{result['loss']:.2e}`"
|
| 305 |
+
|
| 306 |
+
# Yield updates - update ImageSlider AND sliders with latest params
|
| 307 |
+
yield (
|
| 308 |
+
(original_normalized, result["reconstructed_image"]), # Update ImageSlider
|
| 309 |
+
pd.DataFrame(loss_history), # Loss plot
|
| 310 |
+
iteration_cache, # Update iteration history state
|
| 311 |
+
gr.Slider( # Update iteration slider (grows from 1-1 to 1-10)
|
| 312 |
+
minimum=1,
|
| 313 |
+
maximum=n,
|
| 314 |
+
value=n,
|
| 315 |
+
step=1,
|
| 316 |
+
visible=True,
|
| 317 |
+
interactive=True,
|
| 318 |
+
),
|
| 319 |
+
gr.Markdown(value=info_md, visible=True), # Show iteration info
|
| 320 |
+
# Update parameter sliders with optimized values:
|
| 321 |
+
result["params"].get("z_offset", gr.skip()),
|
| 322 |
+
result["params"].get("numerical_aperture_detection", gr.skip()),
|
| 323 |
+
result["params"].get("numerical_aperture_illumination", gr.skip()),
|
| 324 |
+
result["params"].get("tilt_angle_zenith", gr.skip()),
|
| 325 |
+
result["params"].get("tilt_angle_azimuth", gr.skip()),
|
| 326 |
+
)
|
| 327 |
+
|
| 328 |
+
# Final yield (keep last state)
|
| 329 |
+
yield (
|
| 330 |
+
gr.skip(), # Keep last ImageSlider state
|
| 331 |
+
gr.skip(), # Keep last loss plot
|
| 332 |
+
gr.skip(), # Keep iteration history
|
| 333 |
+
gr.skip(), # Keep iteration slider
|
| 334 |
+
gr.Markdown(
|
| 335 |
+
value=f"**Optimization Complete!** Final Loss: `{result['loss']:.2e}`",
|
| 336 |
+
visible=True,
|
| 337 |
+
),
|
| 338 |
+
gr.skip(), # Keep z_offset
|
| 339 |
+
gr.skip(), # Keep na_det
|
| 340 |
+
gr.skip(), # Keep na_ill
|
| 341 |
+
gr.skip(), # Keep tilt_zenith
|
| 342 |
+
gr.skip(), # Keep tilt_azimuth
|
| 343 |
+
)
|
| 344 |
+
|
| 345 |
+
|
| 346 |
+
# ============================================================================
|
| 347 |
+
# ITERATION SCRUBBING CALLBACKS
|
| 348 |
+
# ============================================================================
|
| 349 |
+
|
| 350 |
+
def scrub_iterations(iteration_idx: int, history: list):
|
| 351 |
+
"""Update display AND parameter sliders when user scrubs to different iteration."""
|
| 352 |
+
if not history or iteration_idx < 1 or iteration_idx > len(history):
|
| 353 |
+
return (gr.skip(),) * 7 # image, info, and 5 parameter values
|
| 354 |
+
|
| 355 |
+
# Get selected iteration (convert to 0-indexed)
|
| 356 |
+
selected = history[iteration_idx - 1]
|
| 357 |
+
|
| 358 |
+
# Update ImageSlider overlay
|
| 359 |
+
comparison = (selected["raw_image"], selected["reconstructed_image"])
|
| 360 |
+
|
| 361 |
+
# Update info display
|
| 362 |
+
info_md = f"**Iteration {selected['iteration']}/{len(history)}** | Loss: `{selected['loss']:.2e}`"
|
| 363 |
+
|
| 364 |
+
# Extract parameter values at this iteration
|
| 365 |
+
params = selected["params"]
|
| 366 |
+
z_offset = params.get("z_offset", 0.0)
|
| 367 |
+
na_det = params.get("numerical_aperture_detection", 0.55)
|
| 368 |
+
na_ill = params.get("numerical_aperture_illumination", 0.54)
|
| 369 |
+
tilt_zenith = params.get("tilt_angle_zenith", 0.0)
|
| 370 |
+
tilt_azimuth = params.get("tilt_angle_azimuth", 0.0)
|
| 371 |
+
|
| 372 |
+
return comparison, info_md, z_offset, na_det, na_ill, tilt_zenith, tilt_azimuth
|
| 373 |
+
|
| 374 |
+
|
| 375 |
+
def clear_iteration_state():
|
| 376 |
+
"""Reset iteration state when coordinates change."""
|
| 377 |
+
return (
|
| 378 |
+
[], # Clear iteration_history
|
| 379 |
+
gr.skip(), # Don't update slider (avoid min=max error)
|
| 380 |
+
gr.Markdown(value="", visible=False), # Hide info
|
| 381 |
+
)
|
| 382 |
+
|
| 383 |
+
|
| 384 |
+
# ============================================================================
|
| 385 |
+
# UI CONSTRUCTION
|
| 386 |
+
# ============================================================================
|
| 387 |
+
|
| 388 |
+
def create_gradio_interface(plate_metadata, default_fields, data_xr, pixel_scales):
|
| 389 |
+
"""Build the Gradio interface with all components and event wiring."""
|
| 390 |
+
|
| 391 |
+
with gr.Blocks() as demo:
|
| 392 |
+
gr.Markdown("# waveOrder Phase Reconstruction Demo")
|
| 393 |
+
gr.Markdown(
|
| 394 |
+
"**Paper:** Chandler et al. (2024). *waveOrder: generalist framework for label-agnostic computational microscopy*. "
|
| 395 |
+
"[arXiv:2412.09775](https://arxiv.org/abs/2412.09775)\n\n"
|
| 396 |
+
"**GitHub Repository:** [mehta-lab/waveorder](https://github.com/mehta-lab/waveorder)"
|
| 397 |
+
)
|
| 398 |
+
gr.Markdown("---")
|
| 399 |
+
|
| 400 |
+
# FOV Selection (top of page)
|
| 401 |
+
with gr.Row():
|
| 402 |
+
fov_dropdown = gr.Dropdown(
|
| 403 |
+
choices=default_fields,
|
| 404 |
+
value=Config.DEFAULT_FIELD,
|
| 405 |
+
label="Field of View",
|
| 406 |
+
info=f"Select FOV from well {Config.DEFAULT_ROW}/{Config.DEFAULT_COLUMN}",
|
| 407 |
+
scale=2,
|
| 408 |
+
)
|
| 409 |
+
load_fov_btn = gr.Button("🔄 Load FOV", variant="secondary", size="sm", scale=1)
|
| 410 |
+
|
| 411 |
+
gr.Markdown("---")
|
| 412 |
+
|
| 413 |
+
# Two-column layout: Image viewer (left) | Controls (right)
|
| 414 |
+
with gr.Row():
|
| 415 |
+
# LEFT COLUMN: Large ImageSlider (60% width)
|
| 416 |
+
with gr.Column(scale=4):
|
| 417 |
+
# Image viewer
|
| 418 |
+
initial_preview = extract_2d_slice(
|
| 419 |
+
data_xr,
|
| 420 |
+
t=0,
|
| 421 |
+
c=Config.CHANNEL,
|
| 422 |
+
z=data_xr.sizes["Z"] // 2,
|
| 423 |
+
normalize=True,
|
| 424 |
+
verbose=False,
|
| 425 |
+
)
|
| 426 |
+
|
| 427 |
+
image_viewer = gr.ImageSlider(
|
| 428 |
+
label="Raw (left) vs Reconstructed (right) - Drag slider to compare",
|
| 429 |
+
type="numpy",
|
| 430 |
+
value=(initial_preview, initial_preview),
|
| 431 |
+
height=Config.IMAGE_HEIGHT,
|
| 432 |
+
)
|
| 433 |
+
|
| 434 |
+
gr.Markdown("---")
|
| 435 |
+
|
| 436 |
+
# Section 2: Navigation (below image)
|
| 437 |
+
gr.Markdown("### 🎛️ Navigation")
|
| 438 |
+
z_slider = gr.Slider(
|
| 439 |
+
minimum=0,
|
| 440 |
+
maximum=data_xr.sizes["Z"] - 1,
|
| 441 |
+
value=data_xr.sizes["Z"] // 2,
|
| 442 |
+
step=1,
|
| 443 |
+
label="Z-slice",
|
| 444 |
+
scale=1,
|
| 445 |
+
)
|
| 446 |
+
|
| 447 |
+
# RIGHT COLUMN: All controls (40% width)
|
| 448 |
+
with gr.Column(scale=2):
|
| 449 |
+
# Section 3: Reconstruction Parameters
|
| 450 |
+
gr.Markdown("### ⚙️ Reconstruction Parameters")
|
| 451 |
+
|
| 452 |
+
# Sliders for optimizable parameters
|
| 453 |
+
z_offset_slider = gr.Slider(
|
| 454 |
+
minimum=Config.SLIDER_RANGES["z_offset"][0],
|
| 455 |
+
maximum=Config.SLIDER_RANGES["z_offset"][1],
|
| 456 |
+
value=Config.OPTIMIZABLE_PARAMS["z_offset"][1],
|
| 457 |
+
step=Config.SLIDER_RANGES["z_offset"][2],
|
| 458 |
+
label="Z Offset (μm)",
|
| 459 |
+
info="Axial focus offset",
|
| 460 |
+
)
|
| 461 |
+
|
| 462 |
+
na_det_slider = gr.Slider(
|
| 463 |
+
minimum=Config.SLIDER_RANGES["na_detection"][0],
|
| 464 |
+
maximum=Config.SLIDER_RANGES["na_detection"][1],
|
| 465 |
+
value=Config.OPTIMIZABLE_PARAMS["numerical_aperture_detection"][1],
|
| 466 |
+
step=Config.SLIDER_RANGES["na_detection"][2],
|
| 467 |
+
label="NA Detection",
|
| 468 |
+
info="Numerical aperture of detection objective",
|
| 469 |
+
)
|
| 470 |
+
|
| 471 |
+
na_ill_slider = gr.Slider(
|
| 472 |
+
minimum=Config.SLIDER_RANGES["na_illumination"][0],
|
| 473 |
+
maximum=Config.SLIDER_RANGES["na_illumination"][1],
|
| 474 |
+
value=Config.OPTIMIZABLE_PARAMS["numerical_aperture_illumination"][1],
|
| 475 |
+
step=Config.SLIDER_RANGES["na_illumination"][2],
|
| 476 |
+
label="NA Illumination",
|
| 477 |
+
info="Numerical aperture of illumination",
|
| 478 |
+
)
|
| 479 |
+
|
| 480 |
+
tilt_zenith_slider = gr.Slider(
|
| 481 |
+
minimum=Config.SLIDER_RANGES["tilt_zenith"][0],
|
| 482 |
+
maximum=Config.SLIDER_RANGES["tilt_zenith"][1],
|
| 483 |
+
value=Config.OPTIMIZABLE_PARAMS["tilt_angle_zenith"][1],
|
| 484 |
+
step=Config.SLIDER_RANGES["tilt_zenith"][2],
|
| 485 |
+
label="Tilt Zenith (rad)",
|
| 486 |
+
info="Zenith angle of illumination tilt",
|
| 487 |
+
)
|
| 488 |
+
|
| 489 |
+
tilt_azimuth_slider = gr.Slider(
|
| 490 |
+
minimum=Config.SLIDER_RANGES["tilt_azimuth"][0],
|
| 491 |
+
maximum=Config.SLIDER_RANGES["tilt_azimuth"][1],
|
| 492 |
+
value=Config.OPTIMIZABLE_PARAMS["tilt_angle_azimuth"][1],
|
| 493 |
+
step=Config.SLIDER_RANGES["tilt_azimuth"][2],
|
| 494 |
+
label="Tilt Azimuth (rad)",
|
| 495 |
+
info="Azimuthal angle of illumination tilt",
|
| 496 |
+
)
|
| 497 |
+
|
| 498 |
+
gr.Markdown("---")
|
| 499 |
+
|
| 500 |
+
# Section 4: Reconstruction Actions
|
| 501 |
+
gr.Markdown("### 🔬 Phase Reconstruction")
|
| 502 |
+
|
| 503 |
+
with gr.Row():
|
| 504 |
+
optimize_btn = gr.Button(
|
| 505 |
+
"⚡ Optimize Parameters", variant="secondary", size="lg"
|
| 506 |
+
)
|
| 507 |
+
reconstruct_btn = gr.Button(
|
| 508 |
+
"🔬 Run Reconstruction", variant="primary", size="lg"
|
| 509 |
+
)
|
| 510 |
+
|
| 511 |
+
gr.Markdown("---")
|
| 512 |
+
|
| 513 |
+
# Section 5: Optimization Results
|
| 514 |
+
gr.Markdown("### 📊 Optimization Results")
|
| 515 |
+
|
| 516 |
+
loss_plot = gr.LinePlot(
|
| 517 |
+
x="iteration",
|
| 518 |
+
y="loss",
|
| 519 |
+
title="Optimization - Midband Spatial Frequency Loss",
|
| 520 |
+
height=200,
|
| 521 |
+
scale=2,
|
| 522 |
+
value=pd.DataFrame({"iteration": [], "loss": []}), # Initialize with empty DataFrame structure
|
| 523 |
+
)
|
| 524 |
+
|
| 525 |
+
# Iteration scrubbing controls
|
| 526 |
+
iteration_slider = gr.Slider(
|
| 527 |
+
minimum=1,
|
| 528 |
+
maximum=1,
|
| 529 |
+
value=1,
|
| 530 |
+
step=1,
|
| 531 |
+
label="View Iteration",
|
| 532 |
+
info="Scrub through optimization history",
|
| 533 |
+
interactive=True, # Always interactive (just hidden until optimization)
|
| 534 |
+
visible=False,
|
| 535 |
+
)
|
| 536 |
+
iteration_info = gr.Markdown(value="", visible=False)
|
| 537 |
+
|
| 538 |
+
# State storage
|
| 539 |
+
iteration_history = gr.State(value=[])
|
| 540 |
+
current_data_xr = gr.State(value=data_xr)
|
| 541 |
+
current_pixel_scales = gr.State(value=pixel_scales)
|
| 542 |
+
|
| 543 |
+
gr.Markdown("---")
|
| 544 |
+
|
| 545 |
+
# Wire all event handlers
|
| 546 |
+
_wire_event_handlers(
|
| 547 |
+
demo,
|
| 548 |
+
fov_dropdown,
|
| 549 |
+
load_fov_btn,
|
| 550 |
+
z_slider,
|
| 551 |
+
image_viewer,
|
| 552 |
+
z_offset_slider,
|
| 553 |
+
na_det_slider,
|
| 554 |
+
na_ill_slider,
|
| 555 |
+
tilt_zenith_slider,
|
| 556 |
+
tilt_azimuth_slider,
|
| 557 |
+
optimize_btn,
|
| 558 |
+
reconstruct_btn,
|
| 559 |
+
loss_plot,
|
| 560 |
+
iteration_slider,
|
| 561 |
+
iteration_info,
|
| 562 |
+
iteration_history,
|
| 563 |
+
current_data_xr,
|
| 564 |
+
current_pixel_scales,
|
| 565 |
+
plate_metadata,
|
| 566 |
+
)
|
| 567 |
+
|
| 568 |
+
return demo
|
| 569 |
+
|
| 570 |
+
|
| 571 |
+
def _wire_event_handlers(
|
| 572 |
+
demo,
|
| 573 |
+
fov_dropdown,
|
| 574 |
+
load_fov_btn,
|
| 575 |
+
z_slider,
|
| 576 |
+
image_viewer,
|
| 577 |
+
z_offset_slider,
|
| 578 |
+
na_det_slider,
|
| 579 |
+
na_ill_slider,
|
| 580 |
+
tilt_zenith_slider,
|
| 581 |
+
tilt_azimuth_slider,
|
| 582 |
+
optimize_btn,
|
| 583 |
+
reconstruct_btn,
|
| 584 |
+
loss_plot,
|
| 585 |
+
iteration_slider,
|
| 586 |
+
iteration_info,
|
| 587 |
+
iteration_history,
|
| 588 |
+
current_data_xr,
|
| 589 |
+
current_pixel_scales,
|
| 590 |
+
plate_metadata,
|
| 591 |
+
):
|
| 592 |
+
"""Wire all Gradio event handlers."""
|
| 593 |
+
|
| 594 |
+
# FOV loading
|
| 595 |
+
load_fov_btn.click(
|
| 596 |
+
fn=lambda field, z: load_selected_fov(field, z, plate_metadata),
|
| 597 |
+
inputs=[fov_dropdown, z_slider],
|
| 598 |
+
outputs=[z_slider, image_viewer, current_data_xr, current_pixel_scales],
|
| 599 |
+
)
|
| 600 |
+
|
| 601 |
+
# NA slider linking: Ensure NA_illumination <= NA_detection (physical constraint)
|
| 602 |
+
# Only enforce when NA_detection changes (avoid feedback loop)
|
| 603 |
+
def enforce_na_constraint(na_det_value, na_ill_value):
|
| 604 |
+
"""When NA_detection decreases below NA_illumination, cap NA_illumination."""
|
| 605 |
+
return min(na_ill_value, na_det_value)
|
| 606 |
+
|
| 607 |
+
na_det_slider.change(
|
| 608 |
+
fn=enforce_na_constraint,
|
| 609 |
+
inputs=[na_det_slider, na_ill_slider],
|
| 610 |
+
outputs=[na_ill_slider],
|
| 611 |
+
)
|
| 612 |
+
|
| 613 |
+
# Image viewer for Z navigation (preview mode: same image twice)
|
| 614 |
+
demo.load(
|
| 615 |
+
fn=get_slice_for_preview,
|
| 616 |
+
inputs=[z_slider, current_data_xr],
|
| 617 |
+
outputs=image_viewer,
|
| 618 |
+
)
|
| 619 |
+
z_slider.change(
|
| 620 |
+
fn=get_slice_for_preview,
|
| 621 |
+
inputs=[z_slider, current_data_xr],
|
| 622 |
+
outputs=image_viewer,
|
| 623 |
+
)
|
| 624 |
+
|
| 625 |
+
# Reconstruction buttons
|
| 626 |
+
optimize_btn.click(
|
| 627 |
+
fn=run_optimization_ui,
|
| 628 |
+
inputs=[z_slider, current_data_xr, current_pixel_scales],
|
| 629 |
+
outputs=[
|
| 630 |
+
image_viewer,
|
| 631 |
+
loss_plot,
|
| 632 |
+
iteration_history,
|
| 633 |
+
iteration_slider,
|
| 634 |
+
iteration_info,
|
| 635 |
+
z_offset_slider,
|
| 636 |
+
na_det_slider,
|
| 637 |
+
na_ill_slider,
|
| 638 |
+
tilt_zenith_slider,
|
| 639 |
+
tilt_azimuth_slider,
|
| 640 |
+
],
|
| 641 |
+
)
|
| 642 |
+
|
| 643 |
+
reconstruct_btn.click(
|
| 644 |
+
fn=run_reconstruction_ui,
|
| 645 |
+
inputs=[
|
| 646 |
+
z_slider,
|
| 647 |
+
z_offset_slider,
|
| 648 |
+
na_det_slider,
|
| 649 |
+
na_ill_slider,
|
| 650 |
+
tilt_zenith_slider,
|
| 651 |
+
tilt_azimuth_slider,
|
| 652 |
+
current_data_xr,
|
| 653 |
+
current_pixel_scales,
|
| 654 |
+
],
|
| 655 |
+
outputs=[image_viewer],
|
| 656 |
+
)
|
| 657 |
+
|
| 658 |
+
# Iteration scrubbing - updates image AND all parameter sliders
|
| 659 |
+
iteration_slider.change(
|
| 660 |
+
fn=scrub_iterations,
|
| 661 |
+
inputs=[iteration_slider, iteration_history],
|
| 662 |
+
outputs=[
|
| 663 |
+
image_viewer,
|
| 664 |
+
iteration_info,
|
| 665 |
+
z_offset_slider,
|
| 666 |
+
na_det_slider,
|
| 667 |
+
na_ill_slider,
|
| 668 |
+
tilt_zenith_slider,
|
| 669 |
+
tilt_azimuth_slider,
|
| 670 |
+
],
|
| 671 |
+
)
|
| 672 |
+
|
| 673 |
+
# Clear iteration state when Z changes
|
| 674 |
+
z_slider.change(
|
| 675 |
+
fn=clear_iteration_state,
|
| 676 |
+
inputs=[],
|
| 677 |
+
outputs=[iteration_history, iteration_slider, iteration_info],
|
| 678 |
+
)
|
| 679 |
+
|
| 680 |
+
|
| 681 |
+
# ============================================================================
|
| 682 |
+
# MAIN ENTRY POINT
|
| 683 |
+
# ============================================================================
|
| 684 |
+
|
| 685 |
+
if __name__ == "__main__":
|
| 686 |
+
# Initialize single FOV data
|
| 687 |
+
data_xr, pixel_scales = initialize_single_fov()
|
| 688 |
+
|
| 689 |
+
# Create and launch interface
|
| 690 |
+
demo = create_gradio_interface(data_xr, pixel_scales)
|
| 691 |
+
|
| 692 |
+
print("\n" + "=" * 60)
|
| 693 |
+
print("Starting Gradio Phase Reconstruction Viewer")
|
| 694 |
+
print("=" * 60)
|
| 695 |
+
print("Open your browser to the URL shown below")
|
| 696 |
+
print("=" * 60 + "\n")
|
| 697 |
+
|
| 698 |
+
demo.launch(
|
| 699 |
+
share=False, # Set to True to create public link
|
| 700 |
+
# server_name="0.0.0.0", # Allow external access
|
| 701 |
+
server_port=Config.SERVER_PORT,
|
| 702 |
+
)
|
requirements.txt
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# WaveOrder Phase Reconstruction Demo - Requirements
|
| 2 |
+
# Install waveorder from gradio-demo branch (includes GPU device handling fixes)
|
| 3 |
+
|
| 4 |
+
# Install waveorder from gradio-demo branch
|
| 5 |
+
git+https://github.com/mehta-lab/waveorder.git@gradio-demo
|
| 6 |
+
git+https://github.com/ianhi/xarray-ome.git@main
|
| 7 |
+
git+https://github.com/czbiohub-sf/iohub.git@v0.3.0a2
|
| 8 |
+
|
| 9 |
+
# Gradio for web interface
|
| 10 |
+
gradio>=6.0.0
|