diff --git a/applications/virtual-fly-brain/scripts/README.md b/applications/virtual-fly-brain/scripts/README.md new file mode 100644 index 00000000..9554229b --- /dev/null +++ b/applications/virtual-fly-brain/scripts/README.md @@ -0,0 +1,96 @@ +# NRRD to Neuroglancer Precomputed Workflow + +## Overview +This guide shows how to convert NRRD segmentation files to Neuroglancer precomputed format with meshes. + +## Fixed Issues + +### Issue #1: Meshes not showing in Neuroglancer +**Root cause**: Mesh vertices were in physical coordinates (microns) instead of voxel coordinates. +**Fix**: Removed `spacing` parameter from `marching_cubes()` call. Neuroglancer applies resolution scaling separately. + +### Issue #2: Segmentation fragmentation +**Clarification**: The NRRD files contain multiple anatomical structures as separate segments (this is correct). +If you see unexpected fragmentation, use `merge_segments.py` to consolidate connected components. + +## Workflow + +### Step 1: Inspect your data (optional but recommended) +```bash +cd /home/ddelpiano/git/neuroglass/dataScripts/nrrd_to_precomputed +python inspect_nrrd.py files/VFB_00101567.nrrd +``` + +### Step 2: (Optional) Merge fragmented segments +Only use this if you want to merge disconnected pieces into single segments: +```bash +python merge_segments.py \ + files/VFB_00101567.nrrd \ + files/VFB_00101567_merged.nrrd \ + --min-size 100 \ + --verbose +``` + +### Step 3: Convert NRRD to precomputed format +```bash +python converter.py \ + --input-dir files \ + --output-path output \ + --verbose +``` + +This will: +- Find all `.nrrd` files in `files/` directory +- Convert each to precomputed format in `output/` directory +- Create a mapping file: `output/sources_to_dataset.json` + +### Step 4: Generate meshes +```bash +python meshes_generator.py \ + --input-path output \ + --dust-threshold 100 \ + --verbose +``` + +This will: +- Find all precomputed datasets in `output/` directory +- Generate meshes for each segment (skipping segments < 100 voxels) +- Create segment properties (labels and metadata) +- Generate `output/neuroglancer_state.json` for easy loading + +### Step 5: Serve the data +```bash +cd output +npx http-server . -p 8080 --cors +``` + +### Step 6: View in Neuroglancer +1. Go to https://neuroglancer-demo.appspot.com/ +2. Click the `{}` (JSON) button in the top-right +3. Copy the contents of `output/neuroglancer_state.json` +4. Paste into the JSON editor +5. Click outside the editor to load + +## Troubleshooting + +### Meshes still not showing? +1. **Check the browser console** for errors +2. **Verify mesh files exist**: + ```bash + ls -lh output/*/mesh/ + ``` +3. **Check segment IDs match**: + ```bash + python inspect_nrrd.py files/VFB_00101567.nrrd | grep "Segment IDs" + ``` +4. **Try loading a single segment manually** in Neuroglancer UI + +### Segmentation looks wrong? +- Use `inspect_nrrd.py` to see segment IDs and sizes +- The NRRD files contain pre-labeled anatomical structures +- If you see unexpected fragmentation, verify with the original data source + +### Performance issues? +- Reduce `--dust-threshold` to skip more small segments +- Generate meshes for fewer segments initially +- Check your mesh file sizes diff --git a/applications/virtual-fly-brain/scripts/check_mesh_coords.py b/applications/virtual-fly-brain/scripts/check_mesh_coords.py new file mode 100644 index 00000000..a7ebcff0 --- /dev/null +++ b/applications/virtual-fly-brain/scripts/check_mesh_coords.py @@ -0,0 +1,84 @@ +#!/usr/bin/env python3 +"""Debug mesh and volume coordinate spaces""" +import json +import sys +import os + +if len(sys.argv) < 2: + print("Usage: python check_mesh_coords.py ") + sys.exit(1) + +dataset_path = sys.argv[1] + +# Read main info +with open(os.path.join(dataset_path, "info"), "r") as f: + info = json.load(f) + +print("=== Dataset Info ===") +print(f"Type: {info['type']}") +print(f"Data type: {info['data_type']}") +print(f"Volume size (XYZ): {info['scales'][0]['size']}") +print(f"Resolution (XYZ): {info['scales'][0]['resolution']}") +print(f"Chunk sizes: {info['scales'][0]['chunk_sizes']}") + +# Calculate physical bounds +size = info["scales"][0]["size"] +resolution = info["scales"][0]["resolution"] +physical_bounds = [s * r for s, r in zip(size, resolution)] +print(f"Physical bounds: {physical_bounds}") + +# Read mesh info if exists +mesh_info_path = os.path.join(dataset_path, "mesh", "info") +if os.path.exists(mesh_info_path): + print("\n=== Mesh Info ===") + with open(mesh_info_path, "r") as f: + mesh_info = json.load(f) + print(json.dumps(mesh_info, indent=2)) + + # Check a sample mesh file + mesh_dir = os.path.join(dataset_path, "mesh") + mesh_files = [ + f + for f in os.listdir(mesh_dir) + if f.endswith(".gz") or (f.isdigit() and not f.endswith(".gz")) + ] + + if mesh_files: + sample_mesh = mesh_files[0] + print(f"\nSample mesh file: {sample_mesh}") + + # Try to read and check coordinates + try: + from cloudvolume import CloudVolume + from cloudvolume.mesh import Mesh + + vol = CloudVolume(f"file://{dataset_path}", mip=0) + + # Get segment ID from filename + seg_id = int(sample_mesh.replace(".gz", "").replace(":", "_").split("_")[0]) + + try: + mesh = vol.mesh.get(seg_id) + if mesh and len(mesh.vertices) > 0: + print(f"\nMesh {seg_id} vertices:") + print(f" Min: {mesh.vertices.min(axis=0)}") + print(f" Max: {mesh.vertices.max(axis=0)}") + print(f" Shape: {mesh.vertices.shape}") + + # Compare with volume bounds + max_voxel = mesh.vertices.max(axis=0) + print(f"\nComparison:") + print(f" Volume size (voxels): {size}") + print(f" Mesh max (should be <= volume size): {max_voxel}") + + if any(max_voxel > size): + print(f" ⚠ WARNING: Mesh vertices exceed volume bounds!") + else: + print(f" ✓ Mesh vertices within volume bounds") + except Exception as e: + print(f" Error loading mesh: {e}") + + except Exception as e: + print(f"Error: {e}") +else: + print("\n⚠ No mesh info found") diff --git a/applications/virtual-fly-brain/scripts/converter.py b/applications/virtual-fly-brain/scripts/converter.py new file mode 100644 index 00000000..042c4a1e --- /dev/null +++ b/applications/virtual-fly-brain/scripts/converter.py @@ -0,0 +1,290 @@ +#!/usr/bin/env python3 +""" +Minimal converter: NRRD -> Neuroglancer precomputed (volumetric only). +Fixed to use correct scale keys with optional compression. +""" +from __future__ import annotations +import argparse +import os +import tempfile +import requests +import nrrd +import numpy as np +import json +import hashlib +from cloudvolume import CloudVolume +from urllib.parse import urlparse +from pathlib import Path + + +def download_to_temp(url: str) -> str: + if url.startswith("file://"): + return url.replace("file://", "") + if os.path.exists(url): + return url + r = requests.get(url, stream=True) + r.raise_for_status() + fd, tmp = tempfile.mkstemp(suffix=".nrrd") + os.close(fd) + with open(tmp, "wb") as f: + for chunk in r.iter_content(1024 * 1024): + f.write(chunk) + return tmp + + +def sanitize_name(name: str) -> str: + for c in [" ", "/", "\\", ":", "?", "&", "=", "%"]: + name = name.replace(c, "_") + return name + + +def dataset_name_from_source(src: str, local_path: str) -> str: + """Generate dataset name from source path.""" + # For local files, just use the filename without extension + if os.path.exists(src): + fname = os.path.splitext(os.path.basename(src))[0] + return sanitize_name(fname) + + # For URLs, try to extract meaningful name from path + try: + if src.startswith(("http://", "https://", "s3://", "gs://")): + parsed = urlparse(src) + parts = [p for p in parsed.path.rstrip("/").split("/") if p] + if len(parts) >= 1: + fname = os.path.splitext(parts[-1])[0] + return sanitize_name(fname) + except Exception: + pass + + # Fallback: use filename with hash + fname = os.path.splitext(os.path.basename(local_path))[0] + short = hashlib.md5(src.encode("utf-8")).hexdigest()[:6] + return sanitize_name(f"{fname}_{short}") + + +def detect_spacing(header: dict): + if "space directions" in header and header["space directions"] is not None: + try: + dirs = header["space directions"] + spacings = [ + float(np.linalg.norm(d)) if d is not None else 1.0 for d in dirs + ] + return spacings[::-1] + except Exception: + pass + if "spacings" in header: + try: + sp = header["spacings"] + return list(map(float, sp))[::-1] + except Exception: + pass + return [1.0, 1.0, 1.0] + + +def detect_origin(header: dict): + """Extract space origin from NRRD header and convert to XYZ order.""" + if "space origin" in header and header["space origin"] is not None: + try: + origin = header["space origin"] + # NRRD origin is in ZYX order, reverse to XYZ for Neuroglancer + return [float(x) for x in origin[::-1]] + except Exception: + pass + return [0.0, 0.0, 0.0] + + +def main(): + p = argparse.ArgumentParser( + description="Convert NRRD files to Neuroglancer precomputed format" + ) + p.add_argument( + "--input-dir", + required=True, + help="Directory containing NRRD files to convert", + ) + p.add_argument( + "--output-path", + required=True, + help="Output directory for precomputed format (will be created if doesn't exist)", + ) + p.add_argument("--verbose", action="store_true") + p.add_argument( + "--no-compress", + action="store_true", + help="Disable gzip compression (store raw uncompressed chunks)", + ) + p.add_argument( + "--min-intensity", + type=int, + default=None, + help="Minimum segment ID/intensity to keep (values below will be set to 0)", + ) + p.add_argument( + "--max-intensity", + type=int, + default=None, + help="Maximum segment ID/intensity to keep (values above will be set to 0)", + ) + args = p.parse_args() + + compress = not args.no_compress + + # Expand and normalize paths + input_dir = os.path.abspath(os.path.expanduser(args.input_dir)) + output_path = os.path.abspath(os.path.expanduser(args.output_path)) + + if not os.path.isdir(input_dir): + raise ValueError(f"Input directory does not exist: {input_dir}") + + # Convert to file:// URL + args.output_path = f"file://{output_path}" + + if args.output_path.startswith("file://"): + pth = args.output_path.replace("file://", "") + pth = os.path.expanduser(pth) + args.output_path = "file://" + pth + + out_root_local = None + if args.output_path.startswith("file://"): + out_root_local = args.output_path.replace("file://", "") + + os.makedirs(out_root_local, exist_ok=True) if out_root_local else None + + # Find all NRRD files in input directory + nrrd_files = [] + for filename in os.listdir(input_dir): + if filename.endswith(".nrrd"): + nrrd_files.append(os.path.join(input_dir, filename)) + + if not nrrd_files: + raise ValueError(f"No .nrrd files found in {input_dir}") + + if args.verbose: + print(f"Found {len(nrrd_files)} NRRD files in {input_dir}") + + mapping = {} + + for src in nrrd_files: + if args.verbose: + print("Processing:", src) + local_path = download_to_temp(src) + data, header = nrrd.read(local_path) + if data.ndim != 3: + raise RuntimeError( + f"Only 3D volumes supported (got ndim={data.ndim}) for {src}" + ) + + # Transpose from ZYX (NRRD) to XYZ (Neuroglancer) + arr_xyz = np.transpose(data, (2, 1, 0)).copy() + + # Apply intensity filtering if specified (for segmentation data) + if np.issubdtype(arr_xyz.dtype, np.integer): + if args.min_intensity is not None or args.max_intensity is not None: + original_segments = len(np.unique(arr_xyz[arr_xyz > 0])) + + if args.min_intensity is not None: + arr_xyz[arr_xyz < args.min_intensity] = 0 + if args.max_intensity is not None: + arr_xyz[arr_xyz > args.max_intensity] = 0 + + filtered_segments = len(np.unique(arr_xyz[arr_xyz > 0])) + + if args.verbose: + print( + f" Intensity filter: {original_segments} segments -> {filtered_segments} segments" + ) + print( + f" Range: [{args.min_intensity or 'any'}, {args.max_intensity or 'any'}]" + ) + + dtype_str = str(np.dtype(arr_xyz.dtype).name) + voxel_size = detect_spacing(header) + voxel_offset = detect_origin(header) + ds_name = dataset_name_from_source(src, local_path) + dest = args.output_path.rstrip("/") + "/" + ds_name + + if args.verbose: + print( + "Writing dataset:", + dest, + "shape(XYZ):", + arr_xyz.shape, + "dtype:", + dtype_str, + "voxel_size:", + voxel_size, + "voxel_offset:", + voxel_offset, + ) + + # Determine layer type + is_segmentation = np.issubdtype(arr_xyz.dtype, np.integer) + layer_type = "segmentation" if is_segmentation else "image" + + # For uint8/uint16, use raw encoding + encoding = "raw" + compressed_segmentation_block_size = None + + # Create info with explicit scale key = "0" + # IMPORTANT: voxel_offset must match between volume and segmentation + # for proper alignment in Neuroglancer's physical coordinate space + info = { + "data_type": dtype_str, + "num_channels": 1, + "scales": [ + { + "chunk_sizes": [[64, 64, 64]], + "encoding": encoding, + "key": "0", # Important: must be "0" not the resolution + "resolution": voxel_size, + "size": list(arr_xyz.shape), + "voxel_offset": voxel_offset, + } + ], + "type": layer_type, + } + + if compressed_segmentation_block_size: + info["compressed_segmentation_block_size"] = ( + compressed_segmentation_block_size + ) + + # Create CloudVolume with the info + vol = CloudVolume(dest, mip=0, info=info, compress=compress) + vol.commit_info() + + # Write the data + vol[:, :, :] = arr_xyz + + if args.verbose: + print(f"Successfully wrote {ds_name}") + print(f" Encoding: {encoding}") + print( + f" Compression: {'gzip (. gz files)' if compress else 'none (raw files)'}" + ) + print(f" Scale key: 0") + + mapping[src] = ds_name + + # Clean up temp file + if src.startswith("http"): + try: + os.remove(local_path) + except Exception: + pass + + if out_root_local: + mapping_path = os.path.join(out_root_local, "sources_to_dataset.json") + with open(mapping_path, "w") as f: + json.dump(mapping, f, indent=2) + if args.verbose: + print("Wrote mapping:", mapping_path) + + print("Done. Datasets written to:", args.output_path) + print("Mapping (source -> dataset):") + for k, v in mapping.items(): + print(" ", k, "->", v) + + +if __name__ == "__main__": + main() diff --git a/applications/virtual-fly-brain/scripts/inspect_nrrd.py b/applications/virtual-fly-brain/scripts/inspect_nrrd.py new file mode 100644 index 00000000..54da38e0 --- /dev/null +++ b/applications/virtual-fly-brain/scripts/inspect_nrrd.py @@ -0,0 +1,66 @@ +#!/usr/bin/env python3 +"""Inspect NRRD file contents to understand segmentation issues""" +import sys +import nrrd +import numpy as np + +if len(sys.argv) < 2: + print("Usage: python inspect_nrrd.py ") + sys.exit(1) + +nrrd_file = sys.argv[1] +print(f"Inspecting: {nrrd_file}\n") + +data, header = nrrd.read(nrrd_file) + +print("=== Shape and Type ===") +print(f"Shape (Z,Y,X): {data.shape}") +print(f"Dtype: {data.dtype}") + +print("\n=== Header ===") +for key, value in sorted(header.items()): + print(f"{key}: {value}") + +print("\n=== Unique Segment IDs ===") +unique_ids = np.unique(data) +print(f"Total unique values: {len(unique_ids)}") +print(f"Min value: {unique_ids[0]}") +print(f"Max value: {unique_ids[-1]}") + +# Show distribution +print(f"\nFirst 20 segment IDs: {unique_ids[:20].tolist()}") +if len(unique_ids) > 20: + print(f"Last 20 segment IDs: {unique_ids[-20:].tolist()}") + +# Count voxels per segment +print("\n=== Segment Sizes (top 20) ===") +segment_counts = {} +for seg_id in unique_ids: + if seg_id == 0: + continue # Skip background + count = np.sum(data == seg_id) + segment_counts[seg_id] = count + +# Sort by size +sorted_segments = sorted(segment_counts.items(), key=lambda x: x[1], reverse=True) +for seg_id, count in sorted_segments[:20]: + print(f"Segment {seg_id}: {count:,} voxels") + +print(f"\n=== Potential Issues ===") +# Check if segments are consecutive +non_zero_ids = unique_ids[unique_ids > 0] +if len(non_zero_ids) > 0: + expected_consecutive = np.arange(1, len(non_zero_ids) + 1) + if not np.array_equal(non_zero_ids, expected_consecutive): + print( + "⚠ WARNING: Segment IDs are NOT consecutive (gaps or non-standard numbering)" + ) + print(f" This can cause issues with mesh generation") + else: + print("✓ Segment IDs are consecutive (1, 2, 3, ...)") + +# Check for very small segments +small_segments = [sid for sid, cnt in segment_counts.items() if cnt < 100] +if small_segments: + print(f"⚠ WARNING: {len(small_segments)} segments have < 100 voxels (dust)") + print(f" These may represent fragmentation of larger objects") diff --git a/applications/virtual-fly-brain/scripts/merge_segments.py b/applications/virtual-fly-brain/scripts/merge_segments.py new file mode 100644 index 00000000..a82f366d --- /dev/null +++ b/applications/virtual-fly-brain/scripts/merge_segments.py @@ -0,0 +1,93 @@ +#!/usr/bin/env python3 +""" +Merge disconnected components within each segment using connected component analysis. +Use this if you have segments that should be single objects but are fragmented. +""" +import sys +import nrrd +import numpy as np +from scipy import ndimage + + +def merge_segments_by_connectivity(input_file, output_file, min_size=100, verbose=True): + """ + Apply connected component labeling to merge fragments. + Each connected region gets a new unique ID. + + Args: + input_file: Path to input NRRD + output_file: Path to output NRRD + min_size: Minimum voxel count for a component to be kept + verbose: Print progress + """ + if verbose: + print(f"Reading: {input_file}") + + data, header = nrrd.read(input_file) + + if verbose: + print(f"Shape: {data.shape}, dtype: {data.dtype}") + original_segments = len(np.unique(data)) - 1 # Exclude background + print(f"Original segments: {original_segments}") + + # Create binary mask (non-zero = foreground) + mask = data > 0 + + # Connected component labeling + if verbose: + print("Running connected component analysis...") + + labeled, num_features = ndimage.label(mask) + + if verbose: + print(f"Found {num_features} connected components") + + # Filter by size and renumber + output = np.zeros_like(data) + new_label = 1 + + for component_id in range(1, num_features + 1): + component_mask = labeled == component_id + size = np.sum(component_mask) + + if size >= min_size: + output[component_mask] = new_label + if verbose and new_label <= 20: + print(f" Component {new_label}: {size:,} voxels") + new_label += 1 + + if verbose: + final_segments = new_label - 1 + print(f"\nFinal segments: {final_segments}") + print(f"Removed: {num_features - final_segments} small components") + + # Write output + nrrd.write(output_file, output.astype(data.dtype), header) + + if verbose: + print(f"✓ Saved to: {output_file}") + + return output + + +if __name__ == "__main__": + import argparse + + parser = argparse.ArgumentParser( + description="Merge segmentation fragments using connected component analysis" + ) + parser.add_argument("input_file", help="Input NRRD file") + parser.add_argument("output_file", help="Output NRRD file") + parser.add_argument( + "--min-size", + type=int, + default=100, + help="Minimum voxel count for a component (default: 100)", + ) + parser.add_argument("--verbose", action="store_true", help="Print progress") + + args = parser.parse_args() + + merge_segments_by_connectivity( + args.input_file, args.output_file, min_size=args.min_size, verbose=args.verbose + ) diff --git a/applications/virtual-fly-brain/scripts/meshes_generator.py b/applications/virtual-fly-brain/scripts/meshes_generator.py new file mode 100644 index 00000000..419cdfde --- /dev/null +++ b/applications/virtual-fly-brain/scripts/meshes_generator.py @@ -0,0 +1,602 @@ +#!/usr/bin/env python3 +""" +Complete mesh generation and setup for precomputed segmentation volumes. +Automatically creates segment properties for Neuroglancer to discover segments. +Supports parallel processing for faster mesh generation. +""" +import os +import sys +import json +from cloudvolume import CloudVolume +from cloudvolume.mesh import Mesh +from cloudvolume.lib import Bbox +import numpy as np +from multiprocessing import Pool, cpu_count + +try: + from skimage import measure +except ImportError: + print("ERROR: scikit-image not installed. Install with: pip install scikit-image") + sys.exit(1) + + +def create_segment_properties(local_path, segment_ids, verbose=True): + """Create segment_properties directory with info file listing all segments.""" + + segment_props_dir = os.path.join(local_path, "segment_properties") + os.makedirs(segment_props_dir, exist_ok=True) + + # Create the info file + info_path = os.path.join(segment_props_dir, "info") + info = { + "@type": "neuroglancer_segment_properties", + "inline": { + "ids": [str(seg_id) for seg_id in segment_ids], + "properties": [ + { + "id": "label", + "type": "label", + "values": [f"Segment {seg_id}" for seg_id in segment_ids], + }, + { + "id": "description", + "type": "description", + "values": [f"Segment ID {seg_id}" for seg_id in segment_ids], + }, + ], + }, + } + + with open(info_path, "w") as f: + json.dump(info, f, indent=2) + + if verbose: + print(f" ✓ Created segment properties for {len(segment_ids)} segments") + + +def setup_mesh_metadata(dataset_path, local_path, verbose=True): + """Ensure mesh metadata is properly configured.""" + vol = CloudVolume(dataset_path, mip=0, use_https=False) + + # Update main info with mesh and segment_properties + needs_update = False + if "mesh" not in vol.info or vol.info["mesh"] is None: + vol.info["mesh"] = "mesh" + needs_update = True + + if "segment_properties" not in vol.info or vol.info["segment_properties"] is None: + vol.info["segment_properties"] = "segment_properties" + needs_update = True + + if needs_update: + vol.commit_info() + if verbose: + print(f" ✓ Updated main info (mesh + segment_properties)") + + # Create mesh directory + mesh_dir = os.path.join(local_path, "mesh") + os.makedirs(mesh_dir, exist_ok=True) + + # Get resolution and voxel_offset from the volume info + resolution = vol.info["scales"][0]["resolution"] + voxel_offset = vol.info["scales"][0].get("voxel_offset", [0, 0, 0]) + + # Create mesh info with legacy format (multi-res format not supported by CloudVolume) + mesh_info_path = os.path.join(mesh_dir, "info") + mesh_info = { + "@type": "neuroglancer_legacy_mesh", + "mip": 0, + "vertex_quantization_bits": 10, + "lod_scale_multiplier": 1.0, + } + + with open(mesh_info_path, "w") as f: + json.dump(mesh_info, f, indent=2) + + if verbose: + print(f" ✓ Mesh metadata configured (legacy format)") + print(f" Resolution: {resolution}") + print(f" Voxel offset: {voxel_offset}") + + +def generate_single_mesh(args): + """Generate mesh for a single segment (worker function for parallel processing)""" + from scipy import ndimage + + seg_id, data, dust_threshold, filter_noise = args + + try: + mask = data == seg_id + voxel_count = np.sum(mask) + + if voxel_count < dust_threshold: + return {"status": "skipped", "seg_id": seg_id, "voxels": voxel_count} + + # Optional: Remove noise by keeping only largest connected component + if filter_noise: + # Find bounding box of the mask to reduce computation + coords = np.argwhere(mask) + if len(coords) == 0: + return {"status": "empty", "seg_id": seg_id} + + min_coords = coords.min(axis=0) + max_coords = coords.max(axis=0) + + # Extract just the bounding box region + bbox_mask = mask[ + min_coords[0] : max_coords[0] + 1, + min_coords[1] : max_coords[1] + 1, + min_coords[2] : max_coords[2] + 1, + ] + + # Label only the bounding box region + labeled, num_features = ndimage.label(bbox_mask) + if num_features > 1: + # Find largest component + component_sizes = [ + (labeled == i).sum() for i in range(1, num_features + 1) + ] + largest_component = np.argmax(component_sizes) + 1 + bbox_mask = labeled == largest_component + new_voxel_count = np.sum(bbox_mask) + + # If we filtered out too much, skip this segment + if new_voxel_count < dust_threshold: + return { + "status": "skipped", + "seg_id": seg_id, + "voxels": new_voxel_count, + "filtered": True, + } + + # Update the full mask with the filtered bbox + mask[:] = False + mask[ + min_coords[0] : max_coords[0] + 1, + min_coords[1] : max_coords[1] + 1, + min_coords[2] : max_coords[2] + 1, + ] = bbox_mask + + try: + vertices, faces, normals, values = measure.marching_cubes( + mask, level=0.5, allow_degenerate=False + ) + except (ValueError, RuntimeError): + return {"status": "no_surface", "seg_id": seg_id} + + if len(vertices) == 0 or len(faces) == 0: + return {"status": "empty", "seg_id": seg_id} + + vertices = vertices.astype(np.float32) + faces = faces.astype(np.uint32) + + return { + "status": "success", + "seg_id": seg_id, + "vertices": vertices, + "faces": faces, + "voxel_count": voxel_count, + } + + except Exception as e: + return {"status": "error", "seg_id": seg_id, "error": str(e)} + + +def generate_merged_mesh(data, verbose=True): + """Generate a single mesh from all non-zero voxels in the volume.""" + from scipy import ndimage + + # Create mask of all non-zero voxels + mask = data > 0 + voxel_count = np.sum(mask) + + if verbose: + print(f" Creating merged mesh from {voxel_count} voxels...") + + try: + vertices, faces, normals, values = measure.marching_cubes( + mask, level=0.5, allow_degenerate=False + ) + except (ValueError, RuntimeError) as e: + if verbose: + print(f" ✗ Failed to generate merged mesh: {e}") + return None + + if len(vertices) == 0 or len(faces) == 0: + if verbose: + print(f" ✗ Merged mesh has no geometry") + return None + + vertices = vertices.astype(np.float32) + faces = faces.astype(np.uint32) + + if verbose: + print(f" ✓ Merged mesh: {len(vertices)} vertices, {len(faces)} faces") + + return {"vertices": vertices, "faces": faces} + + +def generate_meshes_with_skimage( + dataset_path, + dust_threshold=100, + verbose=True, + num_workers=None, + filter_noise=False, # Disabled by default - too slow + merge_segments=False, +): + """Generate meshes using scikit-image marching cubes.""" + + if verbose: + print(f"\nProcessing: {dataset_path}") + print("=" * 60) + + if dataset_path.startswith("file://"): + local_path = dataset_path.replace("file://", "") + local_path = os.path.expanduser(local_path) + dataset_path = "file://" + local_path + else: + local_path = dataset_path + + setup_mesh_metadata(dataset_path, local_path, verbose) + + vol = CloudVolume(dataset_path, mip=0, use_https=False) + bbox = Bbox([0, 0, 0], vol.info["scales"][0]["size"]) + data = vol[bbox.to_slices()] + + if data.ndim == 4: + data = data[:, :, :, 0] + + if verbose: + print(f" Volume shape: {data.shape}, dtype: {data.dtype}") + print(f" Resolution: {vol.info['scales'][0]['resolution']}") + + # Find ALL unique segments (including those we won't mesh) + all_segments = np.unique(data) + all_segments = all_segments[all_segments > 0] # Exclude background + + if verbose: + print(f" Found {len(all_segments)} non-zero segments") + + resolution = vol.info["scales"][0]["resolution"] + voxel_offset = vol.info["scales"][0].get("voxel_offset", [0, 0, 0]) + + # Handle merged vs individual mesh generation + if merge_segments: + if verbose: + print(f"\n Generating single merged mesh from all segments...") + print( + f" Note: Vertices will be transformed to physical coordinates (resolution: {resolution})" + ) + + # Use segment ID 1 for the merged mesh + create_segment_properties(local_path, [1], verbose) + + merged_mesh = generate_merged_mesh(data, verbose) + + if merged_mesh: + # Transform vertices to physical coordinates + vertices = merged_mesh["vertices"].copy() + vertices[:, 0] = vertices[:, 0] * resolution[0] + voxel_offset[0] + vertices[:, 1] = vertices[:, 1] * resolution[1] + voxel_offset[1] + vertices[:, 2] = vertices[:, 2] * resolution[2] + voxel_offset[2] + + mesh_obj = Mesh(vertices, merged_mesh["faces"], segid=1) + vol.mesh.put(mesh_obj, compress=True) + + return { + "generated": 1, + "skipped": 0, + "failed": 0, + "total": 1, + "segments_with_meshes": [1], + } + else: + return { + "generated": 0, + "skipped": 0, + "failed": 1, + "total": 1, + "segments_with_meshes": [], + } + + # Individual segment mesh generation (original behavior) + # Create segment properties for ALL segments + create_segment_properties(local_path, all_segments.tolist(), verbose) + + # Determine number of workers + if num_workers is None: + num_workers = min( + 4, max(1, cpu_count() - 2) + ) # Use max 4 workers, leave 2 cores free + + if verbose: + print(f"\n Generating meshes (dust threshold: {dust_threshold} voxels)...") + print(f" Using {num_workers} parallel workers") + print(f" Noise filtering: {'enabled' if filter_noise else 'disabled'}") + print( + f" Note: Vertices will be transformed to physical coordinates (resolution: {resolution})" + ) + + meshes_generated = 0 + meshes_skipped = 0 + meshes_failed = 0 + segments_with_meshes = [] + + # Prepare arguments for parallel processing + mesh_args = [ + (seg_id, data, dust_threshold, filter_noise) for seg_id in all_segments + ] + + # Process in parallel + if num_workers > 1: + with Pool(processes=num_workers) as pool: + results = pool.map(generate_single_mesh, mesh_args) + else: + # Sequential processing for debugging + results = [generate_single_mesh(arg) for arg in mesh_args] + + # Write meshes and collect statistics + for idx, result in enumerate(results): + seg_id = result["seg_id"] + status = result["status"] + + if verbose and (idx + 1) % 10 == 0: + print(f" Processed {idx+1}/{len(all_segments)} segments...") + + if status == "success": + # Transform vertices to physical coordinates + vertices = result["vertices"].copy() + vertices[:, 0] = vertices[:, 0] * resolution[0] + voxel_offset[0] + vertices[:, 1] = vertices[:, 1] * resolution[1] + voxel_offset[1] + vertices[:, 2] = vertices[:, 2] * resolution[2] + voxel_offset[2] + + # Write mesh to cloudvolume + mesh_obj = Mesh(vertices, result["faces"], segid=int(seg_id)) + vol.mesh.put(mesh_obj, compress=True) + segments_with_meshes.append(int(seg_id)) + meshes_generated += 1 + + if verbose and meshes_generated <= 5: + print( + f" Segment {seg_id}: ✓ ({len(result['vertices'])} verts, {len(result['faces'])} faces)" + ) + + elif status == "skipped": + meshes_skipped += 1 + else: + meshes_failed += 1 + + if verbose: + print(f"\n Summary:") + print(f" ✓ Generated: {meshes_generated} meshes") + print(f" ⊘ Skipped: {meshes_skipped} segments") + print(f" ✗ Failed: {meshes_failed} segments") + print(f" 📋 Total segments in properties: {len(all_segments)}") + + return { + "generated": meshes_generated, + "skipped": meshes_skipped, + "failed": meshes_failed, + "total": len(all_segments), + "segments_with_meshes": segments_with_meshes, + } + + +def generate_neuroglancer_state( + datasets_info, base_url="http://localhost:8080/precomputed", output_path=None +): + """Generate a Neuroglancer JSON state with all segments pre-selected.""" + + layers = [] + + # Get resolution from first dataset for dimensions + first_dataset_path = datasets_info[0]["path"].replace("file://", "") + with open(os.path.join(first_dataset_path, "info"), "r") as f: + first_info = json.load(f) + + resolution = first_info["scales"][0]["resolution"] + size = first_info["scales"][0]["size"] + + # Calculate appropriate position (center of volume) + position = [s // 2 for s in size] + + for dataset_info in datasets_info: + dataset_path = dataset_info["path"] + dataset_name = os.path.basename(dataset_path.rstrip("/")) + segments = dataset_info.get("segments_with_meshes", []) + + # Select first 10 segments by default (for performance) + selected_segments = [str(s) for s in segments[:10]] + + layer = { + "type": "segmentation", + "source": { + "url": f"precomputed://{base_url}/{dataset_name}", + "subsources": {"default": True, "mesh": True, "properties": True}, + "enableDefaultSubsources": False, + }, + "tab": "segments", + "segments": selected_segments, + "hideSegmentZero": True, + "selectedAlpha": 1.0, + "notSelectedAlpha": 0.15, + "objectAlpha": 0.8, + "meshRenderScale": 1, + "meshSilhouetteRendering": 3, + "name": dataset_name, + "visible": True, + # Apply coordinate transform to meshes to account for resolution + "transform": [ + [resolution[0], 0, 0, 0], + [0, resolution[1], 0, 0], + [0, 0, resolution[2], 0], + ], + } + layers.append(layer) + + state = { + "dimensions": { + "x": [resolution[0] * 1e-6, "m"], # Convert to meters + "y": [resolution[1] * 1e-6, "m"], + "z": [resolution[2] * 1e-6, "m"], + }, + "position": position, + "crossSectionScale": max(size) // 100, # Auto-scale based on volume size + "projectionScale": max(size) * 2, + "projectionOrientation": [0, 0, 0, 1], # Default orientation + "layers": layers, + "layout": "4panel", + "showAxisLines": True, # Show axis lines for reference + } + + if output_path: + with open(output_path, "w") as f: + json.dump(state, f, indent=2) + print(f"\n✓ Saved Neuroglancer state to: {output_path}") + + return state + + +def main(): + import argparse + + parser = argparse.ArgumentParser( + description="Generate meshes and segment properties for precomputed segmentation volumes" + ) + parser.add_argument( + "--input-path", + required=True, + help="Path to precomputed dataset directory (containing one or more datasets)", + ) + parser.add_argument( + "--dust-threshold", + type=int, + default=100, + help="Minimum voxel count for mesh generation (default: 100)", + ) + parser.add_argument( + "--workers", + type=int, + default=None, + help="Number of parallel workers for mesh generation (default: CPU count - 1)", + ) + parser.add_argument( + "--no-filter-noise", + action="store_true", + help="Disable noise filtering (keep all connected components)", + ) + parser.add_argument( + "--filter-noise", + action="store_true", + help="Enable noise filtering to keep only largest connected component (slow!)", + ) + parser.add_argument( + "--merge-segments", + action="store_true", + help="Merge all segments into a single mesh per dataset", + ) + parser.add_argument( + "--verbose", action="store_true", help="Print detailed progress" + ) + parser.add_argument( + "--output-state", + help="Output path for Neuroglancer JSON state file (will be saved in input-path if not specified)", + ) + parser.add_argument( + "--base-url", + default="http://localhost:8080", + help="Base URL for HTTP server in generated state (default: http://localhost:8080)", + ) + + args = parser.parse_args() + + # Expand and normalize input path + input_path = os.path.abspath(os.path.expanduser(args.input_path)) + + if not os.path.isdir(input_path): + raise ValueError(f"Input path does not exist: {input_path}") + + # Find all dataset directories in the input path + # A dataset directory has an 'info' file in it + datasets = [] + for item in os.listdir(input_path): + item_path = os.path.join(input_path, item) + if os.path.isdir(item_path) and os.path.exists(os.path.join(item_path, "info")): + datasets.append(f"file://{item_path}") + + if not datasets: + raise ValueError( + f"No precomputed datasets found in {input_path}. Make sure directories contain 'info' files." + ) + + if args.verbose: + print(f"Found {len(datasets)} precomputed datasets in {input_path}") + for ds in datasets: + print(f" - {os.path.basename(ds.replace('file://', ''))}") + + total_stats = {"generated": 0, "skipped": 0, "failed": 0, "total": 0, "datasets": 0} + + datasets_info = [] + + for dataset in datasets: + try: + stats = generate_meshes_with_skimage( + dataset, + dust_threshold=args.dust_threshold, + verbose=args.verbose, + num_workers=args.workers, + filter_noise=args.filter_noise, # Now opt-in instead of opt-out + merge_segments=args.merge_segments, + ) + + total_stats["generated"] += stats["generated"] + total_stats["skipped"] += stats["skipped"] + total_stats["failed"] += stats["failed"] + total_stats["total"] += stats["total"] + total_stats["datasets"] += 1 + + datasets_info.append( + { + "path": dataset, + "segments_with_meshes": stats.get("segments_with_meshes", []), + } + ) + + except Exception as e: + print(f"\nERROR processing {dataset}: {e}") + if args.verbose: + import traceback + + traceback.print_exc() + continue + + print(f"\n" + "=" * 60) + print(f"COMPLETE! Mesh Generation Summary:") + print(f"=" * 60) + print(f" Datasets processed: {total_stats['datasets']}") + print(f" Total segments: {total_stats['total']}") + print(f" ✓ Meshes generated: {total_stats['generated']}") + print(f" ⊘ Segments skipped: {total_stats['skipped']}") + print(f" ✗ Segments failed: {total_stats['failed']}") + + # Generate Neuroglancer state + output_state_path = args.output_state or os.path.join( + input_path, "neuroglancer_state.json" + ) + if datasets_info: + generate_neuroglancer_state( + datasets_info, base_url=args.base_url, output_path=output_state_path + ) + + print(f"\nNext steps:") + print(f" 1. Start HTTP server: cd ~ && npx http-server . -p 8080 --cors") + print(f" 2. Load in Neuroglancer:") + print(f" - Click the {{}} button") + if datasets_info: + print(f" - Paste contents from: {output_state_path}") + print(f" 3. Segments will now auto-populate in the UI!") + print(f" 4. Click on any segment ID to select/view it") + + +if __name__ == "__main__": + main() diff --git a/applications/virtual-fly-brain/scripts/meshes_generator_parallel.py b/applications/virtual-fly-brain/scripts/meshes_generator_parallel.py new file mode 100644 index 00000000..0a1ae42e --- /dev/null +++ b/applications/virtual-fly-brain/scripts/meshes_generator_parallel.py @@ -0,0 +1,418 @@ +#!/usr/bin/env python3 +""" +Complete mesh generation and setup for precomputed segmentation volumes. +Automatically creates segment properties for Neuroglancer to discover segments. +Supports parallel processing for faster mesh generation. +""" +import os +import sys +import json +from cloudvolume import CloudVolume +from cloudvolume.mesh import Mesh +from cloudvolume.lib import Bbox +import numpy as np +from multiprocessing import Pool, cpu_count + +try: + from skimage import measure +except ImportError: + print("ERROR: scikit-image not installed. Install with: pip install scikit-image") + sys.exit(1) + + +def create_segment_properties(local_path, segment_ids, verbose=True): + """Create segment_properties directory with info file listing all segments.""" + + segment_props_dir = os.path.join(local_path, "segment_properties") + os.makedirs(segment_props_dir, exist_ok=True) + + # Create the info file + info_path = os.path.join(segment_props_dir, "info") + info = { + "@type": "neuroglancer_segment_properties", + "inline": { + "ids": [str(seg_id) for seg_id in segment_ids], + "properties": [ + { + "id": "label", + "type": "label", + "values": [f"Segment {seg_id}" for seg_id in segment_ids], + }, + { + "id": "description", + "type": "description", + "values": [f"Segment ID {seg_id}" for seg_id in segment_ids], + }, + ], + }, + } + + with open(info_path, "w") as f: + json.dump(info, f, indent=2) + + if verbose: + print(f" ✓ Created segment properties for {len(segment_ids)} segments") + + +def setup_mesh_metadata(dataset_path, local_path, verbose=True): + """Ensure mesh metadata is properly configured.""" + vol = CloudVolume(dataset_path, mip=0, use_https=False) + + # Update main info with mesh and segment_properties + needs_update = False + if "mesh" not in vol.info or vol.info["mesh"] is None: + vol.info["mesh"] = "mesh" + needs_update = True + + if "segment_properties" not in vol.info or vol.info["segment_properties"] is None: + vol.info["segment_properties"] = "segment_properties" + needs_update = True + + if needs_update: + vol.commit_info() + if verbose: + print(f" ✓ Updated main info (mesh + segment_properties)") + + # Create mesh directory + mesh_dir = os.path.join(local_path, "mesh") + os.makedirs(mesh_dir, exist_ok=True) + + # Create mesh info + mesh_info_path = os.path.join(mesh_dir, "info") + mesh_info = { + "@type": "neuroglancer_legacy_mesh", + "mip": 0, + "vertex_quantization_bits": 10, + "lod_scale_multiplier": 1.0, + } + + with open(mesh_info_path, "w") as f: + json.dump(mesh_info, f, indent=2) + + if verbose: + print(f" ✓ Mesh metadata configured") + + +def generate_single_mesh(args): + """Generate mesh for a single segment (worker function for parallel processing)""" + seg_id, data, dust_threshold = args + + try: + mask = data == seg_id + voxel_count = np.sum(mask) + + if voxel_count < dust_threshold: + return {"status": "skipped", "seg_id": seg_id, "voxels": voxel_count} + + try: + vertices, faces, normals, values = measure.marching_cubes( + mask, level=0.5, allow_degenerate=False + ) + except (ValueError, RuntimeError): + return {"status": "no_surface", "seg_id": seg_id} + + if len(vertices) == 0 or len(faces) == 0: + return {"status": "empty", "seg_id": seg_id} + + vertices = vertices.astype(np.float32) + faces = faces.astype(np.uint32) + + return { + "status": "success", + "seg_id": seg_id, + "vertices": vertices, + "faces": faces, + "voxel_count": voxel_count, + } + + except Exception as e: + return {"status": "error", "seg_id": seg_id, "error": str(e)} + + +def generate_meshes_with_skimage( + dataset_path, dust_threshold=100, verbose=True, num_workers=None +): + """Generate meshes using scikit-image marching cubes.""" + + if verbose: + print(f"\nProcessing: {dataset_path}") + print("=" * 60) + + if dataset_path.startswith("file://"): + local_path = dataset_path.replace("file://", "") + local_path = os.path.expanduser(local_path) + dataset_path = "file://" + local_path + else: + local_path = dataset_path + + setup_mesh_metadata(dataset_path, local_path, verbose) + + vol = CloudVolume(dataset_path, mip=0, use_https=False) + bbox = Bbox([0, 0, 0], vol.info["scales"][0]["size"]) + data = vol[bbox.to_slices()] + + if data.ndim == 4: + data = data[:, :, :, 0] + + if verbose: + print(f" Volume shape: {data.shape}, dtype: {data.dtype}") + print(f" Resolution: {vol.info['scales'][0]['resolution']}") + + # Find ALL unique segments (including those we won't mesh) + all_segments = np.unique(data) + all_segments = all_segments[all_segments > 0] # Exclude background + + if verbose: + print(f" Found {len(all_segments)} non-zero segments") + + # Create segment properties for ALL segments + create_segment_properties(local_path, all_segments.tolist(), verbose) + + resolution = vol.info["scales"][0]["resolution"] + + # Determine number of workers + if num_workers is None: + num_workers = max(1, cpu_count() - 1) # Leave one core free + + if verbose: + print(f"\n Generating meshes (dust threshold: {dust_threshold} voxels)...") + print(f" Using {num_workers} parallel workers") + print(f" Note: Vertices will be in voxel coordinates (not physical units)") + + meshes_generated = 0 + meshes_skipped = 0 + meshes_failed = 0 + segments_with_meshes = [] + + # Prepare arguments for parallel processing + mesh_args = [(seg_id, data, dust_threshold) for seg_id in all_segments] + + # Process in parallel + if num_workers > 1: + with Pool(processes=num_workers) as pool: + results = pool.map(generate_single_mesh, mesh_args) + else: + # Sequential processing for debugging + results = [generate_single_mesh(arg) for arg in mesh_args] + + # Write meshes and collect statistics + for idx, result in enumerate(results): + seg_id = result["seg_id"] + status = result["status"] + + if verbose and (idx + 1) % 10 == 0: + print(f" Processed {idx+1}/{len(all_segments)} segments...") + + if status == "success": + # Write mesh to cloudvolume + mesh_obj = Mesh(result["vertices"], result["faces"], segid=int(seg_id)) + vol.mesh.put(mesh_obj, compress=True) + segments_with_meshes.append(int(seg_id)) + meshes_generated += 1 + + if verbose and meshes_generated <= 5: + print( + f" Segment {seg_id}: ✓ ({len(result['vertices'])} verts, {len(result['faces'])} faces)" + ) + + elif status == "skipped": + meshes_skipped += 1 + else: + meshes_failed += 1 + + if verbose: + print(f"\n Summary:") + print(f" ✓ Generated: {meshes_generated} meshes") + print(f" ⊘ Skipped: {meshes_skipped} segments") + print(f" ✗ Failed: {meshes_failed} segments") + print(f" 📋 Total segments in properties: {len(all_segments)}") + + return { + "generated": meshes_generated, + "skipped": meshes_skipped, + "failed": meshes_failed, + "total": len(all_segments), + "segments_with_meshes": segments_with_meshes, + } + + +def generate_neuroglancer_state( + datasets_info, base_url="http://localhost:8080/precomputed", output_path=None +): + """Generate a Neuroglancer JSON state with all segments pre-selected.""" + + layers = [] + for dataset_info in datasets_info: + dataset_path = dataset_info["path"] + dataset_name = os.path.basename(dataset_path.rstrip("/")) + segments = dataset_info.get("segments_with_meshes", []) + + # Select first 10 segments by default (for performance) + selected_segments = [str(s) for s in segments[:10]] + + layer = { + "type": "segmentation", + "source": { + "url": f"precomputed://{base_url}/{dataset_name}", + "subsources": {"default": True, "mesh": True, "properties": True}, + "enableDefaultSubsources": False, + }, + "tab": "segments", + "segments": selected_segments, + "hideSegmentZero": True, + "selectedAlpha": 1.0, + "notSelectedAlpha": 0.15, + "objectAlpha": 0.8, + "meshRenderScale": 1, + "meshSilhouetteRendering": 3, + "name": dataset_name, + "visible": True, + } + layers.append(layer) + + state = { + "dimensions": { + "x": [1e-9, "m"], + "y": [5.189161e-10, "m"], + "z": [5.189161e-10, "m"], + }, + "position": [87, 283, 605], + "crossSectionScale": 5, + "projectionScale": 2048, + "layers": layers, + "layout": "4panel", + } + + if output_path: + with open(output_path, "w") as f: + json.dump(state, f, indent=2) + print(f"\n✓ Saved Neuroglancer state to: {output_path}") + + return state + + +def main(): + import argparse + + parser = argparse.ArgumentParser( + description="Generate meshes and segment properties for precomputed segmentation volumes" + ) + parser.add_argument( + "--input-path", + required=True, + help="Path to precomputed dataset directory (containing one or more datasets)", + ) + parser.add_argument( + "--dust-threshold", + type=int, + default=100, + help="Minimum voxel count for mesh generation (default: 100)", + ) + parser.add_argument( + "--workers", + type=int, + default=None, + help="Number of parallel workers for mesh generation (default: CPU count - 1)", + ) + parser.add_argument( + "--verbose", action="store_true", help="Print detailed progress" + ) + parser.add_argument( + "--output-state", + help="Output path for Neuroglancer JSON state file (will be saved in input-path if not specified)", + ) + parser.add_argument( + "--base-url", + default="http://localhost:8080", + help="Base URL for HTTP server in generated state (default: http://localhost:8080)", + ) + + args = parser.parse_args() + + # Expand and normalize input path + input_path = os.path.abspath(os.path.expanduser(args.input_path)) + + if not os.path.isdir(input_path): + raise ValueError(f"Input path does not exist: {input_path}") + + # Find all dataset directories in the input path + # A dataset directory has an 'info' file in it + datasets = [] + for item in os.listdir(input_path): + item_path = os.path.join(input_path, item) + if os.path.isdir(item_path) and os.path.exists(os.path.join(item_path, "info")): + datasets.append(f"file://{item_path}") + + if not datasets: + raise ValueError( + f"No precomputed datasets found in {input_path}. Make sure directories contain 'info' files." + ) + + if args.verbose: + print(f"Found {len(datasets)} precomputed datasets in {input_path}") + for ds in datasets: + print(f" - {os.path.basename(ds.replace('file://', ''))}") + + total_stats = {"generated": 0, "skipped": 0, "failed": 0, "total": 0, "datasets": 0} + + datasets_info = [] + + for dataset in datasets: + try: + stats = generate_meshes_with_skimage( + dataset, + dust_threshold=args.dust_threshold, + verbose=args.verbose, + num_workers=args.workers, + ) + + total_stats["generated"] += stats["generated"] + total_stats["skipped"] += stats["skipped"] + total_stats["failed"] += stats["failed"] + total_stats["total"] += stats["total"] + total_stats["datasets"] += 1 + + datasets_info.append( + { + "path": dataset, + "segments_with_meshes": stats.get("segments_with_meshes", []), + } + ) + + except Exception as e: + print(f"\nERROR processing {dataset}: {e}") + if args.verbose: + import traceback + + traceback.print_exc() + continue + + print(f"\n" + "=" * 60) + print(f"COMPLETE! Mesh Generation Summary:") + print(f"=" * 60) + print(f" Datasets processed: {total_stats['datasets']}") + print(f" Total segments: {total_stats['total']}") + print(f" ✓ Meshes generated: {total_stats['generated']}") + print(f" ⊘ Segments skipped: {total_stats['skipped']}") + print(f" ✗ Segments failed: {total_stats['failed']}") + + # Generate Neuroglancer state + if datasets_info: + output_state_path = args.output_state or os.path.join( + input_path, "neuroglancer_state.json" + ) + generate_neuroglancer_state( + datasets_info, base_url=args.base_url, output_path=output_state_path + ) + + print(f"\nNext steps:") + print(f" 1. Start HTTP server: cd ~ && npx http-server . -p 8080 --cors") + print(f" 2. Load in Neuroglancer:") + print(f" - Click the {{}} button") + print(f" - Paste contents from: {output_state_path}") + print(f" 3. Segments will now auto-populate in the UI!") + print(f" 4. Click on any segment ID to select/view it") + + +if __name__ == "__main__": + main() diff --git a/applications/virtual-fly-brain/scripts/requirements.txt b/applications/virtual-fly-brain/scripts/requirements.txt new file mode 100644 index 00000000..6b6309fe --- /dev/null +++ b/applications/virtual-fly-brain/scripts/requirements.txt @@ -0,0 +1,16 @@ +# Python dependencies for VFB scripts +# Install with: pip install -r requirements.txt + +# Core dependencies +numpy>=1.20.0 +requests>=2.25.0 + +# NRRD file handling +pynrrd>=1.0.0 + +# Neuroglancer precomputed format +cloud-volume>=8.0.0 + +# Mesh generation and image processing +scikit-image>=0.18.0 +scipy>=1.7.0