diff --git a/README.md b/README.md index 4bd2be3a..b7c6c14a 100644 --- a/README.md +++ b/README.md @@ -185,7 +185,7 @@ python -m cellpose The first time cellpose runs it downloads the latest available trained model weights from the website. -You can now **drag and drop** any images (*.tif, *.png, *.jpg, *.gif) into the GUI and run Cellpose, and/or manually segment them. When the GUI is processing, you will see the progress bar fill up and during this time you cannot click on anything in the GUI. For more information about what the GUI is doing you can look at the terminal/prompt you opened the GUI with. For example data, see [website](http://www.cellpose.org) or this [zip file](https://www.cellpose.org/static/images/demo_images.zip). For best accuracy and runtime performance, resize images so cells are less than 100 pixels across. +You can now **drag and drop** any images (*.tif, *.png, *.jpg, *.gif) into the GUI and run Cellpose, and/or manually segment them. When the GUI is processing, you will see the progress bar fill up and during this time you cannot click on anything in the GUI. For more information about what the GUI is doing you can look at the terminal/prompt you opened the GUI with. For example data, see [website](https://www.cellpose.org) or this [zip file](https://www.cellpose.org/static/images/demo_images.zip). For best accuracy and runtime performance, resize images so cells are less than 100 pixels across. For 3D data, with multi-Z, please use the 3D version of the GUI with: ~~~~ @@ -195,7 +195,7 @@ python -m cellpose --Zstack ## Step-by-step demo -1. Download this [folder](http://cellpose.org/static/images/demo_images.zip) of images and unzip it. These are a subset of the test images from the paper. +1. Download this [zip file](https://www.cellpose.org/static/images/demo_images.zip) of images and unzip it. These are a subset of the test images from the paper. 2. Start the GUI with `python -m cellpose`. 3. Drag an image from the folder into the GUI. 4. Set the model (in demo all are `cyto`) and the channel you want to segment (in demo all are `green`). Optionally set the second channel if you are segmenting `cyto` and have an available nucleus channel. diff --git a/cellpose/gui/gui.py b/cellpose/gui/gui.py index ec1c6c86..d8d6770c 100644 --- a/cellpose/gui/gui.py +++ b/cellpose/gui/gui.py @@ -1696,6 +1696,8 @@ def add_set(self): if self.NZ == 1: # only save after each cell if single image io._save_sets_with_check(self) + else: + print("GUI_ERROR: cell too small, not drawn") self.current_stroke = [] self.strokes = [] self.current_point_set = [] @@ -1704,7 +1706,7 @@ def add_set(self): def add_mask(self, points=None, color=(100, 200, 50), dense=True): # points is list of strokes points_all = np.concatenate(points, axis=0) - + # loop over z values median = [] zdraw = np.unique(points_all[:, 0]) @@ -1724,29 +1726,29 @@ def add_mask(self, points=None, color=(100, 200, 50), dense=True): ar, ac = ar + vr.min() - 2, ac + vc.min() - 2 # get dense outline contours = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE) - pvc, pvr = contours[-2][0].squeeze().T + pvc, pvr = contours[-2][0][:,0].T vr, vc = pvr + vr.min() - 2, pvc + vc.min() - 2 # concatenate all points ar, ac = np.hstack((np.vstack((vr, vc)), np.vstack((ar, ac)))) # if these pixels are overlapping with another cell, reassign them ioverlap = self.cellpix[z][ar, ac] > 0 - if (~ioverlap).sum() < 8: - print("ERROR: cell too small without overlaps, not drawn") + if (~ioverlap).sum() < 10: + print("GUI_ERROR: cell < 10 pixels without overlaps, not drawn") return None elif ioverlap.sum() > 0: ar, ac = ar[~ioverlap], ac[~ioverlap] # compute outline of new mask - mask = np.zeros((np.ptp(ar) + 4, np.ptp(ac) + 4), np.uint8) - mask[ar - ar.min() + 2, ac - ac.min() + 2] = 1 + mask = np.zeros((np.ptp(vr) + 4, np.ptp(vc) + 4), np.uint8) + mask[ar - vr.min() + 2, ac - vc.min() + 2] = 1 contours = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE) - pvc, pvr = contours[-2][0].squeeze().T - vr, vc = pvr + ar.min() - 2, pvc + ac.min() - 2 + pvc, pvr = contours[-2][0][:,0].T + vr, vc = pvr + vr.min() - 2, pvc + vc.min() - 2 ars = np.concatenate((ars, ar), axis=0) acs = np.concatenate((acs, ac), axis=0) vrs = np.concatenate((vrs, vr), axis=0) vcs = np.concatenate((vcs, vc), axis=0) - + self.draw_mask(z, ars, acs, vrs, vcs, color) median.append(np.array([np.median(ars), np.median(acs)])) @@ -2431,7 +2433,7 @@ def compute_segmentation(self, custom=False, model_name=None, load_model=True): cellprob_threshold=cellprob_threshold, flow_threshold=flow_threshold, do_3D=do_3D, niter=niter, normalize=normalize_params, stitch_threshold=stitch_threshold, - progress=self.progress)[:2] + progress=self.progress, z_axis=0 if self.NZ > 1 else None)[:2] except Exception as e: print("NET ERROR: %s" % e) self.progress.setValue(0) diff --git a/cellpose/gui/io.py b/cellpose/gui/io.py index 2faadd6e..bacb10ea 100644 --- a/cellpose/gui/io.py +++ b/cellpose/gui/io.py @@ -477,9 +477,13 @@ def _masks_to_gui(parent, masks, outlines=None, colors=None): """ masks loaded into GUI """ # get unique values shape = masks.shape - masks = masks.flatten() - fastremap.renumber(masks, in_place=True) - masks = masks.reshape(shape) + if len(fastremap.unique(masks)) != masks.max() + 1: + print("GUI_INFO: renumbering masks") + fastremap.renumber(masks, in_place=True) + outlines = None + masks = masks.reshape(shape) + if masks.ndim == 2: + outlines = None masks = masks.astype(np.uint16) if masks.max() < 2**16 - 1 else masks.astype( np.uint32) if parent.restore and "upsample" in parent.restore: @@ -518,9 +522,6 @@ def _masks_to_gui(parent, masks, outlines=None, colors=None): parent.outpix_resize = parent.outpix.copy() else: parent.outpix = outlines - shape = parent.outpix.shape - fastremap.renumber(parent.outpix, in_place=True) - parent.outpix = np.reshape(parent.outpix, shape) if parent.restore and "upsample" in parent.restore: parent.outpix_resize = parent.outpix.copy() parent.outpix_orig = np.zeros_like(parent.cellpix_orig) @@ -650,7 +651,6 @@ def _save_sets(parent): "diameter": parent.diameter } - print(dat["masks"].shape) if parent.restore is not None: dat["img_restore"] = parent.stack_filtered np.save(base + "_seg.npy", dat) @@ -692,7 +692,6 @@ def _save_sets(parent): "diameter": parent.diameter } - print(dat["masks"].shape) if parent.restore is not None: dat["img_restore"] = parent.stack_filtered np.save(base + "_seg.npy", dat) diff --git a/cellpose/models.py b/cellpose/models.py index d9443d13..952a3176 100644 --- a/cellpose/models.py +++ b/cellpose/models.py @@ -29,7 +29,8 @@ "cyto3", "nuclei", "cyto2_cp3", "tissuenet_cp3", "livecell_cp3", "yeast_PhC_cp3", "yeast_BF_cp3", "bact_phase_cp3", "bact_fluor_cp3", "deepbacs_cp3", "cyto2", "cyto", "CPx", "transformer_cp3", "neurips_cellpose_default", "neurips_cellpose_transformer", - "neurips_grayscale_cyto2" + "neurips_grayscale_cyto2", + "CP", "CPx", "TN1", "TN2", "TN3", "LC1", "LC2", "LC3", "LC4" ] MODEL_LIST_PATH = os.fspath(MODEL_DIR.joinpath("gui_models.txt")) @@ -57,16 +58,20 @@ def model_path(model_type, model_index=0): def size_model_path(model_type): - if os.path.exists(model_type): - return model_type + "_size.npy" - else: - torch_str = "torch" - if model_type == "cyto" or model_type == "nuclei" or model_type == "cyto2": - basename = "size_%s%s_0.npy" % (model_type, torch_str) - else: + torch_str = "torch" + if (model_type == "cyto" or model_type == "nuclei" or + model_type == "cyto2" or model_type == "cyto3"): + if model_type == "cyto3": basename = "size_%s.npy" % model_type + else: + basename = "size_%s%s_0.npy" % (model_type, torch_str) return cache_model_path(basename) - + else: + if os.path.exists(model_type) and os.path.exists(model_type + "_size.npy"): + return model_type + "_size.npy" + else: + raise FileNotFoundError(f"size model not found ({model_type + '_size.npy'})") + def cache_model_path(basename): MODEL_DIR.mkdir(parents=True, exist_ok=True) diff --git a/docs/inputs.rst b/docs/inputs.rst index 9ce3b1e3..2b2b9093 100644 --- a/docs/inputs.rst +++ b/docs/inputs.rst @@ -9,6 +9,11 @@ each channel so that 0 = 1st percentile of image values and 1 = 99th percentile. If you want to run multiple images in a directory, use the command line or a jupyter notebook to run cellpose. +If you have multiple images of the same size, it can be faster to input them into the +Cellpose `model.eval` function as an array rather than a list, and running with a large +batch size. This is because the model can process tiles from multiple images in single batches +on the GPU if the images are fed in as an array. + 3D segmentation ~~~~~~~~~~~~~~~~~~~~~~~~~~