This notebook provides the code used to create the ETOPO and SRTM30Plus seafloor gradient datasets, as well as provide some justification for the choice of the value of $\sigma$, the smoothing paramter, in performing the calculation.
The gradient calculations are performed using a slightly modified version of the Canny function in the Python Scikit-Image Package. The Canny function in that package calculates the gradients using the Sobel functions as well as the edges, but only edges are returned. The only modification that has been made to the source code is to return the calculated x_gradient, y_gradient and gradient_magnitude.
The Canny function is used, rather than a direct call to the Sobel function, because it has code to deal with masked values, in this instance the vaues over land. In the call to the Canny function, the only value that matters is the value of $\sigma$ which determines the window of values used to calculate the gradients, that is the amount of smoothing. The other values passed to the Canny function are used to calculate the edges, which are ignored in this instance
The value of $\sigma$ that makes the most sense was determined by looking at graphs of the gradients (below) for different values of $\sigma$, and assessing how well the shelf break off the west coast is defined. The definition should not be too sharp nor too wide. The code below allows anyone to recalculate the gradients using a different value of $\sigma$. Note the ETOPO and SRTM30Plus datasets are at differing resolutions, so different amounts of smoothing will be needed in order to obtain similar results. The netcdf files on ERD's ERDDAP server has values of $\sigma = 7.5$ for the ETOPO dataset and $\sigma = 12.5$ for the SRTM30Plus dataset.
gradient_magnitude:
x_gradient:
and y_gradient:
gradient_magnitude:
x_gradient:
and y_gradient:
gradient_magnitude:
x_gradient:
and y_gradient:
gradient_magnitude:
x_gradient:
and y_gradient:
gradient_magnitude:
x_gradient:
and y_gradient:
gradient_magnitude:
x_gradient:
and y_gradient:
gradient_magnitude:
x_gradient:
and y_gradient:
gradient_magnitude:
x_gradient:
and y_gradient:
gradient_magnitude:
x_gradient:
and y_gradient:
# import needed packages
from Canny1 import *
from cartopy import crs
import cmocean
import geoviews as gv
import geoviews.feature as gf
import holoviews as hv
import matplotlib.pyplot as plt
from netCDF4 import Dataset
import numpy as np
import numpy.ma as ma
import xarray as xr
"""Documentation for extract_etopo.py.
Program Call and Arguments:
ncdf_file = extract_etopo(file_name, file_base, lat_min, lat_max, lon_min, lon_max)
file_name - name of file with global etopo data.
file_base - base directory of data file location.
lat_min - minimum latitude of extract on (0, 180).
lat_max - maximum latitude of extract on (0, 180).
lon_min - minimum longitude of extract on (0, 360).
lon_max - maximum longitude of extract on (0, 360).
Program Description:
extract_etopo extracts etopo data that falls within the bounding box
defined by (lat_min,lat_max, lon_min, lon_max.)
Depends:
netCDF4.Dataset, numpy.argwhere, numpy.ma.array
"""
def extract_etopo(file_name, file_base = '/Users/rmendels/WorkFiles/seafloor_gradient/etopo/', lat_min = 20., lat_max = 50., lon_min = 230., lon_max = 255.):
import numpy as np
import numpy.ma as ma
from netCDF4 import Dataset
nc_file = file_base + file_name
root = Dataset(nc_file)
lat = root.variables['latitude'][:]
lon = root.variables['longitude'][:]
lat_min_index = np.argwhere(lat == lat_min)
lat_min_index = lat_min_index[0, 0]
lat_max_index = np.argwhere(lat == lat_max)
lat_max_index = lat_max_index[0, 0]
lon_min_index = np.argwhere(lon == lon_min)
lon_min_index = lon_min_index[0, 0]
lon_max_index = np.argwhere(lon == lon_max)
lon_max_index = lon_max_index[0, 0]
lon_etopo = lon[lon_min_index:lon_max_index + 1]
lat_etopo = lat[lat_min_index:lat_max_index + 1]
depth = root.variables['altitude'][lat_min_index:lat_max_index + 1, lon_min_index:lon_max_index + 1 ]
root.close()
depth = ma.array(depth, mask = (depth >= 0))
return depth, lon_etopo, lat_etopo
"""Documentation for extract_srtm30plus.py.
Program Call and Arguments:
ncdf_file = extract_srtm30plus(file_name, file_base, lat_min, lat_max, lon_min, lon_max)
file_name - name of file with global etopo data.
file_base - base directory of data file location.
lat_min - minimum latitude of extract on (0, 180).
lat_max - maximum latitude of extract on (0, 180).
lon_min - minimum longitude of extract on (0, 360).
lon_max - maximum longitude of extract on (0, 360).
Program Description:
extract_etopo extracts etopo data that falls within the bounding box
defined by (lat_min,lat_max, lon_min, lon_max.)
Depends:
netCDF4.Dataset, numpy.argwhere, numpy.ma.array
"""
def extract_srtm30plus(file_name, file_base = '/Users/rmendels/WorkFiles/seafloor_gradient/srtm30plus/', lat_min = 20.004167, lat_max = 50.004167, lon_min = 230.004167, lon_max = 255.004167):
import numpy as np
import numpy.ma as ma
from netCDF4 import Dataset
nc_file = file_base + file_name
root = Dataset(nc_file)
lat = root.variables['latitude'][:]
lon = root.variables['longitude'][:]
#lat_min_index = np.argwhere(lat == lat_min)
lat_min_index = np.argwhere( np.abs(lat - lat_min) < .00001)
lat_min_index = lat_min_index[0, 0]
#lat_max_index = np.argwhere(lat == lat_max)
lat_max_index = np.argwhere( np.abs(lat_max - lat) < .00001)
lat_max_index = lat_max_index[0, 0]
#lon_min_index = np.argwhere(lon == lon_min)
lon_min_index = np.argwhere( np.abs(lon - lon_min) < .00001)
lon_min_index = lon_min_index[0, 0]
#lon_max_index = np.argwhere(lon == lon_max)
lon_max_index = np.argwhere( np.abs(lon_max - lon) < .00001)
lon_max_index = lon_max_index[0, 0]
lon_srtm30plus = lon[lon_min_index:lon_max_index + 1]
lat_srtm30plus = lat[lat_min_index:lat_max_index + 1]
depth = root.variables['z'][lat_min_index:lat_max_index + 1, lon_min_index:lon_max_index + 1 ]
depth = ma.array(depth, mask = (depth >= 0))
root.close()
return depth, lon_srtm30plus, lat_srtm30plus
## note that for seafloor depth gradient, lower and upper values don't matter
## and the returned edges are disregarded
def myCanny(myData, myMask, sigma = 10., lower = .8, upper = .9, use_quantiles = True):
# calls my version of Canny algorithm, return needed eliminates masked
# because of the way masks operate, if you read in sst using netcdf4, then the mask to use is ~sst.mask
edges, y_gradient, x_gradient, magnitude = canny(myData, sigma = sigma, mask = myMask, low_threshold = lower, high_threshold = upper,
use_quantiles = use_quantiles)
x_gradient = ma.array(x_gradient, mask = myData.mask)
y_gradient = ma.array(y_gradient, mask = myData.mask)
magnitude = ma.array(magnitude, mask = myData.mask)
return edges, x_gradient, y_gradient, magnitude
def plot_bathy(depth, latitudes, longitudes, title = ' ', fig_size = ([10, 8])):
myData_xr = xr.DataArray(depth, coords=[latitudes, longitudes], dims=['latitude', 'longitude'], name = 'depth')
myData_xr.plot(cmap = cmocean.cm.deep.reversed())
# plots the gradients, as well as the distribution of the gradients
def plot_canny_gradient(my_grad, latitudes, longitudes, title = ' ', fig_size = ([10, 8]) ):
fig, axes = plt.subplots(ncols=2)
myData_xr = xr.DataArray(my_grad, coords=[latitudes, longitudes], dims=['latitude', 'longitude'], name = 'gradient')
if(my_grad.min() < 0.):
myData_xr.plot(cmap = cmocean.cm.balance, ax=axes[0])
else:
myData_xr.plot(cmap = cmocean.cm.amp, ax=axes[0])
myData_xr = xr.DataArray(np.abs(my_grad), coords=[latitudes, longitudes], dims=['latitude', 'longitude'], name = 'gradient')
myData_xr.plot.hist(bins = 100, histtype='step', density = True, stacked = True, cumulative=True, ax=axes[1])
plt.title('')
plt.rcParams["figure.figsize"] = fig_size
plt.tight_layout()
fig.suptitle(title, y = 1.0)
def create_depth_gradient_nc(f_name, base_dir, depths, lats, lons):
from netCDF4 import Dataset, num2date, date2num
import numpy as np
import numpy.ma as ma
file_name = base_dir + f_name + '_seafloor_gradient.nc'
ncfile = Dataset(file_name, 'w', format = 'NETCDF4')
latsdim = lats.size
lonsdim = lons.size
#Create Dimensions
latdim = ncfile.createDimension('lat', latsdim)
londim = ncfile.createDimension('lon', lonsdim)
#Create Variables
LatLon_Projection = ncfile.createVariable('LatLon_Projection', 'i4')
latitude = ncfile.createVariable('lat', 'f4', ('lat'), zlib = True, complevel = 2)
longitude = ncfile.createVariable('lon', 'f4', ('lon'), zlib = True, complevel = 2)
# edges = ncfile.createVariable('edges', 'f4', ('time', 'altitude', 'lat', 'lon'), fill_value = -9999.0, zlib = True, complevel = 2)
sea_floor_depth = ncfile.createVariable('sea_floor_depth', 'f4', ( 'lat', 'lon'), fill_value = -99999.0, zlib = True, complevel = 2)
x_gradient = ncfile.createVariable('x_gradient', 'f4', ( 'lat', 'lon'), fill_value = -9999.0, zlib = True, complevel = 2)
y_gradient = ncfile.createVariable('y_gradient', 'f4', ('lat', 'lon'), fill_value = -9999.0, zlib = True, complevel = 2)
magnitude_gradient = ncfile.createVariable('magnitude_gradient', 'f4', ('lat', 'lon'), fill_value = -9999.0, zlib = True, complevel = 2)
# int LatLon_Projection ;
LatLon_Projection.grid_mapping_name = "latitude_longitude"
LatLon_Projection.earth_radius = 6367470.
#float lat(lat) ;
latitude._CoordinateAxisType = "Lat"
junk = (lats.min(), lats.max())
latitude.actual_range = junk
latitude.axis = "Y"
latitude.grid_mapping = "Equidistant Cylindrical"
latitude.ioos_category = "Location"
latitude.long_name = "Latitude"
latitude.reference_datum = "geographical coordinates, WGS84 projection"
latitude.standard_name = "latitude"
latitude.units = "degrees_north"
latitude.valid_max = lats.max()
latitude.valid_min = lats.min()
#float lon(lon) ;
longitude._CoordinateAxisType = "Lon"
junk = (lons.min(), lons.max())
longitude.actual_range = junk
longitude.axis = "X"
longitude.grid_mapping = "Equidistant Cylindrical"
longitude.ioos_category = "Location"
longitude.long_name = "Longitude"
longitude.reference_datum = "geographical coordinates, WGS84 projection"
longitude.standard_name = "longitude"
longitude.units = "degrees_east"
longitude.valid_max = lons.max()
longitude.valid_min = lons.min()
#float edges(lat, lon) ;
#edges.long_name = "Frontal Edge"
#edges.missing_value = -9999.
#edges.grid_mapping = "LatLon_Projection"
#edges.coordinates = "time altitude lat lon "
#float sea_floor_depth(lat, lon) ;
sea_floor_depth.long_name = "Sea Floor Depth"
sea_floor_depth.standard_name = "sea_floor_depth"
sea_floor_depth.missing_value = -9999.
sea_floor_depth.grid_mapping = "LatLon_Projection"
sea_floor_depth.coordinates = "lat lon "
sea_floor_depth.units = 'm'
#float x_gradient(lat, lon) ;
x_gradient.long_name = "East-West Gradient of sea_floor depth"
x_gradient.missing_value = -99999.
x_gradient.grid_mapping = "LatLon_Projection"
x_gradient.coordinates = "lat lon "
x_gradient.units = 'm'
# float y_gradient(lat, lon) ;
y_gradient.long_name = "North-South Gradient of sea_floor depth"
y_gradient.missing_value = -9999.
y_gradient.grid_mapping = "LatLon_Projection"
y_gradient.coordinates = "lat lon "
y_gradient.units = 'm'
# float magnitude( lat, lon) ;
magnitude_gradient.long_name = "Magnitude of sea_floor depth Gradient"
magnitude_gradient.missing_value = -9999.
magnitude_gradient.grid_mapping = "LatLon_Projection"
magnitude_gradient.coordinates = "lat lon "
magnitude_gradient.units = 'm'
## global
ncfile.title = "Estimated" + f_name + "sea_floor depth x_gradient, y_gradient and gradient magnitude"
ncfile.cdm_data_type = "Grid"
ncfile.Conventions = "COARDS, CF-1.6, ACDD-1.3"
ncfile.standard_name_vocabulary = "CF Standard Name Table v55"
ncfile.creator_email = "erd.data@noaa.gov"
ncfile.creator_name = "NOAA NMFS SWFSC ERD"
ncfile.creator_type = "institution"
ncfile.creator_url = "https://www.pfeg.noaa.gov"
ncfile.Easternmost_Easting = lons.max()
ncfile.Northernmost_Northing = lats.max()
ncfile.Westernmost_Easting = lons.min()
ncfile.Southernmost_Northing = lats.max()
ncfile.geospatial_lat_max = lats.max()
ncfile.geospatial_lat_min = lats.min()
ncfile.geospatial_lat_resolution = 0.01
ncfile.geospatial_lat_units = "degrees_north"
ncfile.geospatial_lon_max = lons.max()
ncfile.geospatial_lon_min = lons.min()
ncfile.geospatial_lon_resolution = 0.01
ncfile.geospatial_lon_units = "degrees_east"
ncfile.infoUrl = ""
ncfile.institution = "NOAA ERD"
ncfile.keywords = ""
ncfile.keywords_vocabulary = "GCMD Science Keywords"
ncfile.summary = '''Estimated sea_floor depth x-gradient, y-gradient and gradient magnitude
using the Python scikit-image canny algorithm with sigma = 10.,x-gradient, y-gradient and gradient magnitude.
'''
ncfile.license = '''The data may be used and redistributed for free but is not intended
for legal use, since it may contain inaccuracies. Neither the data
Contributor, ERD, NOAA, nor the United States Government, nor any
of their employees or contractors, makes any warranty, express or
implied, including warranties of merchantability and fitness for a
particular purpose, or assumes any legal liability for the accuracy,
completeness, or usefulness, of this information.
'''
history = 'created from ' + f_name + 'using python scikit-image canny algorithm, sigma = 10'
ncfile.history = history
longitude[:] = lons[:]
latitude[:] = lats[:]
sea_floor_depth[:, :] = depths[:, :]
ncfile.close()
def write_depth_gradient_nc(f_name, base_dir, x_gradient, y_gradient, magnitude):
from netCDF4 import Dataset, num2date, date2num
import numpy as np
import numpy.ma as ma
file_name = base_dir + f_name + '_seafloor_gradient.nc'
ncfile = Dataset(file_name, 'a')
x_grad = ncfile.variables['x_gradient']
y_grad = ncfile.variables['y_gradient']
mag_grad = ncfile.variables['magnitude_gradient']
x_grad[ :, :] = x_gradient[:, :]
y_grad[ :, :] = y_gradient[:, :]
mag_grad[ :, :] = magnitude[:, :]
ncfile.close()
"""
canny.py - Canny Edge detector
Reference: Canny, J., A Computational Approach To Edge Detection, IEEE Trans.
Pattern Analysis and Machine Intelligence, 8:679-714, 1986
Originally part of CellProfiler, code licensed under both GPL and BSD licenses.
Website: http://www.cellprofiler.org
Copyright (c) 2003-2009 Massachusetts Institute of Technology
Copyright (c) 2009-2011 Broad Institute
All rights reserved.
Original author: Lee Kamentsky
"""
import numpy as np
import scipy.ndimage as ndi
from scipy.ndimage import generate_binary_structure, binary_erosion, label
from skimage.filters import gaussian
from skimage import dtype_limits, img_as_float
from skimage._shared.utils import assert_nD
def smooth_with_function_and_mask(image, function, mask):
"""Smooth an image with a linear function, ignoring masked pixels
Parameters
----------
image : array
Image you want to smooth.
function : callable
A function that does image smoothing.
mask : array
Mask with 1's for significant pixels, 0's for masked pixels.
Notes
------
This function calculates the fractional contribution of masked pixels
by applying the function to the mask (which gets you the fraction of
the pixel data that's due to significant points). We then mask the image
and apply the function. The resulting values will be lower by the
bleed-over fraction, so you can recalibrate by dividing by the function
on the mask to recover the effect of smoothing from just the significant
pixels.
"""
bleed_over = function(mask.astype(float))
masked_image = np.zeros(image.shape, image.dtype)
masked_image[mask] = image[mask]
smoothed_image = function(masked_image)
output_image = smoothed_image / (bleed_over + np.finfo(float).eps)
return output_image
def canny(image, sigma=1., low_threshold=None, high_threshold=None, mask=None,
use_quantiles=False):
"""Edge filter an image using the Canny algorithm.
Parameters
-----------
image : 2D array
Grayscale input image to detect edges on; can be of any dtype.
sigma : float
Standard deviation of the Gaussian filter.
low_threshold : float
Lower bound for hysteresis thresholding (linking edges).
If None, low_threshold is set to 10% of dtype's max.
high_threshold : float
Upper bound for hysteresis thresholding (linking edges).
If None, high_threshold is set to 20% of dtype's max.
mask : array, dtype=bool, optional
Mask to limit the application of Canny to a certain area.
use_quantiles : bool, optional
If True then treat low_threshold and high_threshold as quantiles of the
edge magnitude image, rather than absolute edge magnitude values. If True
then the thresholds must be in the range [0, 1].
Returns
-------
output : 2D array (image)
The binary edge map.
See also
--------
skimage.sobel
Notes
-----
The steps of the algorithm are as follows:
* Smooth the image using a Gaussian with ``sigma`` width.
* Apply the horizontal and vertical Sobel operators to get the gradients
within the image. The edge strength is the norm of the gradient.
* Thin potential edges to 1-pixel wide curves. First, find the normal
to the edge at each point. This is done by looking at the
signs and the relative magnitude of the X-Sobel and Y-Sobel
to sort the points into 4 categories: horizontal, vertical,
diagonal and antidiagonal. Then look in the normal and reverse
directions to see if the values in either of those directions are
greater than the point in question. Use interpolation to get a mix of
points instead of picking the one that's the closest to the normal.
* Perform a hysteresis thresholding: first label all points above the
high threshold as edges. Then recursively label any point above the
low threshold that is 8-connected to a labeled point as an edge.
References
-----------
.. [1] Canny, J., A Computational Approach To Edge Detection, IEEE Trans.
Pattern Analysis and Machine Intelligence, 8:679-714, 1986
.. [2] William Green's Canny tutorial
http://dasl.unlv.edu/daslDrexel/alumni/bGreen/www.pages.drexel.edu/_weg22/can_tut.html
Examples
--------
>>> from skimage import feature
>>> # Generate noisy image of a square
>>> im = np.zeros((256, 256))
>>> im[64:-64, 64:-64] = 1
>>> im += 0.2 * np.random.rand(*im.shape)
>>> # First trial with the Canny filter, with the default smoothing
>>> edges1 = feature.canny(im)
>>> # Increase the smoothing for better results
>>> edges2 = feature.canny(im, sigma=3)
"""
#
# The steps involved:
#
# * Smooth using the Gaussian with sigma above.
#
# * Apply the horizontal and vertical Sobel operators to get the gradients
# within the image. The edge strength is the sum of the magnitudes
# of the gradients in each direction.
#
# * Find the normal to the edge at each point using the arctangent of the
# ratio of the Y sobel over the X sobel - pragmatically, we can
# look at the signs of X and Y and the relative magnitude of X vs Y
# to sort the points into 4 categories: horizontal, vertical,
# diagonal and antidiagonal.
#
# * Look in the normal and reverse directions to see if the values
# in either of those directions are greater than the point in question.
# Use interpolation to get a mix of points instead of picking the one
# that's the closest to the normal.
#
# * Label all points above the high threshold as edges.
# * Recursively label any point above the low threshold that is 8-connected
# to a labeled point as an edge.
#
# Regarding masks, any point touching a masked point will have a gradient
# that is "infected" by the masked point, so it's enough to erode the
# mask by one and then mask the output. We also mask out the border points
# because who knows what lies beyond the edge of the image?
#
assert_nD(image, 2)
dtype_max = dtype_limits(image, clip_negative=False)[1]
if low_threshold is None:
low_threshold = 0.1 * dtype_max
else:
low_threshold = low_threshold / dtype_max
if high_threshold is None:
high_threshold = 0.2 * dtype_max
else:
high_threshold = high_threshold / dtype_max
if mask is None:
mask = np.ones(image.shape, dtype=bool)
def fsmooth(x):
return img_as_float(gaussian(x, sigma, mode='constant'))
smoothed = smooth_with_function_and_mask(image, fsmooth, mask)
jsobel = ndi.sobel(smoothed, axis=1)
isobel = ndi.sobel(smoothed, axis=0)
abs_isobel = np.abs(isobel)
abs_jsobel = np.abs(jsobel)
magnitude = np.hypot(isobel, jsobel)
#
# Make the eroded mask. Setting the border value to zero will wipe
# out the image edges for us.
#
s = generate_binary_structure(2, 2)
eroded_mask = binary_erosion(mask, s, border_value=0)
eroded_mask = eroded_mask & (magnitude > 0)
#
#--------- Find local maxima --------------
#
# Assign each point to have a normal of 0-45 degrees, 45-90 degrees,
# 90-135 degrees and 135-180 degrees.
#
local_maxima = np.zeros(image.shape, bool)
#----- 0 to 45 degrees ------
pts_plus = (isobel >= 0) & (jsobel >= 0) & (abs_isobel >= abs_jsobel)
pts_minus = (isobel <= 0) & (jsobel <= 0) & (abs_isobel >= abs_jsobel)
pts = pts_plus | pts_minus
pts = eroded_mask & pts
# Get the magnitudes shifted left to make a matrix of the points to the
# right of pts. Similarly, shift left and down to get the points to the
# top right of pts.
c1 = magnitude[1:, :][pts[:-1, :]]
c2 = magnitude[1:, 1:][pts[:-1, :-1]]
m = magnitude[pts]
w = abs_jsobel[pts] / abs_isobel[pts]
c_plus = c2 * w + c1 * (1 - w) <= m
c1 = magnitude[:-1, :][pts[1:, :]]
c2 = magnitude[:-1, :-1][pts[1:, 1:]]
c_minus = c2 * w + c1 * (1 - w) <= m
local_maxima[pts] = c_plus & c_minus
#----- 45 to 90 degrees ------
# Mix diagonal and vertical
#
pts_plus = (isobel >= 0) & (jsobel >= 0) & (abs_isobel <= abs_jsobel)
pts_minus = (isobel <= 0) & (jsobel <= 0) & (abs_isobel <= abs_jsobel)
pts = pts_plus | pts_minus
pts = eroded_mask & pts
c1 = magnitude[:, 1:][pts[:, :-1]]
c2 = magnitude[1:, 1:][pts[:-1, :-1]]
m = magnitude[pts]
w = abs_isobel[pts] / abs_jsobel[pts]
c_plus = c2 * w + c1 * (1 - w) <= m
c1 = magnitude[:, :-1][pts[:, 1:]]
c2 = magnitude[:-1, :-1][pts[1:, 1:]]
c_minus = c2 * w + c1 * (1 - w) <= m
local_maxima[pts] = c_plus & c_minus
#----- 90 to 135 degrees ------
# Mix anti-diagonal and vertical
#
pts_plus = (isobel <= 0) & (jsobel >= 0) & (abs_isobel <= abs_jsobel)
pts_minus = (isobel >= 0) & (jsobel <= 0) & (abs_isobel <= abs_jsobel)
pts = pts_plus | pts_minus
pts = eroded_mask & pts
c1a = magnitude[:, 1:][pts[:, :-1]]
c2a = magnitude[:-1, 1:][pts[1:, :-1]]
m = magnitude[pts]
w = abs_isobel[pts] / abs_jsobel[pts]
c_plus = c2a * w + c1a * (1.0 - w) <= m
c1 = magnitude[:, :-1][pts[:, 1:]]
c2 = magnitude[1:, :-1][pts[:-1, 1:]]
c_minus = c2 * w + c1 * (1.0 - w) <= m
local_maxima[pts] = c_plus & c_minus
#----- 135 to 180 degrees ------
# Mix anti-diagonal and anti-horizontal
#
pts_plus = (isobel <= 0) & (jsobel >= 0) & (abs_isobel >= abs_jsobel)
pts_minus = (isobel >= 0) & (jsobel <= 0) & (abs_isobel >= abs_jsobel)
pts = pts_plus | pts_minus
pts = eroded_mask & pts
c1 = magnitude[:-1, :][pts[1:, :]]
c2 = magnitude[:-1, 1:][pts[1:, :-1]]
m = magnitude[pts]
w = abs_jsobel[pts] / abs_isobel[pts]
c_plus = c2 * w + c1 * (1 - w) <= m
c1 = magnitude[1:, :][pts[:-1, :]]
c2 = magnitude[1:, :-1][pts[:-1, 1:]]
c_minus = c2 * w + c1 * (1 - w) <= m
local_maxima[pts] = c_plus & c_minus
#
#---- If use_quantiles is set then calculate the thresholds to use
#
if use_quantiles:
if high_threshold > 1.0 or low_threshold > 1.0:
raise ValueError("Quantile thresholds must not be > 1.0")
if high_threshold < 0.0 or low_threshold < 0.0:
raise ValueError("Quantile thresholds must not be < 0.0")
high_threshold = np.percentile(magnitude, 100.0 * high_threshold)
low_threshold = np.percentile(magnitude, 100.0 * low_threshold)
#
#---- Create two masks at the two thresholds.
#
high_mask = local_maxima & (magnitude >= high_threshold)
low_mask = local_maxima & (magnitude >= low_threshold)
#
# Segment the low-mask, then only keep low-segments that have
# some high_mask component in them
#
strel = np.ones((3, 3), bool)
labels, count = label(low_mask, strel)
if count == 0:
return low_mask
sums = (np.array(ndi.sum(high_mask, labels,
np.arange(count, dtype=np.int32) + 1),
copy=False, ndmin=1))
good_label = np.zeros((count + 1,), bool)
good_label[1:] = sums > 0
output_mask = good_label[labels]
return output_mask, isobel, jsobel, magnitude
def plot_bathy(depth, latitudes, longitudes, title = ' ', fig_size = ([10, 8])):
myData_xr = xr.DataArray(depth, coords=[latitudes, longitudes], dims=['latitude', 'longitude'], name = 'depth')
myData_xr.plot(cmap = cmocean.cm.deep.reversed())
# plots the gradients, as well as the distribution of the gradients
def plot_canny_gradient(my_grad, latitudes, longitudes, title = ' ', fig_size = ([10, 8]) ):
fig, axes = plt.subplots(ncols=2)
myData_xr = xr.DataArray(my_grad, coords=[latitudes, longitudes], dims=['latitude', 'longitude'], name = 'gradient')
if(my_grad.min() < 0.):
myData_xr.plot(cmap = cmocean.cm.balance, ax=axes[0])
else:
myData_xr.plot(cmap = cmocean.cm.amp, ax=axes[0])
myData_xr = xr.DataArray(np.abs(my_grad), coords=[latitudes, longitudes], dims=['latitude', 'longitude'], name = 'gradient')
myData_xr.plot.hist(bins = 100, histtype='step', density = True, stacked = True, cumulative=True, ax=axes[1])
plt.title('')
plt.rcParams["figure.figsize"] = fig_size
plt.tight_layout()
fig.suptitle(title, y = 1.0)