diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 3043ef9..e72d412 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -40,6 +40,7 @@ jobs: id: 'auth' uses: 'google-github-actions/auth@v2' with: + project_id: 'openet' create_credentials_file: true workload_identity_provider: 'projects/470570065811/locations/global/workloadIdentityPools/gitaction-pool/providers/gitaction-provider' service_account: 'github-actions@openet.iam.gserviceaccount.com' diff --git a/openet/core/common.py b/openet/core/common.py index 52f2c00..90a4a6a 100644 --- a/openet/core/common.py +++ b/openet/core/common.py @@ -174,17 +174,19 @@ def sentinel2_sr_cloud_mask(input_img): return cloud_mask.Not().rename(['cloud_mask']) -def landsat_c2_sr_lst_correct(sr_image, ndvi): +def landsat_c2_sr_lst_correct(input_img, ndvi=None): """Apply correction to Collection 2 LST using adjusted ASTER emissivity Parameters ---------- - sr_image : ee.Image + input_img : ee.Image Image from a Landsat Collection 2 SR image collection with the SPACECRAFT_ID and LANDSAT_PRODUCT_ID metadata properties - (e.g. LANDSAT/LC08/C02/T1_L2). + (e.g. LANDSAT/LC08/C02/T1_L2). The image itself is not read in this + function but is instead used to select from the Level 2 and TOA collections. ndvi : ee.Image - Normalized difference vegetation index (NDVI) + This parameter is deprecated and NDVI will be computed internally in the function, + but leaving to support backwards compatibility. Returns ------- @@ -206,10 +208,10 @@ def landsat_c2_sr_lst_correct(sr_image, ndvi): Hulley, G. 2023. """ - spacecraft_id = ee.String(sr_image.get('SPACECRAFT_ID')) + spacecraft_id = ee.String(input_img.get('SPACECRAFT_ID')) # Landsat image geometry for clipping ASTER GED - image_geom = sr_image.geometry() + image_geom = input_img.geometry() image_extent = image_geom.bounds(1, 'EPSG:4326') # Server side approach for getting image extent snapped to the ASTER GED grid @@ -257,7 +259,7 @@ def landsat_c2_sr_lst_correct(sr_image, ndvi): 'LANDSAT_8': 0.0584, 'LANDSAT_9': 0.0457, }) - def get_matched_c2_t1_image(input_img): + def get_matched_c2_t1_l2_image(input_img): # Find matching Landsat Collection 2 Tier 1 Level 2 image # based on the "LANDSAT_PRODUCT_ID" property # Build the system:index format scene ID from the LANDSAT_PRODUCT_ID @@ -284,7 +286,7 @@ def get_matched_c2_t1_image(input_img): ) def get_matched_c2_t1_radiance_image(input_img): - # Find matching Landsat Collection 2 Tier 1 Level 2 image + # Find matching Landsat Collection 2 Tier 1 radiance image # based on the "LANDSAT_PRODUCT_ID" property # Build the system:index format scene ID from the LANDSAT_PRODUCT_ID satellite = ee.String(input_img.get('SPACECRAFT_ID')) @@ -349,10 +351,21 @@ def get_matched_c2_t1_radiance_image(input_img): }) ) - # Rebuilding the coll2 image here since the extra bands needed for the calculation - # will likely have been dropped or excluded before getting to this function - coll2 = get_matched_c2_t1_image(sr_image) - coll2RT = get_matched_c2_t1_radiance_image(sr_image) + # Rebuilding the coll2 image here from the LANDSAT_PRODUCT_ID + # since the extra bands needed for the calculation will likely + # have been dropped or excluded before getting to this function + c02_lvl2_img = get_matched_c2_t1_l2_image(input_img) + c02_rad_img = get_matched_c2_t1_radiance_image(input_img) + + # Compute NDVI from the Level 2 SR image + # Including the global surface water maximum extent to limit the water mask + # to only those areas that have been flagged as water at some point in time + # which should help remove shadows that are misclassified as water + ndvi = landsat.c02_sr_ndvi( + sr_img=landsat.c02_l2_sr(c02_lvl2_img), + water_mask=landsat.c02_qa_water_mask(c02_lvl2_img), + gsw_extent_flag=True + ) # Apply Allen-Kilic Eq. 5 to calc. ASTER emiss. for Landsat # This is Eq. 4 of Malakar et al., 2018 @@ -398,27 +411,26 @@ def get_matched_c2_t1_radiance_image(input_img): # Using the ASTER-based soil emissivity from above # The following estimate for emissivity to use with Landsat may need to be clamped # to some predefined safe limits (for example, 0.5 and 1.0). - # CGM - Is the .multiply(1.0) needed? - fc_landsat = ndvi.multiply(1.0).subtract(0.15).divide(0.65).clamp(0, 1.0) + fc_landsat = ndvi.subtract(0.15).divide(0.65).clamp(0, 1.0) # calc_smoothed_em_soil - LS_EM = fc_landsat.multiply(-1).add(1).multiply(em_soil).add(fc_landsat.multiply(veg_emis)) + ls_em = fc_landsat.multiply(-1).add(1).multiply(em_soil).add(fc_landsat.multiply(veg_emis)) # Apply Eq. 8 to get thermal surface radiance, Rc, from C2 Real time band 10 # (Eq. 7 of Malakar et al. but without the emissivity to produce actual radiance) - Rc = ( - coll2RT.select(['thermal']) - .multiply(ee.Number(coll2RT.get('RADIANCE_MULT_BAND_thermal'))) - .add(ee.Number(coll2RT.get('RADIANCE_ADD_BAND_thermal'))) - .subtract(coll2.select(['ST_URAD']).multiply(0.001)) - .divide(coll2.select(['ST_ATRAN']).multiply(0.0001)) - .subtract(LS_EM.multiply(-1).add(1).multiply(coll2.select(['ST_DRAD']).multiply(0.001))) + rc = ( + c02_rad_img.select(['thermal']) + .multiply(ee.Number(c02_rad_img.get('RADIANCE_MULT_BAND_thermal'))) + .add(ee.Number(c02_rad_img.get('RADIANCE_ADD_BAND_thermal'))) + .subtract(c02_lvl2_img.select(['ST_URAD']).multiply(0.001)) + .divide(c02_lvl2_img.select(['ST_ATRAN']).multiply(0.0001)) + .subtract(ls_em.multiply(-1).add(1).multiply(c02_lvl2_img.select(['ST_DRAD']).multiply(0.001))) ) # Apply Eq. 7 to convert Rs to LST (similar to Malakar et al., but with emissivity) return ( - LS_EM.multiply(ee.Number(k1.get(spacecraft_id))) - .divide(Rc).add(1.0).log().pow(-1) + ls_em.multiply(ee.Number(k1.get(spacecraft_id))) + .divide(rc).add(1.0).log().pow(-1) .multiply(ee.Number(k2.get(spacecraft_id))) .rename('lst') ) diff --git a/openet/core/export.py b/openet/core/export.py new file mode 100644 index 0000000..fcf6b87 --- /dev/null +++ b/openet/core/export.py @@ -0,0 +1,144 @@ +import logging + +import ee + +from . import utils + + +def mgrs_export_tiles( + study_area_coll_id, + mgrs_coll_id, + study_area_property=None, + study_area_features=[], + mgrs_tiles=[], + mgrs_skip_list=[], + utm_zones=[], + wrs2_tiles=[], + mgrs_property='mgrs', + utm_property='utm', + wrs2_property='wrs2', + cell_size=30, +): + """Select MGRS tiles and metadata that intersect the study area geometry + + Parameters + ---------- + study_area_coll_id : str + Study area feature collection asset ID. + mgrs_coll_id : str + MGRS feature collection asset ID. + study_area_property : str, optional + Property name to use for inList() filter call of study area collection. + Filter will only be applied if both 'study_area_property' and + 'study_area_features' parameters are both set. + study_area_features : list, optional + List of study area feature property values to filter on. + mgrs_tiles : list, optional + User defined MGRS tile subset. + mgrs_skip_list : list, optional + User defined list MGRS tiles to skip. + utm_zones : list, optional + User defined UTM zone subset. + wrs2_tiles : list, optional + User defined WRS2 tile subset. + mgrs_property : str, optional + MGRS property in the MGRS feature collection (the default is 'mgrs'). + utm_property : str, optional + UTM zone property in the MGRS feature collection (the default is 'utm'). + wrs2_property : str, optional + WRS2 property in the MGRS feature collection (the default is 'wrs2'). + cell_size : float, optional + Cell size for transform and shape calculation (the default is 30). + + Returns + ------ + list of dicts: export information + + """ + # Build and filter the study area feature collection + logging.debug('Building study area collection') + logging.debug(f' {study_area_coll_id}') + study_area_coll = ee.FeatureCollection(study_area_coll_id) + if ((study_area_property == 'STUSPS') and + ('CONUS' in [x.upper() for x in study_area_features])): + # Exclude AK, HI, AS, GU, PR, MP, VI, (but keep DC) + study_area_features = [ + 'AL', 'AR', 'AZ', 'CA', 'CO', 'CT', 'DC', 'DE', 'FL', 'GA', + 'IA', 'ID', 'IL', 'IN', 'KS', 'KY', 'LA', 'MA', 'MD', 'ME', + 'MI', 'MN', 'MO', 'MS', 'MT', 'NC', 'ND', 'NE', 'NH', 'NJ', + 'NM', 'NV', 'NY', 'OH', 'OK', 'OR', 'PA', 'RI', 'SC', 'SD', + 'TN', 'TX', 'UT', 'VA', 'VT', 'WA', 'WI', 'WV', 'WY', + ] + study_area_features = sorted(list(set(study_area_features))) + + if study_area_property and study_area_features: + logging.debug(' Filtering study area collection') + logging.debug(f' Property: {study_area_property}') + logging.debug(f' Features: {",".join(study_area_features)}') + study_area_coll = study_area_coll.filter( + ee.Filter.inList(study_area_property, study_area_features) + ) + + logging.debug('Building MGRS tile list') + tiles_coll = ee.FeatureCollection(mgrs_coll_id).filterBounds(study_area_coll.geometry()) + + # Filter collection by user defined lists + if utm_zones: + logging.debug(f' Filter user UTM Zones: {utm_zones}') + tiles_coll = tiles_coll.filter(ee.Filter.inList(utm_property, utm_zones)) + if mgrs_skip_list: + logging.debug(f' Filter MGRS skip list: {mgrs_skip_list}') + tiles_coll = tiles_coll.filter(ee.Filter.inList(mgrs_property, mgrs_skip_list).Not()) + if mgrs_tiles: + logging.debug(f' Filter MGRS tiles/zones: {mgrs_tiles}') + # Allow MGRS tiles to be subsets of the full tile code + # i.e. mgrs_tiles = 10TE, 10TF + mgrs_filters = [ + ee.Filter.stringStartsWith(mgrs_property, mgrs_id.upper()) + for mgrs_id in mgrs_tiles + ] + tiles_coll = tiles_coll.filter(ee.call('Filter.or', mgrs_filters)) + + # Drop the MGRS tile geometry to simplify the getInfo call + def drop_geometry(ftr): + return ee.Feature(None).copyProperties(ftr) + + logging.debug(' Requesting tile/zone info') + tiles_info = utils.get_info(tiles_coll.map(drop_geometry)) + + # Constructed as a list of dicts to mimic other interpolation/export tools + tiles_list = [] + for tile_ftr in tiles_info['features']: + mgrs_id = tile_ftr['properties']['mgrs'].upper() + tile_extent = [ + int(tile_ftr['properties']['xmin']), + int(tile_ftr['properties']['ymin']), + int(tile_ftr['properties']['xmax']), + int(tile_ftr['properties']['ymax']) + ] + tile_geo = [cell_size, 0, tile_extent[0], 0, -cell_size, tile_extent[3]] + tile_shape = [ + int((tile_extent[2] - tile_extent[0]) / cell_size), + int((tile_extent[3] - tile_extent[1]) / cell_size) + ] + tiles_list.append({ + 'crs': 'EPSG:{:d}'.format(int(tile_ftr['properties']['epsg'])), + 'extent': tile_extent, + 'geo': tile_geo, + 'index': tile_ftr['properties']['mgrs'].upper(), + 'maxpixels': tile_shape[0] * tile_shape[1] + 1, + 'shape': tile_shape, + 'utm': int(mgrs_id[:2]), + 'wrs2_tiles': sorted(utils.wrs2_str_2_set(tile_ftr['properties'][wrs2_property])), + }) + + # Apply the user defined WRS2 tile list + if wrs2_tiles: + logging.debug(f' Filter WRS2 tiles: {wrs2_tiles}') + for tile in tiles_list: + tile['wrs2_tiles'] = sorted(list(set(tile['wrs2_tiles']) & set(wrs2_tiles))) + + # Only return export tiles that have intersecting WRS2 tiles + export_list = [t for t in sorted(tiles_list, key=lambda k: k['index']) if t['wrs2_tiles']] + + return export_list diff --git a/openet/core/interpolate.py b/openet/core/interpolate.py index 0f20a2f..9f7a27e 100644 --- a/openet/core/interpolate.py +++ b/openet/core/interpolate.py @@ -1,4 +1,4 @@ -import datetime +from datetime import datetime, timedelta import logging from dateutil.relativedelta import relativedelta @@ -444,20 +444,20 @@ def from_scene_et_fraction( # Adjust start/end dates based on t_interval # Increase the date range to fully include the time interval - start_dt = datetime.datetime.strptime(start_date, '%Y-%m-%d') - end_dt = datetime.datetime.strptime(end_date, '%Y-%m-%d') + start_dt = datetime.strptime(start_date, '%Y-%m-%d') + end_dt = datetime.strptime(end_date, '%Y-%m-%d') if t_interval.lower() == 'monthly': - start_dt = datetime.datetime(start_dt.year, start_dt.month, 1) + start_dt = datetime(start_dt.year, start_dt.month, 1) end_dt -= relativedelta(days=+1) - end_dt = datetime.datetime(end_dt.year, end_dt.month, 1) + end_dt = datetime(end_dt.year, end_dt.month, 1) end_dt += relativedelta(months=+1) start_date = start_dt.strftime('%Y-%m-%d') end_date = end_dt.strftime('%Y-%m-%d') # The start/end date for the interpolation include more days # (+/- interp_days) than are included in the ETr collection - interp_start_dt = start_dt - datetime.timedelta(days=interp_days) - interp_end_dt = end_dt + datetime.timedelta(days=interp_days) + interp_start_dt = start_dt - timedelta(days=interp_days) + interp_end_dt = end_dt + timedelta(days=interp_days) interp_start_date = interp_start_dt.date().isoformat() interp_end_date = interp_end_dt.date().isoformat() @@ -914,20 +914,20 @@ def from_scene_et_actual( # Adjust start/end dates based on t_interval # Increase the date range to fully include the time interval - start_dt = datetime.datetime.strptime(start_date, '%Y-%m-%d') - end_dt = datetime.datetime.strptime(end_date, '%Y-%m-%d') + start_dt = datetime.strptime(start_date, '%Y-%m-%d') + end_dt = datetime.strptime(end_date, '%Y-%m-%d') if t_interval.lower() == 'monthly': - start_dt = datetime.datetime(start_dt.year, start_dt.month, 1) + start_dt = datetime(start_dt.year, start_dt.month, 1) end_dt -= relativedelta(days=+1) - end_dt = datetime.datetime(end_dt.year, end_dt.month, 1) + end_dt = datetime(end_dt.year, end_dt.month, 1) end_dt += relativedelta(months=+1) start_date = start_dt.strftime('%Y-%m-%d') end_date = end_dt.strftime('%Y-%m-%d') # The start/end date for the interpolation include more days # (+/- interp_days) than are included in the ETr collection - interp_start_dt = start_dt - datetime.timedelta(days=interp_days) - interp_end_dt = end_dt + datetime.timedelta(days=interp_days) + interp_start_dt = start_dt - timedelta(days=interp_days) + interp_end_dt = end_dt + timedelta(days=interp_days) interp_start_date = interp_start_dt.date().isoformat() interp_end_date = interp_end_dt.date().isoformat() diff --git a/openet/core/landsat.py b/openet/core/landsat.py index 86a8561..1758b32 100644 --- a/openet/core/landsat.py +++ b/openet/core/landsat.py @@ -1,6 +1,100 @@ import ee +def c02_l2_sr(input_img): + """Prepare a Collection 2 Level 2 image to surface reflectance [0-1] and LST [K] values + + Parameters + ---------- + input_img : ee.Image + Image from a Landsat Collection 2 Level 2 image collection with SPACECRAFT_ID property + (e.g. LANDSAT/LC08/C02/T1_L2). + + Returns + ------- + ee.Image + + """ + # Use the SPACECRAFT_ID property identify each Landsat type + spacecraft_id = ee.String(input_img.get('SPACECRAFT_ID')) + + # Rename bands to generic names + input_bands = ee.Dictionary({ + 'LANDSAT_4': ['SR_B1', 'SR_B2', 'SR_B3', 'SR_B4', 'SR_B5', 'SR_B7', 'ST_B6', 'QA_PIXEL', 'QA_RADSAT'], + 'LANDSAT_5': ['SR_B1', 'SR_B2', 'SR_B3', 'SR_B4', 'SR_B5', 'SR_B7', 'ST_B6', 'QA_PIXEL', 'QA_RADSAT'], + 'LANDSAT_7': ['SR_B1', 'SR_B2', 'SR_B3', 'SR_B4', 'SR_B5', 'SR_B7', 'ST_B6', 'QA_PIXEL', 'QA_RADSAT'], + 'LANDSAT_8': ['SR_B2', 'SR_B3', 'SR_B4', 'SR_B5', 'SR_B6', 'SR_B7', 'ST_B10', 'QA_PIXEL', 'QA_RADSAT'], + 'LANDSAT_9': ['SR_B2', 'SR_B3', 'SR_B4', 'SR_B5', 'SR_B6', 'SR_B7', 'ST_B10', 'QA_PIXEL', 'QA_RADSAT'], + }) + output_bands = ['blue', 'green', 'red', 'nir', 'swir1', 'swir2', 'lst', 'QA_PIXEL', 'QA_RADSAT'] + + return ( + input_img + .select(input_bands.get(spacecraft_id), output_bands) + .multiply([0.0000275, 0.0000275, 0.0000275, 0.0000275, 0.0000275, 0.0000275, 0.00341802, 1, 1]) + .add([-0.2, -0.2, -0.2, -0.2, -0.2, -0.2, 149.0, 0, 0]) + .set({ + 'system:time_start': input_img.get('system:time_start'), + 'system:index': input_img.get('system:index'), + 'SPACECRAFT_ID': spacecraft_id, + 'LANDSAT_PRODUCT_ID': input_img.get('LANDSAT_PRODUCT_ID'), + 'LANDSAT_SCENE_ID': input_img.get('LANDSAT_SCENE_ID'), + # 'CLOUD_COVER_LAND': input_img.get('CLOUD_COVER_LAND'), + }) + ) + + +def c02_sr_ndvi(sr_img, water_mask=None, gsw_extent_flag=False): + """Landsat Collection 2 normalized difference vegetation index (NDVI) + + A specialized function is needed for Collection 2 since the reflectance values can be both + negative and greater than 1, which causes problems in the gee .normalizedDifference() function. + + Parameters + ---------- + sr_img : ee.Image + "Prepped" Landsat image with standardized band names of "nir" and "red". + water_mask : ee.Image + Mask used to identify pixels with negative or very low reflectance that will be set to -0.1. + gsw_extent_flag : bool + If True, apply the global surface water extent mask to the QA_PIXEL water mask + to help avoid misclassified shadows being included in the water mask. + + Returns + ------- + ee.Image + + """ + # Force the input values to be at greater than or equal to zero + # since C02 surface reflectance values can be negative + # but the normalizedDifference function will return nodata + ndvi_img = sr_img.max(0).normalizedDifference(['nir', 'red']) + + b1 = sr_img.select(['nir']) + b2 = sr_img.select(['red']) + + # Assume that very high reflectance values are unreliable for computing the index + # and set the output value to 0 + # Threshold value could be set lower, but for now only trying to catch saturated pixels + ndvi_img = ndvi_img.where(b1.gte(1).Or(b2.gte(1)), 0) + + # Assume that low reflectance values are unreliable for computing the index and set to 0 + ndvi_img = ndvi_img.where(b1.lt(0.01).And(b2.lt(0.01)), 0) + + # If both reflectance values are below the threshold, and if the pixel is flagged as water, + # set the output to -0.1 (should this be -1?) + if water_mask: + if gsw_extent_flag: + gsw_extent_mask = ee.Image('JRC/GSW1_4/GlobalSurfaceWater').select(['max_extent']).gte(1) + water_mask = water_mask.And(gsw_extent_mask) + ndvi_img = ndvi_img.where(b1.lt(0.01).And(b2.lt(0.01)).And(water_mask), -0.1) + + # Should there be an additional check for if either value was negative? + # ndvi_img = ndvi_img.where(b1.lt(0).Or(b2.lt(0)), 0) + + return ndvi_img.clamp(-1.0, 1.0).rename(['ndvi']) + + def c02_qa_pixel_mask( input_img, cirrus_flag=False, @@ -99,6 +193,24 @@ def c02_qa_pixel_mask( return mask_img.rename(['mask']) +def c02_qa_water_mask(input_img): + """Landsat Collection 2 QA_PIXEL band water mask + + Parameters + ---------- + input_img : ee.Image + Image from a Landsat Collection 2 SR image collection + with the QA_PIXEL band and the SPACECRAFT_ID property + (e.g. LANDSAT/LC08/C02/T1_L2). + + Returns + ------- + ee.Image + + """ + return input_img.select(['QA_PIXEL']).rightShift(7).bitwiseAnd(1).neq(0).rename('qa_water_mask') + + def c02_cloud_score_mask(input_img, cloud_score_pct=100): """Landsat Collection 2 TOA simple cloud score based cloud mask @@ -235,7 +347,7 @@ def c02_matched_toa_coll( input_img : ee.Image Image with the "image_property" metadata property. image_property : str - The metedata property name in input_img to use as a match criteria + The metadata property name in input_img to use as a match criteria (the default is "LANDSAT_SCENE_ID"). match_property : str The metadata property name in the Landsat Collection 2 TOA collections diff --git a/openet/core/tests/test_a_utils.py b/openet/core/tests/test_a_utils.py index ff6bc5d..d0e2ff0 100644 --- a/openet/core/tests/test_a_utils.py +++ b/openet/core/tests/test_a_utils.py @@ -1,4 +1,4 @@ -import datetime +from datetime import datetime import types import ee @@ -8,7 +8,7 @@ def arg_valid_date(): - assert utils.arg_valid_date('2020-03-10') == datetime.datetime(2020, 3, 10) + assert utils.arg_valid_date('2020-03-10') == datetime(2020, 3, 10) def arg_valid_date_exception(): @@ -38,6 +38,11 @@ def test_affine_transform(): assert output == [0.0002777777777777778, 0, -179.0001388888889, 0, -0.0002777777777777778, 61.00013888888889] +# TODO: Write this test +# def test_build_parent_folders(): +# assert False + + def test_date_0utc(date='2015-07-13'): assert utils.get_info(utils.date_0utc( ee.Date(date).advance(2, 'hour')).format('yyyy-MM-dd')) == date @@ -45,17 +50,17 @@ def test_date_0utc(date='2015-07-13'): def test_date_range_type(): output = utils.date_range( - datetime.datetime(2020, 1, 1), datetime.datetime(2020, 1, 3)) + datetime(2020, 1, 1), datetime(2020, 1, 3)) assert isinstance(output, types.GeneratorType) @pytest.mark.parametrize( 'start_dt, end_dt, expected', [ - [datetime.datetime(2020, 1, 1), datetime.datetime(2020, 1, 3), 3], - [datetime.datetime(2003, 12, 30), datetime.datetime(2004, 1, 3), 5], - [datetime.datetime(2004, 2, 28), datetime.datetime(2004, 3, 1), 3], - [datetime.datetime(2001, 1, 1), datetime.datetime(2002, 1, 1), 366], + [datetime(2020, 1, 1), datetime(2020, 1, 3), 3], + [datetime(2003, 12, 30), datetime(2004, 1, 3), 5], + [datetime(2004, 2, 28), datetime(2004, 3, 1), 3], + [datetime(2001, 1, 1), datetime(2002, 1, 1), 366], ] ) def test_date_range_defaults(start_dt, end_dt, expected): @@ -66,10 +71,10 @@ def test_date_range_defaults(start_dt, end_dt, expected): @pytest.mark.parametrize( 'start_dt, end_dt, days, expected', [ - [datetime.datetime(2001, 1, 1), datetime.datetime(2001, 1, 1), 2, 1], - [datetime.datetime(2001, 1, 1), datetime.datetime(2001, 1, 2), 2, 1], - [datetime.datetime(2001, 1, 1), datetime.datetime(2001, 1, 3), 2, 2], - [datetime.datetime(2001, 1, 1), datetime.datetime(2001, 1, 4), 2, 2], + [datetime(2001, 1, 1), datetime(2001, 1, 1), 2, 1], + [datetime(2001, 1, 1), datetime(2001, 1, 2), 2, 1], + [datetime(2001, 1, 1), datetime(2001, 1, 3), 2, 2], + [datetime(2001, 1, 1), datetime(2001, 1, 4), 2, 2], ] ) def test_date_range_days(start_dt, end_dt, days, expected): @@ -80,8 +85,7 @@ def test_date_range_days(start_dt, end_dt, days, expected): @pytest.mark.parametrize( 'start_dt, end_dt, skip_leap_days, expected', [ - [datetime.datetime(2004, 2, 28), datetime.datetime(2004, 3, 1), True, 2], - # [datetime.datetime(2000, 2, 28), datetime.datetime(2000, 3, 1), True, 2], + [datetime(2004, 2, 28), datetime(2004, 3, 1), True, 2], ] ) def test_date_range_skip_leap_days(start_dt, end_dt, skip_leap_days, expected): @@ -89,6 +93,41 @@ def test_date_range_skip_leap_days(start_dt, end_dt, skip_leap_days, expected): start_dt, end_dt, skip_leap_days=skip_leap_days))) == expected +@pytest.mark.parametrize( + 'start_dt, end_dt, exclusive_end_dates, expected', + [ + [ + datetime(2004, 2, 1), datetime(2004, 2, 28), False, + [(datetime(2004, 2, 1), datetime(2004, 2, 28))] + ], + [ + datetime(2004, 2, 1), datetime(2004, 2, 28), True, + [(datetime(2004, 2, 1), datetime(2004, 2, 29))] + ], + [ + datetime(2005, 2, 1), datetime(2005, 2, 28), True, + [(datetime(2005, 2, 1), datetime(2005, 3, 1))] + ], + [ + datetime(2004, 2, 1), datetime(2005, 2, 28), False, + [ + (datetime(2004, 2, 1), datetime(2004, 12, 31)), + (datetime(2005, 1, 1), datetime(2005, 2, 28)) + ] + ], + [ + datetime(2005, 2, 1), datetime(2006, 2, 28), True, + [ + (datetime(2005, 2, 1), datetime(2006, 1, 1)), + (datetime(2006, 1, 1), datetime(2006, 3, 1)) + ] + ], + ] +) +def test_date_years(start_dt, end_dt, exclusive_end_dates, expected): + assert list(utils.date_years(start_dt, end_dt, exclusive_end_dates)) == expected + + # TODO: Write this test # def test_delay_task(): # assert False @@ -115,7 +154,6 @@ def test_get_ee_assets_exception(): @pytest.mark.parametrize( - # Note: These are made up values 'input_value, expected', [ [300, True], @@ -130,7 +168,11 @@ def test_is_number(input_value, expected): def test_millis(): - assert utils.millis(datetime.datetime(2015, 7, 13)) == 1436745600000 + assert utils.millis(datetime(2015, 7, 13)) == 1436745600000 + + +def test_parse_landsat_id(): + assert utils.parse_landsat_id('LC08_030036_20210725') == ('LC08', 30, 36, 2021, 7, 25) @pytest.mark.parametrize( @@ -199,6 +241,10 @@ def test_list_2_str_ranges(input_value, expected): assert utils.list_2_str_ranges(input_value) == expected +def test_ver_str_2_num(): + assert utils.ver_str_2_num('0.20.6') == [0, 20, 6] + + def test_constant_image_value(expected=10.123456789, tol=0.000001): output = utils.constant_image_value(ee.Image.constant(expected)) assert abs(output['constant'] - expected) <= tol diff --git a/openet/core/tests/test_b_landsat.py b/openet/core/tests/test_b_landsat.py index 7c3e3c8..ede70a6 100644 --- a/openet/core/tests/test_b_landsat.py +++ b/openet/core/tests/test_b_landsat.py @@ -5,6 +5,21 @@ import openet.core.utils as utils +# TODO: Add additional tests and/or rebuild these as a constant image test +def test_c02_l2_sr_band_names(): + input_img = ee.Image('LANDSAT/LT05/C02/T1_L2/LT05_042034_20091016') + output = utils.get_info(landsat.c02_l2_sr(input_img).bandNames()) + assert output == ['blue', 'green', 'red', 'nir', 'swir1', 'swir2', 'lst', 'QA_PIXEL', 'QA_RADSAT'] + + +# TODO: Add additional tests and/or rebuild as a constant image tes +def test_c02_ndvi_band_name(): + input_img = ee.Image('LANDSAT/LT05/C02/T1_L2/LT05_042034_20091016') + sr_img = landsat.c02_l2_sr(input_img).select(['red', 'nir']) + output = utils.get_info(landsat.c02_sr_ndvi(sr_img).bandNames()) + assert output == ['ndvi'] + + @pytest.mark.parametrize( "qa_pixel, expected", [ @@ -76,6 +91,20 @@ def test_c02_qa_pixel_mask_flags(img_value, arg_name, flag_value, expected): assert utils.constant_image_value(output_img)['mask'] == expected +@pytest.mark.parametrize( + "qa_pixel, expected", + [ + ['0000000000000000', 0], # Designated Fill + ['0000000000000001', 0], + ['0000000010000000', 1], # Water + ] +) +def test_c02_qa_mask_defaults(qa_pixel, expected): + input_img = ee.Image.constant([int(qa_pixel, 2)]).rename(['QA_PIXEL']) + output_img = landsat.c02_qa_water_mask(input_img) + assert utils.constant_image_value(output_img)['qa_water_mask'] == expected + + @pytest.mark.parametrize( "scene_id, time_start", [ diff --git a/openet/core/tests/test_common.py b/openet/core/tests/test_common.py index ce7f1d7..af530ea 100644 --- a/openet/core/tests/test_common.py +++ b/openet/core/tests/test_common.py @@ -121,18 +121,18 @@ def test_sentinel2_sr_cloud_mask(img_value, expected): def test_landsat_c2_sr_lst_correct(): # Basic function test with default inputs - sr_img = ee.Image('LANDSAT/LC08/C02/T1_L2/LC08_030036_20210725') - ndvi_img = sr_img.multiply(0.0000275).add(-0.2).normalizedDifference(['SR_B5', 'SR_B4']) - output_img = common.landsat_c2_sr_lst_correct(sr_img, ndvi_img) + input_img = ee.Image('LANDSAT/LC08/C02/T1_L2/LC08_030036_20210725') + ndvi_img = input_img.multiply(0.0000275).add(-0.2).normalizedDifference(['SR_B5', 'SR_B4']) + output_img = common.landsat_c2_sr_lst_correct(input_img, ndvi_img) output = utils.get_info(output_img) assert output['bands'][0]['id'] == 'lst' def test_landsat_c2_sr_lst_parameter_keywords(): # Check that the function parameter keywords all work - sr_img = ee.Image('LANDSAT/LC08/C02/T1_L2/LC08_030036_20210725') - ndvi_img = sr_img.multiply(0.0000275).add(-0.2).normalizedDifference(['SR_B5', 'SR_B4']) - output_img = common.landsat_c2_sr_lst_correct(ndvi=ndvi_img, sr_image=sr_img) + input_img = ee.Image('LANDSAT/LC08/C02/T1_L2/LC08_030036_20210725') + ndvi_img = input_img.multiply(0.0000275).add(-0.2).normalizedDifference(['SR_B5', 'SR_B4']) + output_img = common.landsat_c2_sr_lst_correct(ndvi=ndvi_img, input_img=input_img) output = utils.get_info(output_img) assert output['bands'][0]['id'] == 'lst' diff --git a/openet/core/tests/test_export.py b/openet/core/tests/test_export.py new file mode 100644 index 0000000..a996a66 --- /dev/null +++ b/openet/core/tests/test_export.py @@ -0,0 +1,75 @@ +import pprint + +import openet.core.export as export + + +def test_mgrs_export_tiles(): + output = export.mgrs_export_tiles( + study_area_coll_id='TIGER/2018/States', + mgrs_coll_id='projects/openet/assets/mgrs/conus/gridmet/zones', + ) + # Hardcoding these values for now, but they could be changed to conditionals + assert list(output[0].keys()) == ['crs', 'extent', 'geo', 'index', 'maxpixels', 'shape', 'utm', 'wrs2_tiles'] + assert output[0]['crs'] == 'EPSG:32610' + assert output[0]['extent'] == [399975, 3699975, 779985, 4399995] + assert output[0]['geo'] == [30, 0, 399975, 0, -30, 4399995] + assert output[0]['index'] == '10S' + assert output[0]['maxpixels'] == 295571779 + assert output[0]['shape'] == [12667, 23334] + assert output[0]['utm'] == 10 + assert 'p042r035' in output[0]['wrs2_tiles'] + + +def test_mgrs_export_tiles_study_area_features_param(): + output = export.mgrs_export_tiles( + study_area_coll_id='TIGER/2018/States', + mgrs_coll_id='projects/openet/assets/mgrs/conus/gridmet/zones', + study_area_property='STUSPS', + study_area_features=['CA'], + ) + assert ['10S', '10T', '11S', '11T'] == [tile['index'] for tile in output] + + +def test_mgrs_export_tiles_mgrs_keep_list_param(): + output = export.mgrs_export_tiles( + study_area_coll_id='TIGER/2018/States', + mgrs_coll_id='projects/openet/assets/mgrs/conus/gridmet/zones', + study_area_property='STUSPS', + study_area_features=['CA', 'NV'], + mgrs_tiles=['10S'], + ) + assert ['10S'] == [tile['index'] for tile in output] + + +def test_mgrs_export_tiles_mgrs_skip_list_param(): + output = export.mgrs_export_tiles( + study_area_coll_id='TIGER/2018/States', + mgrs_coll_id='projects/openet/assets/mgrs/conus/gridmet/zones', + study_area_property='STUSPS', + study_area_features=['CA', 'NV'], + mgrs_skip_list=['10S'], + ) + assert ['10T', '11S', '11T'] == [tile['index'] for tile in output] + + +def test_mgrs_export_tiles_utm_zones_param(): + output = export.mgrs_export_tiles( + study_area_coll_id='TIGER/2018/States', + mgrs_coll_id='projects/openet/assets/mgrs/conus/gridmet/zones', + study_area_property='STUSPS', + study_area_features=['CA', 'NV'], + utm_zones=[11], + ) + assert ['11S', '11T'] == [tile['index'] for tile in output] + + +def test_mgrs_export_tiles_wrs2_tiles_param(): + output = export.mgrs_export_tiles( + study_area_coll_id='TIGER/2018/States', + mgrs_coll_id='projects/openet/assets/mgrs/conus/gridmet/zones', + study_area_property='STUSPS', + study_area_features=['CA'], + mgrs_tiles=['10S'], + wrs2_tiles=['p042r035'] + ) + assert ['p042r035'] == output[0]['wrs2_tiles'] diff --git a/openet/core/tests/test_interpolate.py b/openet/core/tests/test_interpolate.py index 476c707..f334aaa 100644 --- a/openet/core/tests/test_interpolate.py +++ b/openet/core/tests/test_interpolate.py @@ -1,4 +1,4 @@ -import datetime +from datetime import datetime, timezone import logging import ee @@ -18,8 +18,7 @@ def tgt_image(tgt_value, tgt_time): return ( ee.Image.constant(tgt_value).rename(['tgt']) .set({'system:time_start': tgt_time, - 'system:index': datetime.datetime.utcfromtimestamp( - tgt_time / 1000.0).strftime('%Y%m%d')}) + 'system:index': datetime.fromtimestamp(tgt_time / 1000.0, tz=timezone.utc).strftime('%Y%m%d')}) ) diff --git a/openet/core/utils.py b/openet/core/utils.py index 7cf3399..a1e428c 100644 --- a/openet/core/utils.py +++ b/openet/core/utils.py @@ -1,6 +1,6 @@ import argparse import calendar -from datetime import datetime, timedelta +from datetime import datetime, timedelta, timezone import itertools import json import logging @@ -64,6 +64,23 @@ def arg_valid_file(file_path): raise argparse.ArgumentTypeError(f'{file_path} does not exist') +def build_parent_folders(folder_id, set_public=False): + """Build the asset folder including parents""" + # Build any parent folders above the "3rd" level + # i.e. after "projects/openet/assets" or "projects/openet/folder" + public_policy = {'bindings': [{'role': 'roles/viewer', 'members': ['allUsers']}]} + folder_id_split = folder_id.replace('projects/earthengine-legacy/assets/', '').split('/') + for i in range(len(folder_id_split)): + if i <= 3: + continue + folder_id = '/'.join(folder_id_split[:i]) + if not ee.data.getInfo(folder_id): + print(f' Building folder: {folder_id}') + ee.data.createAsset({'type': 'FOLDER'}, folder_id) + if set_public: + ee.data.setIamPolicy(folder_id, public_policy) + + def date_0utc(date): """Get the 0 UTC date for a date @@ -85,9 +102,9 @@ def date_range(start_dt, end_dt, days=1, skip_leap_days=False): Parameters ---------- start_dt : datetime - start date. + Start date. end_dt : datetime - end date. + End date. days : int, optional Step size (the default is 1). skip_leap_days : bool, optional @@ -106,59 +123,139 @@ def date_range(start_dt, end_dt, days=1, skip_leap_days=False): curr_dt += timedelta(days=days) -def delay_task(delay_time=0, max_ready=-1): - """Delay script execution based on number of RUNNING and READY tasks +def date_years(start_dt, end_dt, exclusive_end_dates=False): + """Generate separate start and end dates for each year in a date range + + Parameters + ---------- + start_dt : datetime + Start date. + end_dt : datetime + End date. + exclusive_end_dates : bool, optional + If True, set the end dates for each iteration range to be exclusive. + + Yields + ------- + start and end datetimes for each year + + """ + if (end_dt - start_dt).days > 366: + for year in range(start_dt.year, end_dt.year + 1): + year_start_dt = max(datetime(year, 1, 1), start_dt) + year_end_dt = datetime(year + 1, 1, 1) - timedelta(days=1) + year_end_dt = min(year_end_dt, end_dt) + if exclusive_end_dates: + year_end_dt = year_end_dt + timedelta(days=1) + yield year_start_dt, year_end_dt + else: + if exclusive_end_dates: + yield start_dt, end_dt + timedelta(days=1) + else: + yield start_dt, end_dt + + +def delay_task(delay_time=0, task_max=-1, task_count=0): + """Delay script execution based on number of READY tasks Parameters ---------- delay_time : float, int Delay time in seconds between starting export tasks or checking the - number of queued tasks if "max_ready" is > 0. The default is 0. - The delay time will be set to a minimum of 10 seconds if max_ready > 0. - max_ready : int, optional - Maximum number of queued "READY" tasks. The default is -1 which - implies no limit to the number of tasks that will be submitted. + number of queued tasks if "ready_task_max" is > 0. The default is 0. + The delay time will be set to a minimum of 10 seconds if + ready_task_max > 0. + task_max : int, optional + Maximum number of queued "READY" tasks. + task_count : int + The current/previous/assumed number of ready tasks. + Value will only be updated if greater than or equal to ready_task_max. Returns ------- - None + int : ready_task_count """ - # Force delay time to be a positive value - # (since parameter used to support negative values) + if task_max > 3000: + raise ValueError('The maximum number of queued tasks must be less than 3000') + + # Force delay time to be a positive value since the parameter used to + # support negative values if delay_time < 0: delay_time = abs(delay_time) - logging.debug(f' Pausing {delay_time} seconds') - - if max_ready <= 0: + if (task_max is None or task_max <= 0) and (delay_time >= 0): + # Assume task_max was not set and just wait the delay time + logging.debug(f' Pausing {delay_time} seconds, not checking task list') time.sleep(delay_time) - elif max_ready > 0: - # Don't continue to the next export until the number of READY tasks - # is greater than or equal to "max_ready" - - # Force delay_time to be at least 10 seconds if max_ready is set - # to avoid excessive EE calls - delay_time = max(delay_time, 10) + return 0 + elif task_max and (task_count < task_max): + # Skip waiting or checking tasks if a maximum number of tasks was set + # and the current task count is below the max + logging.debug(f' Ready tasks: {task_count}') + return task_count + + # If checking tasks, force delay_time to be at least 10 seconds if + # ready_task_max is set to avoid excessive EE calls + delay_time = max(delay_time, 10) + + # Make an initial pause before checking tasks lists to allow + # for previous export to start up + # CGM - I'm not sure what a good default first pause time should be, + # but capping it at 30 seconds is probably fine for now + logging.debug(f' Pausing {min(delay_time, 30)} seconds for tasks to start') + time.sleep(delay_time) + + # If checking tasks, don't continue to the next export until the number + # of READY tasks is greater than or equal to "ready_task_max" + while True: + ready_task_count = len(get_ee_tasks(states=['READY']).keys()) + logging.debug(f' Ready tasks: {ready_task_count}') + if ready_task_count >= task_max: + logging.debug(f' Pausing {delay_time} seconds') + time.sleep(delay_time) + else: + logging.debug(f' {task_max - ready_task_count} open task ' + f'slots, continuing processing') + break - # Make an initial pause before checking tasks lists to allow - # for previous export to start up. - time.sleep(delay_time) + return ready_task_count - while True: - ready_tasks = get_ee_tasks(states=['READY'], verbose=True) - ready_task_count = len(ready_tasks.keys()) - if ready_task_count >= max_ready: - logging.debug(' {} tasks queued, waiting {} seconds to start ' - 'more tasks'.format(ready_task_count, delay_time)) - time.sleep(delay_time) +def get_info(ee_obj, max_retries=4): + """Make an exponential back off getInfo call on an Earth Engine object""" + # output = ee_obj.getInfo() + output = None + for i in range(1, max_retries): + try: + output = ee_obj.getInfo() + except ee.ee_exception.EEException as e: + if ('Earth Engine memory capacity exceeded' in str(e) or + 'Earth Engine capacity exceeded' in str(e) or + 'Too many concurrent aggregations' in str(e) or + 'Computation timed out.' in str(e)): + # TODO: Maybe add 'Connection reset by peer' + logging.info(f' Resending query ({i}/{max_retries})') + logging.info(f' {e}') else: - logging.debug(' Continuing iteration') - break + # TODO: What should happen for unexpected EE exceptions? + # It might be better to reraise the exception and exit + logging.info(f' {e}') + logging.info(' Unhandled Earth Engine exception') + continue + except Exception as e: + logging.info(f' Resending query ({i}/{max_retries})') + logging.debug(f' {e}') + if output is not None: + break -def get_ee_assets(asset_id, start_dt=None, end_dt=None, retries=6): + time.sleep(i ** 3) + + return output + + +def get_ee_assets(asset_id, start_dt=None, end_dt=None, retries=4): """Return assets IDs in a collection Parameters @@ -177,23 +274,32 @@ def get_ee_assets(asset_id, start_dt=None, end_dt=None, retries=6): list : Asset IDs """ - params = {'parent': asset_id} - - # TODO: Add support or handling for case when only start or end is set + # # CGM - There was a bug in earthengine-api>=0.1.326 that caused listImages() + # # to return an empty list if the startTime and endTime parameters are set + # # Switching to a .aggregate_array(system:index).getInfo() approach below for now + # # since getList is flagged for deprecation + # # This may have been fixed in a later update and should be reviewed + coll = ee.ImageCollection(asset_id) if start_dt and end_dt: - params['startTime'] = start_dt.isoformat() + '.000000000Z' - params['endTime'] = end_dt.isoformat() + '.000000000Z' + coll = coll.filterDate(start_dt.strftime('%Y-%m-%d'), end_dt.strftime('%Y-%m-%d')) + # params = {'parent': asset_id} + # if start_dt and end_dt: + # # CGM - Do both start and end need to be set to apply filtering? + # params['startTime'] = start_dt.isoformat() + '.000000000Z' + # params['endTime'] = end_dt.isoformat() + '.000000000Z' asset_id_list = None for i in range(retries): try: - asset_id_list = [x['id'] for x in ee.data.listImages(params)['images']] + asset_id_list = coll.aggregate_array('system:index').getInfo() + asset_id_list = [f'{asset_id}/{id}' for id in asset_id_list] + # asset_id_list = [x['id'] for x in ee.data.listImages(params)['images']] break except ValueError: raise Exception('\nThe collection or folder does not exist, exiting') except Exception as e: logging.warning(f' Error getting asset list, retrying ({i}/{retries})\n {e}') - time.sleep((i+1) ** 2) + time.sleep((i+1) ** 3) if asset_id_list is None: raise Exception('\nUnable to retrieve task list, exiting') @@ -201,16 +307,13 @@ def get_ee_assets(asset_id, start_dt=None, end_dt=None, retries=6): return asset_id_list -def get_ee_tasks(states=['RUNNING', 'READY'], verbose=False, retries=6): +def get_ee_tasks(states=['RUNNING', 'READY'], retries=4): """Return current active tasks Parameters ---------- states : list, optional List of task states to check (the default is ['RUNNING', 'READY']). - verbose : bool, optional - This parameter is deprecated and is no longer being used. - To get verbose logging of the active tasks use utils.print_ee_tasks(). retries : int, optional The number of times to retry getting the task list if there is an error. @@ -229,7 +332,7 @@ def get_ee_tasks(states=['RUNNING', 'READY'], verbose=False, retries=6): break except Exception as e: logging.warning(f' Error getting task list, retrying ({i}/{retries})\n {e}') - time.sleep((i+1) ** 2) + time.sleep((i+1) ** 3) if task_list is None: raise Exception('\nUnable to retrieve task list, exiting') @@ -265,8 +368,8 @@ def print_ee_tasks(tasks): for desc, task in tasks.items(): if task['state'] == 'RUNNING': - start_dt = datetime.utcfromtimestamp(task['start_timestamp_ms'] / 1000) - update_dt = datetime.utcfromtimestamp(task['update_timestamp_ms'] / 1000) + start_dt = datetime.fromtimestamp(task['start_timestamp_ms'] / 1000, tz=timezone.utc) + update_dt = datetime.fromtimestamp(task['update_timestamp_ms'] / 1000, tz=timezone.utc) logging.debug(' {:8s} {} {:0.2f} {}'.format( task['state'], task['description'], (update_dt - start_dt).total_seconds() / 3600, @@ -280,49 +383,16 @@ def print_ee_tasks(tasks): return tasks -def get_info(ee_obj, max_retries=4): - """Make an exponential back off getInfo call on an Earth Engine object""" - # output = ee_obj.getInfo() - output = None - for i in range(1, max_retries): - try: - output = ee_obj.getInfo() - except ee.ee_exception.EEException as e: - if ('Earth Engine memory capacity exceeded' in str(e) or - 'Earth Engine capacity exceeded' in str(e) or - 'Too many concurrent aggregations' in str(e) or - 'Computation timed out.' in str(e)): - # TODO: Maybe add 'Connection reset by peer' - logging.info(f' Resending query ({i}/{max_retries})') - logging.info(f' {e}') - else: - # TODO: What should happen for unexpected EE exceptions? - # It might be better to reraise the exception and exit - logging.info(f' {e}') - logging.info(' Unhandled Earth Engine exception') - continue - except Exception as e: - logging.info(f' Resending query ({i}/{max_retries})') - logging.debug(f' {e}') - - if output is not None: - break - - time.sleep(i ** 3) - - return output - - -def ee_task_start(task, n=6): +def ee_task_start(task, n=4): """Make an exponential backoff Earth Engine request""" for i in range(1, n): try: task.start() break except Exception as e: - logging.info(' Resending query ({}/{})'.format(i, n)) - logging.debug(' {}'.format(e)) - time.sleep(i ** 2) + logging.info(f' Resending query ({i}/{n})') + logging.debug(f' {e}') + time.sleep(i ** 3) return task @@ -350,6 +420,29 @@ def millis(input_dt): return 1000 * int(calendar.timegm(input_dt.timetuple())) +def parse_landsat_id(system_index): + """Return the components of an EE Landsat Collection 1 system:index + + Parameters + ---------- + system_index : str + + Notes + ----- + LXSS_PPPRRR_YYYYMMDD + LC08_030036_20210725 + + """ + sensor = system_index[0:4] + path = int(system_index[5:8]) + row = int(system_index[8:11]) + year = int(system_index[12:16]) + month = int(system_index[16:18]) + day = int(system_index[18:20]) + + return sensor, path, row, year, month, day + + def parse_int_set(nputstr=""): """Return list of numbers given a string of ranges @@ -392,8 +485,8 @@ def wrs2_set_2_str(tiles): # CGM - I don't think string of a list is exactly JSON, but it seems to work tile_dict = { k: '[{}]'.format(list_2_str_ranges(v)) - for k, v in tile_dict.items()} - # tile_dict = {k: sorted(v) for k, v in tile_dict.items()} + for k, v in tile_dict.items() + } tile_str = ( json.dumps(tile_dict, sort_keys=True) .replace('"', '').replace(' ', '') @@ -468,6 +561,25 @@ def list_2_str_ranges(i): return ','.join(output) +def ver_str_2_num(version_str): + """Return a version number string as a list of integers for sorting or comparison + + Parameters + ---------- + version_str : str + + Returns + ------- + list of integers + + Notes + ----- + 0.20.6 -> [0, 20, 6] + + """ + return list(map(int, version_str.split('.'))) + + def constant_image_value(image, crs='EPSG:32613', scale=1): """Extract the output value from a "constant" image""" rr_params = { @@ -501,26 +613,8 @@ def point_coll_value(coll, xy, scale=1): for row in output[1:]: # TODO: Add support for images that don't have a system:time_start - date = datetime.utcfromtimestamp(row[3] / 1000.0).strftime('%Y-%m-%d') + date = datetime.fromtimestamp(row[3] / 1000.0, tz=timezone.utc).strftime('%Y-%m-%d') for k, v in col_dict.items(): info_dict[k][date] = row[col_dict[k]] return info_dict - # return pd.DataFrame.from_dict(info_dict) - - -# def build_parent_folders(folder_id, set_public=False): -# """Build the asset folder including parents""" -# # Build any parent folders above the "3rd" level -# # i.e. after "projects/openet/assets" or "projects/openet/folder" -# public_policy = {'bindings': [{'role': 'roles/viewer', 'members': ['allUsers']}]} -# folder_id_split = folder_id.replace('projects/earthengine-legacy/assets/', '').split('/') -# for i in range(len(folder_id_split)): -# if i <= 3: -# continue -# folder_id = '/'.join(folder_id_split[:i]) -# if not ee.data.getInfo(folder_id): -# print(f' Building folder: {folder_id}') -# ee.data.createAsset({'type': 'FOLDER'}, folder_id) -# if set_public: -# ee.data.setIamPolicy(folder_id, public_policy) diff --git a/pyproject.toml b/pyproject.toml index 181332d..2abcdd9 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,12 +1,12 @@ [project] name = "openet-core" -version = "0.6.0" +version = "0.7.0" authors = [ { name="Charles Morton", email="charles.morton@dri.edu" }, ] description = "OpenET Core Components" readme = "README.rst" -requires-python = ">=3.8" +requires-python = ">=3.9" keywords = ["OpenET", "Earth Engine", "Evapotranspiration", "Landsat"] license = {text = "Apache-2.0"} classifiers = [ @@ -15,7 +15,7 @@ classifiers = [ "Operating System :: OS Independent", ] dependencies = [ - "earthengine-api>=0.1.364", + "earthengine-api>=1.5.2", "python-dateutil", ]