From e6795b8aa9feb3eb9e23efd8190bd06f41810749 Mon Sep 17 00:00:00 2001 From: zzeppozz Date: Tue, 23 Nov 2021 17:55:09 -0600 Subject: [PATCH 01/18] python dependencies for docker install --- requirements.txt | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) create mode 100644 requirements.txt diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 00000000..95972181 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,22 @@ +Cython>=0.29.24 +Flask>=2.0.2 +mapscript>=7.6.0 +matplotlib>=3.5.0 +psycopg2-binary>=2.9.2 +requests>=2.26.0 + +//// Build manually +// mapserver/mapscript +// cctools +// gdal +// lmdata-* +// lmpy +// lmserver +// lmtest +// mod-wsgi? +// proj +// scripts in rocks-lifemapper +// solr +// specify-open-api-tools +// webclient +//// \ No newline at end of file From d11743fa484ccae912751a4054072fc45b0938af Mon Sep 17 00:00:00 2001 From: zzeppozz Date: Tue, 23 Nov 2021 17:55:47 -0600 Subject: [PATCH 02/18] begin conversion to flask --- .gitignore | 3 + LmWebServer/flask_app/base.py | 68 +++++++++ LmWebServer/flask_app/occurrence.py | 228 ++++++++++++++++++++++++++++ LmWebServer/flask_app/routes.py | 39 +++++ 4 files changed, 338 insertions(+) create mode 100644 LmWebServer/flask_app/base.py create mode 100644 LmWebServer/flask_app/occurrence.py create mode 100644 LmWebServer/flask_app/routes.py diff --git a/.gitignore b/.gitignore index 5dc29ee4..613cb3ef 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,6 @@ +# Virtual environment for testing +venv/ + # Byte-compiled / optimized / DLL files __pycache__/ *.py[cod] diff --git a/LmWebServer/flask_app/base.py b/LmWebServer/flask_app/base.py new file mode 100644 index 00000000..d3470cd7 --- /dev/null +++ b/LmWebServer/flask_app/base.py @@ -0,0 +1,68 @@ +"""The module provides a base Lifemapper service class +""" +from flask import Flask, session, request, json +from werkzeug.exceptions import HTTPException + +from LmCommon.common.lmconstants import DEFAULT_POST_USER +from LmServer.common.localconstants import PUBLIC_USER +from LmServer.common.log import WebLogger +from LmServer.db.borg_scribe import BorgScribe + +app = Flask(__name__) + + +# ............................................................................. +class LmService: + """This is the base Lifemapper service object + + This is the base Lifemapper service object that the services can inherit + from. It is responsible for getting a database connection and logger that + can be used for the service. + """ + + # .......................... + def __init__(self): + """Constructor + + The constructor is only responsible for getting a logger, user and a + scribe instance for the service. We do that here in a simple base + class in case we decide that we need to use a different mechanism (such + as a CherryPy Tool) + """ + log = WebLogger() + # self.scribe = cherrypy.thread_data.scribeRetriever.get_scribe() + self.scribe = BorgScribe(log) + self.scribe.open_connections() + # self.log = cherrypy.session.log + self.log = log + + # .......................... + @staticmethod + def get_user_id(url_user=None): + """Gets the user id for the service call. + + Gets the user id for the service call. If urlUser is provided, try + that first. Then try the session and finally fall back to the + PUBLIC_USER + + TODO: Save the username in the session + """ + # Check to see if we should use url user + if url_user is not None: + if url_user.lower() == 'public'.lower(): + return PUBLIC_USER + if url_user.lower() == DEFAULT_POST_USER.lower(): + return DEFAULT_POST_USER + # Try to get the user from the session + try: + return session['username'] + except Exception: + # Fall back to PUBLIC_USER + return PUBLIC_USER + + # .......................... + @staticmethod + def OPTIONS(): + """Common options request for all services (needed for CORS) + """ + return diff --git a/LmWebServer/flask_app/occurrence.py b/LmWebServer/flask_app/occurrence.py new file mode 100644 index 00000000..5bd9430e --- /dev/null +++ b/LmWebServer/flask_app/occurrence.py @@ -0,0 +1,228 @@ +"""This module provides REST services for Occurrence sets""" +from http import HTTPStatus +import json +from werkzeug.exceptions import (BadRequest, Forbidden, InternalServerError, NotFound) + + +from LmCommon.common.lmconstants import ( + DEFAULT_POST_USER, HTTPStatus, JobStatus) +from LmServer.base.atom import Atom +from LmServer.common.localconstants import PUBLIC_USER +from LmWebServer.common.lmconstants import HTTPMethod +from LmWebServer.services.api.v2.base import LmService +from LmWebServer.services.common.access_control import check_user_permission +from LmWebServer.services.common.boom_post import BoomPoster +from LmWebServer.services.cp_tools.lm_format import lm_formatter + + +# ............................................................................. +class OccurrenceLayerService(LmService): + """ + @summary: This class is for the occurrence sets service. The dispatcher is + responsible for calling the correct method + """ + + # # ................................ + # def DELETE(self, path_occset_id): + # """Attempts to delete an occurrence set + # + # Args: + # path_occset_id (int): The id of the occurrence set to delete. + # """ + # occ = self.scribe.get_occurrence_set(occ_id=int(path_occset_id)) + # + # if occ is None: + # raise cherrypy.HTTPError( + # HTTPStatus.NOT_FOUND, 'Occurrence set not found') + # + # # If allowed to, delete + # if check_user_permission(self.get_user_id(), occ, HTTPMethod.DELETE): + # success = self.scribe.delete_object(occ) + # if success: + # cherrypy.response.status = HTTPStatus.NO_CONTENT + # return + # + # # If unsuccessful, fail + # raise cherrypy.HTTPError( + # HTTPStatus.INTERNAL_SERVER_ERROR, + # 'Failed to delete occurrence set') + # + # # If no permission to delete, raise HTTP 403 + # raise cherrypy.HTTPError( + # HTTPStatus.FORBIDDEN, + # 'User does not have permission to delete this occurrence set') + + # ................................ + @lm_formatter + def filter_occurrence_sets(self, path_occset_id=None, after_time=None, before_time=None, + display_name=None, epsg_code=None, minimum_number_of_points=1, + limit=100, offset=0, url_user=None, status=None, gridset_id=None, + fill_points=False, **params): + """GET request. Either an occurrence set or list of them. + """ + if path_occset_id is None: + return self._list_occurrence_sets( + self.get_user_id(url_user=url_user), after_time=after_time, + before_time=before_time, display_name=display_name, + epsg_code=epsg_code, + minimum_number_of_points=minimum_number_of_points, limit=limit, + offset=offset, gridset_id=gridset_id, status=status) + + if path_occset_id.lower() == 'count': + return self._count_occurrence_sets( + self.get_user_id(url_user=url_user), after_time=after_time, + before_time=before_time, display_name=display_name, + epsg_code=epsg_code, + minimum_number_of_points=minimum_number_of_points, + gridset_id=gridset_id, status=status) + + if path_occset_id.lower() == 'web': + return self._list_web_occurrence_sets( + self.get_user_id(url_user=url_user), after_time=after_time, + before_time=before_time, display_name=display_name, + epsg_code=epsg_code, + minimum_number_of_points=minimum_number_of_points, limit=limit, + offset=offset, gridset_id=gridset_id, status=status) + + # Fallback to just get an individual occurrence set + return self._get_occurrence_set( + path_occset_id, fill_points=fill_points) + + # # ................................ + # # @cherrypy.tools.json_out + # @lm_formatter + # def POST(self, **params): + # """Posts a new BOOM archive + # """ + # projection_data = json.loads(cherrypy.request.body.read()) + # + # if self.get_user_id() == PUBLIC_USER: + # usr = self.scribe.find_user(DEFAULT_POST_USER) + # else: + # usr = self.scribe.find_user(self.get_user_id()) + # + # boom_post = BoomPoster( + # usr.user_id, usr.email, projection_data, self.scribe) + # gridset = boom_post.init_boom() + # + # cherrypy.response.status = HTTPStatus.ACCEPTED + # return Atom( + # gridset.get_id(), gridset.name, gridset.metadata_url, + # gridset.mod_time, epsg=gridset.epsg_code) + + # ................................ + def _count_occurrence_sets(self, user_id, after_time=None, + before_time=None, display_name=None, + epsg_code=None, minimum_number_of_points=1, + status=None, gridset_id=None): + """Return a count of occurrence sets matching the specified criteria + """ + after_status = None + before_status = None + + # Process status parameter + if status: + if status < JobStatus.COMPLETE: + before_status = JobStatus.COMPLETE - 1 + elif status == JobStatus.COMPLETE: + before_status = JobStatus.COMPLETE + 1 + after_status = JobStatus.COMPLETE - 1 + else: + after_status = status - 1 + + occ_count = self.scribe.count_occurrence_sets( + user_id=user_id, min_occurrence_count=minimum_number_of_points, + display_name=display_name, after_time=after_time, + before_time=before_time, epsg=epsg_code, + before_status=before_status, after_status=after_status, + gridset_id=gridset_id) + return {'count': occ_count} + + # ................................ + def get_occurrence_set(self, path_occset_id, fill_points=False): + """Attempt to get an occurrence set""" + occ = self.scribe.get_occurrence_set(occ_id=int(path_occset_id)) + + if occ is None: + raise NotFound('Occurrence set not found') + + # If allowed to, return + if check_user_permission(self.get_user_id(), occ, HTTPMethod.GET): + if fill_points: + occ.read_shapefile() + return occ + + raise Forbidden('User {} does not have permission to GET occurrence set'.format( + self.get_user_id())) + + # ................................ + def _list_occurrence_sets(self, user_id, after_time=None, before_time=None, + display_name=None, epsg_code=None, + minimum_number_of_points=1, limit=100, offset=0, + status=None, gridset_id=None): + """Return a list of occurrence sets matching the specified criteria + """ + after_status = None + before_status = None + + # Process status parameter + if status: + if status < JobStatus.COMPLETE: + before_status = JobStatus.COMPLETE - 1 + elif status == JobStatus.COMPLETE: + before_status = JobStatus.COMPLETE + 1 + after_status = JobStatus.COMPLETE - 1 + else: + after_status = status - 1 + + occ_atoms = self.scribe.list_occurrence_sets( + offset, limit, user_id=user_id, + min_occurrence_count=minimum_number_of_points, + display_name=display_name, after_time=after_time, + before_time=before_time, epsg=epsg_code, + before_status=before_status, after_status=after_status, + gridset_id=gridset_id) + return occ_atoms + # + # # ................................ + # def _list_web_occurrence_sets( + # self, user_id, after_time=None, before_time=None, + # display_name=None, epsg_code=None, minimum_number_of_points=1, + # limit=100, offset=0, status=None, gridset_id=None): + # """Return a list of occurrence set web objects matching criteria + # """ + # after_status = None + # before_status = None + # + # # Process status parameter + # if status: + # if status < JobStatus.COMPLETE: + # before_status = JobStatus.COMPLETE - 1 + # elif status == JobStatus.COMPLETE: + # before_status = JobStatus.COMPLETE + 1 + # after_status = JobStatus.COMPLETE - 1 + # else: + # after_status = status - 1 + # + # occs = self.scribe.list_occurrence_sets( + # offset, limit, user_id=user_id, + # min_occurrence_count=minimum_number_of_points, + # display_name=display_name, after_time=after_time, + # before_time=before_time, epsg=epsg_code, + # before_status=before_status, after_status=after_status, + # gridset_id=gridset_id, atom=False) + # occ_objs = [] + # for occ in occs: + # occ_objs.append( + # { + # 'id': occ.get_id(), + # 'metadata_url': occ.metadata_url, + # 'name': occ.display_name, + # 'modification_time': occ.status_mod_time, + # 'epsg': occ.epsg_code, + # 'status': occ.status, + # 'count': occ.query_count + # } + # ) + # return occ_objs + diff --git a/LmWebServer/flask_app/routes.py b/LmWebServer/flask_app/routes.py new file mode 100644 index 00000000..bd269882 --- /dev/null +++ b/LmWebServer/flask_app/routes.py @@ -0,0 +1,39 @@ +from flask import Flask, request + +from LmWebServer.flask_app.occurrence import OccurrenceLayerService + +app = Flask(__name__) + + +# ..................................................................................... +@app.route('/api/v2/occ/', methods=['GET']) +def occ_get(identifier): + """Get an occurrence record from available providers. + + Args: + identifier (str): An occurrence identifier to search for among occurrence providers. + + Returns: + dict: A dictionary of metadata for the requested record. + """ + api = OccurrenceLayerService() + + after_time = request.args.get('after_time', default = None, type = str) + before_time = request.args.get('before_time', default = None, type = str) + display_name = request.args.get('display_name', default = None, type = str) + epsg_code = request.args.get('epsg_code', default= None, type = str) + minimum_number_of_points = request.args.get('minimum_number_of_points', default = 1, type = int) + limit = request.args.get('limit', default = 100, type = int) + offset = request.args.get('offset', default = 0, type = int) + url_user = request.args.get('url_user', default = None, type = str) + status = request.args.get('status', default = None, type = str) + gridset_id = request.args.get('gridset_id', default = None, type = str) + fill_points = request.args.get('fill_points', default = False, type = bool) + + response = api.filter_occurrence_sets( + path_occset_id=identifier, after_time=after_time, before_time=before_time, + display_name=display_name, epsg_code=epsg_code, minimum_number_of_points=minimum_number_of_points, + limit=limit, offset=offset, url_user=url_user, status=status, gridset_id=gridset_id, + fill_points=fill_points) + return response + From c12013f38f32d7ba0e8631e51495801176833e0f Mon Sep 17 00:00:00 2001 From: zzeppozz Date: Wed, 24 Nov 2021 10:55:31 -0600 Subject: [PATCH 03/18] rocks docs --- docs/admin_lm/installLifemapperSystem.rst | 21 +++++++++------------ 1 file changed, 9 insertions(+), 12 deletions(-) diff --git a/docs/admin_lm/installLifemapperSystem.rst b/docs/admin_lm/installLifemapperSystem.rst index b97b9e1b..70a7e046 100644 --- a/docs/admin_lm/installLifemapperSystem.rst +++ b/docs/admin_lm/installLifemapperSystem.rst @@ -25,24 +25,21 @@ Install both rolls on Frontend lmwriter$ $PYTHON /opt/lifemapper/LmServer/tools/matt_daemon.py stop + #. **Remove old roll(s)** without cleaning data and packages + :: + + rocks remove roll lifemapper-server lifemapper-compute + #. **Add and enable new roll(s)**. Replace the following roll name with the latest version:: rocks add roll lifemapper-server-*.iso clean=1 rocks add roll lifemapper-compute-*.iso clean=1 - rocks enable roll lifemapper-compute version=(new)yyyy.mm.dd - rocks enable roll lifemapper-server version=(new)yyyy.mm.dd - - #. **Disable old roll versions** - :: - rocks disable roll lifemapper-compute version=(old)yyyy.mm.dd - rocks disable roll lifemapper-server version=(old)yyyy.mm.dd - + rocks enable roll lifemapper-server lifemapper-compute + #. **Remove conflicting RPMs** - rpm -evl --quiet --nodeps lifemapper-server - rpm -evl --quiet --nodeps lifemapper-compute - rpm -evl --quiet --nodeps rocks-lifemapper - rpm -evl --quiet --nodeps rocks-lmcompute + rpm -evl --quiet --nodeps lifemapper-lmserver lifemapper-lmcompute + rpm -evl --quiet --nodeps rocks-lifemapper rocks-lmcompute #. Follow `Build and execute installation` instructions below From b5b61ec244f972754fce8f8ae9e6c863dc983e42 Mon Sep 17 00:00:00 2001 From: zzeppozz Date: Wed, 24 Nov 2021 12:50:05 -0600 Subject: [PATCH 04/18] add scipy --- requirements.txt | 17 +---------------- 1 file changed, 1 insertion(+), 16 deletions(-) diff --git a/requirements.txt b/requirements.txt index 95972181..f1b3b9f2 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,22 +1,7 @@ Cython>=0.29.24 Flask>=2.0.2 mapscript>=7.6.0 +scipy>=1.7.2 matplotlib>=3.5.0 psycopg2-binary>=2.9.2 requests>=2.26.0 - -//// Build manually -// mapserver/mapscript -// cctools -// gdal -// lmdata-* -// lmpy -// lmserver -// lmtest -// mod-wsgi? -// proj -// scripts in rocks-lifemapper -// solr -// specify-open-api-tools -// webclient -//// \ No newline at end of file From 7ee9840a0a7d537606fe7e35ce36fa70446c4cf5 Mon Sep 17 00:00:00 2001 From: zzeppozz Date: Tue, 30 Nov 2021 16:37:33 -0600 Subject: [PATCH 05/18] Initial API services for login, occurrence, layer; unfinished; untested --- LmWebServer/flask_app/base.py | 25 ++- LmWebServer/flask_app/layer.py | 142 +++++++++++++++++ LmWebServer/flask_app/occurrence.py | 237 +++++++++++----------------- LmWebServer/flask_app/routes.py | 171 +++++++++++++++++--- LmWebServer/public_html/login.html | 9 ++ 5 files changed, 412 insertions(+), 172 deletions(-) create mode 100644 LmWebServer/flask_app/layer.py create mode 100644 LmWebServer/public_html/login.html diff --git a/LmWebServer/flask_app/base.py b/LmWebServer/flask_app/base.py index d3470cd7..73e68b24 100644 --- a/LmWebServer/flask_app/base.py +++ b/LmWebServer/flask_app/base.py @@ -1,7 +1,6 @@ """The module provides a base Lifemapper service class """ -from flask import Flask, session, request, json -from werkzeug.exceptions import HTTPException +from flask import Flask, session from LmCommon.common.lmconstants import DEFAULT_POST_USER from LmServer.common.localconstants import PUBLIC_USER @@ -36,6 +35,24 @@ class in case we decide that we need to use a different mechanism (such # self.log = cherrypy.session.log self.log = log + # .......................... + @staticmethod + def get_user(self, user_id=None): + """Gets the user id for the service call. + + Gets the user id for the service call. If urlUser is provided, try + that first. Then try the session and finally fall back to the + PUBLIC_USER + + TODO: Save the username in the session + """ + svc = LmService() + if user_id is None: + svc.get_user_id() + # Check to see if we should use url user + usr = svc.scribe.find_user(user_id) + return usr + # .......................... @staticmethod def get_user_id(url_user=None): @@ -49,9 +66,9 @@ def get_user_id(url_user=None): """ # Check to see if we should use url user if url_user is not None: - if url_user.lower() == 'public'.lower(): + if url_user.lower() == 'public': return PUBLIC_USER - if url_user.lower() == DEFAULT_POST_USER.lower(): + if url_user.lower() == DEFAULT_POST_USER: return DEFAULT_POST_USER # Try to get the user from the session try: diff --git a/LmWebServer/flask_app/layer.py b/LmWebServer/flask_app/layer.py new file mode 100644 index 00000000..57350420 --- /dev/null +++ b/LmWebServer/flask_app/layer.py @@ -0,0 +1,142 @@ +"""This module provides REST services for Layers""" +import werkzeug.exceptions as WEXC + +from LmCommon.common.lmconstants import HTTPStatus +from LmWebServer.common.lmconstants import HTTPMethod +from LmWebServer.services.api.v2.base import LmService +from LmWebServer.services.common.access_control import check_user_permission +from LmWebServer.services.cp_tools.lm_format import lm_formatter + + +# ............................................................................. +class LayerService(LmService): + """Class for layers web service.""" + + + # ................................ + @lm_formatter + def count_env_layers( + self, user_id, after_time=None, before_time=None, alt_pred_code=None, date_code=None, + env_code=None, env_type_id=None, epsg_code=None, gcm_code=None, scenario_code=None): + """Count environmental layer objects matching the specified criteria + + Args: + user_id (str): The user authorized for this operation. Note that this may not be + the same user as is logged into the system + after_time (float): Time in MJD of the earliest modtime for filtering + before_time (float): Time in MJD of the latest modtime for filtering + alt_pred_code (str): Code of the GCM scenario for filtering predicted environmental layera + date_code (str): Code of the date for filtering predicted environmental layers (for past, present, future) + epsg_code (str): EPSG code for the SRS for filtering layers + env_code (str): Environmental type code for filtering environmental layers + env_type_id (int): Database key of the environmental type for filtering environmental layers + gcm_code (str) = GCM code for filtering environmental layers + scenario_code (str): Database key for filtering to environmental layers belonging to one scenario + """ + layer_count = self.scribe.count_env_layers( + user_id=user_id, after_time=after_time, before_time=before_time, env_code=env_code, gcm_code=gcm_code, + alt_pred_code=alt_pred_code, date_code=date_code, epsg=epsg_code, env_type_id=env_type_id, + scenario_code=scenario_code) + + return {'count': layer_count} + + # ................................ + @lm_formatter + def count_layers(self, user_id, after_time=None, before_time=None, epsg_code=None, squid=None): + """Return a count of layers matching the specified criteria + + Args: + user_id (str): The user authorized for this operation. Note that this may not be + the same user as is logged into the system + after_time (float): Time in MJD of the earliest modtime for filtering + before_time (float): Time in MJD of the latest modtime for filtering + alt_pred_code (str): Code of the GCM scenario for filtering predicted environmental layera + date_code (str): Code of the date for filtering predicted environmental layers (for past, present, future) + epsg_code (str): EPSG code for the SRS for filtering layers + squid (str): Unique taxon identifier for filtering layers + """ + layer_count = self.scribe.count_layers( + user_id=user_id, squid=squid, after_time=after_time, before_time=before_time, epsg=epsg_code) + + return {'count': layer_count} + + # ................................ + @lm_formatter + def get_layer(self, user_id, layer_id, env_layer=False): + """Return a layer + + Args: + user_id (str): The user authorized for this operation. Note that this may not be + the same user as is logged into the system + layer_id (int): A database identifier for a requested layer. + env_layer (bool): flag - True restricts the search to environmental layers; False returns non-specific layers + """ + if env_layer: + lyr = self.scribe.get_env_layer(lyr_id=layer_id) + else: + lyr = self.scribe.get_layer(lyr_id=layer_id) + + if lyr is None: + if env_layer: + return WEXC.NotFound('Environmental layer {} was not found'.format(layer_id)) + else: + return WEXC.NotFound('Layer {} was not found'.format(layer_id)) + + if check_user_permission(user_id, lyr, HTTPMethod.GET): + return lyr + else: + return WEXC.Forbidden('User {} does not have permission to access layer {}'.format( + user_id, layer_id)) + + # ................................ + @lm_formatter + def list_env_layers( + self, user_id, after_time=None, before_time=None, alt_pred_code=None, date_code=None, + env_code=None, env_type_id=None, epsg_code=None, gcm_code=None, scenario_code=None, + limit=100, offset=0): + """Return a list of environmental layers matching the specified criteria + + Args: + user_id (str): The user authorized for this operation. Note that this may not be + the same user as is logged into the system + after_time (float): Time in MJD of the earliest modtime for filtering + before_time (float): Time in MJD of the latest modtime for filtering + alt_pred_code (str): Code of the GCM scenario for filtering predicted environmental layera + date_code (str): Code of the date for filtering predicted environmental layers (for past, present, future) + epsg_code (str): EPSG code for the SRS for filtering layers + env_code (str): Environmental type code for filtering environmental layers + env_type_id (int): Database key of the environmental type for filtering environmental layers + gcm_code (str) = GCM code for filtering environmental layers + layer_type (int): Code for filtering on environmental or other layer type. + 0/None = all; 1 = environmental layer; 2 = Not yet implemented + scenario_code (str): Code for filtering to environmental layers belonging to one scenario + limit (int): Number of records to return + offset (int): Offset for starting record of records to return + """ + lyr_atoms = self.scribe.list_env_layers( + offset, limit, user_id=user_id, after_time=after_time, before_time=before_time, + env_code=env_code, gcm_code=gcm_code, alt_pred_code=alt_pred_code, + date_code=date_code, epsg=epsg_code, env_type_id=env_type_id, scen_code=scenario_code) + + return lyr_atoms + + # ................................ + def list_layers( + self, user_id, after_time=None, before_time=None, epsg_code=None, squid=None, + limit=100, offset=0): + """Return a list of layers matching the specified criteria + + Args: + user_id (str): The user authorized for this operation. Note that this may not be + the same user as is logged into the system + after_time (float): Time in MJD of the earliest modtime for filtering + before_time (float): Time in MJD of the latest modtime for filtering + epsg_code (str): EPSG code for the SRS for filtering layers + squid (str): Unique identifier or filtering to a taxon associated with the layer + limit (int): Number of records to return + offset (int): Offset for starting record of records to return + """ + layer_atoms = self.scribe.list_layers( + offset, limit, user_id=user_id, after_time=after_time, before_time=before_time, epsg=epsg_code, squid=squid) + + return layer_atoms diff --git a/LmWebServer/flask_app/occurrence.py b/LmWebServer/flask_app/occurrence.py index 5bd9430e..ede1c19c 100644 --- a/LmWebServer/flask_app/occurrence.py +++ b/LmWebServer/flask_app/occurrence.py @@ -1,13 +1,8 @@ """This module provides REST services for Occurrence sets""" -from http import HTTPStatus -import json -from werkzeug.exceptions import (BadRequest, Forbidden, InternalServerError, NotFound) +import werkzeug.exceptions as WEXC - -from LmCommon.common.lmconstants import ( - DEFAULT_POST_USER, HTTPStatus, JobStatus) +from LmCommon.common.lmconstants import (JobStatus) from LmServer.base.atom import Atom -from LmServer.common.localconstants import PUBLIC_USER from LmWebServer.common.lmconstants import HTTPMethod from LmWebServer.services.api.v2.base import LmService from LmWebServer.services.common.access_control import check_user_permission @@ -17,101 +12,57 @@ # ............................................................................. class OccurrenceLayerService(LmService): - """ - @summary: This class is for the occurrence sets service. The dispatcher is - responsible for calling the correct method - """ - - # # ................................ - # def DELETE(self, path_occset_id): - # """Attempts to delete an occurrence set - # - # Args: - # path_occset_id (int): The id of the occurrence set to delete. - # """ - # occ = self.scribe.get_occurrence_set(occ_id=int(path_occset_id)) - # - # if occ is None: - # raise cherrypy.HTTPError( - # HTTPStatus.NOT_FOUND, 'Occurrence set not found') - # - # # If allowed to, delete - # if check_user_permission(self.get_user_id(), occ, HTTPMethod.DELETE): - # success = self.scribe.delete_object(occ) - # if success: - # cherrypy.response.status = HTTPStatus.NO_CONTENT - # return - # - # # If unsuccessful, fail - # raise cherrypy.HTTPError( - # HTTPStatus.INTERNAL_SERVER_ERROR, - # 'Failed to delete occurrence set') - # - # # If no permission to delete, raise HTTP 403 - # raise cherrypy.HTTPError( - # HTTPStatus.FORBIDDEN, - # 'User does not have permission to delete this occurrence set') + """Class for the occurrence sets web service.""" # ................................ @lm_formatter - def filter_occurrence_sets(self, path_occset_id=None, after_time=None, before_time=None, - display_name=None, epsg_code=None, minimum_number_of_points=1, - limit=100, offset=0, url_user=None, status=None, gridset_id=None, - fill_points=False, **params): - """GET request. Either an occurrence set or list of them. + def delete_occurrence_set(self, user_id, occset_id): + """Delete an occurrence set + + Args: + user_id (str): The user authorized for this operation. Note that this may not be + the same user as is logged into the system + occset_id (int): The id of the occurrence set to delete. """ - if path_occset_id is None: - return self._list_occurrence_sets( - self.get_user_id(url_user=url_user), after_time=after_time, - before_time=before_time, display_name=display_name, - epsg_code=epsg_code, - minimum_number_of_points=minimum_number_of_points, limit=limit, - offset=offset, gridset_id=gridset_id, status=status) - - if path_occset_id.lower() == 'count': - return self._count_occurrence_sets( - self.get_user_id(url_user=url_user), after_time=after_time, - before_time=before_time, display_name=display_name, - epsg_code=epsg_code, - minimum_number_of_points=minimum_number_of_points, - gridset_id=gridset_id, status=status) - - if path_occset_id.lower() == 'web': - return self._list_web_occurrence_sets( - self.get_user_id(url_user=url_user), after_time=after_time, - before_time=before_time, display_name=display_name, - epsg_code=epsg_code, - minimum_number_of_points=minimum_number_of_points, limit=limit, - offset=offset, gridset_id=gridset_id, status=status) - - # Fallback to just get an individual occurrence set - return self._get_occurrence_set( - path_occset_id, fill_points=fill_points) + occ = self.scribe.get_occurrence_set(occ_id=int(occset_id)) + + if occ is None: + raise WEXC.NotFound('Occurrence set not found') + + # If allowed to, delete + if check_user_permission(user_id, occ, HTTPMethod.DELETE): + success = self.scribe.delete_object(occ) + if success: + return True + + # If unsuccessful, fail + raise WEXC.InternalServerError('Failed to delete occurrence set') + + # If no permission to delete, raise HTTP 403 + raise WEXC.Forbidden('User does not have permission to delete this occurrence set') - # # ................................ - # # @cherrypy.tools.json_out - # @lm_formatter - # def POST(self, **params): - # """Posts a new BOOM archive - # """ - # projection_data = json.loads(cherrypy.request.body.read()) - # - # if self.get_user_id() == PUBLIC_USER: - # usr = self.scribe.find_user(DEFAULT_POST_USER) - # else: - # usr = self.scribe.find_user(self.get_user_id()) - # - # boom_post = BoomPoster( - # usr.user_id, usr.email, projection_data, self.scribe) - # gridset = boom_post.init_boom() - # - # cherrypy.response.status = HTTPStatus.ACCEPTED - # return Atom( - # gridset.get_id(), gridset.name, gridset.metadata_url, - # gridset.mod_time, epsg=gridset.epsg_code) + # ................................ + @lm_formatter + def post_boom_data(self, user_id, user_email, boom_data, **params): + """Post occurrence data to seed a new BOOM archive + + Args: + user_id (str): The user authorized for this operation. Note that this may not be + the same user as is logged into the system + user_email (str): The user to be notified of results of this operation. + boom_data: JSON package of parameters to initialize a new gridset and workflow. + """ + boom_post = BoomPoster(user_id, user_email, boom_data, self.scribe) + gridset = boom_post.init_boom() + + # cherrypy.response.status = HTTPStatus.ACCEPTED + return Atom( + gridset.get_id(), gridset.name, gridset.metadata_url, + gridset.mod_time, epsg=gridset.epsg_code) # ................................ - def _count_occurrence_sets(self, user_id, after_time=None, + @lm_formatter + def count_occurrence_sets(self, user_id, after_time=None, before_time=None, display_name=None, epsg_code=None, minimum_number_of_points=1, status=None, gridset_id=None): @@ -139,12 +90,13 @@ def _count_occurrence_sets(self, user_id, after_time=None, return {'count': occ_count} # ................................ - def get_occurrence_set(self, path_occset_id, fill_points=False): + @lm_formatter + def get_occurrence_set(self, occset_id, fill_points=False): """Attempt to get an occurrence set""" - occ = self.scribe.get_occurrence_set(occ_id=int(path_occset_id)) + occ = self.scribe.get_occurrence_set(occ_id=int(occset_id)) if occ is None: - raise NotFound('Occurrence set not found') + raise WEXC.NotFound('Occurrence set not found') # If allowed to, return if check_user_permission(self.get_user_id(), occ, HTTPMethod.GET): @@ -152,11 +104,12 @@ def get_occurrence_set(self, path_occset_id, fill_points=False): occ.read_shapefile() return occ - raise Forbidden('User {} does not have permission to GET occurrence set'.format( + raise WEXC.Forbidden('User {} does not have permission to GET occurrence set'.format( self.get_user_id())) # ................................ - def _list_occurrence_sets(self, user_id, after_time=None, before_time=None, + @lm_formatter + def list_occurrence_sets(self, user_id, after_time=None, before_time=None, display_name=None, epsg_code=None, minimum_number_of_points=1, limit=100, offset=0, status=None, gridset_id=None): @@ -183,46 +136,46 @@ def _list_occurrence_sets(self, user_id, after_time=None, before_time=None, before_status=before_status, after_status=after_status, gridset_id=gridset_id) return occ_atoms - # - # # ................................ - # def _list_web_occurrence_sets( - # self, user_id, after_time=None, before_time=None, - # display_name=None, epsg_code=None, minimum_number_of_points=1, - # limit=100, offset=0, status=None, gridset_id=None): - # """Return a list of occurrence set web objects matching criteria - # """ - # after_status = None - # before_status = None - # - # # Process status parameter - # if status: - # if status < JobStatus.COMPLETE: - # before_status = JobStatus.COMPLETE - 1 - # elif status == JobStatus.COMPLETE: - # before_status = JobStatus.COMPLETE + 1 - # after_status = JobStatus.COMPLETE - 1 - # else: - # after_status = status - 1 - # - # occs = self.scribe.list_occurrence_sets( - # offset, limit, user_id=user_id, - # min_occurrence_count=minimum_number_of_points, - # display_name=display_name, after_time=after_time, - # before_time=before_time, epsg=epsg_code, - # before_status=before_status, after_status=after_status, - # gridset_id=gridset_id, atom=False) - # occ_objs = [] - # for occ in occs: - # occ_objs.append( - # { - # 'id': occ.get_id(), - # 'metadata_url': occ.metadata_url, - # 'name': occ.display_name, - # 'modification_time': occ.status_mod_time, - # 'epsg': occ.epsg_code, - # 'status': occ.status, - # 'count': occ.query_count - # } - # ) - # return occ_objs + + # ................................ + @lm_formatter + def list_web_occurrence_sets( + self, user_id, after_time=None, before_time=None, + display_name=None, epsg_code=None, minimum_number_of_points=1, + limit=100, offset=0, status=None, gridset_id=None): + """Return a list of occurrence set web objects matching criteria""" + after_status = None + before_status = None + + # Process status parameter + if status: + if status < JobStatus.COMPLETE: + before_status = JobStatus.COMPLETE - 1 + elif status == JobStatus.COMPLETE: + before_status = JobStatus.COMPLETE + 1 + after_status = JobStatus.COMPLETE - 1 + else: + after_status = status - 1 + + occs = self.scribe.list_occurrence_sets( + offset, limit, user_id=user_id, + min_occurrence_count=minimum_number_of_points, + display_name=display_name, after_time=after_time, + before_time=before_time, epsg=epsg_code, + before_status=before_status, after_status=after_status, + gridset_id=gridset_id, atom=False) + occ_objs = [] + for occ in occs: + occ_objs.append( + { + 'id': occ.get_id(), + 'metadata_url': occ.metadata_url, + 'name': occ.display_name, + 'modification_time': occ.status_mod_time, + 'epsg': occ.epsg_code, + 'status': occ.status, + 'count': occ.query_count + } + ) + return occ_objs diff --git a/LmWebServer/flask_app/routes.py b/LmWebServer/flask_app/routes.py index bd269882..550902d0 100644 --- a/LmWebServer/flask_app/routes.py +++ b/LmWebServer/flask_app/routes.py @@ -1,39 +1,158 @@ -from flask import Flask, request +from flask import Flask, redirect, render_template, request, session +from werkzeug.exceptions import BadRequest, NotFound from LmWebServer.flask_app.occurrence import OccurrenceLayerService +from LmWebServer.flask_app.base import LmService -app = Flask(__name__) +app = Flask(__name__.split('.')[0]) +# .......................... +@app.route('/api/v2/login', methods=['GET', 'POST']) +def login(): + if request.method == 'POST': + req = request.form + username = req.get('username') + password = req.get('password') + + user = LmService.get_user(username) + if user.check_password(password): + session['username'] = user.user_id + return user + else: + print('Incorrect password') + return redirect(request.url) + + return render_template('public_html/login.html') # ..................................................................................... -@app.route('/api/v2/occ/', methods=['GET']) -def occ_get(identifier): - """Get an occurrence record from available providers. +@app.route('/api/v2/occ/', methods=['GET', 'POST', 'DELETE']) +def occurrence(identifier): + """Occurrence API service for GET, POST, and DELETE operations on occurrences Args: - identifier (str): An occurrence identifier to search for among occurrence providers. + identifier (str): An occurrence identifier to search for. Returns: - dict: A dictionary of metadata for the requested record. + dict: For GET and POST operations, zero or more dictionaries of metadata for the requested or + posted record(s); for DELETE operations, True or False for success + + TODO: Why is boom post here? Create a different service for that. """ - api = OccurrenceLayerService() - - after_time = request.args.get('after_time', default = None, type = str) - before_time = request.args.get('before_time', default = None, type = str) - display_name = request.args.get('display_name', default = None, type = str) - epsg_code = request.args.get('epsg_code', default= None, type = str) - minimum_number_of_points = request.args.get('minimum_number_of_points', default = 1, type = int) - limit = request.args.get('limit', default = 100, type = int) - offset = request.args.get('offset', default = 0, type = int) - url_user = request.args.get('url_user', default = None, type = str) - status = request.args.get('status', default = None, type = str) - gridset_id = request.args.get('gridset_id', default = None, type = str) - fill_points = request.args.get('fill_points', default = False, type = bool) - - response = api.filter_occurrence_sets( - path_occset_id=identifier, after_time=after_time, before_time=before_time, - display_name=display_name, epsg_code=epsg_code, minimum_number_of_points=minimum_number_of_points, - limit=limit, offset=offset, url_user=url_user, status=status, gridset_id=gridset_id, - fill_points=fill_points) + svc = OccurrenceLayerService() + user = svc.get_user() + user_id = user.user_id + + if request.method == 'POST' and request.is_json: + boom_data = request.get_json() + svc.post_boom_data(user_id, user.email, boom_data) + + elif request.method == 'DELETE': + svc.delete_occurrence_set(user_id, identifier) + + elif request.method == 'GET': + after_time = request.args.get('after_time', default = None, type = str) + before_time = request.args.get('before_time', default = None, type = str) + display_name = request.args.get('display_name', default = None, type = str) + epsg_code = request.args.get('epsg_code', default= None, type = str) + minimum_number_of_points = request.args.get('minimum_number_of_points', default = 1, type = int) + limit = request.args.get('limit', default = 100, type = int) + offset = request.args.get('offset', default = 0, type = int) + # url_user = request.args.get('url_user', default = None, type = str) + status = request.args.get('status', default = None, type = str) + gridset_id = request.args.get('gridset_id', default = None, type = str) + fill_points = request.args.get('fill_points', default = False, type = bool) + + if identifier.lower() == 'count': + response = svc.count_occurrence_sets( + user_id, after_time=after_time, before_time=before_time, display_name=display_name, + epsg_code=epsg_code, minimum_number_of_points=minimum_number_of_points, + gridset_id=gridset_id, status=status) + + elif identifier.lower() == 'web': + response = svc.list_web_occurrence_sets( + user_id, after_time=after_time, before_time=before_time, display_name=display_name, + epsg_code=epsg_code, minimum_number_of_points=minimum_number_of_points, limit=limit, + offset=offset, gridset_id=gridset_id, status=status) + + elif identifier is None: + response = svc.list_occurrence_sets( + user_id, after_time=after_time, before_time=before_time, display_name=display_name, + epsg_code=epsg_code, minimum_number_of_points=minimum_number_of_points, limit=limit, + offset=offset, gridset_id=gridset_id, status=status) + + else: + try: + occid = int(identifier) + except: + return BadRequest('{} is not a valid layer ID'.format(identifier)) + else: + response = svc.get_occurrence_set(occid, fill_points=fill_points) + + return response +# ..................................................................................... +@app.route('/api/v2/layer/', methods=['GET', 'DELETE']) +def layer(identifier): + """Layer API service for GET and DELETE operations on layers + + Path parameter: + identifier (str): A layer identifier to search for. + + Returns: + dict: A dictionary of metadata for the requested record. + """ + svc = OccurrenceLayerService() + user = svc.get_user() + user_id = user.user_id + + if request.method == 'DELETE': + svc.delete_occurrence_set(user_id, identifier) + + elif request.method == 'GET': + after_time = request.args.get('after_time', default = None, type = str) + before_time = request.args.get('before_time', default = None, type = str) + alt_pred_code = request.args.get('alt_pred_code', default = None, type = str) + date_code = request.args.get('date_code', default = None, type = str) + epsg_code = request.args.get('epsg_code', default= None, type = str) + env_code = request.args.get('env_code', default = None, type = str) + env_type_id = request.args.get('env_type_id', default = None, type = int) + gcm_code = request.args.get('gcm_code', default = None, type = str) + # layer_type: + layer_type = request.args.get('layer_type', default = None, type = str) + scenario_code = request.args.get('scenario_code', default = None, type = int) + squid = request.args.get('squid', default = None, type = str) + limit = request.args.get('limit', default = 100, type = int) + offset = request.args.get('offset', default = 0, type = int) + + if identifier is None: + if layer_type == 1: + response = svc.list_env_layers( + user_id, after_time=after_time, before_time=before_time, alt_pred_code=alt_pred_code, + date_code=date_code, env_code=env_code, env_type_id=env_type_id, epsg_code=epsg_code, + gcm_code=gcm_code, scenario_code=scenario_code, limit=limit, offset=offset) + else: + response = svc.list_layers( + user_id, after_time=after_time, before_time=before_time, epsg_code=epsg_code, + squid=squid, limit=limit, offset=offset) + elif identifier.lower() == 'count': + if layer_type == 1: + response = svc.count_env_layers( + user_id, after_time=after_time, before_time=before_time, alt_pred_code=alt_pred_code, + date_code=date_code, env_code=env_code, env_type_id=env_type_id, epsg_code=epsg_code, + gcm_code=gcm_code, scenario_code=scenario_code) + else: + response = svc.count_layers( + offset, limit, user_id=user_id, after_time=after_time, before_time=before_time, + epsg=epsg_code, squid=squid) + + else: + try: + layer_id = int(identifier) + except: + return BadRequest('{} is not a valid occurrenceset ID'.format(identifier)) + else: + response = svc.get_layer(user_id, layer_id, env_layer=(layer_type == 1)) + + + return response diff --git a/LmWebServer/public_html/login.html b/LmWebServer/public_html/login.html new file mode 100644 index 00000000..2a4d0a28 --- /dev/null +++ b/LmWebServer/public_html/login.html @@ -0,0 +1,9 @@ +
+

Login to your account

+
+ + + + Forgot Username? +
+
\ No newline at end of file From a32b809d9ee81e3207bf472d70dae701b630e7e0 Mon Sep 17 00:00:00 2001 From: zzeppozz Date: Tue, 30 Nov 2021 17:39:14 -0600 Subject: [PATCH 06/18] add services; unfinished; untested --- LmWebServer/flask_app/biotaphy_names.py | 43 +++++++++++ LmWebServer/flask_app/biotaphy_points.py | 67 +++++++++++++++++ LmWebServer/flask_app/env_layer.py | 93 ++++++++++++++++++++++++ LmWebServer/flask_app/layer.py | 3 +- LmWebServer/flask_app/routes.py | 28 ++++++- 5 files changed, 231 insertions(+), 3 deletions(-) create mode 100644 LmWebServer/flask_app/biotaphy_names.py create mode 100644 LmWebServer/flask_app/biotaphy_points.py create mode 100644 LmWebServer/flask_app/env_layer.py diff --git a/LmWebServer/flask_app/biotaphy_names.py b/LmWebServer/flask_app/biotaphy_names.py new file mode 100644 index 00000000..19f8ed0e --- /dev/null +++ b/LmWebServer/flask_app/biotaphy_names.py @@ -0,0 +1,43 @@ +"""This module provides a wrapper around GBIF's names service for use in the Biotaphy web application""" +import werkzeug.exceptions as WEXC + +from LmCommon.common.api_query import GbifAPI +from LmWebServer.flask_app.base import LmService +from LmWebServer.services.cp_tools.lm_format import lm_formatter + + +# ............................................................................. +class GBIFTaxonService(LmService): + """Class to get and filter results from GBIF name-matching service.""" + + # ................................ + @lm_formatter + def get_gbif_results(self, names_obj): + """Queries GBIF for accepted names matching the provided list of names + + Args: + names_obj: a JSON list of name strings to match + """ + if not isinstance(names_obj, list): + return WEXC.BadRequest('Name data must be a JSON list') + + retval = [] + for name in names_obj: + try: + gbif_resp = GbifAPI.get_accepted_names(name)[0] + except Exception as e: + self.log.error('Could not get accepted name from GBIF for name {}: {}'.format(name, e)) + retval.append({ + GbifAPI.SEARCH_NAME_KEY: name, + GbifAPI.ACCEPTED_NAME_KEY: None, + GbifAPI.TAXON_ID_KEY: None + }) + else: + retval.append({ + GbifAPI.SEARCH_NAME_KEY: name, + GbifAPI.ACCEPTED_NAME_KEY: gbif_resp[ + GbifAPI.SPECIES_NAME_KEY], + GbifAPI.TAXON_ID_KEY: gbif_resp[ + GbifAPI.SPECIES_KEY_KEY] + }) + return retval diff --git a/LmWebServer/flask_app/biotaphy_points.py b/LmWebServer/flask_app/biotaphy_points.py new file mode 100644 index 00000000..444212c9 --- /dev/null +++ b/LmWebServer/flask_app/biotaphy_points.py @@ -0,0 +1,67 @@ +"""This module provides a wrapper around iDigBio's occurrence service to get an occurrence count for GBIF taxon keys""" +import os +import random +import werkzeug.exceptions as WEXC + +from LmCommon.common.api_query import IdigbioAPI, GbifAPI +from LmCommon.common.lmconstants import DEFAULT_POST_USER +from LmServer.common.data_locator import EarlJr +from LmServer.common.lmconstants import LMFileType, FileFix +from LmServer.common.localconstants import PUBLIC_USER +from LmWebServer.flask_app.base import LmService +from LmWebServer.services.cp_tools.lm_format import lm_formatter + + +# ............................................................................. +class IDigBioOccurrenceService(LmService): + """iDigBio occurrence data service""" + + # ................................ + def _get_data_targets(self): + earl = EarlJr() + user_id = self.get_user_id() + if user_id == PUBLIC_USER: + user_id = DEFAULT_POST_USER + # All results are temp files + out_dir = earl.create_data_path(user_id, LMFileType.TMP_JSON) + if not os.path.exists(out_dir): + os.makedirs(out_dir) + basename = earl.create_basename( + LMFileType.OCCURRENCE_RAW_FILE, obj_code=random.randint(0, 100000)) + + point_output_file = os.path.join( + out_dir, + basename + FileFix.EXTENSION[LMFileType.OCCURRENCE_RAW_FILE]) + meta_output_file = os.path.join( + out_dir, + basename + FileFix.EXTENSION[LMFileType.OCCURRENCE_META_FILE]) + return point_output_file, meta_output_file + + # ................................ + @lm_formatter + def get_occurrence_counts_for_taxonids(self, taxonids_obj): + """Queries iDigBio for the number of occurrence points for the provided GBIF taxon keys + + Args: + taxonids_obj: a JSON list of GBIF taxon_keys to count iDigBio occurrences for. + """ + + if not isinstance(taxonids_obj, list): + return WEXC.BadRequest('GBIF Taxon IDs must be a JSON list') + + _point_output_file, _meta_output_file = self._get_data_targets() + idig_api = IdigbioAPI() + response = [] + try: + # queryIdigbioData gets and returns counts + summary = idig_api.query_idigbio_data(taxonids_obj) + + except Exception as e: + self.log.error('Could not get iDigBio points for GBIF taxon IDs: {}'.format(e)) + + else: + for key, val in summary.items(): + if key != GbifAPI.GBIF_MISSING_KEY: + response.append({GbifAPI.TAXON_ID_KEY: key, IdigbioAPI.OCCURRENCE_COUNT_KEY: val}) + + return response diff --git a/LmWebServer/flask_app/env_layer.py b/LmWebServer/flask_app/env_layer.py new file mode 100644 index 00000000..1eb519b6 --- /dev/null +++ b/LmWebServer/flask_app/env_layer.py @@ -0,0 +1,93 @@ +"""This module provides REST services for environmental layers""" +import werkzeug.exceptions as WEXC + +from LmWebServer.common.lmconstants import HTTPMethod +from LmWebServer.services.api.v2.base import LmService +from LmWebServer.services.common.access_control import check_user_permission +from LmWebServer.services.cp_tools.lm_format import lm_formatter + + +# ............................................................................. +class EnvLayerService(LmService): + """Class for environmental layers service.""" + + # ................................ + @lm_formatter + def count_env_layers( + self, user_id, after_time=None, before_time=None, alt_pred_code=None, date_code=None, + env_code=None, env_type_id=None, epsg_code=None, gcm_code=None, scenario_code=None): + """Count environmental layer objects matching the specified criteria + + Args: + user_id (str): The user authorized for this operation. Note that this may not be + the same user as is logged into the system + after_time (float): Time in MJD of the earliest modtime for filtering + before_time (float): Time in MJD of the latest modtime for filtering + alt_pred_code (str): Code of the GCM scenario for filtering predicted environmental layera + date_code (str): Code of the date for filtering predicted environmental layers (for past, present, future) + epsg_code (str): EPSG code for the SRS for filtering layers + env_code (str): Environmental type code for filtering environmental layers + env_type_id (int): Database key of the environmental type for filtering environmental layers + gcm_code (str) = GCM code for filtering environmental layers + scenario_code (str): Database key for filtering to environmental layers belonging to one scenario + """ + layer_count = self.scribe.count_env_layers( + user_id=user_id, after_time=after_time, before_time=before_time, env_code=env_code, gcm_code=gcm_code, + alt_pred_code=alt_pred_code, date_code=date_code, epsg=epsg_code, env_type_id=env_type_id, + scenario_code=scenario_code) + + return {'count': layer_count} + + # ................................ + @lm_formatter + def get_env_layer(self, user_id, layer_id): + """Return an environmental layer + + Args: + user_id (str): The user authorized for this operation. Note that this may not be + the same user as is logged into the system + layer_id (int): A database identifier for a requested layer. + """ + lyr = self.scribe.get_env_layer(lyr_id=layer_id) + + if lyr is None: + return WEXC.NotFound('Environmental layer {} was not found'.format(layer_id)) + + if check_user_permission(user_id, lyr, HTTPMethod.GET): + return lyr + else: + return WEXC.Forbidden('User {} does not have permission to access layer {}'.format( + user_id, layer_id)) + + + # ................................ + @lm_formatter + def list_env_layers( + self, user_id, after_time=None, before_time=None, alt_pred_code=None, date_code=None, + env_code=None, env_type_id=None, epsg_code=None, gcm_code=None, scenario_code=None, + limit=100, offset=0): + """Return a list of environmental layers matching the specified criteria + + Args: + user_id (str): The user authorized for this operation. Note that this may not be + the same user as is logged into the system + after_time (float): Time in MJD of the earliest modtime for filtering + before_time (float): Time in MJD of the latest modtime for filtering + alt_pred_code (str): Code of the GCM scenario for filtering predicted environmental layera + date_code (str): Code of the date for filtering predicted environmental layers (for past, present, future) + epsg_code (str): EPSG code for the SRS for filtering layers + env_code (str): Environmental type code for filtering environmental layers + env_type_id (int): Database key of the environmental type for filtering environmental layers + gcm_code (str) = GCM code for filtering environmental layers + layer_type (int): Code for filtering on environmental or other layer type. + 0/None = all; 1 = environmental layer; 2 = Not yet implemented + scenario_code (str): Code for filtering to environmental layers belonging to one scenario + limit (int): Number of records to return + offset (int): Offset for starting record of records to return + """ + lyr_atoms = self.scribe.list_env_layers( + offset, limit, user_id=user_id, after_time=after_time, before_time=before_time, + env_code=env_code, gcm_code=gcm_code, alt_pred_code=alt_pred_code, + date_code=date_code, epsg=epsg_code, env_type_id=env_type_id, scen_code=scenario_code) + + return lyr_atoms diff --git a/LmWebServer/flask_app/layer.py b/LmWebServer/flask_app/layer.py index 57350420..19aea028 100644 --- a/LmWebServer/flask_app/layer.py +++ b/LmWebServer/flask_app/layer.py @@ -1,9 +1,8 @@ """This module provides REST services for Layers""" import werkzeug.exceptions as WEXC -from LmCommon.common.lmconstants import HTTPStatus from LmWebServer.common.lmconstants import HTTPMethod -from LmWebServer.services.api.v2.base import LmService +from LmWebServer.flask_app.base import LmService from LmWebServer.services.common.access_control import check_user_permission from LmWebServer.services.cp_tools.lm_format import lm_formatter diff --git a/LmWebServer/flask_app/routes.py b/LmWebServer/flask_app/routes.py index 550902d0..9bd58093 100644 --- a/LmWebServer/flask_app/routes.py +++ b/LmWebServer/flask_app/routes.py @@ -3,6 +3,8 @@ from LmWebServer.flask_app.occurrence import OccurrenceLayerService from LmWebServer.flask_app.base import LmService +from LmWebServer.flask_app.biotaphy_names import GBIFTaxonService +from LmWebServer.flask_app.biotaphy_points import IDigBioOccurrenceService app = Flask(__name__.split('.')[0]) @@ -153,6 +155,30 @@ def layer(identifier): return BadRequest('{} is not a valid occurrenceset ID'.format(identifier)) else: response = svc.get_layer(user_id, layer_id, env_layer=(layer_type == 1)) - return response + +# ..................................................................................... +@app.route('/api/v2/biotaphynames', methods=['POST']) +def biotaphynames(): + try: + names_obj = request.get_json() + except: + return BadRequest('Name list must be in JSON format') + else: + svc = GBIFTaxonService() + response = svc.get_gbif_results(names_obj) + return response + +# ..................................................................................... +@app.route('/api/v2/biotaphypoints', methods=['POST']) +def biotaphypoints(): + try: + taxon_ids = request.get_json() + except: + return BadRequest('Taxon ID list must be in JSON format') + else: + svc = IDigBioOccurrenceService() + response = svc.get_occurrence_counts_for_taxonids(names_obj) + return response + From 507fad8c7297ab49d67442a4b435220a053d76cd Mon Sep 17 00:00:00 2001 From: zzeppozz Date: Wed, 1 Dec 2021 17:32:09 -0600 Subject: [PATCH 07/18] more flask conversion; unfinished; untested --- LmWebServer/flask_app/gridset.py | 551 ++++++++++++++++++++++++++++ LmWebServer/flask_app/occurrence.py | 3 +- LmWebServer/flask_app/routes.py | 90 ++++- requirements.txt | 9 +- 4 files changed, 635 insertions(+), 18 deletions(-) create mode 100644 LmWebServer/flask_app/gridset.py diff --git a/LmWebServer/flask_app/gridset.py b/LmWebServer/flask_app/gridset.py new file mode 100644 index 00000000..6f72a5af --- /dev/null +++ b/LmWebServer/flask_app/gridset.py @@ -0,0 +1,551 @@ +"""This module provides REST services for grid sets +""" +import dendropy +from flask import Response +from http import HTTPStatus +import json +import os +import werkzeug.exceptions as WEXC +import zipfile + +from lmpy import Matrix + +from LmCommon.common.lmconstants import ( + DEFAULT_TREE_SCHEMA, JobStatus, LMFormat, MatrixType, ProcessType) +from LmCommon.common.time import gmt +from LmCommon.encoding.layer_encoder import LayerEncoder +from LmDbServer.boom.boom_collate import BoomCollate +from LmServer.base.atom import Atom +from LmServer.base.layer import Vector +from LmServer.base.service_object import ServiceObject +from LmServer.common.lmconstants import ARCHIVE_PATH +from LmServer.legion.lm_matrix import LMMatrix +from LmServer.legion.mtx_column import MatrixColumn +from LmServer.legion.tree import Tree +from LmWebServer.common.lmconstants import HTTPMethod +from LmWebServer.services.api.v2.base import LmService +from LmWebServer.services.api.v2.matrix import MatrixService +from LmWebServer.services.common.access_control import check_user_permission +from LmWebServer.services.common.boom_post import BoomPoster +from LmWebServer.services.cp_tools.lm_format import lm_formatter + +BG_REF_ID_KEY = 'identifier' +BG_REF_KEY = 'hypothesis_package_reference' +BG_REF_TYPE_KEY = 'reference_type' +EVENT_FIELD_KEY = 'event_field' +FILE_NAME_KEY = 'file_name' +HYPOTHESIS_NAME_KEY = 'hypothesis_name' +KEYWORD_KEY = 'keywords' +LAYERS_KEY = 'layers' + +# ............................................................................. +def get_gridset(user_id, gridset_id): + """Attempts to get a GridSet""" + gridset = self.scribe.get_gridset( + gridset_id=gridset_id, fill_matrices=True) + + if gridset is None: + raise WEXC.NotFound('GridSet {} was not found'.format(gridset_id)) + + if check_user_permission(user_id, gridset, HTTPMethod.GET): + return gridset + + raise WEXC.Forbidden('User {} does not have permission to access GridSet {}'.format( + user_id, gridset_id)) + +# ............................................................................. +def summarize_object_statuses(summary): + """Summarizes a summary + + Args: + summary (:obj:`list` of :obj:`tuple` of :obj:`int`, :obj:`int`): A list + of (status, count) tuples for an object type + """ + complete = 0 + waiting = 0 + running = 0 + error = 0 + total = 0 + for status, count in summary: + if status <= JobStatus.INITIALIZE: + waiting += count + elif status < JobStatus.COMPLETE: + running += count + elif status == JobStatus.COMPLETE: + complete += count + else: + error += count + total += count + return (waiting, running, complete, error, total) + + +# ............................................................................. +class GridsetAnalysisService(LmService): + """This class is for the service representing gridset analyses. + + Todo: + * Enable DELETE? Could remove all existing analysis matrices + * Enable GET? Could this just be the outputs? + """ + + # ................................ + @lm_formatter + def POST( + self, gridset_id, do_mcpa=False, num_permutations=500, do_calc=False, **params): + """Adds a set of biogeographic hypotheses to the gridset""" + # Get gridset + gridset = self.get_gridset(gridset_id) + + # Check status of all matrices + if not all([mtx.status == JobStatus.COMPLETE for mtx in gridset.get_matrices()]): + raise WEXC.Conflict( + 'The gridset is not ready for analysis. All matrices must be complete') + + if do_mcpa: + mcpa_possible = ( + len(gridset.get_biogeographic_hypotheses()) > 0 and gridset.tree is not None) + if not mcpa_possible: + raise WEXC.Conflict( + 'The gridset must have a tree and biogeographic hypotheses to perform MCPA') + + # If everything is ready and we have analyses to run, do so + if do_mcpa or do_calc: + boom_col = BoomCollate( + gridset, do_pam_stats=do_calc, do_mcpa=do_mcpa, + num_permutations=num_permutations) + boom_col.create_workflow() + boom_col.close() + + # cherrypy.response.status = HTTPStatus.ACCEPTED + return gridset + + raise WEXC.BadRequest('Must specify at least one analysis to perform') + + # ................................ + def _get_user_dir(self): + """Get the user's workspace directory + + Todo: + Change this to use something at a lower level. This is using the + same path construction as the getBoomPackage script + """ + return os.path.join( + ARCHIVE_PATH, self.get_user_id(), 'uploads', 'biogeo') + + +# ............................................................................. +class GridsetBioGeoService(LmService): + """Service class for gridset biogeographic hypotheses + """ + + # ................................ + @lm_formatter + def GET(self, user_id, gridset_id, path_biogeo_id=None, **params): + """There is not a true service for limiting the biogeographic + hypothesis matrices in a gridset, but return all when listing + """ + gridset = get_gridset(user_id, gridset_id) + + bg_hyps = gridset.get_biogeographic_hypotheses() + + if path_biogeo_id is None: + return bg_hyps + + for hyp in bg_hyps: + if hyp.get_id() == path_biogeo_id: + return hyp + + # If not found 404... + raise WEXC.NotFound('Biogeographic hypothesis mtx {} not found for gridset {}'.format( + path_biogeo_id, gridset_id)) + + # ................................ + @lm_formatter + def POST(self, user_id, gridset_id, bio_geo_data, **params): + """Adds a set of biogeographic hypotheses to the gridset""" + # Get gridset + gridset = get_gridset(user_id, gridset_id) + + # Process JSON + hypothesis_json = json.loads(bio_geo_data) + + # Check reference to get file + ref_obj = hypothesis_json[BG_REF_KEY] + + # If gridset, + if ref_obj[BG_REF_TYPE_KEY].lower() == 'gridset': + # copy hypotheses from gridset + try: + ref_gridset_id = int(ref_obj[BG_REF_ID_KEY]) + except Exception: + # Probably not an integer or something + raise WEXC.BadRequest('Cannot get gridset for reference identfier {}'.format( + ref_obj[BG_REF_ID_KEY])) + ref_gridset = get_gridset(user_id, ref_gridset_id) + + # Get hypotheses from other gridset + ret = [] + for bg_hyp in ref_gridset.get_biogeographic_hypotheses(): + new_bg_mtx = LMMatrix( + None, matrix_type=MatrixType.BIOGEO_HYPOTHESES, + process_type=ProcessType.ENCODE_HYPOTHESES, + gcm_code=bg_hyp.gcm_code, + alt_pred_code=bg_hyp.alt_pred_code, + date_code=bg_hyp.date_code, metadata=bg_hyp.mtx_metadata, + user_id=gridset.get_user_id(), gridset=gridset, + status=JobStatus.INITIALIZE) + inserted_bg = self.scribe.find_or_insert_matrix(new_bg_mtx) + inserted_bg.update_status(JobStatus.COMPLETE) + self.scribe.update_object(inserted_bg) + # Save the original grim data into the new location + bg_mtx = Matrix.load(bg_hyp.get_dlocation()) + bg_mtx.write(inserted_bg.get_dlocation()) + ret.append(inserted_bg) + elif ref_obj[BG_REF_TYPE_KEY].lower() == 'upload': + curr_time = gmt().mjd + # Check for uploaded biogeo package + package_name = ref_obj[BG_REF_ID_KEY] + package_filename = os.path.join( + self._get_user_dir(), '{}{}'.format( + package_name, LMFormat.ZIP.ext)) + + encoder = LayerEncoder(gridset.get_shapegrid().get_dlocation()) + # TODO(CJ): Pull this from config somewhere + min_coverage = 0.25 + + if os.path.exists(package_filename): + with open(package_filename) as in_f: + with zipfile.ZipFile(in_f, allowZip64=True) as zip_f: + # Get file names in package + avail_files = zip_f.namelist() + + for hyp_lyr in ref_obj[LAYERS_KEY]: + hyp_filename = hyp_lyr[FILE_NAME_KEY] + + # Check to see if file is in zip package + if hyp_filename in avail_files or \ + '{}{}'.format( + hyp_filename, LMFormat.SHAPE.ext + ) in avail_files: + if HYPOTHESIS_NAME_KEY in hyp_lyr: + hyp_name = hyp_lyr[HYPOTHESIS_NAME_KEY] + else: + hyp_name = os.path.splitext( + os.path.basename(hyp_filename))[0] + + if EVENT_FIELD_KEY in hyp_lyr: + event_field = hyp_lyr[EVENT_FIELD_KEY] + column_name = '{} - {}'.format( + hyp_name, event_field) + else: + event_field = None + column_name = hyp_name + + int_param_val_key = \ + MatrixColumn.INTERSECT_PARAM_VAL_NAME + lyr_meta = { + 'name': hyp_name, + int_param_val_key.lower(): event_field, + ServiceObject.META_DESCRIPTION.lower(): + '{} based on layer {}'.format( + 'Biogeographic hypotheses', + hyp_filename), + ServiceObject.META_KEYWORDS.lower(): [ + 'biogeographic hypothesis' + ] + } + + if KEYWORD_KEY in hyp_lyr: + lyr_meta[ + ServiceObject.META_KEYWORDS.lower() + ].extend(hyp_lyr[KEYWORD_KEY]) + + lyr = Vector( + hyp_name, gridset.get_user_id(), + gridset.epsg, dlocation=None, + metadata=lyr_meta, + data_format=LMFormat.SHAPE.driver, + val_attribute=event_field, + mod_time=curr_time) + updated_lyr = self.scribe.find_or_insert_layer( + lyr) + + # Get dlocation + # Loop through files to write all matching + # (ext) to out location + base_out = os.path.splitext( + updated_lyr.get_dlocation())[0] + + for ext in LMFormat.SHAPE.get_extensions(): + z_fn = '{}{}'.format(hyp_filename, ext) + out_fn = '{}{}'.format(base_out, ext) + if z_fn in avail_files: + zip_f.extract(z_fn, out_fn) + + # Add it to the list of files to be encoded + encoder.encode_biogeographic_hypothesis( + updated_lyr.get_dlocation(), column_name, + min_coverage, event_field=event_field) + else: + raise WEXC.BadRequest('{} missing from package'.format(hyp_filename)) + + # Create biogeo matrix + # Add the matrix to contain biogeo hypotheses layer + # intersections + meta = { + ServiceObject.META_DESCRIPTION.lower(): + 'Biogeographic Hypotheses from package {}'.format( + package_name), + ServiceObject.META_KEYWORDS.lower(): [ + 'biogeographic hypotheses' + ] + } + + tmp_mtx = LMMatrix( + None, matrix_type=MatrixType.BIOGEO_HYPOTHESES, + process_type=ProcessType.ENCODE_HYPOTHESES, + user_id=self.get_user_id(), gridset=gridset, metadata=meta, + status=JobStatus.INITIALIZE, status_mod_time=curr_time) + bg_mtx = self.scribe.find_or_insert_matrix(tmp_mtx) + + # Encode the hypotheses + enc_mtx = encoder.get_encoded_matrix() + enc_mtx.write(bg_mtx.get_dlocation()) + + # We'll return the newly inserted biogeo matrix + ret = [bg_mtx] + else: + raise WEXC.NotFound('Biogeography package: {} was not found'.format( + package_name)) + else: + raise WEXC.BadRequest('Cannot add hypotheses with reference type: {}'.format( + ref_obj[BG_REF_TYPE_KEY])) + + # Return resulting list of matrices + return ret + + # ................................ + def _get_user_dir(self, user_id): + """Get the user's workspace directory + + Todo: + Change this to use something at a lower level. This is using the + same path construction as the getBoomPackage script + """ + return os.path.join( + ARCHIVE_PATH, user_id, 'uploads', 'biogeo') + + +# ............................................................................. +class GridsetProgressService(LmService): + """Service class for gridset progress + """ + + # ................................ + @lm_formatter + def get_gridset_progress(self, gridset_id, detail=False, **params): + """Get progress for a gridset""" + return ('gridset', gridset_id, detail) + + +# ............................................................................. +class GridsetTreeService(LmService): + """Service for the tree of a gridset + """ + + # ................................ + def delete_tree(self, user_id, tree_id): + """Attempts to delete a tree + + Args: + path_tree_id: The id of the tree to delete + """ + tree = self.scribe.get_tree(tree_id=tree_id) + + if tree is None: + raise WEXC.NotFound('Tree {} not found'.format(tree_id)) + + # If allowed to, delete + if check_user_permission(user_id, tree, HTTPMethod.DELETE): + success = self.scribe.delete_object(tree) + if success: + return Response(status=HTTPStatus.NO_CONTENT) + + # TODO: How can this happen? Make sure we catch those cases and + # respond appropriately. We don't want 500 errors + else: + raise WEXC.InternalServerError('Failed to delete tree') + + else: + raise WEXC.Forbidden('User does not have permission to delete this tree') + + # ................................ + @lm_formatter + def GET(self, user_id, gridset_id, tree_id=None, include_csv=None, include_sdms=None, **params): + """Just return the gridset tree, no listing at this time + + TODO: remove unused args. How is this called? + """ + gridset = get_gridset(user_id, gridset_id) + return gridset.tree + + # ................................ + @lm_formatter + def post_tree(self, user_id, gridset_id, tree_id=None, name=None, tree_data=None, tree_schema=DEFAULT_TREE_SCHEMA, **params): + """Posts a new tree and adds it to the gridset + + Note: Calling function in routes.py should retrieve tree_data with: + tree_data = request.get_json() + """ + if tree_id is not None: + tree = self.scribe.get_tree(tree_id=tree_id) + + if tree is None: + raise WEXC.NotFound('Tree {} was not found'.format(tree_id)) + + if not check_user_permission(user_id, tree, HTTPMethod.GET): + pass + + else: + # Raise exception if user does not have permission + raise WEXC.Forbidden('User {} cannot access tree {}'.format(user_id, tree_id)) + + else: + if name is None: + raise WEXC.BadRequest('Must provide name for tree') + + tree = dendropy.Tree.get(file=tree_data, schema=tree_schema) + new_tree = Tree(name, user_id=self.get_user_id()) + updated_tree = self.scribe.find_or_insert_tree(new_tree) + updated_tree.set_tree(tree) + updated_tree.write_tree() + updated_tree.mod_time = gmt().mjd + self.scribe.update_object(updated_tree) + + gridset = get_gridset(user_id, gridset_id) + gridset.add_tree(tree) + gridset.update_mod_time(gmt().mjd) + self.scribe.update_object(gridset) + + return updated_tree + + +# ............................................................................. +class GridsetService(LmService): + """Class for gridset services""" + analysis = GridsetAnalysisService() + biogeo = GridsetBioGeoService() + matrix = MatrixService() + progress = GridsetProgressService() + tree = GridsetTreeService() + + # ................................ + def delete_gridset(self, user_id, gridset_id): + """Attempts to delete a grid set + + Args: + user_id (str): The user authorized for this operation. + gridset_id (int): The id of the grid set to delete + """ + gridset = self.scribe.get_gridset(gridset_id=gridset_id) + + if gridset is None: + raise WEXC.NotFound('Gridset {} not found'.format(gridset_id)) + + # If allowed to, delete + if check_user_permission(user_id, gridset, HTTPMethod.DELETE): + success = self.scribe.delete_object(gridset) + if success: + return Response(status=HTTPStatus.NO_CONTENT) + + # TODO: How can this happen? Make sure we catch those cases and + # respond appropriately. We don't want 500 errors + raise WEXC.InternalServerError('Failed to delete grid set') + + raise WEXC.Forbidden('User does not have permission to delete this grid set') + + + # ................................ + def get_gridset_makeflow_status(self, gridset_id=None): + """Perform a HTTP HEAD request to get general status""" + if gridset_id is not None: + mf_summary = self.scribe.summarize_mf_chains_for_gridset(gridset_id) + (waiting_mfs, running_mfs, _, _, _) = summarize_object_statuses(mf_summary) + if waiting_mfs + running_mfs == 0: + response = Response(status=HTTPStatus.OK) + else: + response = Response(status=HTTPStatus.ACCEPTED) + else: + response = Response(status=HTTPStatus.OK) + return response + + # ................................ + @lm_formatter + def post_boom_data(self, user_id, user_email, gridset_data, **params): + """Posts a new grid set""" + boom_post = BoomPoster(user_id, user_email, gridset_data, self.scribe) + gridset = boom_post.init_boom() + + # Return atom of posted gridset + return Atom( + gridset.get_id(), gridset.name, gridset.metadata_url, + gridset.mod_time, epsg=gridset.epsg_code) + + # ................................ + @lm_formatter + def count_gridsets( + self, user_id, after_time=None, before_time=None, epsg_code=None, meta_string=None, + shapegrid_id=None): + """Count GridSet objects matching the specified criteria + + Args: + user_id (str): The user authorized for this operation. Note that this may not be + the same user as is logged into the system + after_time (float): Time in MJD of the earliest modtime for filtering + before_time (float): Time in MJD of the latest modtime for filtering + epsg_code (str): EPSG code for the SRS for filtering layers + """ + gridset_count = self.scribe.count_gridsets( + user_id=user_id, shapegrid_layer_id=shapegrid_id, meta_string=meta_string, + after_time=after_time, before_time=before_time, epsg=epsg_code) + return {'count': gridset_count} + + # ................................ + @lm_formatter + def get_gridset(self, user_id, gridset_id): + """Attempt to get a GridSet + """ + gridset = self.scribe.get_gridset(gridset_id=gridset_id, fill_matrices=True) + if gridset is None: + raise WEXC.NotFound('Gridset {} was not found'.format(gridset_id)) + + if check_user_permission(user_id, gridset, HTTPMethod.GET): + return gridset + + raise WEXC.Forbidden('User {} does not have permission to access Gridset {}'.format( + user_id, gridset_id)) + + # ................................ + @lm_formatter + def list_gridsets( + self, user_id, after_time=None, before_time=None, epsg_code=None, meta_string=None, + shapegrid_id=None, limit=100, offset=0): + """List GridSet objects matching the specified criteria + + Args: + user_id: The user to count GridSets for. Note that this may not be + the same user logged into the system + after_time: (optional) Return GridSets modified after this time + (Modified Julian Day) + before_time: (optional) Return GridSets modified before this time + (Modified Julian Day) + epsg: (optional) Return GridSets with this EPSG code + limit: (optional) Return this number of GridSets, at most + offset: (optional) Offset the returned GridSets by this number + """ + gridset_atoms = self.scribe.list_gridsets( + offset, limit, user_id=user_id, shapegrid_layer_id=shapegrid_id, + meta_string=meta_string, after_time=after_time, + before_time=before_time, epsg=epsg_code) + + return gridset_atoms diff --git a/LmWebServer/flask_app/occurrence.py b/LmWebServer/flask_app/occurrence.py index ede1c19c..ea714f98 100644 --- a/LmWebServer/flask_app/occurrence.py +++ b/LmWebServer/flask_app/occurrence.py @@ -1,4 +1,5 @@ """This module provides REST services for Occurrence sets""" +from http import HTTPStatus import werkzeug.exceptions as WEXC from LmCommon.common.lmconstants import (JobStatus) @@ -33,7 +34,7 @@ def delete_occurrence_set(self, user_id, occset_id): if check_user_permission(user_id, occ, HTTPMethod.DELETE): success = self.scribe.delete_object(occ) if success: - return True + return HTTPStatus.NO_CONTENT # If unsuccessful, fail raise WEXC.InternalServerError('Failed to delete occurrence set') diff --git a/LmWebServer/flask_app/routes.py b/LmWebServer/flask_app/routes.py index 9bd58093..5d78cbec 100644 --- a/LmWebServer/flask_app/routes.py +++ b/LmWebServer/flask_app/routes.py @@ -1,10 +1,11 @@ from flask import Flask, redirect, render_template, request, session from werkzeug.exceptions import BadRequest, NotFound -from LmWebServer.flask_app.occurrence import OccurrenceLayerService from LmWebServer.flask_app.base import LmService from LmWebServer.flask_app.biotaphy_names import GBIFTaxonService from LmWebServer.flask_app.biotaphy_points import IDigBioOccurrenceService +from LmWebServer.flask_app.occurrence import OccurrenceLayerService +from LmWebServer.flask_app.gridset import GridsetService app = Flask(__name__.split('.')[0]) @@ -64,7 +65,13 @@ def occurrence(identifier): gridset_id = request.args.get('gridset_id', default = None, type = str) fill_points = request.args.get('fill_points', default = False, type = bool) - if identifier.lower() == 'count': + if identifier is None: + response = svc.list_occurrence_sets( + user_id, after_time=after_time, before_time=before_time, display_name=display_name, + epsg_code=epsg_code, minimum_number_of_points=minimum_number_of_points, limit=limit, + offset=offset, gridset_id=gridset_id, status=status) + + elif identifier.lower() == 'count': response = svc.count_occurrence_sets( user_id, after_time=after_time, before_time=before_time, display_name=display_name, epsg_code=epsg_code, minimum_number_of_points=minimum_number_of_points, @@ -75,13 +82,7 @@ def occurrence(identifier): user_id, after_time=after_time, before_time=before_time, display_name=display_name, epsg_code=epsg_code, minimum_number_of_points=minimum_number_of_points, limit=limit, offset=offset, gridset_id=gridset_id, status=status) - - elif identifier is None: - response = svc.list_occurrence_sets( - user_id, after_time=after_time, before_time=before_time, display_name=display_name, - epsg_code=epsg_code, minimum_number_of_points=minimum_number_of_points, limit=limit, - offset=offset, gridset_id=gridset_id, status=status) - + else: try: occid = int(identifier) @@ -89,8 +90,7 @@ def occurrence(identifier): return BadRequest('{} is not a valid layer ID'.format(identifier)) else: response = svc.get_occurrence_set(occid, fill_points=fill_points) - - + return response # ..................................................................................... @@ -174,11 +174,75 @@ def biotaphynames(): @app.route('/api/v2/biotaphypoints', methods=['POST']) def biotaphypoints(): try: - taxon_ids = request.get_json() + taxonids_obj = request.get_json() except: return BadRequest('Taxon ID list must be in JSON format') else: svc = IDigBioOccurrenceService() - response = svc.get_occurrence_counts_for_taxonids(names_obj) + response = svc.get_occurrence_counts_for_taxonids(taxonids_obj) + return response + +# ..................................................................................... +@app.route('/api/v2/gridset/', methods=['GET', 'POST', 'DELETE']) +def gridset(identifier): + svc = GridsetService() + user = svc.get_user() + user_id = user.user_id + + if request.method == 'POST' and request.is_json: + gridset_data = request.get_json() + svc.post_boom_data(user_id, user.email, gridset_data) + + elif request.method == 'DELETE': + svc.delete_gridset(user_id, identifier) + + elif request.method == 'GET': + after_time = request.args.get('after_time', default = None, type = str) + before_time = request.args.get('before_time', default = None, type = str) + epsg_code = request.args.get('epsg_code', default= None, type = str) + meta_string = request.args.get('meta_string', default= None, type = str) + shapegrid_id = request.args.get('shapegrid_id', default= None, type = int) + limit = request.args.get('limit', default = 100, type = int) + offset = request.args.get('offset', default = 0, type = int) + + if identifier is None: + response = svc.list_gridsets( + user_id, after_time=after_time, before_time=before_time, epsg_code=epsg_code, + meta_string=meta_string, shapegrid_id=shapegrid_id, limit=limit, offset=offset) + + elif identifier.lower() == 'count': + response = svc.count_gridsets( + user_id, after_time=after_time, before_time=before_time, epsg_code=epsg_code, + meta_string=meta_string, shapegrid_id=shapegrid_id) + + else: + try: + gridset_id = int(identifier) + except: + return BadRequest('{} is not a valid gridset ID'.format(identifier)) + else: + response = svc.get_gridset(user_id, gridset_id) + return response + # biotaphynames = GBIFTaxonService() + # biotaphypoints = IDigBioOccurrenceService() + # biotaphytree = OpenTreeService() + # envlayer = EnvLayerService() + # gbifparser = GBIFNamesService() + # globalpam = GlobalPAMService() + # gridset = GridsetService() + # hint = SpeciesHintService() + # layer = LayerService() + # occurrence = OccurrenceLayerService() + # opentree = OpenTreeService() + # scenario = ScenarioService() + # scenpackage = ScenarioPackageService() + # sdmproject = SdmProjectService() + # shapegrid = ShapegridService() + # snippet = SnippetService() + # rawsolr = RawSolrService() + # taxonomy = TaxonomyHintService() + # tree = TreeService() + # upload = UserUploadService() + diff --git a/requirements.txt b/requirements.txt index f1b3b9f2..66fcb8ca 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,7 +1,8 @@ Cython>=0.29.24 -Flask>=2.0.2 -mapscript>=7.6.0 -scipy>=1.7.2 +Flask (brings Werkzeug) +mapscript +scipy (brings numpy) matplotlib>=3.5.0 -psycopg2-binary>=2.9.2 +psycopg2-binary requests>=2.26.0 +dendropy \ No newline at end of file From e40cabdef1a3a086fc9555253447b09d3c67e357 Mon Sep 17 00:00:00 2001 From: zzeppozz Date: Thu, 2 Dec 2021 09:15:00 -0600 Subject: [PATCH 08/18] updates; unfinished, untested --- LmWebServer/flask_app/biotaphy_names.py | 2 +- LmWebServer/flask_app/env_layer.py | 2 +- LmWebServer/flask_app/gbif_parser.py | 50 ++++++++ LmWebServer/flask_app/global_pam.py | 152 ++++++++++++++++++++++++ LmWebServer/flask_app/layer.py | 2 +- LmWebServer/flask_app/occurrence.py | 19 ++- LmWebServer/flask_app/routes.py | 44 ++++++- 7 files changed, 261 insertions(+), 10 deletions(-) create mode 100644 LmWebServer/flask_app/gbif_parser.py create mode 100644 LmWebServer/flask_app/global_pam.py diff --git a/LmWebServer/flask_app/biotaphy_names.py b/LmWebServer/flask_app/biotaphy_names.py index 19f8ed0e..2132f3f3 100644 --- a/LmWebServer/flask_app/biotaphy_names.py +++ b/LmWebServer/flask_app/biotaphy_names.py @@ -16,7 +16,7 @@ def get_gbif_results(self, names_obj): """Queries GBIF for accepted names matching the provided list of names Args: - names_obj: a JSON list of name strings to match + names_obj(dict): a JSON list of name strings to match """ if not isinstance(names_obj, list): return WEXC.BadRequest('Name data must be a JSON list') diff --git a/LmWebServer/flask_app/env_layer.py b/LmWebServer/flask_app/env_layer.py index 1eb519b6..2f614d7b 100644 --- a/LmWebServer/flask_app/env_layer.py +++ b/LmWebServer/flask_app/env_layer.py @@ -29,7 +29,7 @@ def count_env_layers( env_code (str): Environmental type code for filtering environmental layers env_type_id (int): Database key of the environmental type for filtering environmental layers gcm_code (str) = GCM code for filtering environmental layers - scenario_code (str): Database key for filtering to environmental layers belonging to one scenario + scenario_code (str): Scenario code for filtering to environmental layers belonging to one scenario """ layer_count = self.scribe.count_env_layers( user_id=user_id, after_time=after_time, before_time=before_time, env_code=env_code, gcm_code=gcm_code, diff --git a/LmWebServer/flask_app/gbif_parser.py b/LmWebServer/flask_app/gbif_parser.py new file mode 100644 index 00000000..17871d6f --- /dev/null +++ b/LmWebServer/flask_app/gbif_parser.py @@ -0,0 +1,50 @@ +"""This module provides a wrapper around GBIF's names service + +TODO: Delete? This and biotaphy_names appear identical +""" +from werkzeug.exceptions import BadRequest + +from LmCommon.common.api_query import GbifAPI +from LmWebServer.services.api.v2.base import LmService +from LmWebServer.services.cp_tools.lm_format import lm_formatter + +# TODO: These need to go into a constants file +ACCEPTED_NAME_KEY = 'accepted_name' +SEARCH_NAME_KEY = 'search_name' +SPECIES_KEY_KEY = 'speciesKey' +SPECIES_NAME_KEY = 'species' +TAXON_ID_KEY = 'taxon_id' + + +# ............................................................................. +class GBIFNamesService(LmService): + """Service to get GBIF accepted names""" + + # ................................ + @lm_formatter + def get_gbif_names(self, names_obj): + """Queries GBIF for accepted names matching the provided list of names + + Args: + names_obj: a JSON list of name strings to match + """ + if not isinstance(names_obj, list): + return BadRequest('Name data must be a JSON list') + + retval = [] + for name in names_obj: + try: + gbif_resp = GbifAPI.get_accepted_names(name)[0] + retval.append({ + SEARCH_NAME_KEY: name, + ACCEPTED_NAME_KEY: gbif_resp[SPECIES_NAME_KEY], + TAXON_ID_KEY: gbif_resp[SPECIES_KEY_KEY] + }) + except Exception as e: + self.log.error('Could not get accepted name from GBIF for name {}: {}'.format(name, e)) + retval.append({ + SEARCH_NAME_KEY: name, + ACCEPTED_NAME_KEY: None, + TAXON_ID_KEY: None + }) + return retval diff --git a/LmWebServer/flask_app/global_pam.py b/LmWebServer/flask_app/global_pam.py new file mode 100644 index 00000000..7fd96c3f --- /dev/null +++ b/LmWebServer/flask_app/global_pam.py @@ -0,0 +1,152 @@ +"""This module provides services for query and subsetting of global PAMs""" +from werkzeug.exceptions import BadRequest + +from LmServer.base.atom import Atom +from LmServer.common.lmconstants import SOLR_FIELDS, HTTPStatus +from LmServer.common.solr import facet_archive_on_gridset, query_archive_index +from LmServer.common.subset import subset_global_pam +from LmWebServer.services.api.v2.base import LmService +from LmWebServer.services.cp_tools.lm_format import lm_formatter + + +# ............................................................................. +class _GridsetFacetService(LmService): + """This service retrieves gridsets within the solr index for the user + """ + + # ................................ + @lm_formatter + def GET(self, user_id=None, **params): + """Queries the Global PAM for matching results + """ + facets = facet_archive_on_gridset(user_id=user_id) + # NOTE: Response is list of id, count but not separated + i = 0 + counts = [] + while i < len(facets): + counts.append({ + SOLR_FIELDS.GRIDSET_ID: str(facets[i]), + 'count': int(facets[i + 1]) + }) + i += 2 + + return { + SOLR_FIELDS.GRIDSET_ID: counts + } + + +# ............................................................................. +class GlobalPAMService(LmService): + """This class is responsible for the Global PAM services.""" + gridset = _GridsetFacetService() + + # ................................ + @lm_formatter + def subset_global_pam( + self, user_id, algorithm_code=None, bbox=None, display_name=None, gridset_id=None, + model_scenario_code=None, prj_scen_code=None, point_max=None, point_min=None, squid=None, + taxon_kingdom=None, taxon_phylum=None, taxon_class=None, taxon_order=None, taxon_family=None, + taxon_genus=None, taxon_species=None, **params): + """Queries the Global PAM and returns a subset of intersected layers (PAVs) matching the parameters + + Args: + user_id (str): The user authorized for this operation. Note that this may not be + the same user as is logged into the system + algorithm_code (str): Code for filtering SDM layers modeled with this algorithm to populate a PAM + bbox (str): Bounding box in format 'minx, miny, maxx, maxy' for subsetting layers to populate a PAM + display_name (str): Taxonomic name for filtering layers to populate a PAM + gridset_id (int): Database key for gridset to subset for a PAM + point_max (int): Maximum number of points for filtering layers to populate a PAM + point_min (int): Minimum number of points for filtering layers to populate a PAM + model_scenario_code (str): Code for filtering SDM layers modeled with this scenario to populate a PAM + prj_scen_code (str): Code for filtering SDM layers projected with this scenario to populate a PAM + squid (str): Lifemapper unique identifier for filtering layers to populate a PAM + taxon_kingdom (str): Kingdom for filtering layers to populate a PAM + taxon_phylum (str): Phylum for filtering layers to populate a PAM + taxon_class (str): Class for filtering layers to populate a PAM + taxon_order (str): Order for filtering layers to populate a PAM + taxon_family (str): Family for filtering layers to populate a PAM + taxon_genus (str): Genus for filtering layers to populate a PAM + taxon_species (str): Species for filtering layers to populate a PAM + """ + return self._make_solr_query( + algorithm_code=algorithm_code, bbox=bbox, display_name=display_name, gridset_id=gridset_id, + model_scenario_code=model_scenario_code, point_max=point_max, point_min=point_min, + user_id=user_id, projection_scenario_code=prj_scen_code, squid=squid, + tax_kingdom=taxon_kingdom, tax_phylum=taxon_phylum, tax_class=taxon_class, tax_order=taxon_order, + tax_family=taxon_family, tax_genus=taxon_genus, tax_species=taxon_species) + + # ................................ + @lm_formatter + def post_global_pam_subset(self, user_id, archive_name, gridset_id, algorithm_code=None, bbox=None, + cell_size=None, model_scenario_code=None, point_max=None, + point_min=None, user_id=None, prj_scen_code=None, squid=None, + taxon_kingdom=None, taxon_phylum=None, taxon_class=None, + taxon_order=None, taxon_family=None, taxon_genus=None, + taxon_species=None, display_name=None, **params): + """Queries the Global PAM, and creates a gridset initializing a new PAM from layers matching the parameters, + + Args: + user_id (str): The user authorized for this operation. Note that this may not be + the same user as is logged into the system + archive_name (str): Name to be associated with the new gridset + gridset_id (int): Database key for gridset to subset for a PAM + algorithm_code (str): Code for filtering SDM layers modeled with this algorithm to populate a PAM + bbox (str): Bounding box in format 'minx, miny, maxx, maxy' for subsetting layers to populate a PAM + cell_size (float): Size of cells (in map units) to be used for intersections when creating the new PAM + display_name (str): Taxonomic name for filtering layers to populate a PAM + point_max (int): Maximum number of points for filtering layers to populate a PAM + point_min (int): Minimum number of points for filtering layers to populate a PAM + model_scenario_code (str): Code for filtering SDM layers modeled with this scenario to populate a PAM + prj_scen_code (str): Code for filtering SDM layers projected with this scenario to populate a PAM + squid (str): Lifemapper unique identifier for filtering layers to populate a PAM + taxon_kingdom (str): Kingdom for filtering layers to populate a PAM + taxon_phylum (str): Phylum for filtering layers to populate a PAM + taxon_class (str): Class for filtering layers to populate a PAM + taxon_order (str): Order for filtering layers to populate a PAM + taxon_family (str): Family for filtering layers to populate a PAM + taxon_genus (str): Genus for filtering layers to populate a PAM + taxon_species (str): Species for filtering layers to populate a PAM + """ + matches = self._make_solr_query( + user_id, algorithm_code=algorithm_code, bbox=bbox, display_name=display_name, gridset_id=gridset_id, + model_scenario_code=model_scenario_code, projection_scenario_code=prj_scen_code, + point_max=point_max, point_min=point_min, squid=squid, tax_kingdom=taxon_kingdom, tax_phylum=taxon_phylum, + tax_class=taxon_class, tax_order=taxon_order, tax_family=taxon_family, tax_genus=taxon_genus, + tax_species=taxon_species) + # Make bbox tuple + if bbox: + bbox = tuple([float(i) for i in bbox.split(',')]) + + gridset = self._subset_global_pam(archive_name, matches, bbox=bbox, cell_size=cell_size) + HTTPStatus.ACCEPTED + return Atom( + gridset.get_id(), gridset.name, gridset.metadata_url, + gridset.mod_time, epsg=gridset.epsg_code) + + # ................................ + def _make_solr_query( + self, user_id, algorithm_code=None, bbox=None, display_name=None, gridset_id=None, + model_scenario_code=None, projection_scenario_code=None, point_max=None, point_min=None, + squid=None, tax_kingdom=None, tax_phylum=None, tax_class=None, tax_order=None, tax_family=None, + tax_genus=None, tax_species=None): + return query_archive_index( + algorithm_code=algorithm_code, bbox=bbox, display_name=display_name, gridset_id=gridset_id, + model_scenario_code=model_scenario_code, projection_scenario_code=projection_scenario_code, + point_max=point_max, point_min=point_min, squid=squid, tax_kingdom=tax_kingdom, + tax_phylum=tax_phylum, tax_class=tax_class, tax_order=tax_order, tax_family=tax_family, + tax_genus=tax_genus, tax_species=tax_species, user_id=user_id) + + # ................................ + def _subset_global_pam(self, user_id, archive_name, matches, bbox=None, cell_size=None): + """Creates a subset of a global PAM and create a new grid set + + Args: + user_id (str): The user authorized for this operation. + archive_name (str) : The name of this new grid set + matches (list) : Solr hits to be used for subsetting + bbox (str): Bounding box in format 'minx, miny, maxx, maxy' for subsetting layers to populate a PAM + cell_size (float): Size of cells (in map units) to be used for intersections when creating the new PAM + """ + return subset_global_pam( + archive_name, matches, user_id, bbox=bbox, cell_size=cell_size, scribe=self.scribe) diff --git a/LmWebServer/flask_app/layer.py b/LmWebServer/flask_app/layer.py index 19aea028..7c6ac0b4 100644 --- a/LmWebServer/flask_app/layer.py +++ b/LmWebServer/flask_app/layer.py @@ -30,7 +30,7 @@ def count_env_layers( env_code (str): Environmental type code for filtering environmental layers env_type_id (int): Database key of the environmental type for filtering environmental layers gcm_code (str) = GCM code for filtering environmental layers - scenario_code (str): Database key for filtering to environmental layers belonging to one scenario + scenario_code (str): Code for filtering to environmental layers belonging to one scenario """ layer_count = self.scribe.count_env_layers( user_id=user_id, after_time=after_time, before_time=before_time, env_code=env_code, gcm_code=gcm_code, diff --git a/LmWebServer/flask_app/occurrence.py b/LmWebServer/flask_app/occurrence.py index ede1c19c..13ae37f8 100644 --- a/LmWebServer/flask_app/occurrence.py +++ b/LmWebServer/flask_app/occurrence.py @@ -62,11 +62,22 @@ def post_boom_data(self, user_id, user_email, boom_data, **params): # ................................ @lm_formatter - def count_occurrence_sets(self, user_id, after_time=None, - before_time=None, display_name=None, - epsg_code=None, minimum_number_of_points=1, - status=None, gridset_id=None): + def count_occurrence_sets( + self, user_id, after_time=None, before_time=None, display_name=None, epsg_code=None, + minimum_number_of_points=1, status=None, gridset_id=None): """Return a count of occurrence sets matching the specified criteria + + Args: + user_id (str): The user authorized for this operation. Note that this may not be + the same user as is logged into the system + after_time (float): Time in MJD of the earliest modtime for filtering + before_time (float): Time in MJD of the latest modtime for filtering + display_name (str): Taxonomic name for filtering + squid (str): Unique taxon identifier for filtering + minimum_number_of_points (int): Minimum number of points for filtering + status (int): Status code for filtering + gridset_id (int): Database key to filter occurrencesets within a gridset + """ after_status = None before_status = None diff --git a/LmWebServer/flask_app/routes.py b/LmWebServer/flask_app/routes.py index 9bd58093..a5cf4740 100644 --- a/LmWebServer/flask_app/routes.py +++ b/LmWebServer/flask_app/routes.py @@ -5,6 +5,9 @@ from LmWebServer.flask_app.base import LmService from LmWebServer.flask_app.biotaphy_names import GBIFTaxonService from LmWebServer.flask_app.biotaphy_points import IDigBioOccurrenceService +from LmWebServer.flask_app.gbif_parser import GBIFNamesService +from LmWebServer.flask_app.global_pam import GlobalPAMService +from LmWebServer.flask_app.layer import LayerService app = Flask(__name__.split('.')[0]) @@ -104,7 +107,7 @@ def layer(identifier): Returns: dict: A dictionary of metadata for the requested record. """ - svc = OccurrenceLayerService() + svc = LayerService() user = svc.get_user() user_id = user.user_id @@ -174,11 +177,46 @@ def biotaphynames(): @app.route('/api/v2/biotaphypoints', methods=['POST']) def biotaphypoints(): try: - taxon_ids = request.get_json() + taxonids_obj = request.get_json() except: return BadRequest('Taxon ID list must be in JSON format') else: svc = IDigBioOccurrenceService() - response = svc.get_occurrence_counts_for_taxonids(names_obj) + response = svc.get_occurrence_counts_for_taxonids(taxonids_obj) return response +# ..................................................................................... +@app.route('/api/v2/gbifparser', methods=['POST']) +def gbifparser(): + try: + names_obj = request.get_json() + except: + return BadRequest('Name list must be in JSON format') + else: + svc = GBIFNamesService() + response = svc.get_gbif_names(names_obj) + return response + +# ..................................................................................... +@app.route('/api/v2/globalpam', methods=['GET', 'POST']) +def globalpam(): + svc = GlobalPAMService()() + user = svc.get_user() + user_id = user.user_id + + if request.method == 'POST': + svc.post_boom_data(user_id, user.email, boom_data) + + elif request.method == 'GET': + after_time = request.args.get('after_time', default = None, type = str) + before_time = request.args.get('before_time', default = None, type = str) + display_name = request.args.get('display_name', default = None, type = str) + epsg_code = request.args.get('epsg_code', default= None, type = str) + minimum_number_of_points = request.args.get('minimum_number_of_points', default = 1, type = int) + limit = request.args.get('limit', default = 100, type = int) + offset = request.args.get('offset', default = 0, type = int) + # url_user = request.args.get('url_user', default = None, type = str) + status = request.args.get('status', default = None, type = str) + gridset_id = request.args.get('gridset_id', default = None, type = str) + fill_points = request.args.get('fill_points', default = False, type = bool) + From 25475f1be1cc2ce04b07551654381564af6d217e Mon Sep 17 00:00:00 2001 From: zzeppozz Date: Thu, 2 Dec 2021 14:28:42 -0600 Subject: [PATCH 09/18] updates; unfinished, untested --- LmCommon/common/api_query.py | 4 +- LmWebServer/flask_app/global_pam.py | 87 ++++----- LmWebServer/flask_app/gridset.py | 267 +++++++++++--------------- LmWebServer/flask_app/routes.py | 72 +++++-- LmWebServer/flask_app/species_hint.py | 51 +++++ requirements.txt | 14 +- 6 files changed, 269 insertions(+), 226 deletions(-) create mode 100644 LmWebServer/flask_app/species_hint.py diff --git a/LmCommon/common/api_query.py b/LmCommon/common/api_query.py index 0bd57f6b..4403ce76 100644 --- a/LmCommon/common/api_query.py +++ b/LmCommon/common/api_query.py @@ -2,6 +2,7 @@ """ from copy import copy import csv +from http import HTTPStatus import json import os import urllib @@ -12,8 +13,7 @@ from LmCommon.common.lm_xml import fromstring, deserialize from LmCommon.common.lmconstants import ( - BISON, BisonQuery, DwcNames, GBIF, HTTPStatus, Idigbio, IdigbioQuery, - Itis, URL_ESCAPES, ENCODING) + BISON, BisonQuery, DwcNames, GBIF, Idigbio, IdigbioQuery, Itis, URL_ESCAPES, ENCODING) from LmCommon.common.occ_parse import OccDataParser from LmCommon.common.ready_file import ready_filename diff --git a/LmWebServer/flask_app/global_pam.py b/LmWebServer/flask_app/global_pam.py index 7fd96c3f..86da8141 100644 --- a/LmWebServer/flask_app/global_pam.py +++ b/LmWebServer/flask_app/global_pam.py @@ -1,8 +1,9 @@ """This module provides services for query and subsetting of global PAMs""" -from werkzeug.exceptions import BadRequest +from flask import make_response +from http import HTTPStatus from LmServer.base.atom import Atom -from LmServer.common.lmconstants import SOLR_FIELDS, HTTPStatus +from LmServer.common.lmconstants import SOLR_FIELDS from LmServer.common.solr import facet_archive_on_gridset, query_archive_index from LmServer.common.subset import subset_global_pam from LmWebServer.services.api.v2.base import LmService @@ -11,28 +12,23 @@ # ............................................................................. class _GridsetFacetService(LmService): - """This service retrieves gridsets within the solr index for the user - """ + """This service retrieves gridsets within the solr index for the user""" # ................................ @lm_formatter - def GET(self, user_id=None, **params): - """Queries the Global PAM for matching results - """ + def list_gridsets(self, user_id=None, **params): + """Queries the Global PAM for matching results""" facets = facet_archive_on_gridset(user_id=user_id) # NOTE: Response is list of id, count but not separated i = 0 counts = [] while i < len(facets): - counts.append({ - SOLR_FIELDS.GRIDSET_ID: str(facets[i]), - 'count': int(facets[i + 1]) - }) + counts.append( + {SOLR_FIELDS.GRIDSET_ID: str(facets[i]), + 'count': int(facets[i + 1])}) i += 2 - return { - SOLR_FIELDS.GRIDSET_ID: counts - } + return {SOLR_FIELDS.GRIDSET_ID: counts} # ............................................................................. @@ -42,12 +38,12 @@ class GlobalPAMService(LmService): # ................................ @lm_formatter - def subset_global_pam( + def retrieve_pam_subset( self, user_id, algorithm_code=None, bbox=None, display_name=None, gridset_id=None, model_scenario_code=None, prj_scen_code=None, point_max=None, point_min=None, squid=None, taxon_kingdom=None, taxon_phylum=None, taxon_class=None, taxon_order=None, taxon_family=None, taxon_genus=None, taxon_species=None, **params): - """Queries the Global PAM and returns a subset of intersected layers (PAVs) matching the parameters + """Queries the Global PAM and returns a subset of intersected layers (PAVs) from Solr, matching the parameters Args: user_id (str): The user authorized for this operation. Note that this may not be @@ -69,22 +65,22 @@ def subset_global_pam( taxon_genus (str): Genus for filtering layers to populate a PAM taxon_species (str): Species for filtering layers to populate a PAM """ - return self._make_solr_query( + solr_matches = self._make_solr_query( algorithm_code=algorithm_code, bbox=bbox, display_name=display_name, gridset_id=gridset_id, model_scenario_code=model_scenario_code, point_max=point_max, point_min=point_min, user_id=user_id, projection_scenario_code=prj_scen_code, squid=squid, tax_kingdom=taxon_kingdom, tax_phylum=taxon_phylum, tax_class=taxon_class, tax_order=taxon_order, tax_family=taxon_family, tax_genus=taxon_genus, tax_species=taxon_species) + return solr_matches # ................................ @lm_formatter - def post_global_pam_subset(self, user_id, archive_name, gridset_id, algorithm_code=None, bbox=None, - cell_size=None, model_scenario_code=None, point_max=None, - point_min=None, user_id=None, prj_scen_code=None, squid=None, - taxon_kingdom=None, taxon_phylum=None, taxon_class=None, - taxon_order=None, taxon_family=None, taxon_genus=None, - taxon_species=None, display_name=None, **params): - """Queries the Global PAM, and creates a gridset initializing a new PAM from layers matching the parameters, + def post_pam_subset( + self, user_id, archive_name, cell_size=None, algorithm_code=None, bbox=None, display_name=None, + gridset_id=None, model_scenario_code=None, prj_scen_code=None, point_max=None, point_min=None, + squid=None, taxon_kingdom=None, taxon_phylum=None, taxon_class=None, taxon_order=None, + taxon_family=None, taxon_genus=None, taxon_species=None, **params): + """Queries the Global PAM, and creates a gridset initializing a new PAM from the subset of layers matching the parameters, Args: user_id (str): The user authorized for this operation. Note that this may not be @@ -108,21 +104,22 @@ def post_global_pam_subset(self, user_id, archive_name, gridset_id, algorithm_co taxon_genus (str): Genus for filtering layers to populate a PAM taxon_species (str): Species for filtering layers to populate a PAM """ - matches = self._make_solr_query( + solr_matches = self._make_solr_query( user_id, algorithm_code=algorithm_code, bbox=bbox, display_name=display_name, gridset_id=gridset_id, model_scenario_code=model_scenario_code, projection_scenario_code=prj_scen_code, point_max=point_max, point_min=point_min, squid=squid, tax_kingdom=taxon_kingdom, tax_phylum=taxon_phylum, tax_class=taxon_class, tax_order=taxon_order, tax_family=taxon_family, tax_genus=taxon_genus, tax_species=taxon_species) - # Make bbox tuple - if bbox: + # Make bbox tuple from string + if bbox is not None: bbox = tuple([float(i) for i in bbox.split(',')]) - gridset = self._subset_global_pam(archive_name, matches, bbox=bbox, cell_size=cell_size) - HTTPStatus.ACCEPTED - return Atom( - gridset.get_id(), gridset.name, gridset.metadata_url, - gridset.mod_time, epsg=gridset.epsg_code) + gridset = subset_global_pam( + archive_name, solr_matches, user_id, bbox=bbox, cell_size=cell_size, scribe=self.scribe) + gatom = Atom( + gridset.get_id(), gridset.name, gridset.metadata_url, gridset.mod_time, epsg=gridset.epsg_code) + return make_response(gatom, HTTPStatus.ACCEPTED) + # ................................ def _make_solr_query( @@ -137,16 +134,16 @@ def _make_solr_query( tax_phylum=tax_phylum, tax_class=tax_class, tax_order=tax_order, tax_family=tax_family, tax_genus=tax_genus, tax_species=tax_species, user_id=user_id) - # ................................ - def _subset_global_pam(self, user_id, archive_name, matches, bbox=None, cell_size=None): - """Creates a subset of a global PAM and create a new grid set - - Args: - user_id (str): The user authorized for this operation. - archive_name (str) : The name of this new grid set - matches (list) : Solr hits to be used for subsetting - bbox (str): Bounding box in format 'minx, miny, maxx, maxy' for subsetting layers to populate a PAM - cell_size (float): Size of cells (in map units) to be used for intersections when creating the new PAM - """ - return subset_global_pam( - archive_name, matches, user_id, bbox=bbox, cell_size=cell_size, scribe=self.scribe) + # # ................................ + # def _subset_global_pam(self, user_id, archive_name, matches, bbox=None, cell_size=None): + # """Creates a subset of a global PAM and create a new grid set + # + # Args: + # user_id (str): The user authorized for this operation. + # archive_name (str) : The name of this new grid set + # matches (list) : Solr hits to be used for subsetting + # bbox (str): Bounding box in format 'minx, miny, maxx, maxy' for subsetting layers to populate a PAM + # cell_size (float): Size of cells (in map units) to be used for intersections when creating the new PAM + # """ + # return subset_global_pam( + # archive_name, matches, user_id, bbox=bbox, cell_size=cell_size, scribe=self.scribe) diff --git a/LmWebServer/flask_app/gridset.py b/LmWebServer/flask_app/gridset.py index 6f72a5af..abb896be 100644 --- a/LmWebServer/flask_app/gridset.py +++ b/LmWebServer/flask_app/gridset.py @@ -1,7 +1,6 @@ -"""This module provides REST services for grid sets -""" +"""This module provides REST services for grid sets""" import dendropy -from flask import Response +from flask import Response, make_response from http import HTTPStatus import json import os @@ -53,6 +52,16 @@ def get_gridset(user_id, gridset_id): raise WEXC.Forbidden('User {} does not have permission to access GridSet {}'.format( user_id, gridset_id)) +# ................................ +def get_user_dir(user_id): + """Get the user's workspace directory + + Todo: + Change this to use something at a lower level. This is using the + same path construction as the getBoomPackage script + """ + return os.path.join(ARCHIVE_PATH, user_id, 'uploads', 'biogeo') + # ............................................................................. def summarize_object_statuses(summary): """Summarizes a summary @@ -90,8 +99,7 @@ class GridsetAnalysisService(LmService): # ................................ @lm_formatter - def POST( - self, gridset_id, do_mcpa=False, num_permutations=500, do_calc=False, **params): + def request_analysis(self, gridset_id, do_mcpa=False, num_permutations=500, do_calc=False, **params): """Adds a set of biogeographic hypotheses to the gridset""" # Get gridset gridset = self.get_gridset(gridset_id) @@ -116,84 +124,116 @@ def POST( boom_col.create_workflow() boom_col.close() - # cherrypy.response.status = HTTPStatus.ACCEPTED - return gridset - - raise WEXC.BadRequest('Must specify at least one analysis to perform') - - # ................................ - def _get_user_dir(self): - """Get the user's workspace directory - - Todo: - Change this to use something at a lower level. This is using the - same path construction as the getBoomPackage script - """ - return os.path.join( - ARCHIVE_PATH, self.get_user_id(), 'uploads', 'biogeo') + response = make_response(gridset, HTTPStatus.ACCEPTED) + return response + else: + raise WEXC.BadRequest('Must specify at least one analysis to perform') # ............................................................................. class GridsetBioGeoService(LmService): - """Service class for gridset biogeographic hypotheses - """ + """Service class for gridset biogeographic hypotheses""" # ................................ @lm_formatter - def GET(self, user_id, gridset_id, path_biogeo_id=None, **params): + def get_biogeo_hypotheses(self, user_id, gridset_id, biogeo_id=None, **params): """There is not a true service for limiting the biogeographic hypothesis matrices in a gridset, but return all when listing """ gridset = get_gridset(user_id, gridset_id) - bg_hyps = gridset.get_biogeographic_hypotheses() - if path_biogeo_id is None: + if biogeo_id is None: return bg_hyps for hyp in bg_hyps: - if hyp.get_id() == path_biogeo_id: + if hyp.get_id() == biogeo_id: return hyp # If not found 404... - raise WEXC.NotFound('Biogeographic hypothesis mtx {} not found for gridset {}'.format( - path_biogeo_id, gridset_id)) + raise WEXC.NotFound( + 'Biogeographic hypothesis mtx {} not found for gridset {}'.format(biogeo_id, gridset_id)) + + # ................................ + @lm_formatter + def _encode_insert_biogeo(self, zip_f, hyp_lyr, gridset, encoder): + curr_time = gmt().mjd + min_coverage = 0.25 + hyp_filename = hyp_lyr[FILE_NAME_KEY] + # Check to see if file is in zip package + if HYPOTHESIS_NAME_KEY in hyp_lyr: + hyp_name = hyp_lyr[HYPOTHESIS_NAME_KEY] + else: + hyp_name = os.path.splitext(os.path.basename(hyp_filename))[0] + + if EVENT_FIELD_KEY in hyp_lyr: + event_field = hyp_lyr[EVENT_FIELD_KEY] + column_name = '{} - {}'.format( + hyp_name, event_field) + else: + event_field = None + column_name = hyp_name + + int_param_val_key = MatrixColumn.INTERSECT_PARAM_VAL_NAME + lyr_meta = { + 'name': hyp_name, + int_param_val_key.lower(): event_field, + ServiceObject.META_DESCRIPTION.lower(): + 'Biogeographic hypotheses based on layer {}'.format(hyp_filename), + ServiceObject.META_KEYWORDS.lower(): ['biogeographic hypothesis'] + } + + if KEYWORD_KEY in hyp_lyr: + lyr_meta[ServiceObject.META_KEYWORDS.lower()].extend(hyp_lyr[KEYWORD_KEY]) + + lyr = Vector( + hyp_name, gridset.get_user_id(), gridset.epsg, dlocation=None, metadata=lyr_meta, + data_format=LMFormat.SHAPE.driver, val_attribute=event_field, mod_time=curr_time) + updated_lyr = self.scribe.find_or_insert_layer(lyr) + + # Loop through files to write all matching + # (ext) to out location + base_out = os.path.splitext(updated_lyr.get_dlocation())[0] + + for ext in LMFormat.SHAPE.get_extensions(): + z_fn = '{}{}'.format(hyp_filename, ext) + out_fn = '{}{}'.format(base_out, ext) + if z_fn in zip_f.namelist(): + zip_f.extract(z_fn, out_fn) + + # Add it to the list of files to be encoded + encoder.encode_biogeographic_hypothesis( + updated_lyr.get_dlocation(), column_name, min_coverage, event_field=event_field) + # ................................ @lm_formatter - def POST(self, user_id, gridset_id, bio_geo_data, **params): + def post_biogeo_hypotheses(self, user_id, gridset_id, biogeo_data, **params): """Adds a set of biogeographic hypotheses to the gridset""" # Get gridset gridset = get_gridset(user_id, gridset_id) - - # Process JSON - hypothesis_json = json.loads(bio_geo_data) - # Check reference to get file - ref_obj = hypothesis_json[BG_REF_KEY] - + hypothesis_reference_obj = biogeo_data[BG_REF_KEY] # If gridset, - if ref_obj[BG_REF_TYPE_KEY].lower() == 'gridset': - # copy hypotheses from gridset + if hypothesis_reference_obj[BG_REF_TYPE_KEY].lower() == 'gridset': + # copy hypotheses from gridset try: - ref_gridset_id = int(ref_obj[BG_REF_ID_KEY]) + ref_gridset_id = int(hypothesis_reference_obj[BG_REF_ID_KEY]) except Exception: # Probably not an integer or something raise WEXC.BadRequest('Cannot get gridset for reference identfier {}'.format( - ref_obj[BG_REF_ID_KEY])) + hypothesis_reference_obj[BG_REF_ID_KEY])) + ref_gridset = get_gridset(user_id, ref_gridset_id) # Get hypotheses from other gridset ret = [] for bg_hyp in ref_gridset.get_biogeographic_hypotheses(): new_bg_mtx = LMMatrix( - None, matrix_type=MatrixType.BIOGEO_HYPOTHESES, - process_type=ProcessType.ENCODE_HYPOTHESES, - gcm_code=bg_hyp.gcm_code, - alt_pred_code=bg_hyp.alt_pred_code, - date_code=bg_hyp.date_code, metadata=bg_hyp.mtx_metadata, - user_id=gridset.get_user_id(), gridset=gridset, - status=JobStatus.INITIALIZE) + None, matrix_type=MatrixType.BIOGEO_HYPOTHESES, process_type=ProcessType.ENCODE_HYPOTHESES, + gcm_code=bg_hyp.gcm_code, alt_pred_code=bg_hyp.alt_pred_code, date_code=bg_hyp.date_code, + metadata=bg_hyp.mtx_metadata, user_id=user_id, gridset=gridset, status=JobStatus.INITIALIZE) + inserted_bg = self.scribe.find_or_insert_matrix(new_bg_mtx) inserted_bg.update_status(JobStatus.COMPLETE) self.scribe.update_object(inserted_bg) @@ -201,111 +241,47 @@ def POST(self, user_id, gridset_id, bio_geo_data, **params): bg_mtx = Matrix.load(bg_hyp.get_dlocation()) bg_mtx.write(inserted_bg.get_dlocation()) ret.append(inserted_bg) - elif ref_obj[BG_REF_TYPE_KEY].lower() == 'upload': + + elif hypothesis_reference_obj[BG_REF_TYPE_KEY].lower() == 'upload': curr_time = gmt().mjd - # Check for uploaded biogeo package - package_name = ref_obj[BG_REF_ID_KEY] - package_filename = os.path.join( - self._get_user_dir(), '{}{}'.format( - package_name, LMFormat.ZIP.ext)) - + # # Check for uploaded biogeo package + package_name = hypothesis_reference_obj[BG_REF_ID_KEY] + package_filename = os.path.join(get_user_dir(), '{}{}'.format(package_name, LMFormat.ZIP.ext)) + # encoder = LayerEncoder(gridset.get_shapegrid().get_dlocation()) + self._encode_insert_biogeo(gridset, hypothesis_reference_obj, encoder, package_filename) # TODO(CJ): Pull this from config somewhere - min_coverage = 0.25 - + + if os.path.exists(package_filename): with open(package_filename) as in_f: with zipfile.ZipFile(in_f, allowZip64=True) as zip_f: # Get file names in package avail_files = zip_f.namelist() - - for hyp_lyr in ref_obj[LAYERS_KEY]: + + for hyp_lyr in hypothesis_reference_obj[LAYERS_KEY]: hyp_filename = hyp_lyr[FILE_NAME_KEY] - + self._encode_insert_biogeo(hyp_lyr, gridset, hypothesis_reference_obj, encoder, package_filename) + # Check to see if file is in zip package - if hyp_filename in avail_files or \ - '{}{}'.format( - hyp_filename, LMFormat.SHAPE.ext - ) in avail_files: - if HYPOTHESIS_NAME_KEY in hyp_lyr: - hyp_name = hyp_lyr[HYPOTHESIS_NAME_KEY] - else: - hyp_name = os.path.splitext( - os.path.basename(hyp_filename))[0] - - if EVENT_FIELD_KEY in hyp_lyr: - event_field = hyp_lyr[EVENT_FIELD_KEY] - column_name = '{} - {}'.format( - hyp_name, event_field) - else: - event_field = None - column_name = hyp_name - - int_param_val_key = \ - MatrixColumn.INTERSECT_PARAM_VAL_NAME - lyr_meta = { - 'name': hyp_name, - int_param_val_key.lower(): event_field, - ServiceObject.META_DESCRIPTION.lower(): - '{} based on layer {}'.format( - 'Biogeographic hypotheses', - hyp_filename), - ServiceObject.META_KEYWORDS.lower(): [ - 'biogeographic hypothesis' - ] - } - - if KEYWORD_KEY in hyp_lyr: - lyr_meta[ - ServiceObject.META_KEYWORDS.lower() - ].extend(hyp_lyr[KEYWORD_KEY]) - - lyr = Vector( - hyp_name, gridset.get_user_id(), - gridset.epsg, dlocation=None, - metadata=lyr_meta, - data_format=LMFormat.SHAPE.driver, - val_attribute=event_field, - mod_time=curr_time) - updated_lyr = self.scribe.find_or_insert_layer( - lyr) - - # Get dlocation - # Loop through files to write all matching - # (ext) to out location - base_out = os.path.splitext( - updated_lyr.get_dlocation())[0] - - for ext in LMFormat.SHAPE.get_extensions(): - z_fn = '{}{}'.format(hyp_filename, ext) - out_fn = '{}{}'.format(base_out, ext) - if z_fn in avail_files: - zip_f.extract(z_fn, out_fn) - - # Add it to the list of files to be encoded - encoder.encode_biogeographic_hypothesis( - updated_lyr.get_dlocation(), column_name, - min_coverage, event_field=event_field) - else: + if not ( + hyp_filename in avail_files or '{}{}'.format(hyp_filename, LMFormat.SHAPE.ext) in avail_files): raise WEXC.BadRequest('{} missing from package'.format(hyp_filename)) + else: + self._encode_insert_biogeo(zip_f, hyp_lyr, gridset, encoder) # Create biogeo matrix # Add the matrix to contain biogeo hypotheses layer # intersections meta = { ServiceObject.META_DESCRIPTION.lower(): - 'Biogeographic Hypotheses from package {}'.format( - package_name), - ServiceObject.META_KEYWORDS.lower(): [ - 'biogeographic hypotheses' - ] - } + 'Biogeographic Hypotheses from package {}'.format(package_name), + ServiceObject.META_KEYWORDS.lower(): ['biogeographic hypotheses']} tmp_mtx = LMMatrix( - None, matrix_type=MatrixType.BIOGEO_HYPOTHESES, - process_type=ProcessType.ENCODE_HYPOTHESES, - user_id=self.get_user_id(), gridset=gridset, metadata=meta, - status=JobStatus.INITIALIZE, status_mod_time=curr_time) + None, matrix_type=MatrixType.BIOGEO_HYPOTHESES, process_type=ProcessType.ENCODE_HYPOTHESES, + user_id=self.get_user_id(), gridset=gridset, metadata=meta, status=JobStatus.INITIALIZE, + status_mod_time=curr_time) bg_mtx = self.scribe.find_or_insert_matrix(tmp_mtx) # Encode the hypotheses @@ -314,32 +290,17 @@ def POST(self, user_id, gridset_id, bio_geo_data, **params): # We'll return the newly inserted biogeo matrix ret = [bg_mtx] - else: - raise WEXC.NotFound('Biogeography package: {} was not found'.format( - package_name)) else: raise WEXC.BadRequest('Cannot add hypotheses with reference type: {}'.format( - ref_obj[BG_REF_TYPE_KEY])) + hypothesis_reference_obj[BG_REF_TYPE_KEY])) # Return resulting list of matrices return ret - # ................................ - def _get_user_dir(self, user_id): - """Get the user's workspace directory - - Todo: - Change this to use something at a lower level. This is using the - same path construction as the getBoomPackage script - """ - return os.path.join( - ARCHIVE_PATH, user_id, 'uploads', 'biogeo') - # ............................................................................. class GridsetProgressService(LmService): - """Service class for gridset progress - """ + """Service class for gridset progress""" # ................................ @lm_formatter @@ -350,8 +311,7 @@ def get_gridset_progress(self, gridset_id, detail=False, **params): # ............................................................................. class GridsetTreeService(LmService): - """Service for the tree of a gridset - """ + """Service for the tree of a gridset""" # ................................ def delete_tree(self, user_id, tree_id): @@ -371,8 +331,7 @@ def delete_tree(self, user_id, tree_id): if success: return Response(status=HTTPStatus.NO_CONTENT) - # TODO: How can this happen? Make sure we catch those cases and - # respond appropriately. We don't want 500 errors + # TODO: How can this happen? Catch and respond appropriately, avoid 500 errors else: raise WEXC.InternalServerError('Failed to delete tree') @@ -381,7 +340,7 @@ def delete_tree(self, user_id, tree_id): # ................................ @lm_formatter - def GET(self, user_id, gridset_id, tree_id=None, include_csv=None, include_sdms=None, **params): + def get_tree(self, user_id, gridset_id, tree_id=None, include_csv=None, include_sdms=None, **params): """Just return the gridset tree, no listing at this time TODO: remove unused args. How is this called? diff --git a/LmWebServer/flask_app/routes.py b/LmWebServer/flask_app/routes.py index edc31a5b..6378f485 100644 --- a/LmWebServer/flask_app/routes.py +++ b/LmWebServer/flask_app/routes.py @@ -8,7 +8,8 @@ from LmWebServer.flask_app.global_pam import GlobalPAMService from LmWebServer.flask_app.layer import LayerService from LmWebServer.flask_app.occurrence import OccurrenceLayerService -# from LmWebServer.flask_app.gridset import GridsetService +from LmWebServer.flask_app.gridset import GridsetService +from LmWebServer.flask_app.species_hint import SpeciesHintService app = Flask(__name__.split('.')[0]) @@ -56,8 +57,8 @@ def occurrence(identifier): svc.delete_occurrence_set(user_id, identifier) elif request.method == 'GET': - after_time = request.args.get('after_time', default = None, type = str) - before_time = request.args.get('before_time', default = None, type = str) + after_time = request.args.get('after_time', default = None, type = float) + before_time = request.args.get('before_time', default = None, type = float) display_name = request.args.get('display_name', default = None, type = str) epsg_code = request.args.get('epsg_code', default= None, type = str) minimum_number_of_points = request.args.get('minimum_number_of_points', default = 1, type = int) @@ -115,8 +116,8 @@ def layer(identifier): svc.delete_occurrence_set(user_id, identifier) elif request.method == 'GET': - after_time = request.args.get('after_time', default = None, type = str) - before_time = request.args.get('before_time', default = None, type = str) + after_time = request.args.get('after_time', default = None, type = float) + before_time = request.args.get('before_time', default = None, type = float) alt_pred_code = request.args.get('alt_pred_code', default = None, type = str) date_code = request.args.get('date_code', default = None, type = str) epsg_code = request.args.get('epsg_code', default= None, type = str) @@ -204,21 +205,42 @@ def globalpam(): user = svc.get_user() user_id = user.user_id + archive_name = request.args.get('display_name', default = None, type = str) + cell_size = request.args.get('cell_size', default = None, type = float) + algorithm_code = request.args.get('algorithm_code', default = None, type = str) + bbox = request.args.get('bbox', default = None, type = str) + display_name = request.args.get('display_name', default = None, type = str) + gridset_id = request.args.get('gridset_id', default = None, type = int) + model_scenario_code = request.args.get('model_scenario_code', default = None, type = str) + prj_scen_code = request.args.get('prj_scenario_code', default = None, type = str) + point_max = request.args.get('point_max', default = None, type = int) + point_min = request.args.get('point_min', default = None, type = int) + squid = request.args.get('squid', default = None, type = str) + taxon_kingdom = request.args.get('taxon_kingdom', default = None, type = str) + taxon_phylum = request.args.get('taxon_phylum', default = None, type = str) + taxon_class = request.args.get('taxon_class', default = None, type = str) + taxon_order = request.args.get('taxon_order', default = None, type = str) + taxon_family = request.args.get('taxon_family', default = None, type = str) + taxon_genus = request.args.get('taxon_genus', default = None, type = str) + taxon_species = request.args.get('taxon_species', default = None, type = str) + if request.method == 'POST': - svc.post_boom_data(user_id, user.email, boom_data) + response = svc.post_pam_subset( + user_id, archive_name, gridset_id, algorithm_code=algorithm_code, bbox=bbox, + display_name=display_name, gridset_id=gridset_id, model_scenario_code=model_scenario_code, + prj_scen_code=prj_scen_code, point_max=point_max, point_min=point_min, squid=squid, + taxon_kingdom=taxon_kingdom, taxon_phylum=taxon_phylum, taxon_class=taxon_class, + taxon_order=taxon_order, taxon_family=taxon_family, taxon_genus=taxon_genus, + taxon_species=taxon_species) elif request.method == 'GET': - after_time = request.args.get('after_time', default = None, type = str) - before_time = request.args.get('before_time', default = None, type = str) - display_name = request.args.get('display_name', default = None, type = str) - epsg_code = request.args.get('epsg_code', default= None, type = str) - minimum_number_of_points = request.args.get('minimum_number_of_points', default = 1, type = int) - limit = request.args.get('limit', default = 100, type = int) - offset = request.args.get('offset', default = 0, type = int) - # url_user = request.args.get('url_user', default = None, type = str) - status = request.args.get('status', default = None, type = str) - gridset_id = request.args.get('gridset_id', default = None, type = str) - fill_points = request.args.get('fill_points', default = False, type = bool) + response = svc.post_pam_subset( + user_id, archive_name, cell_size=cell_size, algorithm_code=algorithm_code, bbox=bbox, + display_name=display_name, gridset_id=gridset_id, model_scenario_code=model_scenario_code, + prj_scen_code=prj_scen_code, point_max=point_max, point_min=point_min, squid=squid, + taxon_kingdom=taxon_kingdom, taxon_phylum=taxon_phylum, taxon_class=taxon_class, + taxon_order=taxon_order, taxon_family=taxon_family, taxon_genus=taxon_genus, + taxon_species=taxon_species) return response @@ -237,8 +259,8 @@ def gridset(identifier): svc.delete_gridset(user_id, identifier) elif request.method == 'GET': - after_time = request.args.get('after_time', default = None, type = str) - before_time = request.args.get('before_time', default = None, type = str) + after_time = request.args.get('after_time', default = None, type = float) + before_time = request.args.get('before_time', default = None, type = float) epsg_code = request.args.get('epsg_code', default= None, type = str) meta_string = request.args.get('meta_string', default= None, type = str) shapegrid_id = request.args.get('shapegrid_id', default= None, type = int) @@ -265,6 +287,18 @@ def gridset(identifier): return response +# ..................................................................................... +@app.route('/api/v2/hint', methods=['GET']) +def hint(): + svc = SpeciesHintService() + user_id = svc.get_user() + + search_string = request.args.get('search_string', default= None, type = str) + return svc.get_hint(user_id, search_string) + + + + # biotaphynames = GBIFTaxonService() # biotaphypoints = IDigBioOccurrenceService() # biotaphytree = OpenTreeService() diff --git a/LmWebServer/flask_app/species_hint.py b/LmWebServer/flask_app/species_hint.py new file mode 100644 index 00000000..70cec2ae --- /dev/null +++ b/LmWebServer/flask_app/species_hint.py @@ -0,0 +1,51 @@ +"""This module provides services for fuzzy search for occurrence sets""" +import werkzeug.exceptions as WEXC + +from LmServer.common.lmconstants import SOLR_FIELDS +from LmServer.common.solr import query_archive_index +from LmWebServer.services.api.v2.base import LmService +from LmWebServer.services.cp_tools.lm_format import lm_formatter + + +# ............................................................................. +class SpeciesHintService(LmService): + """This class is responsible for the species hint services""" + + # ................................ + @lm_formatter + def get_hint(self, user_id, search_string, limit=20, **params): + """Search the index for occurrence sets matching the search string""" + if len(search_string) < 3: + raise WEXC.BadRequest('Need to provide at least 3 characters for search string') + + # Split on a space if exists + parts = search_string.replace('%20', '_').split(' ') + if len(parts) > 1: + genus = parts[0] + species_search = '{}*'.format(parts[1]) + else: + genus = '{}*'.format(parts[0]) + species_search = None + + matches = query_archive_index( + tax_genus=genus.title(), tax_species=species_search, user_id=user_id) + + occ_ids = [] + ret = [] + + for match in matches: + occ_id = match[SOLR_FIELDS.OCCURRENCE_ID] + point_count = match[SOLR_FIELDS.POINT_COUNT] + display_name = match[SOLR_FIELDS.DISPLAY_NAME] + binomial = '{} {}'.format( + match[SOLR_FIELDS.TAXON_GENUS], + match[SOLR_FIELDS.TAXON_SPECIES]) + if occ_id not in occ_ids: + occ_ids.append(occ_id) + ret.append({ + 'binomial': binomial, + 'name': display_name, + 'numPoints': point_count, + 'occurrenceSet': occ_id + }) + return ret[:limit] diff --git a/requirements.txt b/requirements.txt index 66fcb8ca..8d139aad 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,8 +1,10 @@ -Cython>=0.29.24 -Flask (brings Werkzeug) +Cython +Flask # brings Werkzeug mapscript -scipy (brings numpy) -matplotlib>=3.5.0 +scipy # brings numpy +matplotlib psycopg2-binary -requests>=2.26.0 -dendropy \ No newline at end of file +requests +dendropy +lmpy # https://github.com/lifemapper/lifemapper-server/blob/main/src/lmpy/lmpy-2.9.99-py3-none-any.whl +idigbio # https://github.com/lifemapper/lifemapper-server/blob/main/src/lmserver/idigbio-0.8.5.tar.gz \ No newline at end of file From c71e382a0fda467c427cc5468738100059ed57a7 Mon Sep 17 00:00:00 2001 From: zzeppozz Date: Thu, 2 Dec 2021 16:46:11 -0600 Subject: [PATCH 10/18] updates; unfinished, untested --- LmWebServer/flask_app/base.py | 25 ++++++--- LmWebServer/flask_app/global_pam.py | 7 ++- LmWebServer/flask_app/gridset.py | 18 ++----- LmWebServer/flask_app/occurrence.py | 7 ++- LmWebServer/flask_app/open_tree.py | 82 +++++++++++++++++++++++++++++ LmWebServer/flask_app/routes.py | 47 +++++++++++++---- 6 files changed, 150 insertions(+), 36 deletions(-) create mode 100644 LmWebServer/flask_app/open_tree.py diff --git a/LmWebServer/flask_app/base.py b/LmWebServer/flask_app/base.py index 73e68b24..8c43b44b 100644 --- a/LmWebServer/flask_app/base.py +++ b/LmWebServer/flask_app/base.py @@ -1,8 +1,10 @@ """The module provides a base Lifemapper service class """ from flask import Flask, session +import os from LmCommon.common.lmconstants import DEFAULT_POST_USER +from LmServer.common.lmconstants import ARCHIVE_PATH from LmServer.common.localconstants import PUBLIC_USER from LmServer.common.log import WebLogger from LmServer.db.borg_scribe import BorgScribe @@ -36,7 +38,6 @@ class in case we decide that we need to use a different mechanism (such self.log = log # .......................... - @staticmethod def get_user(self, user_id=None): """Gets the user id for the service call. @@ -46,16 +47,15 @@ def get_user(self, user_id=None): TODO: Save the username in the session """ - svc = LmService() if user_id is None: - svc.get_user_id() + self.get_user_id() # Check to see if we should use url user - usr = svc.scribe.find_user(user_id) + usr = self.scribe.find_user(user_id) return usr # .......................... - @staticmethod - def get_user_id(url_user=None): + @classmethod + def get_user_id(cls, url_user=None): """Gets the user id for the service call. Gets the user id for the service call. If urlUser is provided, try @@ -76,6 +76,19 @@ def get_user_id(url_user=None): except Exception: # Fall back to PUBLIC_USER return PUBLIC_USER + + # ................................ + @classmethod + def get_user_dir(cls, user_id): + """Get the user's workspace directory + + Todo: + Change this to use something at a lower level. This is using the + same path construction as the getBoomPackage script + """ + return os.path.join(ARCHIVE_PATH, user_id, 'uploads', 'biogeo') + + # .......................... @staticmethod diff --git a/LmWebServer/flask_app/global_pam.py b/LmWebServer/flask_app/global_pam.py index 86da8141..86bf1fa5 100644 --- a/LmWebServer/flask_app/global_pam.py +++ b/LmWebServer/flask_app/global_pam.py @@ -3,11 +3,14 @@ from http import HTTPStatus from LmServer.base.atom import Atom + from LmServer.common.lmconstants import SOLR_FIELDS from LmServer.common.solr import facet_archive_on_gridset, query_archive_index from LmServer.common.subset import subset_global_pam -from LmWebServer.services.api.v2.base import LmService -from LmWebServer.services.cp_tools.lm_format import lm_formatter + +from LmWebServer.flask_app.base import LmService + +from LmWebServer.flask_tools.lm_format import lm_formatter # ............................................................................. diff --git a/LmWebServer/flask_app/gridset.py b/LmWebServer/flask_app/gridset.py index abb896be..1305c26a 100644 --- a/LmWebServer/flask_app/gridset.py +++ b/LmWebServer/flask_app/gridset.py @@ -2,7 +2,6 @@ import dendropy from flask import Response, make_response from http import HTTPStatus -import json import os import werkzeug.exceptions as WEXC import zipfile @@ -17,7 +16,6 @@ from LmServer.base.atom import Atom from LmServer.base.layer import Vector from LmServer.base.service_object import ServiceObject -from LmServer.common.lmconstants import ARCHIVE_PATH from LmServer.legion.lm_matrix import LMMatrix from LmServer.legion.mtx_column import MatrixColumn from LmServer.legion.tree import Tree @@ -52,16 +50,6 @@ def get_gridset(user_id, gridset_id): raise WEXC.Forbidden('User {} does not have permission to access GridSet {}'.format( user_id, gridset_id)) -# ................................ -def get_user_dir(user_id): - """Get the user's workspace directory - - Todo: - Change this to use something at a lower level. This is using the - same path construction as the getBoomPackage script - """ - return os.path.join(ARCHIVE_PATH, user_id, 'uploads', 'biogeo') - # ............................................................................. def summarize_object_statuses(summary): """Summarizes a summary @@ -246,7 +234,7 @@ def post_biogeo_hypotheses(self, user_id, gridset_id, biogeo_data, **params): curr_time = gmt().mjd # # Check for uploaded biogeo package package_name = hypothesis_reference_obj[BG_REF_ID_KEY] - package_filename = os.path.join(get_user_dir(), '{}{}'.format(package_name, LMFormat.ZIP.ext)) + package_filename = os.path.join(self.get_user_dir(), '{}{}'.format(package_name, LMFormat.ZIP.ext)) # encoder = LayerEncoder(gridset.get_shapegrid().get_dlocation()) self._encode_insert_biogeo(gridset, hypothesis_reference_obj, encoder, package_filename) @@ -280,7 +268,7 @@ def post_biogeo_hypotheses(self, user_id, gridset_id, biogeo_data, **params): tmp_mtx = LMMatrix( None, matrix_type=MatrixType.BIOGEO_HYPOTHESES, process_type=ProcessType.ENCODE_HYPOTHESES, - user_id=self.get_user_id(), gridset=gridset, metadata=meta, status=JobStatus.INITIALIZE, + user_id=user_id, gridset=gridset, metadata=meta, status=JobStatus.INITIALIZE, status_mod_time=curr_time) bg_mtx = self.scribe.find_or_insert_matrix(tmp_mtx) @@ -374,7 +362,7 @@ def post_tree(self, user_id, gridset_id, tree_id=None, name=None, tree_data=None raise WEXC.BadRequest('Must provide name for tree') tree = dendropy.Tree.get(file=tree_data, schema=tree_schema) - new_tree = Tree(name, user_id=self.get_user_id()) + new_tree = Tree(name, user_id=user_id) updated_tree = self.scribe.find_or_insert_tree(new_tree) updated_tree.set_tree(tree) updated_tree.write_tree() diff --git a/LmWebServer/flask_app/occurrence.py b/LmWebServer/flask_app/occurrence.py index b8f169b4..1a27db14 100644 --- a/LmWebServer/flask_app/occurrence.py +++ b/LmWebServer/flask_app/occurrence.py @@ -103,7 +103,7 @@ def count_occurrence_sets( # ................................ @lm_formatter - def get_occurrence_set(self, occset_id, fill_points=False): + def get_occurrence_set(self, user_id, occset_id, fill_points=False): """Attempt to get an occurrence set""" occ = self.scribe.get_occurrence_set(occ_id=int(occset_id)) @@ -111,13 +111,12 @@ def get_occurrence_set(self, occset_id, fill_points=False): raise WEXC.NotFound('Occurrence set not found') # If allowed to, return - if check_user_permission(self.get_user_id(), occ, HTTPMethod.GET): + if check_user_permission(user_id, occ, HTTPMethod.GET): if fill_points: occ.read_shapefile() return occ - raise WEXC.Forbidden('User {} does not have permission to GET occurrence set'.format( - self.get_user_id())) + raise WEXC.Forbidden('User {} does not have permission to GET occurrence set'.format(user_id)) # ................................ @lm_formatter diff --git a/LmWebServer/flask_app/open_tree.py b/LmWebServer/flask_app/open_tree.py new file mode 100644 index 00000000..8e1c5573 --- /dev/null +++ b/LmWebServer/flask_app/open_tree.py @@ -0,0 +1,82 @@ +"""This module provides a wrapper around OpenTree's induce subtree service + +Todo: + * Use opentree wrapper code + * Catch service errors from OpenTree +""" +import hashlib +from http import HTTPStatus +import os +import werkzeug.exceptions as WEXC +from biotaphy.client.ot_service_wrapper.open_tree import (get_info_for_names, induced_subtree) + +from LmCommon.common.lmconstants import ENCODING +from LmCommon.common.ready_file import ready_filename +from LmCommon.common.time import gmt +from LmDbServer.tools.partner_data import Partners +from LmServer.common.lmconstants import ( + ARCHIVE_PATH, NONTREE_GBIF_IDS_KEY, TREE_DATA_KEY, TREE_FORMAT_KEY, + TREE_NAME_KEY, UNMATCHED_GBIF_IDS_KEY) +from LmWebServer.services.api.v2.base import LmService +from LmWebServer.services.cp_tools.lm_format import lm_formatter + + +# ............................................................................. +class OpenTreeService(LmService): + """Open Tree wrapper service for retrieving trees from taxon names.""" + + # ................................ + @lm_formatter + def get_tree_for_names(self, user_id, taxon_names_obj): + """Gets an Open Tree tree for a list of taxon names. + + Returns: + dict: A dictionary of tree information. + """ + if not isinstance(taxon_names_obj, list): + raise WEXC.BadRequest('Taxon names must be a JSON list') + + try: + # Get information about taxon names + taxa_info, unmatched_gbif_ids = get_info_for_names(taxon_names_obj) + + # Get the Open Tree IDs + ott_ids = [] + for tax_info in taxa_info.values(): + if 'ott_id' in tax_info.keys(): + ott_ids.append(tax_info['ott_id']) + + if len(ott_ids) <= 1: + raise WEXC.BadRequest('Need more than one open tree ID to create a tree') + # Get the tree from Open Tree + output = induced_subtree(ott_ids) + tree_data = output['newick'] + # Get the list of GBIF IDs that matched to OTT IDs but were not in tree + nontree_ids = [] + + except Exception as e: + raise WEXC.ServiceUnavailable('We are having trouble connecting to Open Tree: {}'.format(e)) + + # Determine a name for the tree, use user id, 16 characters of hashed tree data, and mjd + tree_name = '{}-{}-{}.tre'.format( + user_id, hashlib.md5(tree_data.encode()).hexdigest()[:16], gmt().mjd) + + # Write the tree + out_filename = os.path.join(self.get_user_dir(), tree_name) + if not os.path.exists(out_filename): + ready_filename(out_filename) + with open(out_filename, 'w', encoding=ENCODING) as out_f: + out_f.write(tree_data) + else: + raise WEXC.Conflict('Tree with this name already exists in the user space') + + resp = { + NONTREE_GBIF_IDS_KEY: nontree_ids, + TREE_DATA_KEY: tree_data, + TREE_FORMAT_KEY: Partners.OTT_TREE_FORMAT, # Newick + TREE_NAME_KEY: tree_name, + UNMATCHED_GBIF_IDS_KEY: unmatched_gbif_ids, + } + + return resp + diff --git a/LmWebServer/flask_app/routes.py b/LmWebServer/flask_app/routes.py index 6378f485..ba1730ee 100644 --- a/LmWebServer/flask_app/routes.py +++ b/LmWebServer/flask_app/routes.py @@ -1,5 +1,7 @@ -from flask import Flask, redirect, render_template, request, session -from werkzeug.exceptions import BadRequest, NotFound +from flask import (Flask, redirect, render_template, request, session, url_for) +from flask_cors import CORS +import secrets +from werkzeug.exceptions import BadRequest from LmWebServer.flask_app.base import LmService from LmWebServer.flask_app.biotaphy_names import GBIFTaxonService @@ -10,16 +12,25 @@ from LmWebServer.flask_app.occurrence import OccurrenceLayerService from LmWebServer.flask_app.gridset import GridsetService from LmWebServer.flask_app.species_hint import SpeciesHintService +from LmWebServer.flask_app.open_tree import OpenTreeService app = Flask(__name__.split('.')[0]) +app.secret_key = str.encode(secrets.token_hex()) +CORS(app) + +# .......................... +@app.route('/') +def index(): + if 'username' in session: + return f'Logged in as {session["username"]}' + return 'You are not logged in' # .......................... @app.route('/api/v2/login', methods=['GET', 'POST']) def login(): if request.method == 'POST': - req = request.form - username = req.get('username') - password = req.get('password') + username = request.form.get('username') + password = request.form.get('password') user = LmService.get_user(username) if user.check_password(password): @@ -31,6 +42,13 @@ def login(): return render_template('public_html/login.html') +# ..................................................................................... +@app.route('/logout') +def logout(): + # remove the username from the session if it's there + session.pop('username', None) + return redirect(url_for('index')) + # ..................................................................................... @app.route('/api/v2/occ/', methods=['GET', 'POST', 'DELETE']) def occurrence(identifier): @@ -93,7 +111,7 @@ def occurrence(identifier): except: return BadRequest('{} is not a valid layer ID'.format(identifier)) else: - response = svc.get_occurrence_set(occid, fill_points=fill_points) + response = svc.get_occurrence_set(user_id, occid, fill_points=fill_points) return response @@ -168,7 +186,7 @@ def biotaphynames(): try: names_obj = request.get_json() except: - return BadRequest('Name list must be in JSON format') + return BadRequest('Names must be a JSON list') else: svc = GBIFTaxonService() response = svc.get_gbif_results(names_obj) @@ -180,12 +198,23 @@ def biotaphypoints(): try: taxonids_obj = request.get_json() except: - return BadRequest('Taxon ID list must be in JSON format') + return BadRequest('Taxon IDs must be a JSON list') else: svc = IDigBioOccurrenceService() response = svc.get_occurrence_counts_for_taxonids(taxonids_obj) return response +# ..................................................................................... +@app.route('/api/v2/biotaphytree', methods=['POST']) +def biotaphytree(): + try: + taxon_names_obj = request.get_json() + except: + return BadRequest('Taxon names must be a JSON list') + else: + svc = OpenTreeService() + svc.get_tree_for_names(taxon_names_obj) + # ..................................................................................... @app.route('/api/v2/gbifparser', methods=['POST']) def gbifparser(): @@ -297,7 +326,7 @@ def hint(): return svc.get_hint(user_id, search_string) - + # biotaphynames = GBIFTaxonService() # biotaphypoints = IDigBioOccurrenceService() From 76e83a1476cfcf1eb5af1b22103b243b4115489c Mon Sep 17 00:00:00 2001 From: zzeppozz Date: Thu, 2 Dec 2021 16:48:18 -0600 Subject: [PATCH 11/18] flask helpers --- LmWebServer/formatters/json_formatter.py | 11 +++-------- requirements.txt | 7 +++---- 2 files changed, 6 insertions(+), 12 deletions(-) diff --git a/LmWebServer/formatters/json_formatter.py b/LmWebServer/formatters/json_formatter.py index d88c368f..529e6a8f 100644 --- a/LmWebServer/formatters/json_formatter.py +++ b/LmWebServer/formatters/json_formatter.py @@ -3,12 +3,9 @@ Todo: Use constants """ - from hashlib import md5 import json -import cherrypy - from LmCommon.common.lmconstants import LMFormat from LmServer.base.atom import Atom from LmServer.base.layer import Raster, Vector @@ -29,8 +26,7 @@ # Provide methods for direct calls to formatters # ............................................................................. def format_atom(obj): - """Format an Atom object into a dictionary - """ + """Format an Atom object into a dictionary""" return { 'epsg': obj.epsg_code, 'id': obj.get_id(), @@ -374,9 +370,8 @@ def json_object_formatter(obj): # ............................................................................. def _format_object(obj): - """Helper method to format an individual object based on its type - """ - cherrypy.response.headers['Content-Type'] = LMFormat.JSON.get_mime_type() + """Helper method to format an individual object based on its type""" + # cherrypy.response.headers['Content-Type'] = LMFormat.JSON.get_mime_type() if isinstance(obj, dict): return obj if isinstance(obj, Atom): diff --git a/requirements.txt b/requirements.txt index 8d139aad..b37cf435 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,10 +1,9 @@ -Cython Flask # brings Werkzeug +flask-cors mapscript -scipy # brings numpy -matplotlib psycopg2-binary requests dendropy +idigbio +biotaphypy # https://github.com/biotaphy/BiotaPhyPy/releases/download/1.3.1/biotaphypy-1.3.1-py3-none-any.whl, brings numpy, scipy, matplotlib lmpy # https://github.com/lifemapper/lifemapper-server/blob/main/src/lmpy/lmpy-2.9.99-py3-none-any.whl -idigbio # https://github.com/lifemapper/lifemapper-server/blob/main/src/lmserver/idigbio-0.8.5.tar.gz \ No newline at end of file From 9eab28e6d5f6514bb3a409fcbe4e6e847d3d9bc0 Mon Sep 17 00:00:00 2001 From: zzeppozz Date: Thu, 2 Dec 2021 17:17:53 -0600 Subject: [PATCH 12/18] updates; unfinished, untested --- LmWebServer/flask_app/open_tree.py | 3 +- LmWebServer/flask_app/routes.py | 83 +++++++++++++++++++---- LmWebServer/flask_app/scenario.py | 77 +++++++++++++++++++++ LmWebServer/flask_app/scenario_package.py | 50 ++++++++++++++ 4 files changed, 198 insertions(+), 15 deletions(-) create mode 100644 LmWebServer/flask_app/scenario.py create mode 100644 LmWebServer/flask_app/scenario_package.py diff --git a/LmWebServer/flask_app/open_tree.py b/LmWebServer/flask_app/open_tree.py index 8e1c5573..51edbbfd 100644 --- a/LmWebServer/flask_app/open_tree.py +++ b/LmWebServer/flask_app/open_tree.py @@ -15,8 +15,7 @@ from LmCommon.common.time import gmt from LmDbServer.tools.partner_data import Partners from LmServer.common.lmconstants import ( - ARCHIVE_PATH, NONTREE_GBIF_IDS_KEY, TREE_DATA_KEY, TREE_FORMAT_KEY, - TREE_NAME_KEY, UNMATCHED_GBIF_IDS_KEY) + NONTREE_GBIF_IDS_KEY, TREE_DATA_KEY, TREE_FORMAT_KEY, TREE_NAME_KEY, UNMATCHED_GBIF_IDS_KEY) from LmWebServer.services.api.v2.base import LmService from LmWebServer.services.cp_tools.lm_format import lm_formatter diff --git a/LmWebServer/flask_app/routes.py b/LmWebServer/flask_app/routes.py index ba1730ee..322493c6 100644 --- a/LmWebServer/flask_app/routes.py +++ b/LmWebServer/flask_app/routes.py @@ -13,6 +13,9 @@ from LmWebServer.flask_app.gridset import GridsetService from LmWebServer.flask_app.species_hint import SpeciesHintService from LmWebServer.flask_app.open_tree import OpenTreeService +from LmWebServer.flask_app.scenario_package import ScenarioPackageService +from LmWebServer.flask_app.scenario import ScenarioService +from _datetime import date app = Flask(__name__.split('.')[0]) app.secret_key = str.encode(secrets.token_hex()) @@ -325,22 +328,76 @@ def hint(): search_string = request.args.get('search_string', default= None, type = str) return svc.get_hint(user_id, search_string) +# ..................................................................................... +@app.route('/api/v2/scenpackage/', methods=['GET']) +def scenpackage(identifier): + svc = ScenarioPackageService() + user_id = svc.get_user() + + scenario_package_id = request.args.get('scenario_package_id', default = None, type = int) + scenario_id = request.args.get('scenario_id', default = None, type = int) + after_time = request.args.get('after_time', default = None, type = float) + before_time = request.args.get('before_time', default = None, type = float) + epsg_code = request.args.get('epsg_code', default= None, type = str) + limit = request.args.get('limit', default = 100, type = int) + offset = request.args.get('offset', default = 0, type = int) + if identifier is None: + response = svc.list_scenario_packages( + user_id, after_time=after_time, before_time=before_time, epsg_code=epsg_code, + scenario_id=scenario_id, limit=limit, offset=offset) + elif identifier.lower() == 'count': + response = svc.count_scenario_packages( + user_id, after_time=after_time, before_time=before_time, epsg_code=epsg_code, scenario_id=scenario_id) + + else: + try: + scenario_package_id = int(identifier) + except: + return BadRequest('{} is not a valid layer ID'.format(identifier)) + else: + response = svc.get_scenario_package(user_id, scenario_package_id) + + return response - # biotaphynames = GBIFTaxonService() - # biotaphypoints = IDigBioOccurrenceService() - # biotaphytree = OpenTreeService() - # envlayer = EnvLayerService() - # gbifparser = GBIFNamesService() - # globalpam = GlobalPAMService() - # gridset = GridsetService() - # hint = SpeciesHintService() - # layer = LayerService() - # occurrence = OccurrenceLayerService() - # opentree = OpenTreeService() - # scenario = ScenarioService() - # scenpackage = ScenarioPackageService() +# ..................................................................................... +@app.route('/api/v2/scenario/', methods=['GET']) +def scenario(identifier): + svc = ScenarioPackageService() + user_id = svc.get_user_id() + + scenario_id = request.args.get('scenario_id', default = None, type = int) + after_time = request.args.get('after_time', default = None, type = float) + before_time = request.args.get('before_time', default = None, type = float) + alt_pred_code = request.args.get('alt_pred_code', default= None, type = str) + date_code = request.args.get('date_code', default= None, type = str) + gcm_code = request.args.get('gcm_code', default= None, type = str) + epsg_code = request.args.get('epsg_code', default= None, type = str) + limit = request.args.get('limit', default = 100, type = int) + offset = request.args.get('offset', default = 0, type = int) + + if identifier is None: + response = svc.list_scenarios( + user_id, after_time=after_time, before_time=before_time, alt_pred_code=alt_pred_code, + date_code=date_code, gcm_code=gcm_code, epsg_code=epsg_code, limit=limit, offset=offset) + + elif identifier.lower() == 'count': + response = svc.count_scenarios( + user_id, after_time=after_time, before_time=before_time, alt_pred_code=alt_pred_code, + date_code=date_code, gcm_code=gcm_code, epsg_code=epsg_code) + + else: + try: + scenario_id = int(identifier) + except: + return BadRequest('{} is not a valid layer ID'.format(identifier)) + else: + response = svc.get_scenario(user_id, scenario_id) + + return response + + # sdmproject = SdmProjectService() # shapegrid = ShapegridService() # snippet = SnippetService() diff --git a/LmWebServer/flask_app/scenario.py b/LmWebServer/flask_app/scenario.py new file mode 100644 index 00000000..1fe291b7 --- /dev/null +++ b/LmWebServer/flask_app/scenario.py @@ -0,0 +1,77 @@ +"""This module provides REST services for Scenario""" +import werkzeug.exceptions as WEXC + +from LmCommon.common.lmconstants import HTTPStatus +from LmWebServer.common.lmconstants import HTTPMethod +from LmWebServer.services.api.v2.base import LmService +from LmWebServer.services.common.access_control import check_user_permission +from LmWebServer.services.cp_tools.lm_format import lm_formatter + + +# ............................................................................. +class ScenarioService(LmService): + """Scenarios service class. + """ + + # ................................ + # @lm_formatter + # def GET(self, scenario_id=None, after_time=None, + # alt_pred_code=None, before_time=None, date_code=None, + # epsg_code=None, gcm_code=None, limit=100, offset=0, url_user=None, + # **params): + # """GET request. Individual, list, count + # """ + # if scenario_id is None: + # return self._list_scenarios( + # self.get_user_id(url_user=url_user), after_time=after_time, + # alt_pred_code=alt_pred_code, before_time=before_time, + # date_code=date_code, epsg_code=epsg_code, gcm_code=gcm_code, + # limit=limit, offset=offset) + # + # if scenario_id.lower() == 'count': + # return self._count_scenarios( + # self.get_user_id(url_user=url_user), after_time=after_time, + # alt_pred_code=alt_pred_code, before_time=before_time, + # date_code=date_code, epsg_code=epsg_code, gcm_code=gcm_code) + # + # return self._get_scenario(scenario_id) + + # ................................ + @lm_formatter + def count_scenarios( + self, user_id, after_time=None, before_time=None, alt_pred_code=None, date_code=None, + gcm_code=None, epsg_code=None): + """Return a list of scenarios matching the specified criteria""" + scen_count = self.scribe.count_scenarios( + user_id=user_id, before_time=before_time, after_time=after_time, + epsg=epsg_code, gcm_code=gcm_code, alt_pred_code=alt_pred_code, + date_code=date_code) + return {'count': scen_count} + + # ................................ + @lm_formatter + def get_scenario(self, user_id, scenario_id): + """Return a scenario""" + scn = self.scribe.get_scenario(int(scenario_id), fill_layers=True) + + if scn is None: + raise WEXC.NotFound('Scenario {} not found'.format(scenario_id)) + + if check_user_permission(user_id, scn, HTTPMethod.GET): + return scn + else: + raise WEXC.Forbidden('User {} does not have permission to get scenario {}'.format( + user_id, scenario_id)) + + # ................................ + @lm_formatter + def list_scenarios( + self, user_id, after_time=None, before_time=None, alt_pred_code=None, date_code=None, + gcm_code=None, epsg_code=None, limit=100, offset=0): + """Return a list of scenarios matching the specified criteria""" + + scn_atoms = self.scribe.list_scenarios( + offset, limit, user_id=user_id, before_time=before_time, after_time=after_time, + epsg=epsg_code, gcm_code=gcm_code, alt_pred_code=alt_pred_code, date_code=date_code) + + return scn_atoms diff --git a/LmWebServer/flask_app/scenario_package.py b/LmWebServer/flask_app/scenario_package.py new file mode 100644 index 00000000..a96d9e4e --- /dev/null +++ b/LmWebServer/flask_app/scenario_package.py @@ -0,0 +1,50 @@ +"""This module provides REST services for Scenario packages""" +import werkzeug.exceptions as WEXC + +from LmWebServer.common.lmconstants import HTTPMethod +from LmWebServer.services.api.v2.base import LmService +from LmWebServer.services.common.access_control import check_user_permission +from LmWebServer.services.cp_tools.lm_format import lm_formatter + + +# ............................................................................. +class ScenarioPackageService(LmService): + """Class for scenario packages web services""" + + # ................................ + @lm_formatter + def count_scenario_packages( + self, user_id, after_time=None, before_time=None, epsg_code=None, scenario_id=None): + """Return the number of scenario packages that match the parameters""" + scen_package_count = self.scribe.count_scen_packages( + user_id=user_id, before_time=before_time, after_time=after_time, epsg=epsg_code, + scen_id=scenario_id) + return {'count': scen_package_count} + + # ................................ + @lm_formatter + def get_scenario_package(self, user_id, scenario_package_id): + """Attempt to get a scenario""" + scen_package = self.scribe.get_scen_package(scen_package_id=scenario_package_id) + + if scen_package is None: + raise WEXC.NotFound('Scenario package{} not found'.format(scenario_package_id)) + + if check_user_permission(user_id, scen_package, HTTPMethod.GET): + return scen_package + + # 403 if no permission + raise WEXC.Forbidden('User {} does not have permission for scenario package {}'.format( + user_id, scenario_package_id)) + + # ................................ + @lm_formatter + def list_scenario_packages( + self, user_id, after_time=None, before_time=None, epsg_code=None, scenario_id=None, + limit=100, offset=0): + """Return a list of scenarios matching the specified criteria""" + scen_package_atoms = self.scribe.list_scen_packages( + offset, limit, user_id=user_id, before_time=before_time, after_time=after_time, + scen_id=scenario_id, epsg=epsg_code) + + return scen_package_atoms From 375bf15aeb36e481e665649ab8af288c8199ea82 Mon Sep 17 00:00:00 2001 From: zzeppozz Date: Thu, 2 Dec 2021 17:18:36 -0600 Subject: [PATCH 13/18] helpers --- LmWebServer/flask_tools/__init__.py | 0 LmWebServer/flask_tools/basic_auth.py | 33 ++++++ LmWebServer/flask_tools/lm_format.py | 136 ++++++++++++++++++++++++ LmWebServer/flask_tools/param_caster.py | 42 ++++++++ 4 files changed, 211 insertions(+) create mode 100644 LmWebServer/flask_tools/__init__.py create mode 100644 LmWebServer/flask_tools/basic_auth.py create mode 100644 LmWebServer/flask_tools/lm_format.py create mode 100644 LmWebServer/flask_tools/param_caster.py diff --git a/LmWebServer/flask_tools/__init__.py b/LmWebServer/flask_tools/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/LmWebServer/flask_tools/basic_auth.py b/LmWebServer/flask_tools/basic_auth.py new file mode 100644 index 00000000..5a486832 --- /dev/null +++ b/LmWebServer/flask_tools/basic_auth.py @@ -0,0 +1,33 @@ +"""This module is for basic authentication for Lifemapper services. + +Note: + * We will probably want to switch our authentication mechanism, at least + for Lifemapper proper. We may want to keep basic authentication for + instances though, thus the name of this module is 'basicAuth' +""" +from flask import session +import os + +from LmServer.common.localconstants import PUBLIC_USER +from LmServer.common.log import WebLogger, UserLogger +from LmWebServer.common.lmconstants import SESSION_PATH + +# ............................................................................. +def get_user_name(): + """Attempt to get the session user name""" + user = PUBLIC_USER + log = WebLogger() + + try: + session_file_name = os.path.join(SESSION_PATH, 'session-{}'.format(session['username'])) + if os.path.isfile(session_file_name): + try: + user = session['username'] + except: + user = PUBLIC_USER + log = UserLogger(user) + except Exception as e: + log.error('Exception in get_user_name: {}'.format(str(e))) + + session['username'] = user + session['log'] = log diff --git a/LmWebServer/flask_tools/lm_format.py b/LmWebServer/flask_tools/lm_format.py new file mode 100644 index 00000000..b6f3ecac --- /dev/null +++ b/LmWebServer/flask_tools/lm_format.py @@ -0,0 +1,136 @@ +"""This tool provides output formatting for service calls based on headers + +This module provides a tool for formatting outputs of service calls based on +the accept headers of the request +""" +from flask import request +import werkzeug.exceptions as WEXC + +from LmCommon.common.lmconstants import (CSV_INTERFACE, ENCODING, JSON_INTERFACE, LMFormat, SHAPEFILE_INTERFACE) + +from LmServer.common.lmconstants import SnippetOperations +from LmServer.common.localconstants import PUBLIC_USER +from LmServer.common.snippet import SnippetShooter + +from LmWebServer.formatters.eml_formatter import eml_object_formatter +from LmWebServer.formatters.file_formatter import ( + csv_object_formatter, file_formatter, gtiff_object_formatter, shapefile_object_formatter) +from LmWebServer.formatters.geo_json_formatter import geo_json_object_formatter +from LmWebServer.formatters.json_formatter import json_object_formatter +from LmWebServer.formatters.kml_formatter import kml_object_formatter +from LmWebServer.formatters.package_formatter import gridset_package_formatter +from LmWebServer.formatters.progress_formatter import progress_object_formatter + + +# ............................................................................. +def lm_formatter(f): + """Wrapper method for formatting service objects + + Use this as a decorator for methods that return objects that should be sent + through formatting before being returned + """ + + def wrapper(*args, **kwargs): + """Wrapper function + """ + # Call the handler and get the object result + try: + handler_result = f(*args, **kwargs) + except TypeError: + raise WEXC.BadRequest + + accept_headers = request.headers['Accept'] + + try: + raw_headers = accept_headers.split(',') + valued_accepts = [] + for hdr in raw_headers: + if len(hdr.split(';')) > 1: + mime, val = hdr.split(';') + valued_accepts.append( + (mime.strip(), float(val.strip('q=')))) + else: + valued_accepts.append((hdr.strip(), 1.0)) + except Exception: + valued_accepts = [('*/*', 1.0)] + + sorted_accepts = sorted( + valued_accepts, key=lambda x: x[1], reverse=True) + + for accept_hdr, _ in sorted_accepts: + try: + if accept_hdr == LMFormat.GEO_JSON.get_mime_type(): + return geo_json_object_formatter( + handler_result).encode(ENCODING) + # If JSON or default + if accept_hdr in [LMFormat.JSON.get_mime_type(), '*/*']: + shoot_snippets( + handler_result, SnippetOperations.VIEWED, + JSON_INTERFACE) + return json_object_formatter( + handler_result).encode(ENCODING) + if accept_hdr == LMFormat.EML.get_mime_type(): + return eml_object_formatter( + handler_result) + if accept_hdr == LMFormat.KML.get_mime_type(): + return kml_object_formatter( + handler_result) + if accept_hdr == LMFormat.GTIFF.get_mime_type(): + return gtiff_object_formatter( + handler_result) + if accept_hdr == LMFormat.SHAPE.get_mime_type(): + shoot_snippets( + handler_result, SnippetOperations.DOWNLOADED, + SHAPEFILE_INTERFACE) + return shapefile_object_formatter( + handler_result) + if accept_hdr == LMFormat.CSV.get_mime_type(): + shoot_snippets( + handler_result, SnippetOperations.DOWNLOADED, + CSV_INTERFACE) + return csv_object_formatter( + handler_result).encode(ENCODING) + if accept_hdr == LMFormat.NEWICK.get_mime_type(): + raise WEXC.BadRequest('Newick response not enabled yet') + # TODO: Use dendropy to convert nexus to newick + # return file_formatter(handler_result.get_dlocation()) + if accept_hdr == LMFormat.NEXUS.get_mime_type(): + return file_formatter( + handler_result.get_dlocation()).encode(ENCODING) + if accept_hdr == LMFormat.ZIP.get_mime_type(): + csvs = True + sdms = True + return gridset_package_formatter( + handler_result, include_csv=csvs, include_sdm=sdms + ) + if accept_hdr == LMFormat.PROGRESS.get_mime_type(): + obj_type, obj_id, detail = handler_result + return progress_object_formatter( + obj_type, obj_id, detail=detail).encode(ENCODING) + except Exception as e: + # Ignore and try next accept header + raise WEXC.NotAcceptable('Failed: {}'.format(str(e))) + # If we cannot find an acceptable formatter, raise HTTP error + raise WEXC.NotAcceptable('Could not find an acceptable format') + + # return json_object_formatter(handler_result) + + return wrapper + + +# ............................................................................. +def shoot_snippets(obj, operation, format_string): + """Attempt to shoot snippets for downloads / viewings / etc + """ + # Only shoot public data snippets + try: + if obj.get_user_id() == PUBLIC_USER: + shooter = SnippetShooter() + shooter.add_snippets( + obj, operation, url='{}/{}'.format( + obj.metadata_url, format_string), + who='user', agent='webService', why='request') + shooter.shoot_snippets() + except Exception: + # TODO: Log exceptions for snippets + pass diff --git a/LmWebServer/flask_tools/param_caster.py b/LmWebServer/flask_tools/param_caster.py new file mode 100644 index 00000000..0fa0b63c --- /dev/null +++ b/LmWebServer/flask_tools/param_caster.py @@ -0,0 +1,42 @@ +"""This function ensures that query parameters are case-insensitive + +This dispatcher casts query parameters appropriately so that they can be used + directly by functions down the line. We handle them all here to prevent + code redundancy. +""" +from flask import request + +from LmWebServer.common.lmconstants import ( + QP_NAME_KEY, QP_PROCESS_KEY, QUERY_PARAMETERS) + + +# ............................................................................. +def cast_parameters(): + """Casts the provided parameters to match what we expect + + Cast the provided parameters and change the names to match what we expect. + This allows query parameter names to be case-insensitive and of the type we + expect for processing. + """ + new_parameters = {} + in_params = request.args.get() + + for key in in_params: + # Convert the key to lower case and remove any underscores + mod_key = key.replace('_', '').lower() + if mod_key in QUERY_PARAMETERS: + query_param = QUERY_PARAMETERS[mod_key] + if QP_PROCESS_KEY in query_param: + # If we have a processing instruction, do it + new_parameters[query_param[QP_NAME_KEY] + ] = query_param[QP_PROCESS_KEY](in_params[key]) + else: + # If not, just set to what was passed in but for new parameter + # name + new_parameters[query_param[QP_NAME_KEY]] = in_params[key] + + # Set the request parameters to the new values + for key, val in new_parameters.items(): + request.args.__setattr__(key, val) + + # cherrypy.request.params = new_parameters From 041e5271ba17ed008627e3565ff2b9ac7b3c59fa Mon Sep 17 00:00:00 2001 From: zzeppozz Date: Fri, 3 Dec 2021 11:29:27 -0600 Subject: [PATCH 14/18] updates; unfinished, untested --- LmWebServer/flask_app/env_layer.py | 2 +- LmWebServer/flask_app/gbif_parser.py | 2 +- LmWebServer/flask_app/global_pam.py | 4 +- LmWebServer/flask_app/gridset.py | 8 +- LmWebServer/flask_app/occurrence.py | 12 +- LmWebServer/flask_app/open_tree.py | 9 +- LmWebServer/flask_app/routes.py | 228 +++++++++++++++------- LmWebServer/flask_app/scenario.py | 6 +- LmWebServer/flask_app/scenario_package.py | 2 +- LmWebServer/flask_app/sdm_project.py | 144 ++++++++++++++ LmWebServer/flask_app/shapegrid.py | 130 ++++++++++++ LmWebServer/flask_app/snippet.py | 41 ++++ LmWebServer/flask_app/species_hint.py | 2 +- 13 files changed, 501 insertions(+), 89 deletions(-) create mode 100644 LmWebServer/flask_app/sdm_project.py create mode 100644 LmWebServer/flask_app/shapegrid.py create mode 100644 LmWebServer/flask_app/snippet.py diff --git a/LmWebServer/flask_app/env_layer.py b/LmWebServer/flask_app/env_layer.py index 2f614d7b..f8030f75 100644 --- a/LmWebServer/flask_app/env_layer.py +++ b/LmWebServer/flask_app/env_layer.py @@ -2,7 +2,7 @@ import werkzeug.exceptions as WEXC from LmWebServer.common.lmconstants import HTTPMethod -from LmWebServer.services.api.v2.base import LmService +from LmWebServer.flask_app.base import LmService from LmWebServer.services.common.access_control import check_user_permission from LmWebServer.services.cp_tools.lm_format import lm_formatter diff --git a/LmWebServer/flask_app/gbif_parser.py b/LmWebServer/flask_app/gbif_parser.py index 17871d6f..64a38cb3 100644 --- a/LmWebServer/flask_app/gbif_parser.py +++ b/LmWebServer/flask_app/gbif_parser.py @@ -5,7 +5,7 @@ from werkzeug.exceptions import BadRequest from LmCommon.common.api_query import GbifAPI -from LmWebServer.services.api.v2.base import LmService +from LmWebServer.flask_app.base import LmService from LmWebServer.services.cp_tools.lm_format import lm_formatter # TODO: These need to go into a constants file diff --git a/LmWebServer/flask_app/global_pam.py b/LmWebServer/flask_app/global_pam.py index 86bf1fa5..ad9afc5c 100644 --- a/LmWebServer/flask_app/global_pam.py +++ b/LmWebServer/flask_app/global_pam.py @@ -119,9 +119,9 @@ def post_pam_subset( gridset = subset_global_pam( archive_name, solr_matches, user_id, bbox=bbox, cell_size=cell_size, scribe=self.scribe) - gatom = Atom( + atom = Atom( gridset.get_id(), gridset.name, gridset.metadata_url, gridset.mod_time, epsg=gridset.epsg_code) - return make_response(gatom, HTTPStatus.ACCEPTED) + return make_response(atom, HTTPStatus.ACCEPTED) # ................................ diff --git a/LmWebServer/flask_app/gridset.py b/LmWebServer/flask_app/gridset.py index 1305c26a..5ceb74f6 100644 --- a/LmWebServer/flask_app/gridset.py +++ b/LmWebServer/flask_app/gridset.py @@ -12,15 +12,18 @@ DEFAULT_TREE_SCHEMA, JobStatus, LMFormat, MatrixType, ProcessType) from LmCommon.common.time import gmt from LmCommon.encoding.layer_encoder import LayerEncoder + from LmDbServer.boom.boom_collate import BoomCollate + from LmServer.base.atom import Atom from LmServer.base.layer import Vector from LmServer.base.service_object import ServiceObject from LmServer.legion.lm_matrix import LMMatrix from LmServer.legion.mtx_column import MatrixColumn from LmServer.legion.tree import Tree + from LmWebServer.common.lmconstants import HTTPMethod -from LmWebServer.services.api.v2.base import LmService +from LmWebServer.flask_app.base import LmService from LmWebServer.services.api.v2.matrix import MatrixService from LmWebServer.services.common.access_control import check_user_permission from LmWebServer.services.common.boom_post import BoomPoster @@ -112,8 +115,7 @@ def request_analysis(self, gridset_id, do_mcpa=False, num_permutations=500, do_c boom_col.create_workflow() boom_col.close() - response = make_response(gridset, HTTPStatus.ACCEPTED) - return response + return make_response(gridset, HTTPStatus.ACCEPTED) else: raise WEXC.BadRequest('Must specify at least one analysis to perform') diff --git a/LmWebServer/flask_app/occurrence.py b/LmWebServer/flask_app/occurrence.py index 1a27db14..d106aa35 100644 --- a/LmWebServer/flask_app/occurrence.py +++ b/LmWebServer/flask_app/occurrence.py @@ -1,11 +1,12 @@ """This module provides REST services for Occurrence sets""" +from flask import make_response, Response from http import HTTPStatus import werkzeug.exceptions as WEXC from LmCommon.common.lmconstants import (JobStatus) from LmServer.base.atom import Atom from LmWebServer.common.lmconstants import HTTPMethod -from LmWebServer.services.api.v2.base import LmService +from LmWebServer.flask_app.base import LmService from LmWebServer.services.common.access_control import check_user_permission from LmWebServer.services.common.boom_post import BoomPoster from LmWebServer.services.cp_tools.lm_format import lm_formatter @@ -34,7 +35,7 @@ def delete_occurrence_set(self, user_id, occset_id): if check_user_permission(user_id, occ, HTTPMethod.DELETE): success = self.scribe.delete_object(occ) if success: - return HTTPStatus.NO_CONTENT + return Response(status=HTTPStatus.NO_CONTENT) # If unsuccessful, fail raise WEXC.InternalServerError('Failed to delete occurrence set') @@ -56,10 +57,9 @@ def post_boom_data(self, user_id, user_email, boom_data, **params): boom_post = BoomPoster(user_id, user_email, boom_data, self.scribe) gridset = boom_post.init_boom() - # cherrypy.response.status = HTTPStatus.ACCEPTED - return Atom( - gridset.get_id(), gridset.name, gridset.metadata_url, - gridset.mod_time, epsg=gridset.epsg_code) + atom = Atom( + gridset.get_id(), gridset.name, gridset.metadata_url, gridset.mod_time, epsg=gridset.epsg_code) + return make_response(atom, HTTPStatus.ACCEPTED) # ................................ @lm_formatter diff --git a/LmWebServer/flask_app/open_tree.py b/LmWebServer/flask_app/open_tree.py index 51edbbfd..f93cd23b 100644 --- a/LmWebServer/flask_app/open_tree.py +++ b/LmWebServer/flask_app/open_tree.py @@ -4,19 +4,22 @@ * Use opentree wrapper code * Catch service errors from OpenTree """ +from biotaphy.client.ot_service_wrapper.open_tree import (get_info_for_names, induced_subtree) + import hashlib -from http import HTTPStatus import os import werkzeug.exceptions as WEXC -from biotaphy.client.ot_service_wrapper.open_tree import (get_info_for_names, induced_subtree) from LmCommon.common.lmconstants import ENCODING from LmCommon.common.ready_file import ready_filename from LmCommon.common.time import gmt + from LmDbServer.tools.partner_data import Partners + from LmServer.common.lmconstants import ( NONTREE_GBIF_IDS_KEY, TREE_DATA_KEY, TREE_FORMAT_KEY, TREE_NAME_KEY, UNMATCHED_GBIF_IDS_KEY) -from LmWebServer.services.api.v2.base import LmService + +from LmWebServer.flask_app.base import LmService from LmWebServer.services.cp_tools.lm_format import lm_formatter diff --git a/LmWebServer/flask_app/routes.py b/LmWebServer/flask_app/routes.py index 322493c6..4fb4a5f4 100644 --- a/LmWebServer/flask_app/routes.py +++ b/LmWebServer/flask_app/routes.py @@ -15,7 +15,10 @@ from LmWebServer.flask_app.open_tree import OpenTreeService from LmWebServer.flask_app.scenario_package import ScenarioPackageService from LmWebServer.flask_app.scenario import ScenarioService -from _datetime import date +from LmWebServer.flask_app.sdm_project import SdmProjectService +from LmWebServer.flask_app.snippet import SnippetService + +from LmCommon.common.lmconstants import JobStatus app = Flask(__name__.split('.')[0]) app.secret_key = str.encode(secrets.token_hex()) @@ -52,71 +55,6 @@ def logout(): session.pop('username', None) return redirect(url_for('index')) -# ..................................................................................... -@app.route('/api/v2/occ/', methods=['GET', 'POST', 'DELETE']) -def occurrence(identifier): - """Occurrence API service for GET, POST, and DELETE operations on occurrences - - Args: - identifier (str): An occurrence identifier to search for. - - Returns: - dict: For GET and POST operations, zero or more dictionaries of metadata for the requested or - posted record(s); for DELETE operations, True or False for success - - TODO: Why is boom post here? Create a different service for that. - """ - svc = OccurrenceLayerService() - user = svc.get_user() - user_id = user.user_id - - if request.method == 'POST' and request.is_json: - boom_data = request.get_json() - svc.post_boom_data(user_id, user.email, boom_data) - - elif request.method == 'DELETE': - svc.delete_occurrence_set(user_id, identifier) - - elif request.method == 'GET': - after_time = request.args.get('after_time', default = None, type = float) - before_time = request.args.get('before_time', default = None, type = float) - display_name = request.args.get('display_name', default = None, type = str) - epsg_code = request.args.get('epsg_code', default= None, type = str) - minimum_number_of_points = request.args.get('minimum_number_of_points', default = 1, type = int) - limit = request.args.get('limit', default = 100, type = int) - offset = request.args.get('offset', default = 0, type = int) - # url_user = request.args.get('url_user', default = None, type = str) - status = request.args.get('status', default = None, type = str) - gridset_id = request.args.get('gridset_id', default = None, type = str) - fill_points = request.args.get('fill_points', default = False, type = bool) - - if identifier is None: - response = svc.list_occurrence_sets( - user_id, after_time=after_time, before_time=before_time, display_name=display_name, - epsg_code=epsg_code, minimum_number_of_points=minimum_number_of_points, limit=limit, - offset=offset, gridset_id=gridset_id, status=status) - - elif identifier.lower() == 'count': - response = svc.count_occurrence_sets( - user_id, after_time=after_time, before_time=before_time, display_name=display_name, - epsg_code=epsg_code, minimum_number_of_points=minimum_number_of_points, - gridset_id=gridset_id, status=status) - - elif identifier.lower() == 'web': - response = svc.list_web_occurrence_sets( - user_id, after_time=after_time, before_time=before_time, display_name=display_name, - epsg_code=epsg_code, minimum_number_of_points=minimum_number_of_points, limit=limit, - offset=offset, gridset_id=gridset_id, status=status) - - else: - try: - occid = int(identifier) - except: - return BadRequest('{} is not a valid layer ID'.format(identifier)) - else: - response = svc.get_occurrence_set(user_id, occid, fill_points=fill_points) - - return response # ..................................................................................... @app.route('/api/v2/layer/', methods=['GET', 'DELETE']) @@ -183,6 +121,71 @@ def layer(identifier): return response +# ..................................................................................... +@app.route('/api/v2/occ/', methods=['GET', 'POST', 'DELETE']) +def occurrence(identifier): + """Occurrence API service for GET, POST, and DELETE operations on occurrences + + Args: + identifier (str): An occurrence identifier to search for. + + Returns: + dict: For GET and POST operations, zero or more dictionaries of metadata for the requested or + posted record(s); for DELETE operations, True or False for success + + TODO: Why is boom post here? Create a different service for that. + """ + svc = OccurrenceLayerService() + user = svc.get_user() + user_id = user.user_id + + if request.method == 'POST' and request.is_json: + boom_data = request.get_json() + svc.post_boom_data(user_id, user.email, boom_data) + + elif request.method == 'DELETE': + svc.delete_occurrence_set(user_id, identifier) + + elif request.method == 'GET': + after_time = request.args.get('after_time', default = None, type = float) + before_time = request.args.get('before_time', default = None, type = float) + display_name = request.args.get('display_name', default = None, type = str) + epsg_code = request.args.get('epsg_code', default= None, type = str) + minimum_number_of_points = request.args.get('minimum_number_of_points', default = 1, type = int) + limit = request.args.get('limit', default = 100, type = int) + offset = request.args.get('offset', default = 0, type = int) + status = request.args.get('status', default = None, type = int) + gridset_id = request.args.get('gridset_id', default = None, type = int) + fill_points = request.args.get('fill_points', default = False, type = bool) + + if identifier is None: + response = svc.list_occurrence_sets( + user_id, after_time=after_time, before_time=before_time, display_name=display_name, + epsg_code=epsg_code, minimum_number_of_points=minimum_number_of_points, limit=limit, + offset=offset, gridset_id=gridset_id, status=status) + + elif identifier.lower() == 'count': + response = svc.count_occurrence_sets( + user_id, after_time=after_time, before_time=before_time, display_name=display_name, + epsg_code=epsg_code, minimum_number_of_points=minimum_number_of_points, + gridset_id=gridset_id, status=status) + + elif identifier.lower() == 'web': + response = svc.list_web_occurrence_sets( + user_id, after_time=after_time, before_time=before_time, display_name=display_name, + epsg_code=epsg_code, minimum_number_of_points=minimum_number_of_points, limit=limit, + offset=offset, gridset_id=gridset_id, status=status) + + else: + try: + occid = int(identifier) + except: + return BadRequest('{} is not a valid layer ID'.format(identifier)) + else: + response = svc.get_occurrence_set(user_id, occid, fill_points=fill_points) + + return response + # ..................................................................................... @app.route('/api/v2/biotaphynames', methods=['POST']) def biotaphynames(): @@ -364,7 +367,7 @@ def scenpackage(identifier): # ..................................................................................... @app.route('/api/v2/scenario/', methods=['GET']) def scenario(identifier): - svc = ScenarioPackageService() + svc = ScenarioService() user_id = svc.get_user_id() scenario_id = request.args.get('scenario_id', default = None, type = int) @@ -397,6 +400,97 @@ def scenario(identifier): return response +# ..................................................................................... +@app.route('/api/v2/sdmproject/', methods=['GET', 'POST', 'DELETE']) +def sdmproject(identifier): + """SdmProject API service for GET, POST, and DELETE operations on SDM projections + + Args: + identifier (str): An sdmproject identifier to search for. + + Returns: + dict: For GET and POST operations, zero or more dictionaries of metadata for the requested or + posted record(s); for DELETE operations, True or False for success + """ + svc = SdmProjectService()() + user = svc.get_user() + user_id = user.user_id + + if request.method == 'POST' and request.is_json: + projection_data = request.get_json() + svc.post_boom_data(user_id, user.email, projection_data) + + elif request.method == 'DELETE': + svc.delete_occurrence_set(user_id, identifier) + + elif request.method == 'GET': + after_time = request.args.get('after_time', default = None, type = float) + before_time = request.args.get('before_time', default = None, type = float) + after_status = request.args.get('after_status', default = JobStatus.COMPLETE, type = int) + before_status = request.args.get('before_status', default = JobStatus.COMPLETE, type = int) + alg_code = request.args.get('alg_code', default = None, type = str) + display_name = request.args.get('display_name', default = None, type = str) + epsg_code = request.args.get('epsg_code', default= None, type = str) + occurrence_set_id = request.args.get('occurrence_set_id', default = None, type = int) + mdl_scenario_code = request.args.get('mdl_scenario_code', default = None, type = str) + prj_scenario_code = request.args.get('prj_scenario_code', default = None, type = str) + status = request.args.get('status', default = JobStatus.COMPLETE, type = int) + gridset_id = request.args.get('gridset_id', default = None, type = int) + limit = request.args.get('limit', default = 100, type = int) + offset = request.args.get('offset', default = 0, type = int) + atom = request.args.get('atom', default = True, type = bool) + + if identifier is None: + response = svc.list_projections( + user_id, after_time=after_time, before_time=before_time, after_status=after_status, + before_status=before_status, alg_code=alg_code, display_name=display_name, + epsg_code=epsg_code, occurrence_set_id=occurrence_set_id, mdl_scenario_code=mdl_scenario_code, + prj_scenario_code=prj_scenario_code, status=status, gridset_id=gridset_id, + limit=limit, offset=offset, atom=atom) + + elif identifier.lower() == 'count': + response = svc.count_projections( + user_id, after_time=after_time, before_time=before_time, after_status=after_status, + before_status=before_status, alg_code=alg_code, display_name=display_name, + epsg_code=epsg_code, occurrence_set_id=occurrence_set_id, mdl_scenario_code=mdl_scenario_code, + prj_scenario_code=prj_scenario_code, status=status, gridset_id=gridset_id) + + else: + try: + projection_id = int(identifier) + except: + return BadRequest('{} is not a valid projection ID'.format(identifier)) + else: + response = svc.get_occurrence_set(user_id, projection_id, atom=atom) + + return response + +# ..................................................................................... +@app.route('/api/v2/snippet', methods=['GET']) +def snippet(): + svc = SnippetService() + user_id = svc.get_user() + + ident1 = request.args.get('ident1', default = None, type = str) + ident2 = request.args.get('ident2', default = None, type = str) + provider = request.args.get('provider', default = None, type = str) + collection = request.args.get('collection', default = None, type = str) + catalog_number = request.args.get('catalog_number', default = None, type = str) + operation = request.args.get('operation', default = None, type = str) + after_time = request.args.get('after_time', default = None, type = float) + before_time = request.args.get('before_time', default = None, type = float) + url = request.args.get('url', default = None, type = str) + who = request.args.get('who', default = None, type = str) + agent = request.args.get('agent', default = None, type = str) + why = request.args.get('why', default = None, type = str) + + response = svc.get_snippet( + user_id, ident1=ident1, ident2=ident2, provider=provider, collection=collection, + catalog_number=catalog_number, operation=operation, after_time=after_time, before_time=before_time, + url=url, who=who, agent=agent, why=why) + + return response + # sdmproject = SdmProjectService() # shapegrid = ShapegridService() diff --git a/LmWebServer/flask_app/scenario.py b/LmWebServer/flask_app/scenario.py index 1fe291b7..a9b17725 100644 --- a/LmWebServer/flask_app/scenario.py +++ b/LmWebServer/flask_app/scenario.py @@ -1,17 +1,15 @@ """This module provides REST services for Scenario""" import werkzeug.exceptions as WEXC -from LmCommon.common.lmconstants import HTTPStatus from LmWebServer.common.lmconstants import HTTPMethod -from LmWebServer.services.api.v2.base import LmService +from LmWebServer.flask_app.base import LmService from LmWebServer.services.common.access_control import check_user_permission from LmWebServer.services.cp_tools.lm_format import lm_formatter # ............................................................................. class ScenarioService(LmService): - """Scenarios service class. - """ + """Scenarios service class.""" # ................................ # @lm_formatter diff --git a/LmWebServer/flask_app/scenario_package.py b/LmWebServer/flask_app/scenario_package.py index a96d9e4e..baab6b31 100644 --- a/LmWebServer/flask_app/scenario_package.py +++ b/LmWebServer/flask_app/scenario_package.py @@ -2,7 +2,7 @@ import werkzeug.exceptions as WEXC from LmWebServer.common.lmconstants import HTTPMethod -from LmWebServer.services.api.v2.base import LmService +from LmWebServer.flask_app.base import LmService from LmWebServer.services.common.access_control import check_user_permission from LmWebServer.services.cp_tools.lm_format import lm_formatter diff --git a/LmWebServer/flask_app/sdm_project.py b/LmWebServer/flask_app/sdm_project.py new file mode 100644 index 00000000..9eddd1b2 --- /dev/null +++ b/LmWebServer/flask_app/sdm_project.py @@ -0,0 +1,144 @@ +"""This module provides REST services for Projections""" +from flask import make_response, Response +from http import HTTPStatus +import werkzeug.exceptions as WEXC + +from LmServer.base.atom import Atom + +from LmWebServer.common.lmconstants import HTTPMethod +from LmWebServer.flask_app.base import LmService +from LmWebServer.services.common.access_control import check_user_permission +from LmWebServer.services.common.boom_post import BoomPoster +from LmWebServer.services.cp_tools.lm_format import lm_formatter + + +# ................................................................0............. +class SdmProjectService(LmService): + """Class responsible for SDM Projection services + """ + + # ................................ + def get_projection(self, user_id, projection_id): + """Retrieve a projection""" + prj = self.scribe.get_sdm_project(int(projection_id)) + + if prj is None: + raise WEXC.NotFound('Projection {} not found'.format(projection_id)) + + if check_user_permission(user_id, prj, HTTPMethod.GET): + return prj + + # If no permission, HTTP 403 + raise WEXC.Forbidden('User {} does not have permission to access projection {}'.format( + user_id, projection_id)) + + + # ................................ + def delete_projection(self, user_id, projection_id): + """Attempts to delete a projection + + Args: + projection_id: The id of the projection to delete + """ + prj = self.scribe.get_sdm_project(int(projection_id)) + + if prj is None: + raise WEXC.NotFound('Projection {} not found'.format(projection_id)) + + if not check_user_permission(user_id, prj, HTTPMethod.DELETE): + raise WEXC.Forbidden('User {} does not have permission to delete projection {}'.format( + user_id, projection_id)) + + else: + success = self.scribe.delete_object(prj) + if success: + return Response(status=HTTPStatus.NO_CONTENT) + else: + # If we have permission but cannot delete, error + raise WEXC.InternalServerError('Failed to delete projection {}'.format(projection_id)) + + # ................................ + @lm_formatter + def GET(self, projection_id=None, after_status=None, after_time=None, + algorithm_code=None, before_status=None, before_time=None, + display_name=None, epsg_code=None, limit=100, + model_scenario_code=None, occurrence_set_id=None, offset=0, + projection_scenario_code=None, url_user=None, scenario_id=None, + status=None, gridset_id=None, atom=True, **params): + """Perform a GET request. List, count, or get individual projection. + """ + if projection_id is None: + return self._list_projections( + self.get_user_id(url_user=url_user), after_status=after_status, + after_time=after_time, alg_code=algorithm_code, + before_status=before_status, before_time=before_time, + display_name=display_name, epsg_code=epsg_code, limit=limit, + mdl_scenario_code=model_scenario_code, + occurrence_set_id=occurrence_set_id, offset=offset, + prj_scenario_code=projection_scenario_code, status=status, + gridset_id=gridset_id, atom=atom) + + if projection_id.lower() == 'count': + return self._count_projections( + self.get_user_id(url_user=url_user), after_status=after_status, + after_time=after_time, alg_code=algorithm_code, + before_status=before_status, before_time=before_time, + display_name=display_name, epsg_code=epsg_code, + mdl_scenario_code=model_scenario_code, + occurrence_set_id=occurrence_set_id, + prj_scenario_code=projection_scenario_code, status=status, + gridset_id=gridset_id) + + # Get individual as fall back + return self._get_projection(projection_id) + + # ................................ + @lm_formatter + def post_boom_data(self, user_id, user_email, projection_data, **params): + """Posts a new projection + """ + boom_post = BoomPoster(user_id, user_email, projection_data, self.scribe) + gridset = boom_post.init_boom() + + atom = Atom( + gridset.get_id(), gridset.name, gridset.metadata_url, gridset.mod_time, epsg=gridset.epsg_code) + return make_response(atom, HTTPStatus.ACCEPTED) + + # ................................ + def count_projections( + self, user_id, after_time=None, before_time=None, after_status=None, before_status=None, + alg_code=None, display_name=None, epsg_code=None, occurrence_set_id=None, + mdl_scenario_code=None, prj_scenario_code=None, status=None, gridset_id=None): + """Return a count of projections matching the specified criteria + """ + # Process status parameter + if status: + before_status = status + after_status = status + + prj_count = self.scribe.count_sdm_projects( + user_id=user_id, display_name=display_name, after_time=after_time, before_time=before_time, + epsg=epsg_code, after_status=after_status, before_status=before_status, + occ_set_id=occurrence_set_id, alg_code=alg_code, mdl_scen_code=mdl_scenario_code, + prj_scen_code=prj_scenario_code, gridset_id=gridset_id) + return {'count': prj_count} + + # ................................ + def list_projections( + self, user_id, after_time=None, before_time=None, after_status=None, before_status=None, + alg_code=None, display_name=None, epsg_code=None, occurrence_set_id=None, + mdl_scenario_code=None, prj_scenario_code=None, status=None, gridset_id=None, + limit=100, offset=0, atom=True): + """Return a list of projections matching the specified criteria""" + # Process status parameter + if status: + before_status = status + after_status = status + + projs = self.scribe.list_sdm_projects( + offset, limit, user_id=user_id, display_name=display_name, after_time=after_time, before_time=before_time, + epsg=epsg_code, after_status=after_status, before_status=before_status, + occ_set_id=occurrence_set_id, alg_code=alg_code, mdl_scen_code=mdl_scenario_code, + prj_scen_code=prj_scenario_code, gridset_id=gridset_id, atom=atom) + + return projs diff --git a/LmWebServer/flask_app/shapegrid.py b/LmWebServer/flask_app/shapegrid.py new file mode 100644 index 00000000..96263585 --- /dev/null +++ b/LmWebServer/flask_app/shapegrid.py @@ -0,0 +1,130 @@ +"""This module provides REST services for shapegrids""" +from flask import make_response, Response +from http import HTTPStatus +import werkzeug.exceptions as WEXC + +from LmServer.legion.shapegrid import Shapegrid +from LmWebServer.common.lmconstants import HTTPMethod +from LmWebServer.flask_app.base import LmService +from LmWebServer.services.common.access_control import check_user_permission +from LmWebServer.services.cp_tools.lm_format import lm_formatter + + +# ............................................................................. +class ShapegridService(LmService): + """Class for shapegrid service.""" + + # ................................ + def delete_shapegrid(self, user_id, shapegrid_id): + """Attempts to delete a shapegrid + + Args: + shapegrid_id: The id of the shapegrid to delete + """ + shapegrid = self.scribe.get_shapegrid(lyr_id=shapegrid_id) + if shapegrid is None: + raise WEXC.NotFound('Shapegrid not found') + + # If allowed to, delete + if check_user_permission(user_id, shapegrid, HTTPMethod.DELETE): + success = self.scribe.delete_object(shapegrid) + if success: + return Response(status=HTTPStatus.NO_CONTENT) + + # How can this happen? Catch and respond appropriately + raise WEXC.InternalServerError('Failed to delete shapegrid') + + # If request is not permitted, raise exception + raise WEXC.Forbidden('User does not have permission to delete this shapegrid') + + # ................................ + @lm_formatter + def GET(self, shapegrid_id=None, after_time=None, before_time=None, + cell_sides=None, cell_size=None, epsg_code=None, limit=100, + offset=0, url_user=None, **params): + """Perform a GET request, either list, count, or return individual.""" + if shapegrid_id is None: + return self._list_shapegrids( + self.get_user_id(url_user=url_user), after_time=after_time, + before_time=before_time, cell_sides=cell_sides, + cell_size=cell_size, epsg_code=epsg_code, limit=limit, + offset=offset) + if shapegrid_id.lower() == 'count': + return self._count_shapegrids( + self.get_user_id(url_user=url_user), after_time=after_time, + before_time=before_time, cell_sides=cell_sides, + cell_size=cell_size, epsg_code=epsg_code) + + # Fallback to return individual + return self._get_shapegrid(shapegrid_id) + + # ................................ + @lm_formatter + def post_shapegrid( + self, user_id, name, epsg_code, cell_sides, cell_size, map_units, bbox, cutout, **params): + """Posts a new shapegrid""" + shapegrid = Shapegrid(name, user_id, epsg_code, cell_sides, cell_size,map_units, bbox) + updated_shapegrid = self.scribe.find_or_insert_shapegrid(shapegrid, cutout=cutout) + return updated_shapegrid + + # ................................ + def count_shapegrids( + self, user_id, after_time=None, before_time=None, cell_sides=None, cell_size=None, epsg_code=None): + """Count shapegrid objects matching the specified criteria + + Args: + user_id (str): The user to count shapegrids for. Note that this may not be the + same user logged into the system + after_time (float): Return shapegrids modified after this time (Modified Julian Day) + before_time (float): Return shapegrids modified before this time (Modified Julian Day) + cell_sides (omt): Number of sides for shapegrid cells, 4 for square cells, 6 for hexagonal cells. + cell_size (float): Size of cells in mapunits + epsg_code (str): Return shapegrids with this EPSG code + """ + shapegrid_count = self.scribe.count_shapegrids( + user_id=user_id, cell_sides=cell_sides, cell_size=cell_size, after_time=after_time, before_time=before_time, epsg=epsg_code) + return {'count': shapegrid_count} + + # ................................ + def get_shapegrid(self, user_id, shapegrid_id): + """Return a shapegrid + + Args: + user_id (str): The user to return a shapegrid for. Note that this may not be the + same user logged into the system + shapegrid_id (int): Database key for the shapegrid object to return + """ + shapegrid = self.scribe.get_shapegrid(lyr_id=shapegrid_id) + if shapegrid is None: + raise WEXC.NotFound('Shapegrid {} was not found'.format(shapegrid_id)) + + if check_user_permission(user_id, shapegrid, HTTPMethod.GET): + return shapegrid + else: + raise WEXC.Forbidden('User {} does not have permission to access shapegrid {}'.format( + user_id, shapegrid_id)) + + # ................................ + def list_shapegrids( + self, user_id, after_time=None, before_time=None, cell_sides=None, cell_size=None, epsg_code=None, + limit=100, offset=0): + """List shapegrid objects matching the specified criteria + + Args: + user_id (str): The user to count shapegrids for. Note that this may not be the + same user logged into the system + after_time (float): Return shapegrids modified after this time (Modified Julian Day) + before_time (float): Return shapegrids modified before this time (Modified Julian Day) + cell_sides (omt): Number of sides for shapegrid cells, 4 for square cells, 6 for hexagonal cells. + cell_size (float): Size of cells in mapunits + epsg_code (str): Return shapegrids with this EPSG code + limit: Return this number of shapegrids, at most + offset: Offset the returned shapegrids by this number + """ + shapegrid_atoms = self.scribe.list_shapegrids( + offset, limit, user_id=user_id, cell_sides=cell_sides, + cell_size=cell_size, after_time=after_time, + before_time=before_time, epsg=epsg_code) + # Format return + # Set headers + return shapegrid_atoms diff --git a/LmWebServer/flask_app/snippet.py b/LmWebServer/flask_app/snippet.py new file mode 100644 index 00000000..bb51fb27 --- /dev/null +++ b/LmWebServer/flask_app/snippet.py @@ -0,0 +1,41 @@ +"""This module provides services for querying snippets""" +from LmServer.common.solr import query_snippet_index +from LmWebServer.flask_app.base import LmService +from LmWebServer.services.cp_tools.lm_format import lm_formatter + + +# ............................................................................. +class SnippetService(LmService): + """This class is responsible for the Lifemapper snippet services. + """ + + # ................................ + @lm_formatter + def get_snippet( + self, ident1=None, provider=None, collection=None, catalog_number=None, operation=None, + after_time=None, before_time=None, ident2=None, url=None, who=None, agent=None, + why=None, **params): + """Query the Lifemapper snippet index and return matches. + + Args: + ident1 (int): An identifier for the primary object (probably occurrenceset) + ident2: A identifier for the secondary object (occurrenceset or projection) + provider (str): The occurrence point provider + collection (str): The collection the point belongs to + catalog_number (str): The catalog number of the occurrence point + operation (str): A LmServer.common.lmconstants.SnippetOperations + after_time (float): Return hits after this time (MJD format) + before_time (float): Return hits before this time (MJD format) + url: A url for the resulting object + who: Who initiated the action + agent: The agent that initiated the action + why: Why the action was initiated + + Todo: Do I need to send user information? + Todo: Are provider, collection, catalog_number args for primary object/ident1? + """ + return query_snippet_index( + ident1=ident1, provider=provider, collection=collection, + catalog_number=catalog_number, operation=operation, + after_time=after_time, before_time=before_time, ident2=ident2, + url=url, who=who, agent=agent, why=why) diff --git a/LmWebServer/flask_app/species_hint.py b/LmWebServer/flask_app/species_hint.py index 70cec2ae..a169609a 100644 --- a/LmWebServer/flask_app/species_hint.py +++ b/LmWebServer/flask_app/species_hint.py @@ -3,7 +3,7 @@ from LmServer.common.lmconstants import SOLR_FIELDS from LmServer.common.solr import query_archive_index -from LmWebServer.services.api.v2.base import LmService +from LmWebServer.flask_app.base import LmService from LmWebServer.services.cp_tools.lm_format import lm_formatter From 9fa385a1bd85a147ce3c462261e3e25aeb1e04f9 Mon Sep 17 00:00:00 2001 From: zzeppozz Date: Fri, 3 Dec 2021 13:21:53 -0600 Subject: [PATCH 15/18] updates to flask; untested --- LmWebServer/flask_app/base.py | 10 +- LmWebServer/flask_app/biotaphy_names.py | 2 +- LmWebServer/flask_app/biotaphy_points.py | 2 +- LmWebServer/flask_app/env_layer.py | 2 +- LmWebServer/flask_app/gbif_parser.py | 2 +- LmWebServer/flask_app/gridset.py | 2 +- LmWebServer/flask_app/layer.py | 2 +- LmWebServer/flask_app/occurrence.py | 6 +- LmWebServer/flask_app/open_tree.py | 2 +- LmWebServer/flask_app/routes.py | 105 +++++- LmWebServer/flask_app/scenario.py | 25 +- LmWebServer/flask_app/scenario_package.py | 2 +- LmWebServer/flask_app/sdm_project.py | 37 +-- LmWebServer/flask_app/shapegrid.py | 23 +- LmWebServer/flask_app/snippet.py | 5 +- LmWebServer/flask_app/solr_raw.py | 15 + LmWebServer/flask_app/species_hint.py | 2 +- LmWebServer/flask_app/taxonomy.py | 22 ++ LmWebServer/flask_app/tree.py | 134 ++++++++ LmWebServer/flask_app/upload.py | 368 ++++++++++++++++++++++ 20 files changed, 656 insertions(+), 112 deletions(-) create mode 100644 LmWebServer/flask_app/solr_raw.py create mode 100644 LmWebServer/flask_app/taxonomy.py create mode 100644 LmWebServer/flask_app/tree.py create mode 100644 LmWebServer/flask_app/upload.py diff --git a/LmWebServer/flask_app/base.py b/LmWebServer/flask_app/base.py index 8c43b44b..df6353e3 100644 --- a/LmWebServer/flask_app/base.py +++ b/LmWebServer/flask_app/base.py @@ -1,6 +1,6 @@ """The module provides a base Lifemapper service class """ -from flask import Flask, session +from flask import session import os from LmCommon.common.lmconstants import DEFAULT_POST_USER @@ -9,8 +9,7 @@ from LmServer.common.log import WebLogger from LmServer.db.borg_scribe import BorgScribe -app = Flask(__name__) - +# app = Flask(__name__) # ............................................................................. class LmService: @@ -31,10 +30,8 @@ class in case we decide that we need to use a different mechanism (such as a CherryPy Tool) """ log = WebLogger() - # self.scribe = cherrypy.thread_data.scribeRetriever.get_scribe() self.scribe = BorgScribe(log) self.scribe.open_connections() - # self.log = cherrypy.session.log self.log = log # .......................... @@ -49,7 +46,6 @@ def get_user(self, user_id=None): """ if user_id is None: self.get_user_id() - # Check to see if we should use url user usr = self.scribe.find_user(user_id) return usr @@ -87,8 +83,6 @@ def get_user_dir(cls, user_id): same path construction as the getBoomPackage script """ return os.path.join(ARCHIVE_PATH, user_id, 'uploads', 'biogeo') - - # .......................... @staticmethod diff --git a/LmWebServer/flask_app/biotaphy_names.py b/LmWebServer/flask_app/biotaphy_names.py index 2132f3f3..9e1b7639 100644 --- a/LmWebServer/flask_app/biotaphy_names.py +++ b/LmWebServer/flask_app/biotaphy_names.py @@ -3,7 +3,7 @@ from LmCommon.common.api_query import GbifAPI from LmWebServer.flask_app.base import LmService -from LmWebServer.services.cp_tools.lm_format import lm_formatter +from LmWebServer.flask_tools.lm_format import lm_formatter # ............................................................................. diff --git a/LmWebServer/flask_app/biotaphy_points.py b/LmWebServer/flask_app/biotaphy_points.py index 444212c9..c8fda945 100644 --- a/LmWebServer/flask_app/biotaphy_points.py +++ b/LmWebServer/flask_app/biotaphy_points.py @@ -9,7 +9,7 @@ from LmServer.common.lmconstants import LMFileType, FileFix from LmServer.common.localconstants import PUBLIC_USER from LmWebServer.flask_app.base import LmService -from LmWebServer.services.cp_tools.lm_format import lm_formatter +from LmWebServer.flask_tools.lm_format import lm_formatter # ............................................................................. diff --git a/LmWebServer/flask_app/env_layer.py b/LmWebServer/flask_app/env_layer.py index f8030f75..16c65d79 100644 --- a/LmWebServer/flask_app/env_layer.py +++ b/LmWebServer/flask_app/env_layer.py @@ -4,7 +4,7 @@ from LmWebServer.common.lmconstants import HTTPMethod from LmWebServer.flask_app.base import LmService from LmWebServer.services.common.access_control import check_user_permission -from LmWebServer.services.cp_tools.lm_format import lm_formatter +from LmWebServer.flask_tools.lm_format import lm_formatter # ............................................................................. diff --git a/LmWebServer/flask_app/gbif_parser.py b/LmWebServer/flask_app/gbif_parser.py index 64a38cb3..e00bddae 100644 --- a/LmWebServer/flask_app/gbif_parser.py +++ b/LmWebServer/flask_app/gbif_parser.py @@ -6,7 +6,7 @@ from LmCommon.common.api_query import GbifAPI from LmWebServer.flask_app.base import LmService -from LmWebServer.services.cp_tools.lm_format import lm_formatter +from LmWebServer.flask_tools.lm_format import lm_formatter # TODO: These need to go into a constants file ACCEPTED_NAME_KEY = 'accepted_name' diff --git a/LmWebServer/flask_app/gridset.py b/LmWebServer/flask_app/gridset.py index 5ceb74f6..63808e33 100644 --- a/LmWebServer/flask_app/gridset.py +++ b/LmWebServer/flask_app/gridset.py @@ -27,7 +27,7 @@ from LmWebServer.services.api.v2.matrix import MatrixService from LmWebServer.services.common.access_control import check_user_permission from LmWebServer.services.common.boom_post import BoomPoster -from LmWebServer.services.cp_tools.lm_format import lm_formatter +from LmWebServer.flask_tools.lm_format import lm_formatter BG_REF_ID_KEY = 'identifier' BG_REF_KEY = 'hypothesis_package_reference' diff --git a/LmWebServer/flask_app/layer.py b/LmWebServer/flask_app/layer.py index 7c6ac0b4..080f6f8e 100644 --- a/LmWebServer/flask_app/layer.py +++ b/LmWebServer/flask_app/layer.py @@ -4,7 +4,7 @@ from LmWebServer.common.lmconstants import HTTPMethod from LmWebServer.flask_app.base import LmService from LmWebServer.services.common.access_control import check_user_permission -from LmWebServer.services.cp_tools.lm_format import lm_formatter +from LmWebServer.flask_tools.lm_format import lm_formatter # ............................................................................. diff --git a/LmWebServer/flask_app/occurrence.py b/LmWebServer/flask_app/occurrence.py index d106aa35..66db2114 100644 --- a/LmWebServer/flask_app/occurrence.py +++ b/LmWebServer/flask_app/occurrence.py @@ -3,13 +3,15 @@ from http import HTTPStatus import werkzeug.exceptions as WEXC -from LmCommon.common.lmconstants import (JobStatus) +from LmCommon.common.lmconstants import JobStatus + from LmServer.base.atom import Atom + from LmWebServer.common.lmconstants import HTTPMethod from LmWebServer.flask_app.base import LmService +from LmWebServer.flask_tools.lm_format import lm_formatter from LmWebServer.services.common.access_control import check_user_permission from LmWebServer.services.common.boom_post import BoomPoster -from LmWebServer.services.cp_tools.lm_format import lm_formatter # ............................................................................. diff --git a/LmWebServer/flask_app/open_tree.py b/LmWebServer/flask_app/open_tree.py index f93cd23b..321d935d 100644 --- a/LmWebServer/flask_app/open_tree.py +++ b/LmWebServer/flask_app/open_tree.py @@ -20,7 +20,7 @@ NONTREE_GBIF_IDS_KEY, TREE_DATA_KEY, TREE_FORMAT_KEY, TREE_NAME_KEY, UNMATCHED_GBIF_IDS_KEY) from LmWebServer.flask_app.base import LmService -from LmWebServer.services.cp_tools.lm_format import lm_formatter +from LmWebServer.flask_tools.lm_format import lm_formatter # ............................................................................. diff --git a/LmWebServer/flask_app/routes.py b/LmWebServer/flask_app/routes.py index 4fb4a5f4..436156d9 100644 --- a/LmWebServer/flask_app/routes.py +++ b/LmWebServer/flask_app/routes.py @@ -17,6 +17,10 @@ from LmWebServer.flask_app.scenario import ScenarioService from LmWebServer.flask_app.sdm_project import SdmProjectService from LmWebServer.flask_app.snippet import SnippetService +from LmWebServer.flask_app.solr_raw import RawSolrService +from LmWebServer.flask_app.taxonomy import TaxonomyHintService +from LmWebServer.flask_app.tree import TreeService +from LmWebServer.flask_app.upload import UserUploadService from LmCommon.common.lmconstants import JobStatus @@ -412,7 +416,7 @@ def sdmproject(identifier): dict: For GET and POST operations, zero or more dictionaries of metadata for the requested or posted record(s); for DELETE operations, True or False for success """ - svc = SdmProjectService()() + svc = SdmProjectService() user = svc.get_user() user_id = user.user_id @@ -492,11 +496,96 @@ def snippet(): return response - # sdmproject = SdmProjectService() - # shapegrid = ShapegridService() - # snippet = SnippetService() - # rawsolr = RawSolrService() - # taxonomy = TaxonomyHintService() - # tree = TreeService() - # upload = UserUploadService() +# ..................................................................................... +@app.route('/api/v2/rawsolr', methods=['POST']) +def rawsolr(): + svc = RawSolrService() + req_body = request.get_json() + response = svc.query_collection(req_body) + return response + +# ..................................................................................... +@app.route('/api/v2/taxonomy', methods=['GET']) +def taxonomy(): + svc = TaxonomyHintService() + req_body = request.get_json() + response = svc.query_collection(req_body) + return response + +# ..................................................................................... +@app.route('/api/v2/tree/', methods=['GET', 'POST', 'DELETE']) +def tree(identifier): + """Tree API service for GET, POST, and DELETE operations on Trees + + Args: + identifier (str): A tree identifier to search for. + + Returns: + dict: For GET and POST operations, zero or more dictionaries of metadata for the requested or + posted record(s); for DELETE operations, True or False for success + """ + svc = TreeService() + user_id = svc.get_user() + + if request.method == 'POST' and request.is_json: + tree_data = request.get_json() + svc.post_tree(user_id, tree_data) + + elif request.method == 'DELETE': + svc.delete_tree(user_id, identifier) + + elif request.method == 'GET': + after_time = request.args.get('after_time', default = None, type = float) + before_time = request.args.get('before_time', default = None, type = float) + is_binary = request.args.get('is_binary', default = None, type = bool) + is_ultrametric = request.args.get('is_ultrametric', default = None, type = bool) + has_branch_lengths = request.args.get('has_branch_lengths', default = None, type = bool) + meta_string = request.args.get('meta_string', default = None, type = str) + name = request.args.get('name', default = None, type = str) + limit = request.args.get('limit', default = 100, type = int) + offset = request.args.get('offset', default = 0, type = int) + + if identifier is None: + response = svc.list_trees( + user_id, after_time=after_time, before_time=before_time, is_binary=is_binary, + is_ultrametric=is_ultrametric, has_branch_lengths=has_branch_lengths, meta_string=meta_string, + name=name, limit=limit, offset=offset) + elif identifier.lower() == 'count': + response = svc.count_trees( + user_id, after_time=after_time, before_time=before_time, is_binary=is_binary, + is_ultrametric=is_ultrametric, has_branch_lengths=has_branch_lengths, meta_string=meta_string, + name=name) + + else: + try: + tree_id = int(identifier) + except: + return BadRequest('{} is not a valid tree ID'.format(identifier)) + else: + response = svc.get_tree(user_id, tree_id) + + return response + +# ..................................................................................... +@app.route('/api/v2/upload', methods=['POST']) +def upload(): + svc = UserUploadService() + + file_name = request.args.get('file_name', default = None, type = str) + upload_type = request.args.get('upload_type', default = None, type = str) + metadata = request.args.get('metadata', default = None, type = str) + upload_file = request.args.get('upload_file', default = None, type = str) + + if upload_file is not None: + try: + data = upload_file.file.read() + except Exception as e: + raise BadRequest('Unable to read uploaded file ({})'.str(e)) + else: + try: + data = request.get_data() + except: + raise BadRequest('Unable to read data from request') + + return svc.post_data(file_name, upload_type, metadata, data) diff --git a/LmWebServer/flask_app/scenario.py b/LmWebServer/flask_app/scenario.py index a9b17725..6eaad22d 100644 --- a/LmWebServer/flask_app/scenario.py +++ b/LmWebServer/flask_app/scenario.py @@ -4,36 +4,13 @@ from LmWebServer.common.lmconstants import HTTPMethod from LmWebServer.flask_app.base import LmService from LmWebServer.services.common.access_control import check_user_permission -from LmWebServer.services.cp_tools.lm_format import lm_formatter +from LmWebServer.flask_tools.lm_format import lm_formatter # ............................................................................. class ScenarioService(LmService): """Scenarios service class.""" - # ................................ - # @lm_formatter - # def GET(self, scenario_id=None, after_time=None, - # alt_pred_code=None, before_time=None, date_code=None, - # epsg_code=None, gcm_code=None, limit=100, offset=0, url_user=None, - # **params): - # """GET request. Individual, list, count - # """ - # if scenario_id is None: - # return self._list_scenarios( - # self.get_user_id(url_user=url_user), after_time=after_time, - # alt_pred_code=alt_pred_code, before_time=before_time, - # date_code=date_code, epsg_code=epsg_code, gcm_code=gcm_code, - # limit=limit, offset=offset) - # - # if scenario_id.lower() == 'count': - # return self._count_scenarios( - # self.get_user_id(url_user=url_user), after_time=after_time, - # alt_pred_code=alt_pred_code, before_time=before_time, - # date_code=date_code, epsg_code=epsg_code, gcm_code=gcm_code) - # - # return self._get_scenario(scenario_id) - # ................................ @lm_formatter def count_scenarios( diff --git a/LmWebServer/flask_app/scenario_package.py b/LmWebServer/flask_app/scenario_package.py index baab6b31..6937f850 100644 --- a/LmWebServer/flask_app/scenario_package.py +++ b/LmWebServer/flask_app/scenario_package.py @@ -4,7 +4,7 @@ from LmWebServer.common.lmconstants import HTTPMethod from LmWebServer.flask_app.base import LmService from LmWebServer.services.common.access_control import check_user_permission -from LmWebServer.services.cp_tools.lm_format import lm_formatter +from LmWebServer.flask_tools.lm_format import lm_formatter # ............................................................................. diff --git a/LmWebServer/flask_app/sdm_project.py b/LmWebServer/flask_app/sdm_project.py index 9eddd1b2..4bbbd03d 100644 --- a/LmWebServer/flask_app/sdm_project.py +++ b/LmWebServer/flask_app/sdm_project.py @@ -9,7 +9,7 @@ from LmWebServer.flask_app.base import LmService from LmWebServer.services.common.access_control import check_user_permission from LmWebServer.services.common.boom_post import BoomPoster -from LmWebServer.services.cp_tools.lm_format import lm_formatter +from LmWebServer.flask_tools.lm_format import lm_formatter # ................................................................0............. @@ -57,41 +57,6 @@ def delete_projection(self, user_id, projection_id): # If we have permission but cannot delete, error raise WEXC.InternalServerError('Failed to delete projection {}'.format(projection_id)) - # ................................ - @lm_formatter - def GET(self, projection_id=None, after_status=None, after_time=None, - algorithm_code=None, before_status=None, before_time=None, - display_name=None, epsg_code=None, limit=100, - model_scenario_code=None, occurrence_set_id=None, offset=0, - projection_scenario_code=None, url_user=None, scenario_id=None, - status=None, gridset_id=None, atom=True, **params): - """Perform a GET request. List, count, or get individual projection. - """ - if projection_id is None: - return self._list_projections( - self.get_user_id(url_user=url_user), after_status=after_status, - after_time=after_time, alg_code=algorithm_code, - before_status=before_status, before_time=before_time, - display_name=display_name, epsg_code=epsg_code, limit=limit, - mdl_scenario_code=model_scenario_code, - occurrence_set_id=occurrence_set_id, offset=offset, - prj_scenario_code=projection_scenario_code, status=status, - gridset_id=gridset_id, atom=atom) - - if projection_id.lower() == 'count': - return self._count_projections( - self.get_user_id(url_user=url_user), after_status=after_status, - after_time=after_time, alg_code=algorithm_code, - before_status=before_status, before_time=before_time, - display_name=display_name, epsg_code=epsg_code, - mdl_scenario_code=model_scenario_code, - occurrence_set_id=occurrence_set_id, - prj_scenario_code=projection_scenario_code, status=status, - gridset_id=gridset_id) - - # Get individual as fall back - return self._get_projection(projection_id) - # ................................ @lm_formatter def post_boom_data(self, user_id, user_email, projection_data, **params): diff --git a/LmWebServer/flask_app/shapegrid.py b/LmWebServer/flask_app/shapegrid.py index 96263585..45a70480 100644 --- a/LmWebServer/flask_app/shapegrid.py +++ b/LmWebServer/flask_app/shapegrid.py @@ -7,7 +7,7 @@ from LmWebServer.common.lmconstants import HTTPMethod from LmWebServer.flask_app.base import LmService from LmWebServer.services.common.access_control import check_user_permission -from LmWebServer.services.cp_tools.lm_format import lm_formatter +from LmWebServer.flask_tools.lm_format import lm_formatter # ............................................................................. @@ -37,27 +37,6 @@ def delete_shapegrid(self, user_id, shapegrid_id): # If request is not permitted, raise exception raise WEXC.Forbidden('User does not have permission to delete this shapegrid') - # ................................ - @lm_formatter - def GET(self, shapegrid_id=None, after_time=None, before_time=None, - cell_sides=None, cell_size=None, epsg_code=None, limit=100, - offset=0, url_user=None, **params): - """Perform a GET request, either list, count, or return individual.""" - if shapegrid_id is None: - return self._list_shapegrids( - self.get_user_id(url_user=url_user), after_time=after_time, - before_time=before_time, cell_sides=cell_sides, - cell_size=cell_size, epsg_code=epsg_code, limit=limit, - offset=offset) - if shapegrid_id.lower() == 'count': - return self._count_shapegrids( - self.get_user_id(url_user=url_user), after_time=after_time, - before_time=before_time, cell_sides=cell_sides, - cell_size=cell_size, epsg_code=epsg_code) - - # Fallback to return individual - return self._get_shapegrid(shapegrid_id) - # ................................ @lm_formatter def post_shapegrid( diff --git a/LmWebServer/flask_app/snippet.py b/LmWebServer/flask_app/snippet.py index bb51fb27..9ea4060e 100644 --- a/LmWebServer/flask_app/snippet.py +++ b/LmWebServer/flask_app/snippet.py @@ -1,13 +1,12 @@ """This module provides services for querying snippets""" from LmServer.common.solr import query_snippet_index from LmWebServer.flask_app.base import LmService -from LmWebServer.services.cp_tools.lm_format import lm_formatter +from LmWebServer.flask_tools.lm_format import lm_formatter # ............................................................................. class SnippetService(LmService): - """This class is responsible for the Lifemapper snippet services. - """ + """This class is responsible for the Lifemapper snippet services.""" # ................................ @lm_formatter diff --git a/LmWebServer/flask_app/solr_raw.py b/LmWebServer/flask_app/solr_raw.py new file mode 100644 index 00000000..8dd97347 --- /dev/null +++ b/LmWebServer/flask_app/solr_raw.py @@ -0,0 +1,15 @@ +"""This module provides a raw interface to solr""" +from LmServer.common.solr import raw_query +from LmWebServer.services.api.v2.base import LmService + + +# ............................................................................. +class RawSolrService(LmService): + """This class provides a web interface to Solr""" + + # ............................ + def query_collection(self, req_body, **params): + """Send these raw parameters to solr""" + collection = req_body['collection'] + query_string = req_body['query_string'] + return raw_query(collection, query_string) diff --git a/LmWebServer/flask_app/species_hint.py b/LmWebServer/flask_app/species_hint.py index a169609a..50715ac0 100644 --- a/LmWebServer/flask_app/species_hint.py +++ b/LmWebServer/flask_app/species_hint.py @@ -4,7 +4,7 @@ from LmServer.common.lmconstants import SOLR_FIELDS from LmServer.common.solr import query_archive_index from LmWebServer.flask_app.base import LmService -from LmWebServer.services.cp_tools.lm_format import lm_formatter +from LmWebServer.flask_tools.lm_format import lm_formatter # ............................................................................. diff --git a/LmWebServer/flask_app/taxonomy.py b/LmWebServer/flask_app/taxonomy.py new file mode 100644 index 00000000..26534358 --- /dev/null +++ b/LmWebServer/flask_app/taxonomy.py @@ -0,0 +1,22 @@ +"""This module contains service code for performing searches on taxonomy.""" +from LmServer.common.solr import query_taxonomy_index +from LmWebServer.services.api.v2.base import LmService +from LmWebServer.services.cp_tools.lm_format import lm_formatter + + +# ............................................................................. +class TaxonomyHintService(LmService): + """This class provides a method for querying available taxonomy.""" + + # ................................ + @lm_formatter + def get_taxonomy( + self, user_id, kingdom=None, phylum=None, class_=None, order_=None, family=None, genus=None, + taxon_key=None, scientific_name=None, canonical_name=None, squid=None, + limit=100, **params): + """Perform a solr search for taxonomy matches.""" + docs = query_taxonomy_index( + taxon_kingdom=kingdom, taxon_phylum=phylum, taxon_class=class_, taxon_order=order_, + taxon_family=family, taxon_genus=genus, taxon_key=taxon_key, scientific_name=scientific_name, + canonical_name=canonical_name, squid=squid, user_id=user_id) + return docs[:limit] diff --git a/LmWebServer/flask_app/tree.py b/LmWebServer/flask_app/tree.py new file mode 100644 index 00000000..90a3041e --- /dev/null +++ b/LmWebServer/flask_app/tree.py @@ -0,0 +1,134 @@ +"""This module provides REST services for trees""" +from flask import Response +from http import HTTPStatus +import werkzeug.exceptions as WEXC + +from LmCommon.common.lmconstants import DEFAULT_TREE_SCHEMA +from LmCommon.common.time import gmt +from LmServer.legion.tree import Tree +from LmWebServer.common.lmconstants import HTTPMethod +from LmWebServer.services.api.v2.base import LmService +from LmWebServer.services.common.access_control import check_user_permission +from LmWebServer.services.cp_tools.lm_format import lm_formatter +from lmpy.tree import TreeWrapper + + +# ............................................................................. +class TreeService(LmService): + """This class is responsible for tree services.""" + + # ................................ + def delete_tree(self, user_id, tree_id): + """Attempts to delete a tree + + Args: + tree_id (int) : The id of the tree to delete + """ + tree = self.scribe.get_tree(tree_id=tree_id) + + if tree is None: + raise WEXC.NotFound('Tree {} not found'.format(tree_id)) + + # If allowed to, delete + if check_user_permission(user_id, tree, HTTPMethod.DELETE): + success = self.scribe.delete_object(tree) + if success: + return Response(status=HTTPStatus.NO_CONTENT) + + raise WEXC.InternalServerError('Failed to delete tree') + + raise WEXC.Forbidden('User {} does not have permission to delete tree {}'.format(user_id, tree_id)) + + # ................................ + @lm_formatter + def post_tree(self, user_id, name=None, tree_data=None, tree_schema=DEFAULT_TREE_SCHEMA, **params): + """Posts a tree + + Args: + name (str): human-readable name for the tree + tree_data (str): tree data in JSON format + tree_schema (str): format for the tree data, default is nexus + """ + if name is None: + raise WEXC.BadRequest('Must provide name for tree') + tree = TreeWrapper.get(file=tree_data, schema=tree_schema) + + new_tree = Tree(name, user_id=user_id) + updated_tree = self.scribe.find_or_insert_tree(new_tree) + updated_tree.set_tree(tree) + updated_tree.write_tree() + updated_tree.mod_time = gmt().mjd + self.scribe.update_object(updated_tree) + + return updated_tree + + # ................................ + @lm_formatter + def count_trees(self, user_id, after_time=None, before_time=None, + is_binary=None, is_ultrametric=None, + has_branch_lengths=None, meta_string=None, name=None): + """Counts the tree objects matching the specified criteria + + Args: + user_id (str) THe user to count trees for. Note that this may not + be the same user logged into the system. + after_time (MJD float) : Return trees modified after this time. + before_time (MJD float) : Return trees modified before this time. + is_binary (bool) : Only return trees that are binary. + is_ultrametric (bool) : Only return trees that are ultrametric. + has_branch_lengths (bool) : Only return trees that have branch + lengths. + meta_string () : ? + name (str) : Return trees with this name. + """ + tree_count = self.scribe.count_trees( + user_id=user_id, name=name, is_binary=is_binary, is_ultrametric=is_ultrametric, + has_branch_lengths=has_branch_lengths, meta_string=meta_string, after_time=after_time, + before_time=before_time) + + return {'count': tree_count} + + # ................................ + @lm_formatter + def get_tree(self, user_id, tree_id): + """Attempt to get a tree + + Args: + tree_id (int) : The database ID of the tree to retrieve. + """ + tree = self.scribe.get_tree(tree_id=tree_id) + if tree is None: + raise WEXC.NotFound('Tree {} was not found'.format(tree_id)) + + if check_user_permission(user_id, tree, HTTPMethod.GET): + return tree + + raise WEXC.Forbidden('User {} does not have permission to access tree {}'.format( + user_id, tree_id)) + + # ................................ + @lm_formatter + def list_trees( + self, user_id, after_time=None, before_time=None, is_binary=None, is_ultrametric=None, + has_branch_lengths=None, meta_string=None, name=None, offset=0, limit=100): + """Lists tree objects matching the specified criteria. + + Args: + user_id (str): The user to list trees for. Note that this may not + be the same user logged into the system. + after_time (MJD float) : Return trees modified after this time. + before_time (MJD float) : Return trees modified before this time. + is_binary (bool) : Only return trees that are binary. + is_ultrametric (bool) : Only return trees that are ultrametric. + has_branch_lengths (bool) : Only return trees that have branch lengths. + limit (int) : The maximum number of trees to return. + meta_string () : ? + name (str) : Return trees with this name. + offset (int) : Start returning trees this many from the first. + """ + tree_atoms = self.scribe.list_trees( + offset, limit, user_id=user_id, name=name, is_binary=is_binary, + is_ultrametric=is_ultrametric, + has_branch_lengths=has_branch_lengths, meta_string=meta_string, + after_time=after_time, before_time=before_time) + return tree_atoms diff --git a/LmWebServer/flask_app/upload.py b/LmWebServer/flask_app/upload.py new file mode 100644 index 00000000..310468a3 --- /dev/null +++ b/LmWebServer/flask_app/upload.py @@ -0,0 +1,368 @@ +"""This module provides a user upload service for specific data types + +Todo: + * Make much more robust. This is a minimum to get something working and + discover limitations + * Use sub-services for different upload types rather than query parameter +""" +from flask import make_response, Response +from http import HTTPStatus +from io import BytesIO +import json +import os +import werkzeug.exceptions as WEXC +import zipfile + +from lmpy import TreeWrapper + +from LmCommon.common.lmconstants import ( + DEFAULT_POST_USER, DEFAULT_TREE_SCHEMA, LMFormat,PhyloTreeKeys) +from LmCommon.common.ready_file import ready_filename + +from LmServer.common.data_locator import EarlJr +from LmServer.common.lmconstants import ENV_DATA_PATH, LMFileType +from LmServer.common.localconstants import PUBLIC_USER + +from LmWebServer.common.lmconstants import HTTPMethod +from LmWebServer.common.localconstants import MAX_ANON_UPLOAD_SIZE +from LmWebServer.services.api.v2.base import LmService +from LmWebServer.services.common.access_control import check_user_permission +from LmWebServer.services.cp_tools.lm_format import lm_formatter + +# TODO: Move to constants +BIOGEO_UPLOAD = 'biogeo' +CLIMATE_UPLOAD = 'climate' +OCCURRENCE_UPLOAD = 'occurrence' +TREE_UPLOAD = 'tree' + + +# ............................................................................. +class UserUploadService(LmService): + """This class is responsible for data uploads to a user space.""" + + # ................................ + @lm_formatter + def post_data(self, file_name=None, upload_type=None, metadata=None, indata=None, **params): + """Posts the new file to the user's space""" + if not check_user_permission(self.get_user_id(), self, HTTPMethod.POST): + raise WEXC.Forbidden('Only logged in users can upload here') + + if upload_type is None: + raise WEXC.BadRequest('Must provide upload type') + + if upload_type.lower() == TREE_UPLOAD: + return self._upload_tree(file_name, indata) + + elif upload_type.lower() == BIOGEO_UPLOAD: + return self._upload_biogeo(file_name, indata) + + elif upload_type.lower() == OCCURRENCE_UPLOAD: + return self._upload_occurrence_data(file_name, metadata, indata) + + elif upload_type.lower() == CLIMATE_UPLOAD: + return self._upload_climate_data(file_name, indata) + + else: + raise WEXC.BadRequest('Unknown upload type: {}'.format(upload_type)) + + + # ................................ + def _get_user_temp_dir(self, user_id): + """Get the user's workspace directory + + Todo: + * Change this to use something at a lower level. This is using + the same path construction as the getBoomPackage script + """ + earl = EarlJr() + pth = earl.create_data_path(user_id, LMFileType.TMP_JSON) + if not os.path.exists(pth): + os.makedirs(pth) + return pth + + # ................................ + def _upload_biogeo(self, package_filename, indata): + """Write the biogeographic hypotheses to the user's workspace + + Args: + package_filename (str): The name of the biogeographic hypotheses + package + upload_file: The uploaded data file + + Todo: + * Sanity checking + * More docs + """ + # Determine where to write the files + out_dir = os.path.join(self.get_user_dir(), package_filename) + if not os.path.exists(out_dir): + os.makedirs(out_dir) + + instr = BytesIO() + instr.write(indata) + instr.seek(0) + + valid_extensions = [LMFormat.JSON.ext] + valid_extensions.extend(LMFormat.SHAPE.get_extensions()) + + # Unzip files and name provided name + with zipfile.ZipFile(instr, allowZip64=True) as zip_f: + for zfname in zip_f.namelist(): + # fn = os.path.basename(zfname) + _, ext = os.path.splitext(zfname) + if ext in valid_extensions: + out_fn = os.path.join(out_dir, os.path.basename(zfname)) + if os.path.exists(out_fn): + raise WEXC.Conflict('{} exists, {}'.format(out_fn, zfname)) + + # zipF.extract(zfname, outFn) + with zip_f.open(zfname) as zip_f2: + with open(out_fn, 'wb') as out_f: + for line in zip_f2: + out_f.write(line) + + outdata = { + 'package_name': package_filename, + 'upload_type': BIOGEO_UPLOAD, + 'status': HTTPStatus.ACCEPTED + } + return make_response(outdata, HTTPStatus.ACCEPTED) + + # ................................ + @staticmethod + def _upload_climate_data(climate_data_filename, indata): + """Write the climate data to the layers space + + Args: + climate_data_filename: The name of the directory to unzip files in + + Todo: + Sanity checking + """ + out_dir = os.path.join(ENV_DATA_PATH, climate_data_filename) + with zipfile.ZipFile(indata, allowZip64=True) as zip_f: + for zf_name in zip_f.namelist(): + _, ext = os.path.splitext(zf_name) + out_fn = os.path.join( + out_dir, '{}{}'.format(climate_data_filename, ext)) + ready_filename(out_fn) + if os.path.exists(out_fn): + raise WEXC.Conflict('{}{} exists'.format(climate_data_filename, ext)) + zip_f.extract(zf_name, out_fn) + + outdata = { + 'package_name': climate_data_filename, + 'upload_type': CLIMATE_UPLOAD, + 'status': HTTPStatus.ACCEPTED + } + return make_response(outdata, HTTPStatus.ACCEPTED) + + # ................................ + def _upload_occurrence_data(self, package_name, metadata, indata): + """Write the occurrence data to the user's workspace + + Args: + package_name: The name of the occurrence data + metadata: A JSON document with metadata about the CSV data + + Todo: + Sanity checking + Use constants + Case insensitive + """ + self.log.debug('In occ upload') + # If the package name ends in .csv, strip it + if package_name.lower().find(LMFormat.CSV.ext) > 0: + package_name = package_name[ + :package_name.lower().find(LMFormat.CSV.ext)] + csv_filename = os.path.join( + self._get_user_temp_dir(), '{}{}'.format( + package_name, LMFormat.CSV.ext)) + meta_filename = os.path.join( + self._get_user_temp_dir(), + '{}{}'.format(package_name, LMFormat.JSON.ext)) + + # Check to see if files exist + if os.path.exists(csv_filename): + raise WEXC.Conflict('{} exists'.format(os.path.basename(csv_filename))) + if os.path.exists(meta_filename): + raise WEXC.Conflict('{} exists'.format(os.path.basename(meta_filename))) + + # Process metadata + if metadata is None: + raise WEXC.BadRequest('Must provide metadata with occurrence data upload') + + m_stringio = BytesIO() + m_stringio.write(metadata.encode()) + m_stringio.seek(0) + metadata = json.load(m_stringio) + self.log.debug('Metadata: {}'.format(metadata)) + if 'field' not in list( + metadata.keys()) or 'role' not in list(metadata.keys()): + raise WEXC.BadRequest('Metadata not in expected format') + + header_row = indata.split('\n'.encode())[0] + meta_obj = {} + # Check for delimiter + if 'delimiter' in list(metadata.keys()): + delim = metadata['delimiter'] + else: + delim = ',' + meta_obj['delimiter'] = delim + headers = header_row.split(delim.encode()) + short_names = [] + + roles = metadata['role'] + for fld in metadata['field']: + if fld['field_type'].lower() == 'string': + field_type = 'string' # 4 + elif fld['field_type'].lower() == 'integer': + field_type = 'integer' # 0 + elif fld['field_type'].lower() == 'real': + field_type = 'real' # 2 + else: + raise WEXC.BadRequest('Field type: {} is unknown'.format(fld['field_type'])) + field_idx = fld['key'] + + # If short name is None or has zero-length, get from csv + short_name = fld['short_name'] + if short_name is None or len(short_name) == 0: + short_name = headers[int(fld['key'])].strip() + # If short name is too long + i = 0 + if len(short_name) > 9: + test_name = short_name[:9] + str(i) + while test_name in short_names: + i += 1 + test_name = short_name[:9] + str(i) + self.log.debug( + 'Trying test name: {}'.format(test_name)) + short_names.append(test_name) + short_name = test_name + field_obj = { + 'type': field_type, + 'name': short_name + } + if 'geopoint' in list(roles.keys()) and fld[ + 'key'] == roles['geopoint']: + field_obj['role'] = 'geopoint' + elif 'taxa_name' in list(roles.keys()) and fld[ + 'key'] == roles['taxa_name']: + field_obj['role'] = 'taxaname' + elif 'latitude' in list(roles.keys()) and fld[ + 'key'] == roles['latitude']: + field_obj['role'] = 'latitude' + elif 'longitude' in list(roles.keys()) and fld[ + 'key'] == roles['longitude']: + field_obj['role'] = 'longitude' + elif 'unique_id' in list(roles.keys()) and fld[ + 'key'] == roles['unique_id']: + field_obj['role'] = 'uniqueid' + elif 'group_by' in list(roles.keys()) and fld[ + 'key'] == roles['group_by']: + field_obj['role'] = 'groupby' + meta_obj[field_idx] = field_obj + + with open(meta_filename, 'wt') as out_f: + json.dump(meta_obj, out_f) + + # Process file + instr = BytesIO() + instr.write(indata) + instr.seek(0) + csv_done = False + + if zipfile.is_zipfile(instr): + with zipfile.ZipFile(instr, allowZip64=True) as zip_f: + for z_fname in zip_f.namelist(): + _, ext = os.path.splitext(z_fname) + if ext == LMFormat.CSV.ext: + # TODO: We could extend here and process more than one + if csv_done: + raise WEXC.BadRequest('Must only provide one .csv file') + # Determine if we are dealing with anonymous user + # once instead of checking at every line + anon_user = self.get_user_id() == DEFAULT_POST_USER + with zip_f.open(z_fname) as z_f: + with open(csv_filename, 'w') as out_f: + num_lines = 0 + for line in z_f: + num_lines += 1 + if (anon_user and + num_lines >= MAX_ANON_UPLOAD_SIZE): + fail_to_upload = True + break + out_f.write(line) + if fail_to_upload: + os.remove(csv_filename) + raise WEXC.RequestEntityTooLarge( + 'Anonymous users may upload occurrence data less than {} lines'.format( + MAX_ANON_UPLOAD_SIZE)) + csv_done = True + else: + if self.get_user_id() == DEFAULT_POST_USER and \ + len(indata.split('\n'.encode())) > MAX_ANON_UPLOAD_SIZE: + raise WEXC.RequestEntityTooLarge( + 'Anonymous users may only upload occurrence data less than {} lines'.format( + MAX_ANON_UPLOAD_SIZE)) + with open(csv_filename, 'w') as out_f: + out_f.write(indata.decode()) + + # Return + outdata = { + 'package_name': package_name, + 'upload_type': OCCURRENCE_UPLOAD, + 'status': HTTPStatus.ACCEPTED + } + return make_response(outdata, HTTPStatus.ACCEPTED) + + # ................................ + def _upload_tree(self, tree_name, indata): + """Write the tree to the user's work space + + Todo: + * Sanity checking + * Insert tree into database? Let boom do it? + """ + tree_base_name, _ = os.path.splitext(tree_name) + tree_name = '{}{}'.format(tree_base_name, LMFormat.NEXUS.ext) + # Check to see if file already exists, fail if it does + out_tree_filename = os.path.join(self._get_user_temp_dir(), tree_name) + if not os.path.exists(out_tree_filename): + # Make sure the user directory exists + ready_filename(out_tree_filename) + + for schema in ['newick', 'nexus', 'phyloxml']: + try: + self.log.debug(indata.decode()) + tree = TreeWrapper.get(data=indata.decode(), schema=schema) + # Add squids + squid_dict = {} + user_id = self.get_user_id() + + if user_id == PUBLIC_USER: + user_id = DEFAULT_POST_USER + for label in tree.get_labels(): + sno = self.scribe.get_taxon( + user_id=user_id, taxon_name=label) + if sno is not None: + squid_dict[label] = sno.squid + tree.annotate_tree_tips(PhyloTreeKeys.SQUID, squid_dict) + # Add internal node labels + tree.add_node_labels() + tree.write( + path=out_tree_filename, schema=DEFAULT_TREE_SCHEMA) + break + except Exception as err: + # Uncomment for debugging purposes + # self.log.debug(err) + pass + else: + raise WEXC.Conflict('Tree with this name already exists in the user space') + # Set HTTP status code + outdata = { + 'file_name': tree_name, + 'upload_type': TREE_UPLOAD, + 'status': HTTPStatus.ACCEPTED + } + return make_response(outdata, HTTPStatus.ACCEPTED) From 71072e6eeb2c1eb065119483a1768a1651fc6bca Mon Sep 17 00:00:00 2001 From: zzeppozz Date: Fri, 3 Dec 2021 15:43:56 -0600 Subject: [PATCH 16/18] boolean param options --- LmWebServer/common/lmconstants.py | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/LmWebServer/common/lmconstants.py b/LmWebServer/common/lmconstants.py index f1ed7935..cac2e826 100644 --- a/LmWebServer/common/lmconstants.py +++ b/LmWebServer/common/lmconstants.py @@ -51,9 +51,9 @@ def boolify_parameter(param, default=True): try: # Try processing a string str_val = param.lower().strip() - if str_val == 'false' or str_val == 'no': + if str_val in('false', 'f', 'no','n'): return False - if str_val == 'true' or str_val == 'yes': + if str_val in ('true', 't', 'yes', 'y'): return True except Exception: pass @@ -93,7 +93,7 @@ def boolify_parameter(param, default=True): }, 'atom': { QP_NAME_KEY: 'atom', - QP_PROCESS_KEY: lambda x: boolify_parameter(x, default=True) # Boolify, default is true + QP_PROCESS_KEY: lambda x: boolify_parameter(x, default=True) }, 'beforestatus': { QP_NAME_KEY: 'before_status', @@ -143,18 +143,18 @@ def boolify_parameter(param, default=True): }, 'detail': { QP_NAME_KEY: 'detail', - QP_PROCESS_KEY: lambda x: boolify_parameter(x, default=False) # Boolify, default is false + QP_PROCESS_KEY: lambda x: boolify_parameter(x, default=False) }, 'displayname': { QP_NAME_KEY: 'display_name' }, 'docalc': { QP_NAME_KEY: 'do_calc', - QP_PROCESS_KEY: lambda x: boolify_parameter(x, default=False) # Boolify, default is false + QP_PROCESS_KEY: lambda x: boolify_parameter(x, default=False) }, 'domcpa': { QP_NAME_KEY: 'do_mcpa', - QP_PROCESS_KEY: lambda x: boolify_parameter(x, default=False) # Boolify, default is false + QP_PROCESS_KEY: lambda x: boolify_parameter(x, default=False) }, 'envcode': { QP_NAME_KEY: 'env_code' @@ -175,7 +175,7 @@ def boolify_parameter(param, default=True): }, 'fillpoints': { QP_NAME_KEY: 'fill_points', - QP_PROCESS_KEY: lambda x: boolify_parameter(x, default=False) # Boolify, default is false + QP_PROCESS_KEY: lambda x: boolify_parameter(x, default=False) }, 'format': { # TODO: Forward to respFormat since format is reserved @@ -190,7 +190,7 @@ def boolify_parameter(param, default=True): }, 'hasbranchlengths': { QP_NAME_KEY: 'has_branch_lengths', - QP_PROCESS_KEY: lambda x: boolify_parameter(x, default=True) # Boolify, default is true + QP_PROCESS_KEY: lambda x: boolify_parameter(x, default=True) }, 'height': { QP_NAME_KEY: 'height', @@ -204,19 +204,19 @@ def boolify_parameter(param, default=True): }, 'includecsvs': { QP_NAME_KEY: 'include_csvs', - QP_PROCESS_KEY: lambda x: boolify_parameter(x, default=False) # Boolify, default is false + QP_PROCESS_KEY: lambda x: boolify_parameter(x, default=False) }, 'includesdms': { QP_NAME_KEY: 'include_sdms', - QP_PROCESS_KEY: lambda x: boolify_parameter(x, default=False) # Boolify, default is false + QP_PROCESS_KEY: lambda x: boolify_parameter(x, default=False) }, 'isbinary': { QP_NAME_KEY: 'is_binary', - QP_PROCESS_KEY: lambda x: boolify_parameter(x, default=True) # Boolify, default is true + QP_PROCESS_KEY: lambda x: boolify_parameter(x, default=True) }, 'isultrametric': { QP_NAME_KEY: 'is_ultrametric', - QP_PROCESS_KEY: lambda x: boolify_parameter(x, default=True) # Boolify, default is true + QP_PROCESS_KEY: lambda x: boolify_parameter(x, default=True) }, 'keyword': { QP_NAME_KEY: 'keyword', @@ -235,7 +235,7 @@ def boolify_parameter(param, default=True): }, 'limit': { QP_NAME_KEY: 'limit', - QP_PROCESS_KEY: lambda x: max(1, int(x)) # Integer, minimum is one + QP_PROCESS_KEY: lambda x: max(1, int(x)) # min = 1 }, 'map': { QP_NAME_KEY: 'map_name' @@ -258,7 +258,7 @@ def boolify_parameter(param, default=True): }, 'minimumnumberofpoints': { QP_NAME_KEY: 'minimum_number_of_points', - QP_PROCESS_KEY: lambda x: max(1, int(x)) # Integer, minimum is one + QP_PROCESS_KEY: lambda x: max(1, int(x)) # min = 1 }, 'numpermutations': { QP_NAME_KEY: 'num_permutations', @@ -273,7 +273,7 @@ def boolify_parameter(param, default=True): }, 'offset': { QP_NAME_KEY: 'offset', - QP_PROCESS_KEY: lambda x: max(0, int(x)) # Integer, minimum is zero + QP_PROCESS_KEY: lambda x: max(0, int(x)) # min = 0 }, 'pathbiogeoid': { QP_NAME_KEY: 'path_biogeo_id' From f95901eb21a169729ccd870f7081354e140c8fad Mon Sep 17 00:00:00 2001 From: zzeppozz Date: Fri, 3 Dec 2021 17:26:58 -0600 Subject: [PATCH 17/18] updates for flask authentication, very unfinished --- LmWebServer/common/lmconstants.py | 3 + LmWebServer/flask_app/base.py | 74 +++- LmWebServer/flask_app/routes.py | 165 +++++++-- LmWebServer/flask_app/user_services.py | 482 +++++++++++++++++++++++++ LmWebServer/public_html/login.html | 44 ++- 5 files changed, 716 insertions(+), 52 deletions(-) create mode 100644 LmWebServer/flask_app/user_services.py diff --git a/LmWebServer/common/lmconstants.py b/LmWebServer/common/lmconstants.py index cac2e826..5d647c87 100644 --- a/LmWebServer/common/lmconstants.py +++ b/LmWebServer/common/lmconstants.py @@ -1,12 +1,15 @@ """This module contains constants used by the Lifemapper web services """ import os +import secrets from LmServer.base.utilities import get_mjd_time_from_iso_8601 from LmServer.common.lmconstants import SESSION_DIR from LmServer.common.localconstants import SCRATCH_PATH, APP_PATH from LmWebServer.common.localconstants import PACKAGING_DIR +FALLBACK_SECRET_KEY = secrets.token_hex() + # CherryPy constants SESSION_PATH = os.path.join(SCRATCH_PATH, SESSION_DIR) SESSION_KEY = '_cp_username' diff --git a/LmWebServer/flask_app/base.py b/LmWebServer/flask_app/base.py index df6353e3..52256400 100644 --- a/LmWebServer/flask_app/base.py +++ b/LmWebServer/flask_app/base.py @@ -1,16 +1,74 @@ """The module provides a base Lifemapper service class """ from flask import session +from flask_login._compat import text_type import os from LmCommon.common.lmconstants import DEFAULT_POST_USER from LmServer.common.lmconstants import ARCHIVE_PATH from LmServer.common.localconstants import PUBLIC_USER from LmServer.common.log import WebLogger +from LmServer.common.lmuser import LMUser from LmServer.db.borg_scribe import BorgScribe # app = Flask(__name__) +class WebUser(LMUser): + """Extends lmuser objects for flask-login""" + + # ................................ + def __init__( + self, user_id, email, password, is_encrypted=False, first_name=None, last_name=None, + institution=None, addr_1=None, addr_2=None, addr_3=None, phone=None, mod_time=None): + """Constructor + + Args: + user_id: user chosen unique id + email: EMail address of user + password: user chosen password + first_name: The first name of this user + last_name: The last name of this user + institution: institution of user (optional) + addr_1: Address, line 1, of user (optional) + addr_2: Address, line 2, of user (optional) + addr_3: Address, line 3, of user (optional) + phone: Phone number of user (optional) + mod_time: Last modification time of this object (optional) + """ + LMUser.__init__( + self, user_id, email, password, is_encrypted=is_encrypted, first_name=first_name, + last_name=last_name, institution=institution, addr_1=addr_1, addr_2=addr_2, addr_3=addr_3, + phone=phone, mod_time=mod_time) + self._authenticated = False + self._active = False + + # .......................... + def is_authenticated(self): + return self._authenticated + + # .......................... + def is_active(self): + if self.user_id in (PUBLIC_USER, DEFAULT_POST_USER): + return False + return True + + # .......................... + def is_anonymous(self): + if self.user_id in (PUBLIC_USER, DEFAULT_POST_USER): + return True + return False + + # .......................... + def get_id(self): + if self.user_id not in (PUBLIC_USER, DEFAULT_POST_USER): + try: + return text_type(self.user_id) + except AttributeError: + raise NotImplementedError('No `user_id` attribute - override `get_id`') + return + + + # ............................................................................. class LmService: """This is the base Lifemapper service object @@ -25,9 +83,7 @@ def __init__(self): """Constructor The constructor is only responsible for getting a logger, user and a - scribe instance for the service. We do that here in a simple base - class in case we decide that we need to use a different mechanism (such - as a CherryPy Tool) + scribe instance for the service. """ log = WebLogger() self.scribe = BorgScribe(log) @@ -38,7 +94,7 @@ class in case we decide that we need to use a different mechanism (such def get_user(self, user_id=None): """Gets the user id for the service call. - Gets the user id for the service call. If urlUser is provided, try + Gets the user id for the service call. If user_id is provided, try that first. Then try the session and finally fall back to the PUBLIC_USER @@ -51,8 +107,8 @@ def get_user(self, user_id=None): # .......................... @classmethod - def get_user_id(cls, url_user=None): - """Gets the user id for the service call. + def get_user_id(cls, user_id=None): + """Gets the lmuser for the service call. Gets the user id for the service call. If urlUser is provided, try that first. Then try the session and finally fall back to the @@ -61,10 +117,10 @@ def get_user_id(cls, url_user=None): TODO: Save the username in the session """ # Check to see if we should use url user - if url_user is not None: - if url_user.lower() == 'public': + if user_id is not None: + if user_id.lower() == 'public': return PUBLIC_USER - if url_user.lower() == DEFAULT_POST_USER: + if user_id.lower() == DEFAULT_POST_USER: return DEFAULT_POST_USER # Try to get the user from the session try: diff --git a/LmWebServer/flask_app/routes.py b/LmWebServer/flask_app/routes.py index 436156d9..63b99a09 100644 --- a/LmWebServer/flask_app/routes.py +++ b/LmWebServer/flask_app/routes.py @@ -1,8 +1,13 @@ -from flask import (Flask, redirect, render_template, request, session, url_for) +from flask import (Flask, flash, redirect, render_template, request, session, url_for) from flask_cors import CORS -import secrets +from flask_login import login_user, LoginForm, LoginManager +import os from werkzeug.exceptions import BadRequest +from werkzeug.utils import secure_filename +from LmCommon.common.lmconstants import JobStatus + +from LmWebServer.common.lmconstants import FALLBACK_SECRET_KEY from LmWebServer.flask_app.base import LmService from LmWebServer.flask_app.biotaphy_names import GBIFTaxonService from LmWebServer.flask_app.biotaphy_points import IDigBioOccurrenceService @@ -22,12 +27,26 @@ from LmWebServer.flask_app.tree import TreeService from LmWebServer.flask_app.upload import UserUploadService -from LmCommon.common.lmconstants import JobStatus +try: + skey = os.environ['SECRET_KEY'] +except: + skey = FALLBACK_SECRET_KEY -app = Flask(__name__.split('.')[0]) -app.secret_key = str.encode(secrets.token_hex()) + +# TODO: Put this into the database or an environment variable +app = Flask(__name__.split('.')[0]) +app.secret_key = str.encode(skey) CORS(app) +login_manager = LoginManager() +login_manager.init_app(app) + + +# .......................... +@login_manager.user_loader +def load_user(user_id): + return LmService.get_user(user_id) + # .......................... @app.route('/') def index(): @@ -36,24 +55,45 @@ def index(): return 'You are not logged in' # .......................... -@app.route('/api/v2/login', methods=['GET', 'POST']) +@app.route('/api/login', methods=['GET', 'POST']) def login(): - if request.method == 'POST': - username = request.form.get('username') - password = request.form.get('password') - - user = LmService.get_user(username) - if user.check_password(password): - session['username'] = user.user_id - return user - else: - print('Incorrect password') - return redirect(request.url) - - return render_template('public_html/login.html') + # Here we use a class of some kind to represent and validate our + # client-side form data. For example, WTForms is a library that will + # handle this for us, and we use a custom LoginForm to validate. + form = LoginForm() + if form.validate_on_submit(): + # Login and validate the user. + # user should be an instance of your `User` class + login_user(user) + + flash('Logged in successfully.') + + next = request.args.get('next') + # is_safe_url should check if the url is safe for redirects. + # See http://flask.pocoo.org/snippets/62/ for an example. + # if not is_safe_url(next): + # return flask.abort(400) + + return redirect(next or url_for('index')) + return render_template('public_html/login.html', form=form) + + + # if request.method == 'POST': + # username = request.form.get('username') + # password = request.form.get('password') + # + # user = LmService.get_user(username) + # if user.check_password(password): + # session['username'] = user.user_id + # return user + # else: + # print('Incorrect password') + # return redirect(request.url) + # + # return render_template('public_html/login.html') # ..................................................................................... -@app.route('/logout') +@app.route('/api/logout') def logout(): # remove the username from the session if it's there session.pop('username', None) @@ -568,24 +608,81 @@ def tree(identifier): return response # ..................................................................................... -@app.route('/api/v2/upload', methods=['POST']) +def allowed_file(filename): + allowed_exts = {'zip', 'json', 'tif', 'tiff', 'asc'} + return '.' in filename and filename.rsplit('.', 1)[1].lower() in allowed_exts + + +@app.route('/api/v2/upload', methods=['GET', 'POST']) def upload(): svc = UserUploadService() file_name = request.args.get('file_name', default = None, type = str) upload_type = request.args.get('upload_type', default = None, type = str) metadata = request.args.get('metadata', default = None, type = str) - upload_file = request.args.get('upload_file', default = None, type = str) - - if upload_file is not None: - try: - data = upload_file.file.read() - except Exception as e: - raise BadRequest('Unable to read uploaded file ({})'.str(e)) - else: - try: - data = request.get_data() - except: - raise BadRequest('Unable to read data from request') + + if request.method == 'POST': + # check if the post request has the file part + if 'file' not in request.files: + flash('No file part') + return redirect(request.url) + + upload_file = request.files['file'] + # If the user does not select a file, the browser submits an empty file without a filename. + if upload_file.filename == '': + flash('No selected file') + return redirect(request.url) - return svc.post_data(file_name, upload_type, metadata, data) + if upload_file: + if allowed_file(upload_file.filename): + if file_name is not None: + safe_filename = secure_filename(file_name) + else: + safe_filename = secure_filename(upload_file.filename) + + try: + data = upload_file.file.read() + except Exception as e: + raise BadRequest('Unable to read uploaded file ({})'.str(e)) + + else: + try: + data = request.get_data() + except: + raise BadRequest('Unable to read data from request') + + return svc.post_data(safe_filename, upload_type, metadata, data) + + +# ..................................................................................... +@app.route('/api/v2/upload_file', methods=['GET', 'POST']) +def upload_file(): + """Test implementation from https://flask.palletsprojects.com/en/2.0.x/patterns/fileuploads/""" + if request.method == 'POST': + # check if the post request has the file part + if 'file' not in request.files: + flash('No file part') + return redirect(request.url) + + file = request.files['file'] + # If the user does not select a file, the browser submits an empty file without a filename. + if file.filename == '': + flash('No selected file') + return redirect(request.url) + + if file and allowed_file(file.filename): + filename = secure_filename(file.filename) + file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename)) + return redirect(url_for('download_file', name=filename)) + + return + + ''' + + Upload new File +

Upload new File

+
+ + +
+ ''' diff --git a/LmWebServer/flask_app/user_services.py b/LmWebServer/flask_app/user_services.py new file mode 100644 index 00000000..977613a2 --- /dev/null +++ b/LmWebServer/flask_app/user_services.py @@ -0,0 +1,482 @@ +"""Module containing user services for basic authentication""" +from flask import (Flask, redirect, render_template, request, session, url_for) +import os +import shutil +import werkzeug.exceptions as WEXC + +from LmCommon.common.lmconstants import HTTPStatus + +from LmServer.common.lmuser import LMUser +from LmServer.common.localconstants import PUBLIC_USER + +from LmWebServer.common.lmconstants import (REFERER_KEY, SESSION_KEY, SESSION_PATH) +from LmWebServer.flask_app.base import LmService + + +# ............................................................................. +class UserLogin(LmService): + """User login service.""" + + # ................................ + def index(self): + """Present the user with a login page if not logged in.""" + # Check if the user is logged in + user_id = self.get_user_id() + if user_id is not None and user_id != PUBLIC_USER: + # Already logged in + return "Welcome {}".format(user_id) + + # Return login page + return render_template('public_html/login.html') + + # ................................ + def login(self, user_id=None, pword=None): + """Log in using the provided credentials""" + + if user_id is None or pword is None: + raise WEXC.BadRequest('Must provide user name and password') + + referer_page = None + try: + cookie = request.cookie + if REFERER_KEY in cookie: + referer_page = cookie[REFERER_KEY].value + else: + referer_page = request.headers['referer'] + cookie = response.cookie + cookie[REFERER_KEY] = referer_page + cookie[REFERER_KEY]['path'] = '/api/login' + cookie[REFERER_KEY]['max-age'] = 30 + cookie[REFERER_KEY]['version'] = 1 + except Exception: + pass + + user = self.scribe.find_user(user_id=user_id) + if user is not None and user.check_password(pword): + # Provided correct credentials + cherrypy.session.regenerate() + cherrypy.session[SESSION_KEY] = user.get_user_id() + cherrypy.request.login = user.get_user_id() + cookie = cherrypy.response.cookie + cookie[REFERER_KEY] = referer_page + cookie[REFERER_KEY]['expires'] = 0 + raise cherrypy.HTTPRedirect(referer_page or '/') + + raise cherrypy.HTTPError( + HTTPStatus.FORBIDDEN, 'Invalid username / password combination') + + +# ............................................................................. +@cherrypy.expose +class UserLogout(LmService): + """ + @summary: Log the user out of the system + """ + + # ................................ + def GET(self): + """Log out + """ + cherrypy.lib.sessions.expire() + cherrypy.session[SESSION_KEY] = cherrypy.request.login = None + session_file_name = os.path.join( + SESSION_PATH, 'session-{}'.format(cherrypy.session.id)) + try: + shutil.rmtree(session_file_name) + except Exception: + pass + + raise cherrypy.HTTPRedirect('/api/login') + + +# ............................................................................. +@cherrypy.expose +class UserSignUp(LmService): + """ + @summary: Service to create a new user + """ + + # ................................ + def GET(self): + """ + @summary: Present a new user form + """ + return _get_signup_page() + + # ................................ + def POST(self, user_id, email, first_name, pword1, last_name=None, + institution=None, address1=None, address2=None, address3=None, + phone=None): + + if not _verify_length(user_id, max_length=20, min_length=5): + raise cherrypy.HTTPError( + HTTPStatus.BAD_REQUEST, + 'User ID must have between 5 and 20 characters') + if not _verify_length(first_name, min_length=2, max_length=50): + raise cherrypy.HTTPError( + HTTPStatus.BAD_REQUEST, + 'First name must have between 2 and 50 characters') + if not _verify_length(last_name, min_length=2, max_length=50): + raise cherrypy.HTTPError( + HTTPStatus.BAD_REQUEST, + 'Last name must have between 2 and 50 characters') + if phone is not None and len(phone) > 0 and not _verify_length( + phone, min_length=10, max_length=20): + raise cherrypy.HTTPError( + HTTPStatus.BAD_REQUEST, + 'Phone number must have between 10 and 20 characters') + if not _verify_length(email, min_length=9, max_length=64): + raise cherrypy.HTTPError( + HTTPStatus.BAD_REQUEST, + 'Email must have between 9 and 64 characters') + if not _verify_length(pword1, min_length=8, max_length=32): + raise cherrypy.HTTPError( + HTTPStatus.BAD_REQUEST, + 'Password must be between 8 and 32 characters') + + check_user = self.scribe.find_user(user_id, email) + + if check_user is None: + usr = LMUser( + user_id, email, pword1, first_name=first_name, + last_name=last_name, institution=institution, addr_1=address1, + addr_2=address2, addr_3=address3, phone=phone) + ins_usr = self.scribe.find_or_insert_user(usr) + + cherrypy.session[SESSION_KEY] = cherrypy.request.login = user_id + + welcome_msg = _get_welcome_msg(first_name, user_id, pword1) + return welcome_msg + + raise cherrypy.HTTPError( + HTTPStatus.CONFLICT, 'Duplicate user credentials') + + +# ............................................................................. +def _get_login_page(): + login_page = """\ + + + Log in to Lifemapper + + +
+
+
+ + + + + + + + + +
+ User Name: + + +
+ Password: + + +
+
+

+ New user? Sign up here!

+ Forgot your password? Contact us at lifemapper at ku dot edu. +
+
+
+ +""" + return login_page + + +# ............................................................................. +def _get_signup_page(): + signup_page = """\ + + + + Sign up for Lifemapper + + + + + + + +""" + return signup_page + + +# ............................................................................. +def _get_welcome_msg(first_name, user_id, pword): + """Get a welcome message for the new user + """ + welcome_msg = """\ + + + + Welcome to Lifemapper + + + +

+ Your user name is: {user_name}, your password is: {pword} +

+ +""".format(user_name=user_id, pword=pword) + return welcome_msg + + +# ............................................................................. +def _verify_length(item, min_length=0, max_length=50): + """ + """ + if item is None or (len(item) <= max_length and len(item) >= min_length): + return True + + return False diff --git a/LmWebServer/public_html/login.html b/LmWebServer/public_html/login.html index 2a4d0a28..92b17797 100644 --- a/LmWebServer/public_html/login.html +++ b/LmWebServer/public_html/login.html @@ -1,9 +1,35 @@ -
-

Login to your account

-
- - - - Forgot Username? -
-
\ No newline at end of file + + + Log in to Lifemapper + + +
+
+
+ + + + + + + + + +
+ User Name: + + +
+ Password: + + +
+
+

+ New user? Sign up here!

+ Forgot your password? Contact us at lifemapper at ku dot edu. +
+
+
+ + From b6877960fde39a2c02231bc819df55aab2cec817 Mon Sep 17 00:00:00 2001 From: zzeppozz Date: Mon, 6 Dec 2021 11:49:58 -0600 Subject: [PATCH 18/18] cleanup --- LmWebServer/flask_app/routes.py | 400 +++++++++++++------------- LmWebServer/flask_tools/basic_auth.py | 15 +- 2 files changed, 216 insertions(+), 199 deletions(-) diff --git a/LmWebServer/flask_app/routes.py b/LmWebServer/flask_app/routes.py index 63b99a09..19fcd5df 100644 --- a/LmWebServer/flask_app/routes.py +++ b/LmWebServer/flask_app/routes.py @@ -1,6 +1,7 @@ -from flask import (Flask, flash, redirect, render_template, request, session, url_for) +import flask +# from flask import (abort, Flask, flash, redirect, render_template, request, session, url_for) from flask_cors import CORS -from flask_login import login_user, LoginForm, LoginManager +from flask_login import login_user, LoginManager import os from werkzeug.exceptions import BadRequest from werkzeug.utils import secure_filename @@ -26,6 +27,7 @@ from LmWebServer.flask_app.taxonomy import TaxonomyHintService from LmWebServer.flask_app.tree import TreeService from LmWebServer.flask_app.upload import UserUploadService +from LmWebServer.flask_tools.basic_auth import is_safe_url try: skey = os.environ['SECRET_KEY'] @@ -34,7 +36,7 @@ # TODO: Put this into the database or an environment variable -app = Flask(__name__.split('.')[0]) +app = flask.Flask(__name__.split('.')[0]) app.secret_key = str.encode(skey) CORS(app) @@ -50,54 +52,56 @@ def load_user(user_id): # .......................... @app.route('/') def index(): - if 'username' in session: - return f'Logged in as {session["username"]}' + if 'username' in flask.session: + return f'Logged in as {flask.session["username"]}' return 'You are not logged in' -# .......................... -@app.route('/api/login', methods=['GET', 'POST']) -def login(): - # Here we use a class of some kind to represent and validate our - # client-side form data. For example, WTForms is a library that will - # handle this for us, and we use a custom LoginForm to validate. - form = LoginForm() - if form.validate_on_submit(): - # Login and validate the user. - # user should be an instance of your `User` class - login_user(user) - - flash('Logged in successfully.') - - next = request.args.get('next') - # is_safe_url should check if the url is safe for redirects. - # See http://flask.pocoo.org/snippets/62/ for an example. - # if not is_safe_url(next): - # return flask.abort(400) - - return redirect(next or url_for('index')) - return render_template('public_html/login.html', form=form) - - - # if request.method == 'POST': - # username = request.form.get('username') - # password = request.form.get('password') - # - # user = LmService.get_user(username) - # if user.check_password(password): - # session['username'] = user.user_id - # return user - # else: - # print('Incorrect password') - # return redirect(request.url) - # - # return render_template('public_html/login.html') - -# ..................................................................................... -@app.route('/api/logout') -def logout(): - # remove the username from the session if it's there - session.pop('username', None) - return redirect(url_for('index')) +# # .......................... +# @app.route('/api/login', methods=['GET', 'POST']) +# def login(): +# # Here we use a class of some kind to represent and validate our +# # client-side form data. For example, WTForms is a library that will +# # handle this for us, and we use a custom LoginForm to validate. +# form = LoginForm() +# if form.validate_on_submit(): +# user = load_user() +# # Login and validate the user. +# # user should be an instance of your `User` class +# user = LmService.get_user() +# login_user(user) +# +# flask.flash('Logged in successfully.') +# +# next = flask.request.args.get('next') +# # is_safe_url should check if the url is safe for redirects. +# # See http://flask.pocoo.org/snippets/62/ for an example. +# if not is_safe_url(next): +# return flask.abort(400) +# +# return flask.redirect(next or flask.url_for('index')) +# return flask.render_template('public_html/login.html', form=form) +# +# +# # if request.method == 'POST': +# # username = request.form.get('username') +# # password = request.form.get('password') +# # +# # user = LmService.get_user(username) +# # if user.check_password(password): +# # session['username'] = user.user_id +# # return user +# # else: +# # print('Incorrect password') +# # return redirect(request.url) +# # +# # return render_template('public_html/login.html') +# +# # ..................................................................................... +# @app.route('/api/logout') +# def logout(): +# # remove the username from the session if it's there +# flask.session.pop('username', None) +# return flask.redirect(flask.url_for('index')) # ..................................................................................... @@ -115,24 +119,24 @@ def layer(identifier): user = svc.get_user() user_id = user.user_id - if request.method == 'DELETE': + if flask.request.method == 'DELETE': svc.delete_occurrence_set(user_id, identifier) - elif request.method == 'GET': - after_time = request.args.get('after_time', default = None, type = float) - before_time = request.args.get('before_time', default = None, type = float) - alt_pred_code = request.args.get('alt_pred_code', default = None, type = str) - date_code = request.args.get('date_code', default = None, type = str) - epsg_code = request.args.get('epsg_code', default= None, type = str) - env_code = request.args.get('env_code', default = None, type = str) - env_type_id = request.args.get('env_type_id', default = None, type = int) - gcm_code = request.args.get('gcm_code', default = None, type = str) + elif flask.request.method == 'GET': + after_time = flask.request.args.get('after_time', default = None, type = float) + before_time = flask.request.args.get('before_time', default = None, type = float) + alt_pred_code = flask.request.args.get('alt_pred_code', default = None, type = str) + date_code = flask.request.args.get('date_code', default = None, type = str) + epsg_code = flask.request.args.get('epsg_code', default= None, type = str) + env_code = flask.request.args.get('env_code', default = None, type = str) + env_type_id = flask.request.args.get('env_type_id', default = None, type = int) + gcm_code = flask.request.args.get('gcm_code', default = None, type = str) # layer_type: - layer_type = request.args.get('layer_type', default = None, type = str) - scenario_code = request.args.get('scenario_code', default = None, type = int) - squid = request.args.get('squid', default = None, type = str) - limit = request.args.get('limit', default = 100, type = int) - offset = request.args.get('offset', default = 0, type = int) + layer_type = flask.request.args.get('layer_type', default = None, type = str) + scenario_code = flask.request.args.get('scenario_code', default = None, type = int) + squid = flask.request.args.get('squid', default = None, type = str) + limit = flask.request.args.get('limit', default = 100, type = int) + offset = flask.request.args.get('offset', default = 0, type = int) if identifier is None: if layer_type == 1: @@ -183,24 +187,24 @@ def occurrence(identifier): user = svc.get_user() user_id = user.user_id - if request.method == 'POST' and request.is_json: - boom_data = request.get_json() + if flask.request.method == 'POST' and flask.request.is_json: + boom_data = flask.request.get_json() svc.post_boom_data(user_id, user.email, boom_data) - elif request.method == 'DELETE': + elif flask.request.method == 'DELETE': svc.delete_occurrence_set(user_id, identifier) - elif request.method == 'GET': - after_time = request.args.get('after_time', default = None, type = float) - before_time = request.args.get('before_time', default = None, type = float) - display_name = request.args.get('display_name', default = None, type = str) - epsg_code = request.args.get('epsg_code', default= None, type = str) - minimum_number_of_points = request.args.get('minimum_number_of_points', default = 1, type = int) - limit = request.args.get('limit', default = 100, type = int) - offset = request.args.get('offset', default = 0, type = int) - status = request.args.get('status', default = None, type = int) - gridset_id = request.args.get('gridset_id', default = None, type = int) - fill_points = request.args.get('fill_points', default = False, type = bool) + elif flask.request.method == 'GET': + after_time = flask.request.args.get('after_time', default = None, type = float) + before_time = flask.request.args.get('before_time', default = None, type = float) + display_name = flask.request.args.get('display_name', default = None, type = str) + epsg_code = flask.request.args.get('epsg_code', default= None, type = str) + minimum_number_of_points = flask.request.args.get('minimum_number_of_points', default = 1, type = int) + limit = flask.request.args.get('limit', default = 100, type = int) + offset = flask.request.args.get('offset', default = 0, type = int) + status = flask.request.args.get('status', default = None, type = int) + gridset_id = flask.request.args.get('gridset_id', default = None, type = int) + fill_points = flask.request.args.get('fill_points', default = False, type = bool) if identifier is None: response = svc.list_occurrence_sets( @@ -234,7 +238,7 @@ def occurrence(identifier): @app.route('/api/v2/biotaphynames', methods=['POST']) def biotaphynames(): try: - names_obj = request.get_json() + names_obj = flask.request.get_json() except: return BadRequest('Names must be a JSON list') else: @@ -246,7 +250,7 @@ def biotaphynames(): @app.route('/api/v2/biotaphypoints', methods=['POST']) def biotaphypoints(): try: - taxonids_obj = request.get_json() + taxonids_obj = flask.request.get_json() except: return BadRequest('Taxon IDs must be a JSON list') else: @@ -258,7 +262,7 @@ def biotaphypoints(): @app.route('/api/v2/biotaphytree', methods=['POST']) def biotaphytree(): try: - taxon_names_obj = request.get_json() + taxon_names_obj = flask.request.get_json() except: return BadRequest('Taxon names must be a JSON list') else: @@ -269,7 +273,7 @@ def biotaphytree(): @app.route('/api/v2/gbifparser', methods=['POST']) def gbifparser(): try: - names_obj = request.get_json() + names_obj = flask.request.get_json() except: return BadRequest('Name list must be in JSON format') else: @@ -284,26 +288,26 @@ def globalpam(): user = svc.get_user() user_id = user.user_id - archive_name = request.args.get('display_name', default = None, type = str) - cell_size = request.args.get('cell_size', default = None, type = float) - algorithm_code = request.args.get('algorithm_code', default = None, type = str) - bbox = request.args.get('bbox', default = None, type = str) - display_name = request.args.get('display_name', default = None, type = str) - gridset_id = request.args.get('gridset_id', default = None, type = int) - model_scenario_code = request.args.get('model_scenario_code', default = None, type = str) - prj_scen_code = request.args.get('prj_scenario_code', default = None, type = str) - point_max = request.args.get('point_max', default = None, type = int) - point_min = request.args.get('point_min', default = None, type = int) - squid = request.args.get('squid', default = None, type = str) - taxon_kingdom = request.args.get('taxon_kingdom', default = None, type = str) - taxon_phylum = request.args.get('taxon_phylum', default = None, type = str) - taxon_class = request.args.get('taxon_class', default = None, type = str) - taxon_order = request.args.get('taxon_order', default = None, type = str) - taxon_family = request.args.get('taxon_family', default = None, type = str) - taxon_genus = request.args.get('taxon_genus', default = None, type = str) - taxon_species = request.args.get('taxon_species', default = None, type = str) + archive_name = flask.request.args.get('display_name', default = None, type = str) + cell_size = flask.request.args.get('cell_size', default = None, type = float) + algorithm_code = flask.request.args.get('algorithm_code', default = None, type = str) + bbox = flask.request.args.get('bbox', default = None, type = str) + display_name = flask.request.args.get('display_name', default = None, type = str) + gridset_id = flask.request.args.get('gridset_id', default = None, type = int) + model_scenario_code = flask.request.args.get('model_scenario_code', default = None, type = str) + prj_scen_code = flask.request.args.get('prj_scenario_code', default = None, type = str) + point_max = flask.request.args.get('point_max', default = None, type = int) + point_min = flask.request.args.get('point_min', default = None, type = int) + squid = flask.request.args.get('squid', default = None, type = str) + taxon_kingdom = flask.request.args.get('taxon_kingdom', default = None, type = str) + taxon_phylum = flask.request.args.get('taxon_phylum', default = None, type = str) + taxon_class = flask.request.args.get('taxon_class', default = None, type = str) + taxon_order = flask.request.args.get('taxon_order', default = None, type = str) + taxon_family = flask.request.args.get('taxon_family', default = None, type = str) + taxon_genus = flask.request.args.get('taxon_genus', default = None, type = str) + taxon_species = flask.request.args.get('taxon_species', default = None, type = str) - if request.method == 'POST': + if flask.request.method == 'POST': response = svc.post_pam_subset( user_id, archive_name, gridset_id, algorithm_code=algorithm_code, bbox=bbox, display_name=display_name, gridset_id=gridset_id, model_scenario_code=model_scenario_code, @@ -312,7 +316,7 @@ def globalpam(): taxon_order=taxon_order, taxon_family=taxon_family, taxon_genus=taxon_genus, taxon_species=taxon_species) - elif request.method == 'GET': + elif flask.request.method == 'GET': response = svc.post_pam_subset( user_id, archive_name, cell_size=cell_size, algorithm_code=algorithm_code, bbox=bbox, display_name=display_name, gridset_id=gridset_id, model_scenario_code=model_scenario_code, @@ -330,21 +334,21 @@ def gridset(identifier): user = svc.get_user() user_id = user.user_id - if request.method == 'POST' and request.is_json: - gridset_data = request.get_json() + if flask.request.method == 'POST' and flask.request.is_json: + gridset_data = flask.request.get_json() svc.post_boom_data(user_id, user.email, gridset_data) - elif request.method == 'DELETE': + elif flask.request.method == 'DELETE': svc.delete_gridset(user_id, identifier) - elif request.method == 'GET': - after_time = request.args.get('after_time', default = None, type = float) - before_time = request.args.get('before_time', default = None, type = float) - epsg_code = request.args.get('epsg_code', default= None, type = str) - meta_string = request.args.get('meta_string', default= None, type = str) - shapegrid_id = request.args.get('shapegrid_id', default= None, type = int) - limit = request.args.get('limit', default = 100, type = int) - offset = request.args.get('offset', default = 0, type = int) + elif flask.request.method == 'GET': + after_time = flask.request.args.get('after_time', default = None, type = float) + before_time = flask.request.args.get('before_time', default = None, type = float) + epsg_code = flask.request.args.get('epsg_code', default= None, type = str) + meta_string = flask.request.args.get('meta_string', default= None, type = str) + shapegrid_id = flask.request.args.get('shapegrid_id', default= None, type = int) + limit = flask.request.args.get('limit', default = 100, type = int) + offset = flask.request.args.get('offset', default = 0, type = int) if identifier is None: response = svc.list_gridsets( @@ -372,7 +376,7 @@ def hint(): svc = SpeciesHintService() user_id = svc.get_user() - search_string = request.args.get('search_string', default= None, type = str) + search_string = flask.request.args.get('search_string', default= None, type = str) return svc.get_hint(user_id, search_string) # ..................................................................................... @@ -381,13 +385,13 @@ def scenpackage(identifier): svc = ScenarioPackageService() user_id = svc.get_user() - scenario_package_id = request.args.get('scenario_package_id', default = None, type = int) - scenario_id = request.args.get('scenario_id', default = None, type = int) - after_time = request.args.get('after_time', default = None, type = float) - before_time = request.args.get('before_time', default = None, type = float) - epsg_code = request.args.get('epsg_code', default= None, type = str) - limit = request.args.get('limit', default = 100, type = int) - offset = request.args.get('offset', default = 0, type = int) + scenario_package_id = flask.request.args.get('scenario_package_id', default = None, type = int) + scenario_id = flask.request.args.get('scenario_id', default = None, type = int) + after_time = flask.request.args.get('after_time', default = None, type = float) + before_time = flask.request.args.get('before_time', default = None, type = float) + epsg_code = flask.request.args.get('epsg_code', default= None, type = str) + limit = flask.request.args.get('limit', default = 100, type = int) + offset = flask.request.args.get('offset', default = 0, type = int) if identifier is None: response = svc.list_scenario_packages( @@ -414,15 +418,15 @@ def scenario(identifier): svc = ScenarioService() user_id = svc.get_user_id() - scenario_id = request.args.get('scenario_id', default = None, type = int) - after_time = request.args.get('after_time', default = None, type = float) - before_time = request.args.get('before_time', default = None, type = float) - alt_pred_code = request.args.get('alt_pred_code', default= None, type = str) - date_code = request.args.get('date_code', default= None, type = str) - gcm_code = request.args.get('gcm_code', default= None, type = str) - epsg_code = request.args.get('epsg_code', default= None, type = str) - limit = request.args.get('limit', default = 100, type = int) - offset = request.args.get('offset', default = 0, type = int) + scenario_id = flask.request.args.get('scenario_id', default = None, type = int) + after_time = flask.request.args.get('after_time', default = None, type = float) + before_time = flask.request.args.get('before_time', default = None, type = float) + alt_pred_code = flask.request.args.get('alt_pred_code', default= None, type = str) + date_code = flask.request.args.get('date_code', default= None, type = str) + gcm_code = flask.request.args.get('gcm_code', default= None, type = str) + epsg_code = flask.request.args.get('epsg_code', default= None, type = str) + limit = flask.request.args.get('limit', default = 100, type = int) + offset = flask.request.args.get('offset', default = 0, type = int) if identifier is None: response = svc.list_scenarios( @@ -460,29 +464,29 @@ def sdmproject(identifier): user = svc.get_user() user_id = user.user_id - if request.method == 'POST' and request.is_json: - projection_data = request.get_json() + if flask.request.method == 'POST' and flask.request.is_json: + projection_data = flask.request.get_json() svc.post_boom_data(user_id, user.email, projection_data) - elif request.method == 'DELETE': + elif flask.request.method == 'DELETE': svc.delete_occurrence_set(user_id, identifier) - elif request.method == 'GET': - after_time = request.args.get('after_time', default = None, type = float) - before_time = request.args.get('before_time', default = None, type = float) - after_status = request.args.get('after_status', default = JobStatus.COMPLETE, type = int) - before_status = request.args.get('before_status', default = JobStatus.COMPLETE, type = int) - alg_code = request.args.get('alg_code', default = None, type = str) - display_name = request.args.get('display_name', default = None, type = str) - epsg_code = request.args.get('epsg_code', default= None, type = str) - occurrence_set_id = request.args.get('occurrence_set_id', default = None, type = int) - mdl_scenario_code = request.args.get('mdl_scenario_code', default = None, type = str) - prj_scenario_code = request.args.get('prj_scenario_code', default = None, type = str) - status = request.args.get('status', default = JobStatus.COMPLETE, type = int) - gridset_id = request.args.get('gridset_id', default = None, type = int) - limit = request.args.get('limit', default = 100, type = int) - offset = request.args.get('offset', default = 0, type = int) - atom = request.args.get('atom', default = True, type = bool) + elif flask.request.method == 'GET': + after_time = flask.request.args.get('after_time', default = None, type = float) + before_time = flask.request.args.get('before_time', default = None, type = float) + after_status = flask.request.args.get('after_status', default = JobStatus.COMPLETE, type = int) + before_status = flask.request.args.get('before_status', default = JobStatus.COMPLETE, type = int) + alg_code = flask.request.args.get('alg_code', default = None, type = str) + display_name = flask.request.args.get('display_name', default = None, type = str) + epsg_code = flask.request.args.get('epsg_code', default= None, type = str) + occurrence_set_id = flask.request.args.get('occurrence_set_id', default = None, type = int) + mdl_scenario_code = flask.request.args.get('mdl_scenario_code', default = None, type = str) + prj_scenario_code = flask.request.args.get('prj_scenario_code', default = None, type = str) + status = flask.request.args.get('status', default = JobStatus.COMPLETE, type = int) + gridset_id = flask.request.args.get('gridset_id', default = None, type = int) + limit = flask.request.args.get('limit', default = 100, type = int) + offset = flask.request.args.get('offset', default = 0, type = int) + atom = flask.request.args.get('atom', default = True, type = bool) if identifier is None: response = svc.list_projections( @@ -515,18 +519,18 @@ def snippet(): svc = SnippetService() user_id = svc.get_user() - ident1 = request.args.get('ident1', default = None, type = str) - ident2 = request.args.get('ident2', default = None, type = str) - provider = request.args.get('provider', default = None, type = str) - collection = request.args.get('collection', default = None, type = str) - catalog_number = request.args.get('catalog_number', default = None, type = str) - operation = request.args.get('operation', default = None, type = str) - after_time = request.args.get('after_time', default = None, type = float) - before_time = request.args.get('before_time', default = None, type = float) - url = request.args.get('url', default = None, type = str) - who = request.args.get('who', default = None, type = str) - agent = request.args.get('agent', default = None, type = str) - why = request.args.get('why', default = None, type = str) + ident1 = flask.request.args.get('ident1', default = None, type = str) + ident2 = flask.request.args.get('ident2', default = None, type = str) + provider = flask.request.args.get('provider', default = None, type = str) + collection = flask.request.args.get('collection', default = None, type = str) + catalog_number = flask.request.args.get('catalog_number', default = None, type = str) + operation = flask.request.args.get('operation', default = None, type = str) + after_time = flask.request.args.get('after_time', default = None, type = float) + before_time = flask.request.args.get('before_time', default = None, type = float) + url = flask.request.args.get('url', default = None, type = str) + who = flask.request.args.get('who', default = None, type = str) + agent = flask.request.args.get('agent', default = None, type = str) + why = flask.request.args.get('why', default = None, type = str) response = svc.get_snippet( user_id, ident1=ident1, ident2=ident2, provider=provider, collection=collection, @@ -540,7 +544,7 @@ def snippet(): @app.route('/api/v2/rawsolr', methods=['POST']) def rawsolr(): svc = RawSolrService() - req_body = request.get_json() + req_body = flask.request.get_json() response = svc.query_collection(req_body) return response @@ -548,7 +552,7 @@ def rawsolr(): @app.route('/api/v2/taxonomy', methods=['GET']) def taxonomy(): svc = TaxonomyHintService() - req_body = request.get_json() + req_body = flask.request.get_json() response = svc.query_collection(req_body) return response @@ -567,23 +571,23 @@ def tree(identifier): svc = TreeService() user_id = svc.get_user() - if request.method == 'POST' and request.is_json: - tree_data = request.get_json() + if flask.request.method == 'POST' and flask.request.is_json: + tree_data = flask.request.get_json() svc.post_tree(user_id, tree_data) - elif request.method == 'DELETE': + elif flask.request.method == 'DELETE': svc.delete_tree(user_id, identifier) - elif request.method == 'GET': - after_time = request.args.get('after_time', default = None, type = float) - before_time = request.args.get('before_time', default = None, type = float) - is_binary = request.args.get('is_binary', default = None, type = bool) - is_ultrametric = request.args.get('is_ultrametric', default = None, type = bool) - has_branch_lengths = request.args.get('has_branch_lengths', default = None, type = bool) - meta_string = request.args.get('meta_string', default = None, type = str) - name = request.args.get('name', default = None, type = str) - limit = request.args.get('limit', default = 100, type = int) - offset = request.args.get('offset', default = 0, type = int) + elif flask.request.method == 'GET': + after_time = flask.request.args.get('after_time', default = None, type = float) + before_time = flask.request.args.get('before_time', default = None, type = float) + is_binary = flask.request.args.get('is_binary', default = None, type = bool) + is_ultrametric = flask.request.args.get('is_ultrametric', default = None, type = bool) + has_branch_lengths = flask.request.args.get('has_branch_lengths', default = None, type = bool) + meta_string = flask.request.args.get('meta_string', default = None, type = str) + name = flask.request.args.get('name', default = None, type = str) + limit = flask.request.args.get('limit', default = 100, type = int) + offset = flask.request.args.get('offset', default = 0, type = int) if identifier is None: response = svc.list_trees( @@ -617,21 +621,21 @@ def allowed_file(filename): def upload(): svc = UserUploadService() - file_name = request.args.get('file_name', default = None, type = str) - upload_type = request.args.get('upload_type', default = None, type = str) - metadata = request.args.get('metadata', default = None, type = str) + file_name = flask.request.args.get('file_name', default = None, type = str) + upload_type = flask.request.args.get('upload_type', default = None, type = str) + metadata = flask.request.args.get('metadata', default = None, type = str) - if request.method == 'POST': - # check if the post request has the file part - if 'file' not in request.files: - flash('No file part') - return redirect(request.url) + if flask.request.method == 'POST': + # check if the post flask.request has the file part + if 'file' not in flask.request.files: + flask.flash('No file part') + return flask.redirect(flask.request.url) - upload_file = request.files['file'] + upload_file = flask.request.files['file'] # If the user does not select a file, the browser submits an empty file without a filename. if upload_file.filename == '': - flash('No selected file') - return redirect(request.url) + flask.flash('No selected file') + return flask.redirect(flask.request.url) if upload_file: if allowed_file(upload_file.filename): @@ -647,9 +651,9 @@ def upload(): else: try: - data = request.get_data() + data = flask.request.get_data() except: - raise BadRequest('Unable to read data from request') + raise BadRequest('Unable to read data from flask.request') return svc.post_data(safe_filename, upload_type, metadata, data) @@ -658,22 +662,22 @@ def upload(): @app.route('/api/v2/upload_file', methods=['GET', 'POST']) def upload_file(): """Test implementation from https://flask.palletsprojects.com/en/2.0.x/patterns/fileuploads/""" - if request.method == 'POST': - # check if the post request has the file part - if 'file' not in request.files: - flash('No file part') - return redirect(request.url) + if flask.request.method == 'POST': + # check if the post flask.request has the file part + if 'file' not in flask.request.files: + flask.flash('No file part') + return flask.redirect(flask.request.url) - file = request.files['file'] + file = flask.request.files['file'] # If the user does not select a file, the browser submits an empty file without a filename. if file.filename == '': - flash('No selected file') - return redirect(request.url) + flask.flash('No selected file') + return flask.redirect(flask.request.url) if file and allowed_file(file.filename): filename = secure_filename(file.filename) file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename)) - return redirect(url_for('download_file', name=filename)) + return flask.redirect(flask.url_for('download_file', name=filename)) return diff --git a/LmWebServer/flask_tools/basic_auth.py b/LmWebServer/flask_tools/basic_auth.py index 5a486832..3c466a63 100644 --- a/LmWebServer/flask_tools/basic_auth.py +++ b/LmWebServer/flask_tools/basic_auth.py @@ -5,8 +5,10 @@ for Lifemapper proper. We may want to keep basic authentication for instances though, thus the name of this module is 'basicAuth' """ -from flask import session +from flask import session, request, url_for import os +# from urlparse import urlparse, urljoin +from urllib.parse import urlparse, urljoin from LmServer.common.localconstants import PUBLIC_USER from LmServer.common.log import WebLogger, UserLogger @@ -31,3 +33,14 @@ def get_user_name(): session['username'] = user session['log'] = log + + +# ............................................................................. +def is_safe_url(target): + """Use to test before redirecting + + Note: From archived Flask snippets site: https://web.archive.org/web/20190128010142/http://flask.pocoo.org/snippets/62/ + """ + ref_url = urlparse(request.host_url) + test_url = urlparse(urljoin(request.host_url, target)) + return test_url.scheme in ('http', 'https') and ref_url.netloc == test_url.netloc \ No newline at end of file