From 2b7a2b95526f2ae93705833c87b2f75ce39f4dcf Mon Sep 17 00:00:00 2001 From: Bhupinder Saini Date: Sun, 28 Jan 2024 16:20:45 +0200 Subject: [PATCH] Create new api for NIMBUS Fixes #11 --- app.py | 20 +- models/method_models.py | 12 + models/problem_models.py | 20 +- poetry.lock | 83 +++++- pyproject.toml | 5 +- resources/nimbus.py | 500 ++++++++++++++++++++++++++++----- resources/problem_resources.py | 56 +++- 7 files changed, 606 insertions(+), 90 deletions(-) diff --git a/app.py b/app.py index 4ef529b..45f28ac 100644 --- a/app.py +++ b/app.py @@ -11,9 +11,20 @@ api = Api(app) -ACCESS_EXPIRES = timedelta(hours=1) + +db_user = "bhupindersaini" +db_password = "" +db_host = "localhost" +db_port = "5432" +db_name = "DESDEO" + +ACCESS_EXPIRES = timedelta(hours=2) app.config["PROPAGATE_EXCEPTIONS"] = True -app.config["SQLALCHEMY_DATABASE_URI"] = "sqlite:///app.db" +app.config[ + "SQLALCHEMY_DATABASE_URI" +] = f"postgresql://{db_user}:{db_password}@{db_host}:{db_port}/{db_name}" + +#app.config["SQLALCHEMY_DATABASE_URI"] = "sqlite:///database.db" app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False app.config["SECRET_KEY"] = "secret-key" app.config["JWT_SECRET_KEY"] = "jwt-secret-key" @@ -21,6 +32,7 @@ app.config["JWT_ACCESS_TOKEN_EXPIRES"] = ACCESS_EXPIRES + jwt = JWTManager(app) # db = SQLAlchemy(app) @@ -74,8 +86,10 @@ def check_if_token_revoked(jwt_header, jwt_payload): api.add_resource(method_resources.MethodControl, "/method/control") # Add nimbus endpoints - +api.add_resource(nimbus.Initialize, "/nimbus/initialize") api.add_resource(nimbus.Iterate, "/nimbus/iterate") +api.add_resource(nimbus.Save, "/nimbus/save") +api.add_resource(nimbus.Choose, "/nimbus/choose") # Add questionnaire endpoints api.add_resource( diff --git a/models/method_models.py b/models/method_models.py index 73edf96..a1d1ef2 100644 --- a/models/method_models.py +++ b/models/method_models.py @@ -1,6 +1,8 @@ import dill from database import db +from sqlalchemy.dialects import postgresql + # to be able to serialize lambdified expressions returned by SymPy # This might break some serializations! dill.settings["recurse"] = True @@ -22,3 +24,13 @@ def __repr__(self): f"Method = id:{self.id}, name:{self.name}, user_id:{self.user_id}, minimize:{self.minimize}, " f"status:{self.status}, last_request:{self.last_request}" ) + + +class Preference(db.Model): + """Database model for storing preferences temporarily (for UTOPIA).""" + + __tablename__ = "preference" + id = db.Column(db.Integer, primary_key=True, unique=True) + method = db.Column(db.String, nullable=False) + preference = db.Column(postgresql.JSON, nullable=False) + date = db.Column(db.DateTime, nullable=False) diff --git a/models/problem_models.py b/models/problem_models.py index eb436dc..152a21a 100644 --- a/models/problem_models.py +++ b/models/problem_models.py @@ -1,6 +1,9 @@ import dill +from sqlalchemy.orm import validates, Mapped + +from sqlalchemy.dialects import postgresql + from database import db -from sqlalchemy.orm import validates # to be able to serialize lambdified expressions returned by SymPy # This might break some serializations! @@ -47,3 +50,18 @@ def validate_dict(self, _, dict_): "The dictrionary supplied to SolutionArchive must contain the keys 'variables' and 'objectives'" ) return dict_ + + +class UTOPIASolutionArchive(db.Model): + __tablename__ = "UTOPIAsolutionarchive" + id = db.Column(db.Integer, primary_key=True, unique=True) + user_id = db.Column(db.Integer, db.ForeignKey("user.id"), nullable=False) + problem_id = db.Column(db.Integer, db.ForeignKey("problem.id"), nullable=False) + preference = db.Column(db.Integer, db.ForeignKey("preference.id"), nullable=False) + method_name=db.Column(db.String(100), nullable=False) + objectives = db.Column(postgresql.ARRAY(db.Float), nullable=False) + variables = db.Column(postgresql.ARRAY(db.Float), nullable=True) + saved = db.Column(db.Boolean, nullable=False) + current = db.Column(db.Boolean, nullable=False) + chosen = db.Column(db.Boolean, nullable=False) + date = db.Column(db.DateTime, nullable=False) diff --git a/poetry.lock b/poetry.lock index e1b7fd0..40ce941 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1488,6 +1488,87 @@ files = [ dev = ["pre-commit", "tox"] testing = ["pytest", "pytest-benchmark"] +[[package]] +name = "psycopg2-binary" +version = "2.9.9" +description = "psycopg2 - Python-PostgreSQL Database Adapter" +optional = false +python-versions = ">=3.7" +files = [ + {file = "psycopg2-binary-2.9.9.tar.gz", hash = "sha256:7f01846810177d829c7692f1f5ada8096762d9172af1b1a28d4ab5b77c923c1c"}, + {file = "psycopg2_binary-2.9.9-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c2470da5418b76232f02a2fcd2229537bb2d5a7096674ce61859c3229f2eb202"}, + {file = "psycopg2_binary-2.9.9-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c6af2a6d4b7ee9615cbb162b0738f6e1fd1f5c3eda7e5da17861eacf4c717ea7"}, + {file = "psycopg2_binary-2.9.9-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:75723c3c0fbbf34350b46a3199eb50638ab22a0228f93fb472ef4d9becc2382b"}, + {file = "psycopg2_binary-2.9.9-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:83791a65b51ad6ee6cf0845634859d69a038ea9b03d7b26e703f94c7e93dbcf9"}, + {file = "psycopg2_binary-2.9.9-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0ef4854e82c09e84cc63084a9e4ccd6d9b154f1dbdd283efb92ecd0b5e2b8c84"}, + {file = "psycopg2_binary-2.9.9-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ed1184ab8f113e8d660ce49a56390ca181f2981066acc27cf637d5c1e10ce46e"}, + {file = "psycopg2_binary-2.9.9-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:d2997c458c690ec2bc6b0b7ecbafd02b029b7b4283078d3b32a852a7ce3ddd98"}, + {file = "psycopg2_binary-2.9.9-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:b58b4710c7f4161b5e9dcbe73bb7c62d65670a87df7bcce9e1faaad43e715245"}, + {file = "psycopg2_binary-2.9.9-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:0c009475ee389757e6e34611d75f6e4f05f0cf5ebb76c6037508318e1a1e0d7e"}, + {file = "psycopg2_binary-2.9.9-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:8dbf6d1bc73f1d04ec1734bae3b4fb0ee3cb2a493d35ede9badbeb901fb40f6f"}, + {file = "psycopg2_binary-2.9.9-cp310-cp310-win32.whl", hash = "sha256:3f78fd71c4f43a13d342be74ebbc0666fe1f555b8837eb113cb7416856c79682"}, + {file = "psycopg2_binary-2.9.9-cp310-cp310-win_amd64.whl", hash = "sha256:876801744b0dee379e4e3c38b76fc89f88834bb15bf92ee07d94acd06ec890a0"}, + {file = "psycopg2_binary-2.9.9-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ee825e70b1a209475622f7f7b776785bd68f34af6e7a46e2e42f27b659b5bc26"}, + {file = "psycopg2_binary-2.9.9-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1ea665f8ce695bcc37a90ee52de7a7980be5161375d42a0b6c6abedbf0d81f0f"}, + {file = "psycopg2_binary-2.9.9-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:143072318f793f53819048fdfe30c321890af0c3ec7cb1dfc9cc87aa88241de2"}, + {file = "psycopg2_binary-2.9.9-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c332c8d69fb64979ebf76613c66b985414927a40f8defa16cf1bc028b7b0a7b0"}, + {file = "psycopg2_binary-2.9.9-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f7fc5a5acafb7d6ccca13bfa8c90f8c51f13d8fb87d95656d3950f0158d3ce53"}, + {file = "psycopg2_binary-2.9.9-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:977646e05232579d2e7b9c59e21dbe5261f403a88417f6a6512e70d3f8a046be"}, + {file = "psycopg2_binary-2.9.9-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:b6356793b84728d9d50ead16ab43c187673831e9d4019013f1402c41b1db9b27"}, + {file = "psycopg2_binary-2.9.9-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:bc7bb56d04601d443f24094e9e31ae6deec9ccb23581f75343feebaf30423359"}, + {file = "psycopg2_binary-2.9.9-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:77853062a2c45be16fd6b8d6de2a99278ee1d985a7bd8b103e97e41c034006d2"}, + {file = "psycopg2_binary-2.9.9-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:78151aa3ec21dccd5cdef6c74c3e73386dcdfaf19bced944169697d7ac7482fc"}, + {file = "psycopg2_binary-2.9.9-cp311-cp311-win32.whl", hash = "sha256:dc4926288b2a3e9fd7b50dc6a1909a13bbdadfc67d93f3374d984e56f885579d"}, + {file = "psycopg2_binary-2.9.9-cp311-cp311-win_amd64.whl", hash = "sha256:b76bedd166805480ab069612119ea636f5ab8f8771e640ae103e05a4aae3e417"}, + {file = "psycopg2_binary-2.9.9-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:8532fd6e6e2dc57bcb3bc90b079c60de896d2128c5d9d6f24a63875a95a088cf"}, + {file = "psycopg2_binary-2.9.9-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:b0605eaed3eb239e87df0d5e3c6489daae3f7388d455d0c0b4df899519c6a38d"}, + {file = "psycopg2_binary-2.9.9-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8f8544b092a29a6ddd72f3556a9fcf249ec412e10ad28be6a0c0d948924f2212"}, + {file = "psycopg2_binary-2.9.9-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2d423c8d8a3c82d08fe8af900ad5b613ce3632a1249fd6a223941d0735fce493"}, + {file = "psycopg2_binary-2.9.9-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2e5afae772c00980525f6d6ecf7cbca55676296b580c0e6abb407f15f3706996"}, + {file = "psycopg2_binary-2.9.9-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e6f98446430fdf41bd36d4faa6cb409f5140c1c2cf58ce0bbdaf16af7d3f119"}, + {file = "psycopg2_binary-2.9.9-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:c77e3d1862452565875eb31bdb45ac62502feabbd53429fdc39a1cc341d681ba"}, + {file = "psycopg2_binary-2.9.9-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:cb16c65dcb648d0a43a2521f2f0a2300f40639f6f8c1ecbc662141e4e3e1ee07"}, + {file = "psycopg2_binary-2.9.9-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:911dda9c487075abd54e644ccdf5e5c16773470a6a5d3826fda76699410066fb"}, + {file = "psycopg2_binary-2.9.9-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:57fede879f08d23c85140a360c6a77709113efd1c993923c59fde17aa27599fe"}, + {file = "psycopg2_binary-2.9.9-cp312-cp312-win32.whl", hash = "sha256:64cf30263844fa208851ebb13b0732ce674d8ec6a0c86a4e160495d299ba3c93"}, + {file = "psycopg2_binary-2.9.9-cp312-cp312-win_amd64.whl", hash = "sha256:81ff62668af011f9a48787564ab7eded4e9fb17a4a6a74af5ffa6a457400d2ab"}, + {file = "psycopg2_binary-2.9.9-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:2293b001e319ab0d869d660a704942c9e2cce19745262a8aba2115ef41a0a42a"}, + {file = "psycopg2_binary-2.9.9-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:03ef7df18daf2c4c07e2695e8cfd5ee7f748a1d54d802330985a78d2a5a6dca9"}, + {file = "psycopg2_binary-2.9.9-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0a602ea5aff39bb9fac6308e9c9d82b9a35c2bf288e184a816002c9fae930b77"}, + {file = "psycopg2_binary-2.9.9-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8359bf4791968c5a78c56103702000105501adb557f3cf772b2c207284273984"}, + {file = "psycopg2_binary-2.9.9-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:275ff571376626195ab95a746e6a04c7df8ea34638b99fc11160de91f2fef503"}, + {file = "psycopg2_binary-2.9.9-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:f9b5571d33660d5009a8b3c25dc1db560206e2d2f89d3df1cb32d72c0d117d52"}, + {file = "psycopg2_binary-2.9.9-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:420f9bbf47a02616e8554e825208cb947969451978dceb77f95ad09c37791dae"}, + {file = "psycopg2_binary-2.9.9-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:4154ad09dac630a0f13f37b583eae260c6aa885d67dfbccb5b02c33f31a6d420"}, + {file = "psycopg2_binary-2.9.9-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:a148c5d507bb9b4f2030a2025c545fccb0e1ef317393eaba42e7eabd28eb6041"}, + {file = "psycopg2_binary-2.9.9-cp37-cp37m-win32.whl", hash = "sha256:68fc1f1ba168724771e38bee37d940d2865cb0f562380a1fb1ffb428b75cb692"}, + {file = "psycopg2_binary-2.9.9-cp37-cp37m-win_amd64.whl", hash = "sha256:281309265596e388ef483250db3640e5f414168c5a67e9c665cafce9492eda2f"}, + {file = "psycopg2_binary-2.9.9-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:60989127da422b74a04345096c10d416c2b41bd7bf2a380eb541059e4e999980"}, + {file = "psycopg2_binary-2.9.9-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:246b123cc54bb5361588acc54218c8c9fb73068bf227a4a531d8ed56fa3ca7d6"}, + {file = "psycopg2_binary-2.9.9-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:34eccd14566f8fe14b2b95bb13b11572f7c7d5c36da61caf414d23b91fcc5d94"}, + {file = "psycopg2_binary-2.9.9-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:18d0ef97766055fec15b5de2c06dd8e7654705ce3e5e5eed3b6651a1d2a9a152"}, + {file = "psycopg2_binary-2.9.9-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d3f82c171b4ccd83bbaf35aa05e44e690113bd4f3b7b6cc54d2219b132f3ae55"}, + {file = "psycopg2_binary-2.9.9-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ead20f7913a9c1e894aebe47cccf9dc834e1618b7aa96155d2091a626e59c972"}, + {file = "psycopg2_binary-2.9.9-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:ca49a8119c6cbd77375ae303b0cfd8c11f011abbbd64601167ecca18a87e7cdd"}, + {file = "psycopg2_binary-2.9.9-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:323ba25b92454adb36fa425dc5cf6f8f19f78948cbad2e7bc6cdf7b0d7982e59"}, + {file = "psycopg2_binary-2.9.9-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:1236ed0952fbd919c100bc839eaa4a39ebc397ed1c08a97fc45fee2a595aa1b3"}, + {file = "psycopg2_binary-2.9.9-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:729177eaf0aefca0994ce4cffe96ad3c75e377c7b6f4efa59ebf003b6d398716"}, + {file = "psycopg2_binary-2.9.9-cp38-cp38-win32.whl", hash = "sha256:804d99b24ad523a1fe18cc707bf741670332f7c7412e9d49cb5eab67e886b9b5"}, + {file = "psycopg2_binary-2.9.9-cp38-cp38-win_amd64.whl", hash = "sha256:a6cdcc3ede532f4a4b96000b6362099591ab4a3e913d70bcbac2b56c872446f7"}, + {file = "psycopg2_binary-2.9.9-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:72dffbd8b4194858d0941062a9766f8297e8868e1dd07a7b36212aaa90f49472"}, + {file = "psycopg2_binary-2.9.9-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:30dcc86377618a4c8f3b72418df92e77be4254d8f89f14b8e8f57d6d43603c0f"}, + {file = "psycopg2_binary-2.9.9-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:31a34c508c003a4347d389a9e6fcc2307cc2150eb516462a7a17512130de109e"}, + {file = "psycopg2_binary-2.9.9-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:15208be1c50b99203fe88d15695f22a5bed95ab3f84354c494bcb1d08557df67"}, + {file = "psycopg2_binary-2.9.9-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1873aade94b74715be2246321c8650cabf5a0d098a95bab81145ffffa4c13876"}, + {file = "psycopg2_binary-2.9.9-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a58c98a7e9c021f357348867f537017057c2ed7f77337fd914d0bedb35dace7"}, + {file = "psycopg2_binary-2.9.9-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:4686818798f9194d03c9129a4d9a702d9e113a89cb03bffe08c6cf799e053291"}, + {file = "psycopg2_binary-2.9.9-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:ebdc36bea43063116f0486869652cb2ed7032dbc59fbcb4445c4862b5c1ecf7f"}, + {file = "psycopg2_binary-2.9.9-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:ca08decd2697fdea0aea364b370b1249d47336aec935f87b8bbfd7da5b2ee9c1"}, + {file = "psycopg2_binary-2.9.9-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ac05fb791acf5e1a3e39402641827780fe44d27e72567a000412c648a85ba860"}, + {file = "psycopg2_binary-2.9.9-cp39-cp39-win32.whl", hash = "sha256:9dba73be7305b399924709b91682299794887cbbd88e38226ed9f6712eabee90"}, + {file = "psycopg2_binary-2.9.9-cp39-cp39-win_amd64.whl", hash = "sha256:f7ae5d65ccfbebdfa761585228eb4d0df3a8b15cfb53bd953e713e09fbb12957"}, +] + [[package]] name = "py" version = "1.11.0" @@ -2649,4 +2730,4 @@ testing = ["big-O", "jaraco.functools", "jaraco.itertools", "more-itertools", "p [metadata] lock-version = "2.0" python-versions = ">=3.9, < 3.11" -content-hash = "c7959db74e8a55f4114e7ee3e4f163b4aeefa19695e51946c3b52aec083478d9" +content-hash = "b0b3d487a21f49790a8519d733a4cbc2dade7190b778db055b196de53186f91a" diff --git a/pyproject.toml b/pyproject.toml index 5ce476e..372d7b4 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -23,6 +23,7 @@ Werkzeug = ">=2.2" desdeo-mcdm = "^1.3.2" desdeo-emo = "^1.5.0" ruff = "^0.1.6" +psycopg2-binary = "^2.9.9" [tool.poetry.dev-dependencies] flake8 = "^3.8.4" @@ -111,8 +112,8 @@ line-length = 120 # Allow unused variables when underscore-prefixed. dummy-variable-rgx = "^(_+|(_+[a-zA-Z0-9_]*[a-zA-Z0-9]+?))$" -# Assume Python 3.11. -target-version = "py312" +# Assume Python 3.9. +target-version = "py39" [tool.ruff.per-file-ignores] # Ignore certain rules in test files diff --git a/resources/nimbus.py b/resources/nimbus.py index 12cf2f4..fabe9a3 100644 --- a/resources/nimbus.py +++ b/resources/nimbus.py @@ -1,5 +1,7 @@ +import datetime from copy import deepcopy from dataclasses import asdict, dataclass +from typing import Union import numpy as np import pandas as pd @@ -15,7 +17,8 @@ from flask_restx import Resource, reqparse from database import db -from models.problem_models import GuestProblem, Problem +from models.method_models import Preference +from models.problem_models import GuestProblem, Problem, UTOPIASolutionArchive from models.user_models import ( GUEST_ROLE, USER_ROLE, @@ -23,11 +26,10 @@ UserModel, role_required, ) -from utilities.expression_parser import NumpyEncoder, numpify_dict_items initialize_parser = reqparse.RequestParser() initialize_parser.add_argument( - "problemId", + "problemID", type=int, help="The id of the problem to be solved", required=True, @@ -41,14 +43,15 @@ iterate_parser = reqparse.RequestParser() iterate_parser.add_argument( - "problemId", + "problemID", type=int, help="The id of the problem to be solved", required=True, ) iterate_parser.add_argument( "preference", - type=list, + type=float, + action="append", help=( "The preference as a reference point. Note, NIMBUS uses classification preference," " we can construct it using this reference point and the reference solution." @@ -57,27 +60,37 @@ ) iterate_parser.add_argument( "referenceSolution", - type=list, + type=float, + action="append", help="The reference solution to be used in the classification preference.", required=True, ) +iterate_parser.add_argument( + "numSolutions", + type=int, + help="The number of solutions to be generated.", + required=True, +) + intermediate_parser = reqparse.RequestParser() intermediate_parser.add_argument( - "problemId", + "problemID", type=int, help="The id of the problem to be solved", required=True, ) intermediate_parser.add_argument( "solution1", - type=list, + type=float, + action="append", help="The first solution for intermediate generation.", required=True, ) intermediate_parser.add_argument( "solution2", - type=list, + type=float, + action="append", help="The second solution for intermediate generation.", required=True, ) @@ -90,27 +103,72 @@ save_parser = reqparse.RequestParser() save_parser.add_argument( - "problemId", + "problemID", type=int, help="The id of the problem these solutions are for.", required=True, ) + + save_parser.add_argument( - "solutions", + "previousPreference", + type=float, + action="append", + help="The previous preference.", + required=True, +) + +save_parser.add_argument( + "objectiveValues", type=list, help="The solutions to be saved. Maybe these are the database indices???", required=True, + location="json", +) + +choose_parser = reqparse.RequestParser() + +choose_parser.add_argument( + "problemID", + type=int, + help="The id of the problem these solutions are for.", + required=True, ) +choose_parser.add_argument( + "solution", + type=float, + help="The solution to be saved. Maybe these are the database indices???", + required=True, + action="append", +) + +get_decision_variables_parser = reqparse.RequestParser() + +get_decision_variables_parser.add_argument( + "problemID", + type=int, + help="The id of the problem these solutions are for.", + required=True, +) + +get_decision_variables_parser.add_argument( + "UserName", + type=str, + help="The username of the user.", + required=True, +) -class NIMBUSResponse(dataclass): + +@dataclass +class NIMBUSResponse: """The response from most NIMBUS endpoints.""" objective_names: list[str] is_maximized: list[bool] lower_bounds: list[float] upper_bounds: list[float] - previousp_preference: list[float] + previous_preference: list[float] current_solutions: list[list[float]] saved_solutions: list[list[float]] all_solutions: list[list[float]] @@ -120,12 +178,14 @@ class Initialize(Resource): @jwt_required() @role_required(USER_ROLE, GUEST_ROLE) def post(self): + """Initialize the NIMBUS method. + """ # Parsing the request data = initialize_parser.parse_args() - problem_id = data["problemId"] initial_solution = data["initialSolution"] + problem_id = data["problemID"] # Make sure that the initial solution is a list or None - if initial_solution is not None or not isinstance(initial_solution, list): + if initial_solution is not None and not isinstance(initial_solution, list): return {"message": "Initial solution must be a list or None"}, 400 # Getting the problem from the database, annoying to extract to a function because # of database session issues @@ -158,34 +218,76 @@ def post(self): "message": "No problem with given ID found for the current user." }, 404 - problem: DiscreteDataProblem | MOProblem = problem_query.problem_pickle - method = NIMBUS(problem, starting_point=np.array(initial_solution)) - request = method.start() + problem: Union[DiscreteDataProblem, MOProblem] = problem_query.problem_pickle ideal = problem.ideal nadir = problem.nadir + max_multiplier = np.array(json.loads(problem_query.minimize), dtype=int) + print(ideal) + print(nadir) + ideal_nadir = np.vstack((ideal, nadir)) - ideal_nadir = ideal_nadir * problem._max_multiplier + ideal_nadir = ideal_nadir * max_multiplier lower_bounds = np.min(ideal_nadir, axis=0) upper_bounds = np.max(ideal_nadir, axis=0) + if initial_solution is not None: + initial_solution = np.array(initial_solution, dtype=float) * max_multiplier + + method = NIMBUS(problem, starting_point=initial_solution) + request = method.start() + current_solution = request[0].content["objective_values"] * max_multiplier - # TODO: Get the actual current solutions, saved solutions, and all solutions - # TODO: Also, save the current solutions to the database + preference = (ideal + nadir) / 2 + preference = preference * max_multiplier + + current_preference_db = Preference( + method="NIMBUS", + preference={"initial preference": preference.tolist()}, + date=datetime.datetime.now(), + ) + + db.session.add(current_preference_db) + db.session.commit() + db.session.refresh(current_preference_db) + + # Remove previous solutions + previous_saved_solutions = UTOPIASolutionArchive.query.filter_by( + user_id=current_user_id, + problem_id=problem_id, + ).all() + + # Is this good practice? Alternatively we could just set the current solutions to saved=False + for solution in previous_saved_solutions: + db.session.delete(solution) + db.session.commit() + + db.session.add( + UTOPIASolutionArchive( + user_id=current_user_id, + problem_id=problem_id, + preference=current_preference_db.id, + method_name="NIMBUS", + objectives=current_solution.tolist(), + variables=[], # Don't have access to these yet + date=datetime.datetime.now(), + saved=True, + current=True, + chosen=False, + ) + ) + db.session.commit() response = NIMBUSResponse( objective_names=problem.objective_names, - is_maximized=[ - bool(multiplier == -1) for multiplier in problem._max_multiplier - ], + is_maximized=[bool(multiplier == -1) for multiplier in max_multiplier], lower_bounds=lower_bounds.tolist(), upper_bounds=upper_bounds.tolist(), - previousPreference=initial_solution + previous_preference=initial_solution or ((lower_bounds + upper_bounds) / 2).tolist(), - current_solutions=request.current_solutions, - saved_solutions=request.saved_solutions, - all_solutions=request.all_solutions, + current_solutions=[current_solution.tolist()], + saved_solutions=[current_solution.tolist()], + all_solutions=[current_solution.tolist()], ) - print(response) return asdict(response), 200 @@ -194,8 +296,9 @@ class Iterate(Resource): @jwt_required() @role_required(USER_ROLE, GUEST_ROLE) def post(self): + """Iterate the NIMBUS method.""" data = iterate_parser.parse_args() - problem_id = data["problemId"] + problem_id = data["problemID"] preference = data["preference"] reference_solution = data["referenceSolution"] @@ -230,62 +333,323 @@ def post(self): "message": "No problem with given ID found for the current user." }, 404 - problem: DiscreteDataProblem | MOProblem = problem_query.problem_pickle + problem: Union[DiscreteDataProblem, MOProblem] = problem_query.problem_pickle + max_multiplier = np.array(json.loads(problem_query.minimize), dtype=int) + ideal = problem.ideal + nadir = problem.nadir + ideal_nadir = np.vstack((ideal, nadir)) + ideal_nadir = ideal_nadir * max_multiplier + lower_bounds = np.min(ideal_nadir, axis=0) + upper_bounds = np.max(ideal_nadir, axis=0) - last_request = method_query.last_request + preference = np.array(preference, dtype=float) * max_multiplier + reference_solution = np.array(reference_solution, dtype=float) * max_multiplier - # cast lists, which have numerical content, to numpy arrays - user_response = numpify_dict_items(user_response_raw) + # Check if classification preference is valid. + # At least one element of preference must be less than or equal to reference solution + # and at least one element of preference must be greater than or equal to reference solution. + pref_less = np.less_equal(preference, reference_solution) + pref_greater = np.greater_equal(preference, reference_solution) - try: - last_request.response = user_response - new_request = method.iterate(last_request) - if isinstance( - new_request, tuple - ): # For methods that return mutliple object from an iterate call (e.g., NIMBUS (for now) and EA methods) - new_request = new_request[0] - - method_query.method_pickle = method - method_query.last_request = new_request - db.session.commit() - except Exception as e: - print(f"DEBUG: {e}") - # error, could not iterate, internal server error - if isinstance(last_request, tuple): - last_request_dump = [ - json.dumps(r.content, cls=NumpyEncoder, ignore_nan=True) - for r in last_request - ] - else: - last_request_dump = json.dumps( - last_request.content, cls=NumpyEncoder, ignore_nan=True - ) + if not np.any(pref_less) or not np.any(pref_greater): return { - "message": "Could not iterate the method with the given response", - "last_request": last_request_dump, + "message": ( + "The preference must be valid classification preference." + " At least one element of preference must be less than or equal to reference solution" + " and at least one element of preference must be greater than or equal to reference solution." + ) }, 400 - # we dump the response first so that we can have it encoded into valid JSON using a custom encoder - # ignore_nan=True will ensure np.nan is coverted to valid JSON value 'null'. + method = NIMBUS(problem, starting_point=reference_solution) + request: NimbusClassificationRequest = method.start()[0] + + classes = [None for _ in range(len(preference))] + levels = [None for _ in range(len(preference))] + + for i, (pref, ref) in enumerate(zip(preference, reference_solution)): + if pref == ideal[i]: + classes[i] = "<" + levels[i] = ideal[i] + elif pref == nadir[i]: + classes[i] = "0" + levels[i] = nadir[i] + elif pref == ref: + classes[i] = "=" + levels[i] = ref[i] + elif pref < ref: + classes[i] = "<=" + levels[i] = pref + elif pref > ref: + classes[i] = ">=" + levels[i] = pref + else: + return {"message": "Something went wrong with the classification."}, 400 + response = { + "classifications": classes, + "number_of_solutions": data["numSolutions"], + "levels": np.array(levels), + } + request.response = response + + request = method.iterate(request)[0] + current_solutions = request.content["objectives"] * max_multiplier + + response["levels"] = response["levels"].tolist() + + current_preference_db = Preference( + method="NIMBUS", + preference={"classification preference": response}, + date=datetime.datetime.now(), + ) + db.session.add(current_preference_db) + db.session.commit() + db.session.refresh(current_preference_db) + + # Set previous current solutions to saved=False + previous_current_solutions = UTOPIASolutionArchive.query.filter_by( + user_id=current_user_id, + problem_id=problem_id, + current=True, + ).all() + for solution in previous_current_solutions: + solution.current = False + db.session.commit() + + for solution in current_solutions: + db.session.add( + UTOPIASolutionArchive( + user_id=current_user_id, + problem_id=problem_id, + preference=current_preference_db.id, + method_name="NIMBUS", + objectives=solution.tolist(), + variables=[], # Don't have access to these yet + date=datetime.datetime.now(), + saved=False, + current=True, + chosen=False, + ) + ) + db.session.commit() + + saved_solutions = UTOPIASolutionArchive.query.filter_by( + user_id=current_user_id, + problem_id=problem_id, + saved=True, + ).all() + + saved_solutions = [solution.objectives for solution in saved_solutions] + + all_solutions = UTOPIASolutionArchive.query.filter_by( + user_id=current_user_id, + problem_id=problem_id, + ).all() - response = json.dumps(new_request.content, cls=NumpyEncoder, ignore_nan=True) + all_solutions = [solution.objectives for solution in all_solutions] - # ok - # We will deserialize the response into a Python dict here because flask-restx will automatically - # serialize the response into valid JSON. - return {"response": json.loads(response)}, 200 + response = NIMBUSResponse( + objective_names=problem.objective_names, + is_maximized=[bool(multiplier == -1) for multiplier in max_multiplier], + lower_bounds=lower_bounds.tolist(), + upper_bounds=upper_bounds.tolist(), + previous_preference=(response["levels"] * max_multiplier).tolist(), + current_solutions=current_solutions.tolist(), + saved_solutions=saved_solutions, + all_solutions=all_solutions, + ) + print(response) + # Temporary return to satisfy linter + return asdict(response), 200 class Intermediate(Resource): - @jwt_required + @jwt_required() @role_required(USER_ROLE, GUEST_ROLE) def post(self): + """Generate intermediate solutions. Doesn't work yet.""" pass class Save(Resource): - @jwt_required + @jwt_required() + @role_required(USER_ROLE, GUEST_ROLE) + def post(self): + """Save or highlight solutions.""" + # Parsing the request + data = save_parser.parse_args() + problem_id = data["problemID"] + objective_values = data["objectiveValues"] + + # Getting the problem from the database, annoying to extract to a function because + # of database session issues + try: + claims = get_jwt() + current_user = get_jwt_identity() + + if claims["role"] == USER_ROLE: + current_user_id = ( + UserModel.query.filter_by(username=current_user).first().id + ) + problem_query = Problem.query.filter_by( + id=problem_id, user_id=current_user_id + ).first() + elif claims["role"] == GUEST_ROLE: + current_user_id = ( + GuestUserModel.query.filter_by(username=current_user).first().id + ) + problem_query = GuestProblem.query.filter_by( + id=problem_id, guest_id=current_user_id + ).first() + except Exception as e: + print(f"DEBUG: {e}") + # not found + return {"message": f"Could not find problem with id={problem_id}."}, 404 + + if problem_query is None: + # not found + return { + "message": "No problem with given ID found for the current user." + }, 404 + + problem: Union[DiscreteDataProblem, MOProblem] = problem_query.problem_pickle + ideal = problem.ideal + nadir = problem.nadir + max_multiplier = np.array(json.loads(problem_query.minimize), dtype=int) + + ideal_nadir = np.vstack((ideal, nadir)) + ideal_nadir = ideal_nadir * max_multiplier + lower_bounds = np.min(ideal_nadir, axis=0) + upper_bounds = np.max(ideal_nadir, axis=0) + # Get solutions from database + solutions = UTOPIASolutionArchive.query.filter_by( + user_id=current_user_id, + problem_id=problem_id, + saved=False, + ).all() + + for obj_vector in objective_values: + for solution in solutions: + if np.allclose(solution.objectives, obj_vector): + solution.saved = True + print(f"Saved solution {solution.id}") + + db.session.commit() + + saved_solutions = UTOPIASolutionArchive.query.filter_by( + user_id=current_user_id, + problem_id=problem_id, + saved=True, + ).all() + + all_solutions = UTOPIASolutionArchive.query.filter_by( + user_id=current_user_id, + problem_id=problem_id, + ).all() + + current_solutions = UTOPIASolutionArchive.query.filter_by( + user_id=current_user_id, + problem_id=problem_id, + current=True, + ).all() + + response = NIMBUSResponse( + objective_names=problem.objective_names, + is_maximized=[bool(multiplier == -1) for multiplier in max_multiplier], + lower_bounds=lower_bounds.tolist(), + upper_bounds=upper_bounds.tolist(), + previous_preference=data["previousPreference"], + current_solutions=[solution.objectives for solution in current_solutions], + saved_solutions=[solution.objectives for solution in saved_solutions], + all_solutions=[solution.objectives for solution in all_solutions], + ) + + return asdict(response), 200 + + +class Choose(Resource): + @jwt_required() + @role_required(USER_ROLE, GUEST_ROLE) + def post(self): + """Choose a solution as the final solution.""" + # Parsing the request + data = save_parser.parse_args() + problem_id = data["problemID"] + chosen_solution = data["solution"] + + # Getting the problem from the database, annoying to extract to a function because + # of database session issues + try: + claims = get_jwt() + current_user = get_jwt_identity() + + if claims["role"] == USER_ROLE: + current_user_id = ( + UserModel.query.filter_by(username=current_user).first().id + ) + problem_query = Problem.query.filter_by( + id=problem_id, user_id=current_user_id + ).first() + elif claims["role"] == GUEST_ROLE: + current_user_id = ( + GuestUserModel.query.filter_by(username=current_user).first().id + ) + problem_query = GuestProblem.query.filter_by( + id=problem_id, guest_id=current_user_id + ).first() + except Exception as e: + print(f"DEBUG: {e}") + # not found + return {"message": f"Could not find problem with id={problem_id}."}, 404 + + if problem_query is None: + # not found + return { + "message": "No problem with given ID found for the current user." + }, 404 + + # Ensure that no other solution is chosen + chosen_solutions = UTOPIASolutionArchive.query.filter_by( + user_id=current_user_id, + problem_id=problem_id, + chosen=True, + ).all() + + if len(chosen_solutions) > 0: + return {"message": "Another solution has already been chosen."}, 400 + + # Get solutions from database + solutions = UTOPIASolutionArchive.query.filter_by( + user_id=current_user_id, + problem_id=problem_id, + ).all() + + for solution in solutions: + if np.allclose(solution.objectives, chosen_solution): + solution.chosen = True + print(f"Chosen solution {solution.id}") + + db.session.commit() + + chosen_solutions = UTOPIASolutionArchive.query.filter_by( + user_id=current_user_id, + problem_id=problem_id, + chosen=True, + ).all() + + return {"message": "Solution chosen."}, 200 + + +class GetDecisionVariables(Resource): + @jwt_required() @role_required(USER_ROLE, GUEST_ROLE) def post(self): + """Get the decision variables for the chosen solution.""" + data = choose_parser.parse_args() + problem_id = data["problemID"] + user_name = data["UserName"] + + # Get data from the database? Get data from file? Juho can decide. + # Check the previous post methods for how to get data from the database. + pass diff --git a/resources/problem_resources.py b/resources/problem_resources.py index 4f25bd9..1e26394 100644 --- a/resources/problem_resources.py +++ b/resources/problem_resources.py @@ -14,11 +14,22 @@ from flask_jwt_extended import get_jwt_identity, jwt_required, get_jwt from flask_restx import Resource, reqparse from models.problem_models import Problem, GuestProblem -from models.user_models import UserModel, GuestUserModel, role_required, USER_ROLE, GUEST_ROLE +from models.user_models import ( + UserModel, + GuestUserModel, + role_required, + USER_ROLE, + GUEST_ROLE, +) from utilities.expression_parser import numpify_expressions # The vailable problem types -available_problem_types = ["Analytical", "Discrete", "Classification PIS", "Test problem"] +available_problem_types = [ + "Analytical", + "Discrete", + "Classification PIS", + "Test problem", +] supported_analytical_problem_operators = ["+", "-", "*", "/"] # Problem creation base parser @@ -152,6 +163,7 @@ required=True, ) + def get_problem_info(problem_query): # From model problem_id = problem_query.id @@ -238,17 +250,21 @@ def get(self): role = get_jwt()["role"] if role == USER_ROLE: - current_user_id = UserModel.query.filter_by(username=current_user).first().id - else: # guest role - current_user_id = GuestUserModel.query.filter_by(username=current_user).first().id + current_user_id = ( + UserModel.query.filter_by(username=current_user).first().id + ) + else: # guest role + current_user_id = ( + GuestUserModel.query.filter_by(username=current_user).first().id + ) # TODO: remove try catch block and check the problems query try: if role == USER_ROLE: problems = Problem.query.filter_by(user_id=current_user_id).all() - else: # guest role + else: # guest role problems = GuestProblem.query.filter_by(user_id=current_user_id).all() - + response = { "problems": [ { @@ -309,12 +325,18 @@ def get(self): claims = get_jwt() current_user = get_jwt_identity() - if claims["role"] == USER_ROLE: - current_user_id = UserModel.query.filter_by(username=current_user).first().id - problem_queries = Problem.query.filter_by( user_id=current_user_id).all() + if claims["role"] == USER_ROLE: + current_user_id = ( + UserModel.query.filter_by(username=current_user).first().id + ) + problem_queries = Problem.query.filter_by(user_id=current_user_id).all() elif claims["role"] == GUEST_ROLE: - current_user_id = GuestUserModel.query.filter_by(username=current_user).first().id - problem_queries = GuestProblem.query.filter_by(user_id=current_user_id).all() + current_user_id = ( + GuestUserModel.query.filter_by(username=current_user).first().id + ) + problem_queries = GuestProblem.query.filter_by( + user_id=current_user_id + ).all() else: return {"message": "User role not found."}, 404 @@ -331,13 +353,16 @@ def get(self): # to deal with in the frontend. # print(problem_queries) - problems = [get_problem_info(problem_query) - for problem_query in problem_queries] + problems = [ + get_problem_info(problem_query) for problem_query in problem_queries + ] return problems, 200 except Exception as e: print(f"DEBUG (while fetching all problem info): {e}") - return {"message": "Encountered internal errror while fetching info for all problems"}, 500 + return { + "message": "Encountered internal errror while fetching info for all problems" + }, 500 class ProblemCreation(Resource): @@ -667,3 +692,4 @@ def post(self): "owner": current_user, } return response, 201 + return {"message": "Error"}, 500 \ No newline at end of file