diff --git a/.github/workflows/js-sdk.yml b/.github/workflows/js-sdk.yml
index fa9ffc6e62..b087469e1f 100644
--- a/.github/workflows/js-sdk.yml
+++ b/.github/workflows/js-sdk.yml
@@ -17,7 +17,7 @@ jobs:
- name: Gathering deps
run: |
sudo apt-get update
- sudo apt-get install -y git python3-pip python3-venv python3-setuptools tmux redis
+ sudo apt-get install -y git python3-pip python3-venv python3-setuptools tmux redis nginx
sudo pip3 install poetry
sudo poetry run pip3 install pytest
- name: Install
diff --git a/docs/architecture/decisions/0008-add-kwargs-to-3bot-start.md b/docs/architecture/decisions/0008-add-kwargs-to-3bot-start.md
new file mode 100644
index 0000000000..38a7f6ae3c
--- /dev/null
+++ b/docs/architecture/decisions/0008-add-kwargs-to-3bot-start.md
@@ -0,0 +1,19 @@
+# 8. add_kwargs_to_3bot_start
+
+Date: 2020-09-27
+
+## Status
+
+Accepted
+
+## Context
+
+Adding packages with kwargs has some limitations and hence kwargs are needed every time start is called not only once when adding package.
+
+## Decision
+
+Add kwargs passed to the package instance that will be saved locally, and can be retrieved everytime the threebot server restarts and starts the package.
+
+## Consequences
+
+Any package that is added with kwargs will save them and hence with every restart of the package, they are reloaded and used in the install of the package.
diff --git a/docs/wiki/tutorials/add_marketplace_chatflow.md b/docs/wiki/tutorials/add_marketplace_chatflow.md
index 5910d9d45d..3207bea428 100644
--- a/docs/wiki/tutorials/add_marketplace_chatflow.md
+++ b/docs/wiki/tutorials/add_marketplace_chatflow.md
@@ -70,12 +70,34 @@
- And for returning your solution count should append your `{SOLUTION_TYPE}` in the `count_dict` in `count_solutions` method in the same module
## Add app in frontend
-- In the frontend, you just need to add your app object in `apps` dict in `packages/marketplace/frontend/App.vue`
- ```js
- {
- name: "App Name in frontend",
- type: "{SOLUTION_TYPE}",
- path: "/{your_app_name}",
- meta: { icon: "app_icon" },
- }
- ```
+- In the frontend, you just need to add your app object as below in `apps` dict under the section you want to list your app in `packages/marketplace/frontend/data.js`
+ - If you need to add another section, just create new one in the `SECTIONS` object with the same structure:
+ ```js
+ "SECTION NAME": {
+ titleToolTip: "Tooltip shown on hovering on section title in the frontend",
+ apps: {
+ // list your applications objects as below structure
+ "App Name": {
+ name: "App Name in frontend",
+ type: "{SOLUTION_TYPE}", // defined in the previous steps
+ image: "./assets/appImage.png", // add your app image in the assets dir
+ disable: false, // make it true if you want to hide your app in the marketplace frontend
+ helpLink: "https://now10.threefold.io/docs", // link to application manual
+ description: "Description of your application"
+ },
+ },
+ },
+ ```
+ - If you just need to add your application in an existing section, add a new app object with below structure in the section object you want to list in:
+ ```js
+ {
+ "App Name": {
+ name: "App Name in frontend",
+ type: "{SOLUTION_TYPE}", // defined in the previous steps
+ image: "./assets/appImage.png", // add your app image in the assets dir
+ disable: false, // make it true if you want to hide your app in the marketplace frontend
+ helpLink: "https://now10.threefold.io/docs", // link to application manual
+ description: "Description of your application"
+ },
+ }
+ ```
diff --git a/examplescripts/minio.py b/examplescripts/minio.py
index 409f22df67..37d6ffc840 100644
--- a/examplescripts/minio.py
+++ b/examplescripts/minio.py
@@ -18,7 +18,7 @@
PASSWORD = "supersecurepassowrd"
network_name = str(uuid.uuid4())
print(f"network name: {network_name}")
-BAD_NODES = set([])
+BAD_NODES = set(["A7FmQZ72h7FzjkJMGXmzLDFyfyxzitDZYuernGG97nv7"])
UP_FOR = 60 * 20 # number of seconds
@@ -55,6 +55,21 @@ def wait_workload(wid):
workload = zos.workloads.get(wid)
+def wait_zdb_workloads(zdb_wids):
+ # Looks like the workload_id can be set before the namespace
+ for wid in zdb_wids:
+ workload = zos.workloads.get(wid)
+ data = j.data.serializers.json.loads(workload.info.result.data_json)
+ if workload.info.result.message:
+ x = workload.info.result.message
+ raise Exception(f"Failed to initialize ZDB: {x}")
+ elif data.get("IP") or data.get("IPs"):
+ return
+ else:
+ sleep(1)
+ continue
+
+
def wait_pools(pools):
for pool in pools:
while pool.cus == 0:
@@ -286,6 +301,7 @@ def pick_minio_nodes(nodes):
backup_vol_id = deploy_volume(minio_backup_node.node_id, backup_pool)
zdb_wids = [x.id for x in zdb_workloads]
wait_workloads(zdb_wids)
+wait_zdb_workloads(zdb_wids)
wait_workload(tlog_workload.id)
wait_workload(master_vol_id)
wait_workload(backup_vol_id)
@@ -325,6 +341,7 @@ def pick_minio_nodes(nodes):
zdb_new_workloads = deploy_zdbs(zdb_later_nodes, zdb_new_pools)
zdb_new_wids = [x.id for x in zdb_new_workloads]
wait_workloads(zdb_new_wids)
+wait_zdb_workloads(zdb_new_wids)
new_namespace_config = get_namespace_config(zdb_new_workloads)
print("Removing three backup storages")
diff --git a/jumpscale/clients/explorer/explorer.py b/jumpscale/clients/explorer/explorer.py
index 31085d45a9..2978cd2fa1 100644
--- a/jumpscale/clients/explorer/explorer.py
+++ b/jumpscale/clients/explorer/explorer.py
@@ -18,6 +18,15 @@
from .users import Users
from .workloads import Workloads
+from jumpscale.loader import j
+
+
+def log_request(r, *args, **kwargs):
+ if j.config.get("EXPLORER_LOGS"):
+ j.logger.debug(
+ f"Request {r.request.url} method: {r.request.method} body: {r.request.body} headers: {r.request.headers}"
+ )
+
class Explorer(Client):
url = fields.String()
@@ -30,7 +39,7 @@ def __init__(self, url=None, identity_name=None, **kwargs):
else:
self._loaded_identity = identity.get_identity()
self._session = requests.Session()
- self._session.hooks = dict(response=raise_for_status)
+ self._session.hooks = dict(response=[log_request, raise_for_status])
secret = self._loaded_identity.nacl.signing_key.encode(Base64Encoder)
auth = HTTPSignatureAuth(
diff --git a/jumpscale/clients/explorer/models.py b/jumpscale/clients/explorer/models.py
index 255dce0b5a..abd1adb7b0 100644
--- a/jumpscale/clients/explorer/models.py
+++ b/jumpscale/clients/explorer/models.py
@@ -42,6 +42,7 @@ class Location(Base):
def __str__(self):
return ",".join([x for x in [self.continent, self.country, self.city] if x])
+
class Farm(Base):
id = fields.Integer()
threebot_id = fields.Integer()
@@ -55,7 +56,6 @@ class Farm(Base):
def __str__(self):
return " - ".join([x for x in [self.name, str(self.location)] if x])
-
class WorkloadsAmount(Base):
@@ -250,9 +250,14 @@ class ContainerNetworkConnection(Base):
class ContainerLogsRedis(Base):
+ # deprecated, please use secret_stdout instead
stdout = fields.String(default="")
+ # deprecated, please use secret_stderr instead
stderr = fields.String(default="")
+ secret_stdout = fields.String(default="")
+ secret_stderr = fields.String(default="")
+
class ContainerLogs(Base):
type = fields.String(default="")
diff --git a/jumpscale/clients/sendgrid/sendgrid.py b/jumpscale/clients/sendgrid/sendgrid.py
index a9fe7d764c..22b6e3df43 100644
--- a/jumpscale/clients/sendgrid/sendgrid.py
+++ b/jumpscale/clients/sendgrid/sendgrid.py
@@ -12,8 +12,8 @@
class SendGridClient(Client):
apikey = fields.String()
- def __init__(self):
- super().__init__()
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
def build_attachment(self, filepath, typ="application/pdf"):
"""
diff --git a/jumpscale/clients/stellar/stellar.py b/jumpscale/clients/stellar/stellar.py
index 142c578185..c32dd980ef 100644
--- a/jumpscale/clients/stellar/stellar.py
+++ b/jumpscale/clients/stellar/stellar.py
@@ -387,7 +387,7 @@ def transfer(
if asset != "XLM":
assetStr = asset.split(":")
if len(assetStr) != 2:
- raise Exception("Wrong asset format should be in format 'assetcode:issuer'")
+ raise Exception(f"Wrong asset format should be in format 'assetcode:issuer', but received {assetStr}")
asset = assetStr[0]
issuer = assetStr[1]
diff --git a/jumpscale/core/identity/__init__.py b/jumpscale/core/identity/__init__.py
index f105ffa8b8..8b86693a16 100644
--- a/jumpscale/core/identity/__init__.py
+++ b/jumpscale/core/identity/__init__.py
@@ -12,9 +12,9 @@
from jumpscale.sals.nettools import get_default_ip_config
DEFAULT_EXPLORER_URLS = {
- "mainnet": "https://explorer.grid.tf/api/v1/",
- "testnet": "https://explorer.testnet.grid.tf/api/v1/",
- "devnet": "https://explorer.devnet.grid.tf/api/v1/",
+ "mainnet": "https://explorer.grid.tf/api/v1",
+ "testnet": "https://explorer.testnet.grid.tf/api/v1",
+ "devnet": "https://explorer.devnet.grid.tf/api/v1",
}
EXPLORER_URLS = js_config.set_default("explorer_api_urls", DEFAULT_EXPLORER_URLS)
@@ -54,6 +54,7 @@ def __init__(
Raises: Input: when params are missing
"""
self._explorer = None
+ explorer_url = explorer_url.rstrip("/")
super().__init__(
tname=tname, email=email, words=words, explorer_url=explorer_url, _tid=_tid, admins=admins, *args, **kwargs,
)
@@ -108,6 +109,7 @@ def explorer(self):
js_config.set("has_migrated_explorer_url", True)
if self.explorer_url:
+ self.explorer_url = self.explorer_url.rstrip("/")
self._explorer = ex_factory.get_by_url_and_identity(self.explorer_url, identity_name=self.instance_name)
else:
self._explorer = ex_factory.get_default()
@@ -169,6 +171,11 @@ def register(self, host=None):
self.save()
return tid
+ def set_default(self):
+ from jumpscale.loader import j
+
+ return j.core.identity.set_default(self.instance_name)
+
def get_identity():
return IdentityFactory(Identity).me
diff --git a/jumpscale/packages/admin/actors/admin.py b/jumpscale/packages/admin/actors/admin.py
index d8a7c2e648..0719166105 100644
--- a/jumpscale/packages/admin/actors/admin.py
+++ b/jumpscale/packages/admin/actors/admin.py
@@ -116,17 +116,24 @@ def delete_identity(self, identity_instance_name: str) -> str:
def get_developer_options(self) -> str:
test_cert = j.core.config.set_default("TEST_CERT", False)
over_provision = j.core.config.set_default("OVER_PROVISIONING", False)
- return j.data.serializers.json.dumps({"data": {"test_cert": test_cert, "over_provision": over_provision}})
+ explorer_logs = j.core.config.set_default("EXPLORER_LOGS", False)
+ return j.data.serializers.json.dumps(
+ {"data": {"test_cert": test_cert, "over_provision": over_provision, "explorer_logs": explorer_logs}}
+ )
@actor_method
- def set_developer_options(self, test_cert: bool, over_provision: bool) -> str:
+ def set_developer_options(self, test_cert: bool, over_provision: bool, explorer_logs: bool) -> str:
j.core.config.set("TEST_CERT", test_cert)
j.core.config.set("OVER_PROVISIONING", over_provision)
- return j.data.serializers.json.dumps({"data": {"test_cert": test_cert, "over_provision": over_provision}})
+ j.core.config.set("EXPLORER_LOGS", explorer_logs)
+ return j.data.serializers.json.dumps(
+ {"data": {"test_cert": test_cert, "over_provision": over_provision, "explorer_logs": explorer_logs}}
+ )
@actor_method
def clear_blocked_nodes(self) -> str:
- j.sals.reservation_chatflow.reservation_chatflow.clear_blocked_nodes()
+ j.sals.reservation_chatflow.reservation_chatflow.clear_blocked_nodes()
return j.data.serializers.json.dumps({"data": "blocked nodes got cleared successfully."})
+
Actor = Admin
diff --git a/jumpscale/packages/admin/frontend/api.js b/jumpscale/packages/admin/frontend/api.js
index cadf8dd532..9235de1bec 100644
--- a/jumpscale/packages/admin/frontend/api.js
+++ b/jumpscale/packages/admin/frontend/api.js
@@ -168,12 +168,12 @@ const apiClient = {
url: `${baseURL}/admin/get_developer_options`
})
},
- setDeveloperOptions: (testCert, overProvision) => {
+ setDeveloperOptions: (testCert, overProvision, explorerLogs) => {
return axios({
url: `${baseURL}/admin/set_developer_options`,
method: "post",
headers: { 'Content-Type': 'application/json' },
- data: { test_cert: testCert, over_provision: overProvision }
+ data: { test_cert: testCert, over_provision: overProvision, explorer_logs: explorerLogs }
})
},
clearBlockedNodes: () => {
diff --git a/jumpscale/packages/admin/frontend/components/settings/Settings.vue b/jumpscale/packages/admin/frontend/components/settings/Settings.vue
index c9c3c8ed55..26b48a28fb 100644
--- a/jumpscale/packages/admin/frontend/components/settings/Settings.vue
+++ b/jumpscale/packages/admin/frontend/components/settings/Settings.vue
@@ -6,7 +6,11 @@
-
+
mdi-plusAdd
@@ -24,7 +28,8 @@
label
close
close-icon="mdi-close-circle-outline"
- >{{ admin }}
+ >{{ admin }}
@@ -46,7 +51,8 @@
:key="i"
:color="getColor(identity.instance_name)"
@click="openIdentity(identity.instance_name)"
- >{{identity.instance_name}}
+ >{{ identity.instance_name }}
@@ -70,13 +76,21 @@
:label="`Allow over provisioning`"
@click.stop="setDeveloperOptions()"
>
+
Clear blocked nodes
+ >Clear blocked nodes
@@ -84,9 +98,20 @@
-
-
-
+
+
+
@@ -118,6 +143,7 @@ module.exports = {
identities: [],
testCert: false,
overProvision: false,
+ explorerLogs: false,
};
},
methods: {
@@ -188,6 +214,7 @@ module.exports = {
let developerOptions = JSON.parse(response.data).data;
this.testCert = developerOptions["test_cert"];
this.overProvision = developerOptions["over_provision"];
+ this.explorerLogs = developerOptions["explorer_logs"];
})
.finally(() => {
this.loading.developerOptions = false;
@@ -195,7 +222,11 @@ module.exports = {
},
setDeveloperOptions() {
this.$api.admins
- .setDeveloperOptions(this.testCert, this.overProvision)
+ .setDeveloperOptions(
+ this.testCert,
+ this.overProvision,
+ this.explorerLogs
+ )
.then((response) => {
this.alert("Developer options updated", "success");
})
diff --git a/jumpscale/packages/farmmanagement/frontend/views/farmmanagement/farmmanagement.html b/jumpscale/packages/farmmanagement/frontend/views/farmmanagement/farmmanagement.html
index a4243a499b..aae0eae27d 100644
--- a/jumpscale/packages/farmmanagement/frontend/views/farmmanagement/farmmanagement.html
+++ b/jumpscale/packages/farmmanagement/frontend/views/farmmanagement/farmmanagement.html
@@ -159,10 +159,10 @@
Pricing
-
+
-
+
diff --git a/jumpscale/packages/marketplace/chats/blog.py b/jumpscale/packages/marketplace/chats/blog.py
index 4c2e31f0eb..98ed0ee148 100644
--- a/jumpscale/packages/marketplace/chats/blog.py
+++ b/jumpscale/packages/marketplace/chats/blog.py
@@ -6,6 +6,7 @@ class BlogDeploy(Publisher):
title = "Deploy a Blog"
SOLUTION_TYPE = "blog" # chatflow used to deploy the solution
+ EXAMPLE_URL = "https://github.com/threefoldfoundation/blog_threefold"
@chatflow_step(title="blog Setup")
def configuration(self):
@@ -14,7 +15,8 @@ def configuration(self):
title = form.string_ask("Title", required=True)
url = form.string_ask("Repository URL", required=True, is_git_url=True)
branch = form.string_ask("Branch", required=True)
- form.ask(Publisher.MD_CONFIG_MSG, md=True)
+ msg = self.get_mdconfig_msg()
+ form.ask(msg, md=True)
self.envars = {
"TYPE": "blog",
"NAME": "entrypoint",
diff --git a/jumpscale/packages/marketplace/chats/cryptpad.py b/jumpscale/packages/marketplace/chats/cryptpad.py
index 9181065c6b..b7c04d3888 100644
--- a/jumpscale/packages/marketplace/chats/cryptpad.py
+++ b/jumpscale/packages/marketplace/chats/cryptpad.py
@@ -35,11 +35,10 @@ def reservation(self):
"form_info": {"chatflow": self.SOLUTION_TYPE, "Solution name": self.solution_name},
}
self.solution_metadata.update(metadata)
-
# reserve subdomain
self.workload_ids.append(
deployer.create_subdomain(
- pool_id=self.pool_id,
+ pool_id=self.gateway_pool.pool_id,
gateway_id=self.gateway.node_id,
subdomain=self.domain,
addresses=self.addresses,
@@ -111,6 +110,7 @@ def reservation(self):
solution_ip=self.ip_address,
solution_port=3000,
enforce_https=False,
+ proxy_pool_id=self.gateway_pool.pool_id,
node_id=self.selected_node.node_id,
solution_uuid=self.solution_id,
**self.solution_metadata,
diff --git a/jumpscale/packages/marketplace/chats/discourse.py b/jumpscale/packages/marketplace/chats/discourse.py
index 8398d4e7c7..490f81a9db 100644
--- a/jumpscale/packages/marketplace/chats/discourse.py
+++ b/jumpscale/packages/marketplace/chats/discourse.py
@@ -69,7 +69,7 @@ def reservation(self):
# reserve subdomain
_id = deployer.create_subdomain(
- pool_id=self.pool_id,
+ pool_id=self.gateway_pool.pool_id,
gateway_id=self.gateway.node_id,
subdomain=self.domain,
addresses=self.addresses,
diff --git a/jumpscale/packages/marketplace/chats/gitea.py b/jumpscale/packages/marketplace/chats/gitea.py
index 2e9f784cc0..9c7f479f5d 100644
--- a/jumpscale/packages/marketplace/chats/gitea.py
+++ b/jumpscale/packages/marketplace/chats/gitea.py
@@ -44,7 +44,7 @@ def reservation(self):
self.solution_metadata.update(metadata)
# reserve subdomain
subdomain_wid = deployer.create_subdomain(
- pool_id=self.pool_id,
+ pool_id=self.gateway_pool.pool_id,
gateway_id=self.gateway.node_id,
subdomain=self.domain,
addresses=self.addresses,
diff --git a/jumpscale/packages/marketplace/chats/mattermost.py b/jumpscale/packages/marketplace/chats/mattermost.py
index 7ab788ce8b..61e9475a8e 100644
--- a/jumpscale/packages/marketplace/chats/mattermost.py
+++ b/jumpscale/packages/marketplace/chats/mattermost.py
@@ -119,6 +119,7 @@ def reservation(self):
solution_port=8065,
enforce_https=False,
node_id=self.selected_node.node_id,
+ proxy_pool_id=self.gateway_pool.pool_id,
solution_uuid=self.solution_id,
**self.solution_metadata,
)
diff --git a/jumpscale/packages/marketplace/chats/peertube.py b/jumpscale/packages/marketplace/chats/peertube.py
index d13cf82145..020d4312d1 100644
--- a/jumpscale/packages/marketplace/chats/peertube.py
+++ b/jumpscale/packages/marketplace/chats/peertube.py
@@ -53,7 +53,7 @@ def reservation(self):
# reserve subdomain
_id = deployer.create_subdomain(
- pool_id=self.pool_id,
+ pool_id=self.gateway_pool.pool_id,
gateway_id=self.gateway.node_id,
subdomain=self.domain,
addresses=self.addresses,
diff --git a/jumpscale/packages/marketplace/chats/publisher.py b/jumpscale/packages/marketplace/chats/publisher.py
index 82b5ced801..6de1052d5b 100644
--- a/jumpscale/packages/marketplace/chats/publisher.py
+++ b/jumpscale/packages/marketplace/chats/publisher.py
@@ -9,16 +9,8 @@
class Publisher(MarketPlaceAppsChatflow):
FLIST_URL = "https://hub.grid.tf/ahmed_hanafy_1/ahmedhanafy725-pubtools-trc.flist"
SOLUTION_TYPE = "publisher" # chatflow used to deploy the solution
- MD_CONFIG_MSG = dedent(
- """\
- Few parameters are needed to be able to publish your content online
- - Title is the title shown up on your published content
- - Repository URL is a valid git repository URL where your content lives e.g (https://github.com/threefoldfoundation/info_gridmanual)
- - Branch is the deployment branch that exists on your git repository to be used as the version of your content to publish.
+ EXAMPLE_URL = "https://github.com/threefoldfoundation/info_gridmanual"
- for more information on the publishing tools please check the [manual](https://manual2.threefold.io/)
- """
- )
title = "Publisher"
steps = [
"get_solution_name",
@@ -32,6 +24,19 @@ class Publisher(MarketPlaceAppsChatflow):
storage_url = "zdb://hub.grid.tf:9900"
query = {"cru": 1, "mru": 1, "sru": 2}
+ def get_mdconfig_msg(self):
+ msg = dedent(
+ f"""\
+ Few parameters are needed to be able to publish your content online
+ - Title is the title shown up on your published content
+ - Repository URL is a valid git repository URL where your content lives e.g ({self.EXAMPLE_URL})
+ - Branch is the deployment branch that exists on your git repository to be used as the version of your content to publish.
+
+ for more information on the publishing tools please check the [manual](https://manual.threefold.io/)
+ """
+ )
+ return msg
+
@chatflow_step(title="Solution Settings")
def configuration(self):
user_info = self.user_info()
@@ -44,7 +49,8 @@ def configuration(self):
title = form.string_ask("Title", required=True)
url = form.string_ask("Repository URL", required=True, is_git_url=True)
branch = form.string_ask("Branch", required=True)
- form.ask(self.MD_CONFIG_MSG, md=True)
+ msg = self.get_mdconfig_msg()
+ form.ask(msg, md=True)
self.envars = {
"TYPE": ttype.value,
"NAME": "entrypoint",
diff --git a/jumpscale/packages/marketplace/chats/taiga.py b/jumpscale/packages/marketplace/chats/taiga.py
index fa3129d1b2..80f7ee5cd3 100644
--- a/jumpscale/packages/marketplace/chats/taiga.py
+++ b/jumpscale/packages/marketplace/chats/taiga.py
@@ -57,7 +57,7 @@ def reservation(self):
# reserve subdomain
subdomain_wid = deployer.create_subdomain(
- pool_id=self.pool_id,
+ pool_id=self.gateway_pool.pool_id,
gateway_id=self.gateway.node_id,
subdomain=self.domain,
addresses=self.addresses,
diff --git a/jumpscale/packages/marketplace/chats/website.py b/jumpscale/packages/marketplace/chats/website.py
index 2b95c6ed2d..75417af008 100644
--- a/jumpscale/packages/marketplace/chats/website.py
+++ b/jumpscale/packages/marketplace/chats/website.py
@@ -6,6 +6,7 @@ class WebsiteDeploy(Publisher):
title = "Deploy a Website"
SOLUTION_TYPE = "website" # chatflow used to deploy the solution
+ EXAMPLE_URL = "https://github.com/xmonader/www_incubaid"
@chatflow_step(title="Website Setup")
def configuration(self):
@@ -14,7 +15,8 @@ def configuration(self):
title = form.string_ask("Title", required=True)
url = form.string_ask("Repository URL", required=True, is_git_url=True)
branch = form.string_ask("Branch", required=True)
- form.ask(Publisher.MD_CONFIG_MSG, md=True)
+ msg = self.get_mdconfig_msg()
+ form.ask(msg, md=True)
self.envars = {
"TYPE": "www",
"NAME": "entrypoint",
diff --git a/jumpscale/packages/marketplace/chats/wiki.py b/jumpscale/packages/marketplace/chats/wiki.py
index 63554f1a51..537ea2d94b 100644
--- a/jumpscale/packages/marketplace/chats/wiki.py
+++ b/jumpscale/packages/marketplace/chats/wiki.py
@@ -6,6 +6,7 @@ class WikiDeploy(Publisher):
title = "Deploy a Wiki"
SOLUTION_TYPE = "wiki" # chatflow used to deploy the solution
+ EXAMPLE_URL = "https://github.com/threefoldfoundation/info_gridmanual"
@chatflow_step(title="Wiki Setup")
def configuration(self):
@@ -14,7 +15,8 @@ def configuration(self):
title = form.string_ask("Title", required=True)
url = form.string_ask("Repository URL", required=True, is_git_url=True)
branch = form.string_ask("Branch", required=True)
- form.ask(Publisher.MD_CONFIG_MSG, md=True)
+ msg = self.get_mdconfig_msg()
+ form.ask(msg, md=True)
self.envars = {
"TYPE": "wiki",
"NAME": "entrypoint",
diff --git a/jumpscale/packages/marketplace/frontend/components/Home.vue b/jumpscale/packages/marketplace/frontend/components/Home.vue
index 3f9ab1f56f..a5d2e7235b 100644
--- a/jumpscale/packages/marketplace/frontend/components/Home.vue
+++ b/jumpscale/packages/marketplace/frontend/components/Home.vue
@@ -1,76 +1,50 @@
-
+
+ @change="viewWorkloads()"
+ >
+
+
+
+
+
+
+
+
+
+
+
+
+
+
-
-
- Apps
-
- Threefold demo applications
-
-
-
-
- {{app.icon}}
-
- {{app.name}}
- {{solutionCount[app.type]}}
-
-
-
- mdi-information-outline
-
-
- Go to How-to Manual
-
-
-
- {{app.description.length > SOLUTION_DESCRIPTION_MAXLENGTH ?
- app.description.slice(0, SOLUTION_DESCRIPTION_MAXLENGTH) + "..." :
- app.description}}
-
-
-
- New
- My Workloads
-
-
-
+
+
@@ -79,41 +53,54 @@
+
+
diff --git a/jumpscale/packages/marketplace/frontend/components/solutions/Info.vue b/jumpscale/packages/marketplace/frontend/components/solutions/Info.vue
index 73c56081f7..fb8f7952a0 100644
--- a/jumpscale/packages/marketplace/frontend/components/solutions/Info.vue
+++ b/jumpscale/packages/marketplace/frontend/components/solutions/Info.vue
@@ -1,12 +1,12 @@
-
+
- {{ title }}
- More details
+ App details
+ More details
-
+
@@ -15,12 +15,8 @@
{{ node }}
|
-
- {{ ip }} / ({{ node }})
+ |
+ {{ new Date(item * 1000).toLocaleString() }}
|
{{ item }} |
@@ -28,7 +24,7 @@
-
+
@@ -68,11 +64,6 @@ module.exports = {
json() {
return this.data;
},
- title() {
- return this.data.Name === undefined
- ? "Workload details"
- : "Solution details";
- },
KeysWithTypeList() {
return ["Node ids", "wids", "Active workload ids"];
},
diff --git a/jumpscale/packages/marketplace/frontend/components/solutions/Solution.vue b/jumpscale/packages/marketplace/frontend/components/solutions/Solution.vue
index 2d680e17b5..e35e7dbc5b 100644
--- a/jumpscale/packages/marketplace/frontend/components/solutions/Solution.vue
+++ b/jumpscale/packages/marketplace/frontend/components/solutions/Solution.vue
@@ -6,14 +6,13 @@
- {{solution.icon}} mdi-48px
{{solution.name}}
module.exports = {
- props: { type: String },
+ props: {
+ type: String,
+ },
+
components: {
"solution-info": httpVueLoader("./Info.vue"),
- "cancel-solution": httpVueLoader("./Delete.vue")
+ "cancel-solution": httpVueLoader("./Delete.vue"),
},
data() {
return {
@@ -104,30 +106,32 @@ module.exports = {
selected: null,
dialogs: {
info: false,
- cancelSolution: false
+ cancelSolution: false,
},
headers: [
{ text: "Name", value: "Name" },
{ text: "URL", value: "domain" },
{ text: "Expiration", value: "expiration" },
- { text: "Actions", value: "actions", sortable: false }
+ { text: "Actions", value: "actions", sortable: false },
],
deployedSolutions: [],
- solutions: [...Object.values(APPS)]
+ sections: SECTIONS,
};
},
computed: {
solution() {
- return this.solutions.find(obj => {
- return obj.type === this.type;
- });
- }
+ for (section in this.sections) {
+ if (Object.keys(this.sections[section].apps).includes(this.type)) {
+ return this.sections[section].apps[this.type];
+ }
+ }
+ },
},
methods: {
open(solutionId) {
this.$router.push({
name: "SolutionChatflow",
- params: { topic: solutionId }
+ params: { topic: solutionId },
});
},
restart(solutionId) {
@@ -148,17 +152,17 @@ module.exports = {
getDeployedSolutions(solution_type) {
this.$api.solutions
.getDeployed(solution_type)
- .then(response => {
+ .then((response) => {
this.deployedSolutions = response.data.data;
})
.finally(() => {
this.loading = false;
});
- }
+ },
},
mounted() {
this.getDeployedSolutions(this.type);
- }
+ },
};
diff --git a/jumpscale/packages/marketplace/frontend/data.js b/jumpscale/packages/marketplace/frontend/data.js
index aaf510660f..b5b576efdb 100644
--- a/jumpscale/packages/marketplace/frontend/data.js
+++ b/jumpscale/packages/marketplace/frontend/data.js
@@ -1,56 +1,102 @@
-const APPS = {
- wiki: {
- name: "Wiki",
- type: "wiki",
- image: "./assets/doc-flat.svg",
- description: "Wiki is a versatile online encyclopedia builder, accessible via a public web address."
+const SECTIONS = {
+ "DECENTRALIZED WE": {
+ titleToolTip: null,
+ apps: {
+ peertube: {
+ name: "Peertube",
+ type: "peertube",
+ image: "./assets/peertube.png",
+ disable: false,
+ helpLink: "https://now10.threefold.io/docs/video-sharing/",
+ description: "Peertube is an open-source video platform that uses peer-to-peer technologies to reduce load on individual servers when viewing videos."
+ },
+ website: {
+ name: "Website",
+ type: "website",
+ image: "./assets/web.png",
+ disable: false,
+ helpLink:"https://now10.threefold.io/docs/website-publisher/",
+ description: "Website is a P2P alternative to centralized cloud-hosted websites. Host your own website with access via a public web address."
+ },
+ wiki: {
+ name: "Wiki",
+ type: "wiki",
+ image: "./assets/doc-flat.svg",
+ disable: false,
+ helpLink: "https://now10.threefold.io/docs/wiki-publisher/",
+ description: "Wiki is a versatile online encyclopedia builder, accessible via a public web address."
+ },
+ blog: {
+ name: "Blog",
+ type: "blog",
+ image: "./assets/blog.png",
+ disable: false,
+ helpLink: "https://now10.threefold.io/docs/blog-publisher/",
+ description: "Blog is a P2P alternative to centralized blogging platforms like Tumblr or Blogspot."
+ },
+ },
},
- website: {
- name: "Website",
- type: "website",
- image: "./assets/web.png",
- description: "Website is a P2P alternative to centralized cloud-hosted websites. Host your own website with access via a public web address."
+ "DECENTRALIZED ME": {
+ titleToolTip: null,
+ apps: {},
},
- blog: {
- name: "Blog",
- type: "blog",
- image: "./assets/blog.png",
- description: "Blog is a P2P alternative to centralized blogging platforms like Tumblr or Blogspot."
+ "DECENTRALIZED OFFICE": {
+ titleToolTip: null,
+ apps: {
+ cryptpad: {
+ name: "Cryptpad",
+ type: "cryptpad",
+ image: "./assets/cryptpad.png",
+ disable: false,
+ helpLink: "https://now10.threefold.io/docs/dmcollab/",
+ description: "Cryptpad is a fully-secured, encrypted alternative to popular office tools and cloud services."
+ },
+ taiga: {
+ name: "Taiga",
+ type: "taiga",
+ image: "./assets/taiga.png",
+ disable: false,
+ helpLink: "https://now10.threefold.io/docs/dmcircles/",
+ description: "Taiga is a P2P alternative to centralized project management tool for multi-functional agile teams."
+ },
+ },
},
- mattermost: {
- name: "Mattermost",
- type: "mattermost",
- image: "./assets/mattermost.png",
- description: "Mattermost is a flexible, open source messaging platform that enables secure team collaboration."
+ "DECENTRALIZED CLOUD": {
+ titleToolTip: null,
+ apps: {},
},
- peertube: {
- name: "Peertube",
- type: "peertube",
- image: "./assets/peertube.png",
- description: "Peertube is an open-source video platform that uses peer-to-peer technologies to reduce load on individual servers when viewing videos."
+ "DECENTRALIZED CODE": {
+ titleToolTip: null,
+ apps: {
+ gitea: {
+ name: "Gitea",
+ type: "gitea",
+ image: "./assets/gitea.png",
+ disable: false,
+ helpLink: "https://now10.threefold.io/docs/gitea/#what-is-gitea-",
+ description: "Gitea is a painless self-hosted Git service. It is similar to GitHub, Bitbucket, and GitLab."
+ },
+ },
},
- gitea: {
- name: "Gitea",
- type: "gitea",
- image: "./assets/gitea.png",
- description: "Gitea is a painless self-hosted Git service. It is similar to GitHub, Bitbucket, and GitLab."
- },
- discourse: {
- name: "Discourse",
- type: "discourse",
- image: "./assets/discourse.png",
- description: "Discourse is an open source Internet forum and mailing list management software application built to educate members about civil community engagement."
- },
- cryptpad: {
- name: "Cryptpad",
- type: "cryptpad",
- image: "./assets/cryptpad.png",
- description: "Cryptpad is a fully-secured, encrypted alternative to popular office tools and cloud services."
- },
- taiga: {
- name: "Taiga",
- type: "taiga",
- image: "./assets/taiga.png",
- description: "Taiga is a P2P alternative to centralized project management tool for multi-functional agile teams."
+ "OTHERS": {
+ titleToolTip: null,
+ apps: {
+ mattermost: {
+ name: "Mattermost",
+ type: "mattermost",
+ image: "./assets/mattermost.png",
+ disable: false,
+ helpLink: "https://now10.threefold.io/docs",
+ description: "Mattermost is a flexible, open source messaging platform that enables secure team collaboration."
+ },
+ discourse: {
+ name: "Discourse",
+ type: "discourse",
+ image: "./assets/discourse.png",
+ disable: false,
+ helpLink: "https://now10.threefold.io/docs",
+ description: "Discourse is an open source Internet forum and mailing list management software application built to educate members about civil community engagement."
+ },
+ },
},
}
diff --git a/jumpscale/packages/marketplace/package.py b/jumpscale/packages/marketplace/package.py
index e471e50650..a607a36bf0 100644
--- a/jumpscale/packages/marketplace/package.py
+++ b/jumpscale/packages/marketplace/package.py
@@ -14,7 +14,7 @@ def install(self, **kwargs):
wallet.add_known_trustline("TFT")
wallet.save()
- def start(self):
+ def start(self, **kwargs):
location_actors_443 = j.sals.nginx.main.websites.default_443.locations.get(name="marketplace_actors")
location_actors_443.is_auth = False
location_actors_443.is_admin = False
diff --git a/jumpscale/packages/tfgrid_solutions/actors/solutions.py b/jumpscale/packages/tfgrid_solutions/actors/solutions.py
index 6aea5f216d..e821ba9d53 100644
--- a/jumpscale/packages/tfgrid_solutions/actors/solutions.py
+++ b/jumpscale/packages/tfgrid_solutions/actors/solutions.py
@@ -56,8 +56,18 @@ def migrate(self) -> str:
@actor_method
def list_pools(self, include_hidden) -> str:
+ def pool_farm_from_cache(cache_dict, pool):
+ for node_id in pool.node_ids:
+ if node_id in cache_dict:
+ return cache_dict[node_id]
+
+ def update_pool_farm_cache(cache_dict, pool, farm):
+ for node_id in pool.node_ids:
+ cache_dict[node_id] = farm
+
res = []
farm_names = {}
+ node_to_farm = {}
pool_factory = StoredFactory(PoolConfig)
workloads_dict = {w.id: w for w in j.sals.zos.workloads.list(j.core.identity.me.tid, NextAction.DEPLOY)}
for pool in j.sals.zos.pools.list():
@@ -77,13 +87,18 @@ def list_pools(self, include_hidden) -> str:
pool_dict["name"] = name
pool_dict["hidden"] = hidden
pool_dict["explorer_url"] = j.core.identity.me.explorer_url
- farm_id = deployer.get_pool_farm_id(pool.pool_id)
- if farm_id >= 0:
- farm = farm_names.get(farm_id)
- if not farm:
- farm = deployer._explorer.farms.get(farm_id)
- farm_names[farm_id] = farm
+ farm = pool_farm_from_cache(node_to_farm, pool)
+ if not farm:
+ farm_id = deployer.get_pool_farm_id(pool=pool)
+ if farm_id >= 0:
+ farm = farm_names.get(farm_id)
+ if not farm:
+ farm = deployer._explorer.farms.get(farm_id)
+ farm_names[farm_id] = farm
+ update_pool_farm_cache(node_to_farm, pool, farm)
+ if farm:
pool_dict["farm"] = farm.name
+
for i, wid in enumerate(pool_dict["active_workload_ids"]):
if wid in workloads_dict:
pool_dict["active_workload_ids"][i] = f"{workloads_dict[wid].info.workload_type.name} - {wid}"
diff --git a/jumpscale/packages/tfgrid_solutions/chats/flist.py b/jumpscale/packages/tfgrid_solutions/chats/flist.py
index 0c8dc3effb..02c840631f 100644
--- a/jumpscale/packages/tfgrid_solutions/chats/flist.py
+++ b/jumpscale/packages/tfgrid_solutions/chats/flist.py
@@ -20,11 +20,11 @@ class FlistDeploy(GedisChatBot):
"flist_network",
"flist_url",
"container_interactive",
+ "ipv6_config",
"container_node_id",
"container_logs",
"container_ip",
"container_env",
- "ipv6_config",
"reservation",
"success",
]
@@ -115,7 +115,7 @@ def flist_url(self):
continue
valid = True
- @chatflow_step(title="Container ineractive & EntryPoint")
+ @chatflow_step(title="Container Interactive & EntryPoint")
def container_interactive(self):
self.interactive = self.single_choice(
"Would you like access to your container through the web browser (coreX)?",
@@ -133,6 +133,14 @@ def container_interactive(self):
def container_env(self):
self.env.update(self.multi_values_ask("Set Environment Variables"))
+ @chatflow_step(title="Global IPv6 Address")
+ def ipv6_config(self):
+ self.public_ipv6 = deployer.ask_ipv6(self)
+ if self.public_ipv6:
+ self.ip_version = "IPv6"
+ else:
+ self.ip_version = None
+
@chatflow_step(title="Container node id")
def container_node_id(self):
query = {
@@ -140,9 +148,9 @@ def container_node_id(self):
"mru": math.ceil(self.resources["memory"] / 1024),
"sru": math.ceil(self.resources["disk_size"] / 1024),
}
- self.selected_node = deployer.ask_container_placement(self, self.pool_id, **query)
+ self.selected_node = deployer.ask_container_placement(self, self.pool_id, ip_version=self.ip_version, **query)
if not self.selected_node:
- self.selected_node = deployer.schedule_container(self.pool_id, **query)
+ self.selected_node = deployer.schedule_container(self.pool_id, ip_version=self.ip_version, **query)
@chatflow_step(title="Container logs")
def container_logs(self):
@@ -180,10 +188,6 @@ def container_ip(self):
"Please choose IP Address for your solution", free_ips, default=free_ips[0], required=True
)
- @chatflow_step(title="Global IPv6 Address")
- def ipv6_config(self):
- self.public_ipv6 = deployer.ask_ipv6(self)
-
@chatflow_step(title="Reservation")
@deployment_context()
def reservation(self):
diff --git a/jumpscale/packages/tfgrid_solutions/chats/minio.py b/jumpscale/packages/tfgrid_solutions/chats/minio.py
index 3e2d9ab73a..d8190447f3 100644
--- a/jumpscale/packages/tfgrid_solutions/chats/minio.py
+++ b/jumpscale/packages/tfgrid_solutions/chats/minio.py
@@ -16,13 +16,13 @@ class MinioDeploy(GedisChatBot):
"container_resources",
"minio_resources",
"zdb_nodes_selection",
+ "ipv6_config",
"minio_nodes_selection",
"minio_network",
"access_credentials",
"container_logs",
"public_key",
"ip_selection",
- "ipv6_config",
"zdb_reservation",
"minio_reservation",
"success",
@@ -97,9 +97,17 @@ def zdb_nodes_selection(self):
query = {"sru": 10}
workload_name = "ZDB workloads"
self.zdb_nodes, self.zdb_pool_ids = deployer.ask_multi_pool_distribution(
- self, self.zdb_number, query, workload_name=workload_name
+ self, self.zdb_number, query, workload_name=workload_name, ip_version="IPv6",
)
+ @chatflow_step(title="Global IPv6 Address")
+ def ipv6_config(self):
+ self.public_ipv6 = deployer.ask_ipv6(self)
+ if self.public_ipv6:
+ self.ip_version = "IPv6"
+ else:
+ self.ip_version = None
+
@chatflow_step(title="Minio Nodes")
def minio_nodes_selection(self):
queries = [
@@ -113,7 +121,7 @@ def minio_nodes_selection(self):
if self.mode == "Master/Slave":
workload_names.append("Secondary")
self.minio_nodes, self.minio_pool_ids = deployer.ask_multi_pool_placement(
- self, len(queries), queries, workload_names=workload_names,
+ self, len(queries), queries, workload_names=workload_names, ip_version=self.ip_version,
)
@chatflow_step(title="Network")
@@ -207,9 +215,9 @@ def ip_selection(self):
)
self.network_view.used_ips.append(self.ip_addresses[1])
- @chatflow_step(title="Global IPv6 Address")
- def ipv6_config(self):
- self.public_ipv6 = deployer.ask_ipv6(self)
+ @chatflow_step(title="Reserve zdb", disable_previous=True)
+ @deployment_context()
+ def zdb_reservation(self):
self.metadata = {
"Solution Name": self.solution_name,
"Solution Type": "minio",
@@ -217,11 +225,6 @@ def ipv6_config(self):
"Master IP": self.ip_addresses[0],
}
self.solution_metadata.update(self.metadata)
-
- @chatflow_step(title="Reserve zdb", disable_previous=True)
- @deployment_context()
- def zdb_reservation(self):
-
self.password = uuid.uuid4().hex
self.zdb_result = deployer.deploy_minio_zdb(
pool_id=self.zdb_pool_ids[0],
diff --git a/jumpscale/packages/tfgrid_solutions/chats/network.py b/jumpscale/packages/tfgrid_solutions/chats/network.py
index 915cc2a7fa..ad1c31df77 100644
--- a/jumpscale/packages/tfgrid_solutions/chats/network.py
+++ b/jumpscale/packages/tfgrid_solutions/chats/network.py
@@ -1,12 +1,13 @@
from textwrap import dedent
-
+import random
from jumpscale.loader import j
from jumpscale.sals.chatflows.chatflows import GedisChatBot, StopChatFlow, chatflow_step
from jumpscale.sals.reservation_chatflow import DeploymentFailed, deployer, deployment_context, solutions
+from collections import defaultdict
class NetworkDeploy(GedisChatBot):
- steps = ["welcome", "start", "ip_config", "network_reservation", "network_info"]
+ steps = ["welcome", "start", "ip_config", "access_node_selection", "network_reservation", "network_info"]
title = "Network"
@chatflow_step(title="Welcome")
@@ -45,32 +46,57 @@ def start(self):
@chatflow_step(title="IP Configuration")
def ip_config(self):
+ if self.action == "Create":
+ self.ip_range = j.sals.reservation_chatflow.reservation_chatflow.get_ip_range(self)
ips = ["IPv6", "IPv4"]
self.ipversion = self.single_choice(
"How would you like to connect to your network? If unsure, choose IPv4", ips, required=True, default="IPv4",
)
- self.md_show_update("Searching for access node...")
+
+ @chatflow_step(title="Access Node Selection")
+ def access_node_selection(self):
+ self.md_show_update("Fetching Access Nodes...")
pools = [
p
for p in j.sals.zos.pools.list()
if p.node_ids and p.cus >= 0 and p.sus >= 0 and p.empty_at > j.data.time.now().timestamp
]
- self.access_node = None
- for pool in pools:
- try:
- access_nodes = j.sals.reservation_chatflow.reservation_chatflow.get_nodes(
- 1, ip_version=self.ipversion, pool_ids=[pool.pool_id]
- )
- except StopChatFlow:
- continue
- if access_nodes:
- self.access_node = access_nodes[0]
- self.pool = pool.pool_id
- break
- if not self.access_node:
+
+ access_nodes_pools = defaultdict(list)
+ for p in pools:
+ for node_id in p.node_ids:
+ access_nodes_pools[node_id].append(p.pool_id)
+ available_access_nodes = {}
+ all_access_nodes = filter(lambda node: node.node_id in access_nodes_pools, j.sals.zos._explorer.nodes.list())
+ if self.ipversion == "IPv4":
+ ip_filter = j.sals.zos.nodes_finder.filter_public_ip4
+ else:
+ ip_filter = j.sals.zos.nodes_finder.filter_public_ip6
+ available_access_nodes = {
+ n.node_id: n for n in all_access_nodes if ip_filter(n) and j.sals.zos.nodes_finder.filter_is_up(n)
+ }
+
+ if not available_access_nodes:
raise StopChatFlow("There are no available access nodes in your existing pools")
- if self.action == "Create":
- self.ip_range = j.sals.reservation_chatflow.reservation_chatflow.get_ip_range(self)
+
+ access_node_id = self.drop_down_choice(
+ "Please select an access node or leave it empty to automatically select it",
+ list(available_access_nodes.keys()),
+ )
+ if access_node_id:
+ self.access_node = available_access_nodes[access_node_id]
+ if len(access_nodes_pools[self.access_node.node_id]) > 1:
+ self.pool = self.drop_down_choice(
+ "Please select a pool or leave it empty to automaically select it",
+ access_nodes_pools[self.access_node.node_id],
+ )
+ if not self.pool:
+ self.pool = random.choice(list(access_nodes_pools[self.access_node.node_id]))
+ else:
+ self.pool = access_nodes_pools[self.access_node.node_id][0]
+ else:
+ self.access_node = random.choice(list(available_access_nodes.values()))
+ self.pool = random.choice(list(access_nodes_pools[self.access_node.node_id]))
@chatflow_step(title="Reservation")
@deployment_context()
diff --git a/jumpscale/packages/tfgrid_solutions/chats/ubuntu.py b/jumpscale/packages/tfgrid_solutions/chats/ubuntu.py
index bb0a5efe45..5a4bd8d49d 100644
--- a/jumpscale/packages/tfgrid_solutions/chats/ubuntu.py
+++ b/jumpscale/packages/tfgrid_solutions/chats/ubuntu.py
@@ -19,9 +19,9 @@ class UbuntuDeploy(GedisChatBot):
"ubuntu_network",
"container_logs",
"public_key_get",
+ "ipv6_config",
"container_node_id",
"container_ip",
- "ipv6_config",
"reservation",
"success",
]
@@ -93,6 +93,14 @@ def public_key_get(self):
"""Please upload your public SSH key to be able to access the depolyed container via ssh""", required=True,
).split("\n")[0]
+ @chatflow_step(title="Global IPv6 Address")
+ def ipv6_config(self):
+ self.public_ipv6 = deployer.ask_ipv6(self)
+ if self.public_ipv6:
+ self.ip_version = "IPv6"
+ else:
+ self.ip_version = None
+
@chatflow_step(title="Container node id")
def container_node_id(self):
query = {
@@ -100,9 +108,9 @@ def container_node_id(self):
"mru": math.ceil(self.resources["memory"] / 1024),
"sru": math.ceil(self.resources["disk_size"] / 1024),
}
- self.selected_node = deployer.ask_container_placement(self, self.pool_id, **query)
+ self.selected_node = deployer.ask_container_placement(self, self.pool_id, ip_version=self.ip_version, **query)
if not self.selected_node:
- self.selected_node = deployer.schedule_container(self.pool_id, **query)
+ self.selected_node = deployer.schedule_container(self.pool_id, ip_version=self.ip_version, **query)
@chatflow_step(title="Container IP")
@deployment_context()
@@ -128,10 +136,6 @@ def container_ip(self):
"Please choose IP Address for your solution", free_ips, default=free_ips[0], required=True,
)
- @chatflow_step(title="Global IPv6 Address")
- def ipv6_config(self):
- self.public_ipv6 = deployer.ask_ipv6(self)
-
@chatflow_step(title="Reservation")
@deployment_context()
def reservation(self):
diff --git a/jumpscale/packages/tfgrid_solutions/scripts/threebot/entrypoint.py b/jumpscale/packages/tfgrid_solutions/scripts/threebot/entrypoint.py
index f42539883d..7d169f8cb0 100644
--- a/jumpscale/packages/tfgrid_solutions/scripts/threebot/entrypoint.py
+++ b/jumpscale/packages/tfgrid_solutions/scripts/threebot/entrypoint.py
@@ -41,7 +41,7 @@ def main():
identity.register()
identity.save()
- j.core.identity.set_default("test")
+ j.core.identity.set_default("main")
if backup_password:
# Seprate the logic of wallet creation in case of stellar failure it still takes the backup
diff --git a/jumpscale/packages/threebot_deployer/chats/threebot.py b/jumpscale/packages/threebot_deployer/chats/threebot.py
index 9e9039f827..18d3fbc97b 100644
--- a/jumpscale/packages/threebot_deployer/chats/threebot.py
+++ b/jumpscale/packages/threebot_deployer/chats/threebot.py
@@ -16,13 +16,14 @@ class ThreebotDeploy(MarketPlaceAppsChatflow):
steps = [
"create_or_recover",
"get_solution_name",
- # "upload_public_key",
+ "upload_public_key",
"set_backup_password",
"infrastructure_setup",
"deploy",
"initializing",
"new_expiration",
"solution_extension",
+ "wireguard_configs",
"success",
]
@@ -41,7 +42,8 @@ def _threebot_start(self):
# the main container + the nginx container with 0.25 GB disk
self.query = {"cru": 2, "mru": 2, "sru": 2.25}
self.container_resources = {"cru": 1, "mru": 1, "sru": 2}
- self.expiration = 30 * 60 # 30 minutes for 3bot
+ self.expiration = 60 * 60 # 60 minutes for 3bot
+ self.ip_version = "IPv6"
@chatflow_step(title="Welcome")
def create_or_recover(self):
@@ -77,12 +79,15 @@ def get_solution_name(self):
self.md_show("The specified 3Bot name doesn't exist.")
self.backup_model = BACKUP_MODEL_FACTORY.get(f"{self.solution_name}_{self.threebot_name}")
- # @chatflow_step(title="SSH key")
- # def upload_public_key(self):
- # self.public_key = self.upload_file(
- # "Please upload your public ssh key, this will allow you to access your threebot container using ssh",
- # required=True,
- # ).strip()
+ @chatflow_step(title="SSH key (Optional)")
+ def upload_public_key(self):
+ self.public_key = (
+ self.upload_file(
+ "Please upload your public ssh key, this will allow you to access your threebot container using ssh",
+ )
+ or ""
+ )
+ self.public_key = self.public_key.strip()
def _existing_3bot(self):
try:
@@ -164,7 +169,7 @@ def deploy(self):
"INSTANCE_NAME": self.solution_name,
"THREEBOT_NAME": self.threebot_name,
"DOMAIN": self.domain,
- # "SSHKEY": self.public_key,
+ "SSHKEY": self.public_key,
"TEST_CERT": "true" if test_cert else "false",
"MARKETPLACE_URL": f"https://{j.sals.nginx.main.websites.threebot_deployer_threebot_deployer_root_proxy_443.domain}/",
}
@@ -237,5 +242,49 @@ def initializing(self):
self.stop(f"Failed to initialize 3Bot on {self.threebot_url} , please contact support")
self.domain = f"{self.domain}/admin"
+ @chatflow_step(title="Container Access")
+ def wireguard_configs(self):
+ filename = self.solution_metadata["owner"].replace(".3bot", "")
+ wg_file_path = j.sals.fs.join_paths(j.core.dirs.CFGDIR, f"{filename}.3bot_apps.conf")
+ wg_file_path_alt = j.sals.fs.join_paths(j.core.dirs.CFGDIR, "wireguard", f"{filename}.3bot_apps.conf")
+ if j.sals.fs.exists(wg_file_path):
+ content = j.sals.fs.read_file(wg_file_path)
+ elif j.sals.fs.exists(wg_file_path_alt):
+ content = j.sals.fs.read_file(wg_file_path_alt)
+ elif hasattr(self, "wgcfg"):
+ content = self.wgcfg
+ else:
+ config = deployer.add_access(
+ self.network_view.name,
+ self.network_view,
+ self.selected_node.node_id,
+ self.pool_id,
+ bot=self,
+ **self.solution_metadata,
+ )
+ content = config["wg"]
+
+ msg = f"""\
+ Use the following template to configure your wireguard connection. This will give you access to your network.
+ Make sure you have wireguard installed
+
+ {content.replace(chr(10), "
")}
+
+ In order to have the network active and accessible from your local/container machine, navigate to where the config is downloaded and start your connection using `wg-quick up <your_download_dir>/apps.conf`
+ """
+ self.download_file(msg=dedent(msg), data=content, filename="apps.conf", html=True)
+
+ @chatflow_step(title="Success", disable_previous=True, final_step=True)
+ def success(self):
+ display_name = self.solution_name.replace(f"{self.solution_metadata['owner']}-", "")
+ message = f"""\
+ # You deployed a new instance {display_name} of {self.SOLUTION_TYPE}
+
\n
+ - You can access it via the browser using: https://{self.domain}
+
+ - You can access your 3Bot via IP: `{self.ip_address}`. To use it make sure wireguard is up and running.
+ """
+ self.md_show(dedent(message), md=True)
+
chat = ThreebotDeploy
diff --git a/jumpscale/packages/threebot_deployer/package.py b/jumpscale/packages/threebot_deployer/package.py
index 164b03dccf..9ba2eed0fe 100644
--- a/jumpscale/packages/threebot_deployer/package.py
+++ b/jumpscale/packages/threebot_deployer/package.py
@@ -35,7 +35,7 @@ def install(self, **kwargs):
f"Added remote redis logs on machine {log_config['channel_host']}:{log_config['channel_port']}"
)
- def start(self):
+ def start(self, **kwargs):
# Configuring 3bot deployer package actors to be public
location_actors_443 = j.sals.nginx.main.websites.default_443.locations.get(name="threebot_deployer_actors")
location_actors_443.is_auth = False
diff --git a/jumpscale/sals/marketplace/apps_chatflow.py b/jumpscale/sals/marketplace/apps_chatflow.py
index a0dcc6666d..0916dda8f7 100644
--- a/jumpscale/sals/marketplace/apps_chatflow.py
+++ b/jumpscale/sals/marketplace/apps_chatflow.py
@@ -12,6 +12,7 @@
from .chatflow import MarketPlaceChatflow
from .deployer import deployer
from .solutions import solutions
+from jumpscale.clients.explorer.models import WorkloadType
FLAVORS = {
"Silver": {"sru": 2,},
@@ -39,6 +40,7 @@ def _init_solution(self):
self.username = self.user_info()["username"]
self.solution_metadata["owner"] = self.username
self.threebot_name = j.data.text.removesuffix(self.username, ".3bot")
+ self.ip_version = "IPv6"
self.expiration = 60 * 60 * 3 # expiration 3 hours
def _choose_flavor(self, flavors=None):
@@ -68,11 +70,17 @@ def _choose_flavor(self, flavors=None):
def _get_pool(self):
self.currency = "TFT"
available_farms = []
- farm_names = ["freefarm"] # [f.name for f in j.sals.zos._explorer.farms.list()] # TODO: RESTORE LATER
+ farm_names = [f.name for f in j.sals.zos._explorer.farms.list()]
+ # farm_names = ["freefarm"] # DEUBGGING ONLY
for farm_name in farm_names:
- available, _, _, _, _ = deployer.check_farm_capacity(farm_name, currencies=[self.currency], **self.query)
- if available:
+ available_ipv4, _, _, _, _ = deployer.check_farm_capacity(
+ farm_name, currencies=[self.currency], ip_version="IPv4", **self.query
+ )
+ available_ipv6, _, _, _, _ = deployer.check_farm_capacity(
+ farm_name, currencies=[self.currency], ip_version="IPv6", **self.query
+ )
+ if available_ipv4 and available_ipv6:
available_farms.append(farm_name)
self.farm_name = random.choice(available_farms)
@@ -82,15 +90,13 @@ def _get_pool(self):
if "apps" in networks_names:
# old user
self.md_show_update("Checking for free resources .....")
- free_pools = deployer.get_free_pools(self.solution_metadata["owner"])
+ free_pools = deployer.get_free_pools(self.solution_metadata["owner"], **self.query)
if free_pools:
self.md_show_update(
"Searching for a best fit pool (best fit pool would try to find a pool that matches your resources or with least difference from the required specs)..."
)
# select free pool and extend if required
- pool, cu_diff, su_diff = deployer.get_best_fit_pool(
- free_pools, self.expiration, free_to_use=self.currency == "FreeTFT", **self.query
- )
+ pool, cu_diff, su_diff = deployer.get_best_fit_pool(free_pools, self.expiration, **self.query)
if cu_diff < 0 or su_diff < 0:
cu_diff = abs(cu_diff) if cu_diff < 0 else 0
su_diff = abs(su_diff) if su_diff < 0 else 0
@@ -138,11 +144,22 @@ def _get_pool(self):
currency=self.currency,
**self.query,
)
+ if not all(
+ [
+ self.pool_info.escrow_information.address.strip() != "",
+ self.pool_info.escrow_information.address.strip() != "",
+ ]
+ ):
+ raise StopChatFlow(
+ f"provisioning the pool, invalid escrow information probably caused by a misconfigured, pool creation request was {self.pool_info}"
+ )
deployer.pay_for_pool(self.pool_info)
result = deployer.wait_demo_payment(self, self.pool_info.reservation_id)
if not result:
raise StopChatFlow(f"provisioning the pool timed out. pool_id: {self.pool_info.reservation_id}")
- deployer.init_new_user_network(self, self.solution_metadata["owner"], self.pool_info.reservation_id)
+ self.wgcfg = deployer.init_new_user_network(
+ self, self.solution_metadata["owner"], self.pool_info.reservation_id
+ )
self.pool_id = self.pool_info.reservation_id
return self.pool_id
@@ -153,7 +170,7 @@ def _deploy_network(self):
self.network_view = deployer.get_network_view(f"{self.solution_metadata['owner']}_apps")
self.ip_address = None
while not self.ip_address:
- self.selected_node = deployer.schedule_container(self.pool_id, **self.query)
+ self.selected_node = deployer.schedule_container(self.pool_id, ip_version=self.ip_version, **self.query)
result = deployer.add_network_node(
self.network_view.name,
self.selected_node,
@@ -178,7 +195,7 @@ def _deploy_network(self):
def _get_domain(self):
# get domain for the ip address
self.md_show_update("Preparing gateways ...")
- gateways = deployer.list_all_gateways(self.username)
+ gateways = deployer.list_all_gateways(self.username, self.farm_name)
if not gateways:
raise StopChatFlow(
"There are no available gateways in the farms bound to your pools. The resources you paid for will be re-used in your upcoming deployments."
@@ -191,6 +208,16 @@ def _get_domain(self):
gateway = gw_dict["gateway"]
for domain in gateway.managed_domains:
is_managed_domains = True
+ # TODO: FIXME Remove when gateways is fixed
+ if domain in [
+ "tfgw-prod-05.ava.tf",
+ "tfgw-prod-05.base.tf",
+ "tfgw-prod-05.3x0.me",
+ "tfgw-prod-05.gateway.tf",
+ "tfgw-prod-02.gateway.tf",
+ "tfgw-prod-07.base.tf",
+ ]:
+ continue
try:
if j.sals.crtsh.has_reached_limit(domain):
continue
@@ -211,27 +238,60 @@ def _get_domain(self):
"Letsencrypt limit has been reached on all gateways. The resources you paid for will be re-used in your upcoming deployments."
)
- self.domain = random.choice(list(domains.keys()))
+ self.addresses = []
- self.gateway = domains[self.domain]["gateway"]
- self.gateway_pool = domains[self.domain]["pool"]
+ while not self.addresses and domains:
+ managed_domain = random.choice(list(domains.keys()))
+ self.gateway = domains[managed_domain]["gateway"]
+ self.gateway_pool = domains[managed_domain]["pool"]
- solution_name = self.solution_name.replace(f"{self.solution_metadata['owner']}-", "").replace("_", "-")
- owner_prefix = self.solution_metadata["owner"].replace(".3bot", "").replace(".", "").replace("_", "-")
- solution_type = self.SOLUTION_TYPE.replace(".", "").replace("_", "-")
- # check if domain name is free or append random number
- full_domain = f"{owner_prefix}-{solution_type}-{solution_name}.{self.domain}"
- while True:
- if j.tools.dnstool.is_free(full_domain):
- self.domain = full_domain
- break
- else:
- random_number = random.randint(1000, 100000)
- full_domain = f"{owner_prefix}-{solution_type}-{solution_name}-{random_number}.{self.domain}"
+ solution_name = self.solution_name.replace(f"{self.solution_metadata['owner']}-", "").replace("_", "-")
+ owner_prefix = self.solution_metadata["owner"].replace(".3bot", "").replace(".", "").replace("_", "-")
+ solution_type = self.SOLUTION_TYPE.replace(".", "").replace("_", "-")
+ # check if domain name is free or append random number
+ full_domain = f"{owner_prefix}-{solution_type}-{solution_name}.{managed_domain}"
- self.addresses = []
- for ns in self.gateway.dns_nameserver:
- self.addresses.append(j.sals.nettools.get_host_by_name(ns))
+ metafilter = lambda metadata: metadata.get("owner") == self.username
+ # no need to load workloads in deployer object because it is already loaded when checking for name and/or network
+ user_subdomains = {}
+ all_domains = solutions._list_subdomain_workloads(solution_type, metadata_filters=[metafilter]).values()
+ for dom_list in all_domains:
+ for dom in dom_list:
+ user_subdomains[dom["domain"]] = dom
+
+ while True:
+ if full_domain in user_subdomains:
+ # check if related container workloads still exist
+ dom = user_subdomains[full_domain]
+ sol_uuid = dom["uuid"]
+ if sol_uuid:
+ workloads = solutions.get_workloads_by_uuid(sol_uuid, "DEPLOY")
+ is_free = True
+ for w in workloads:
+ if w.info.workload_type == WorkloadType.Container:
+ is_free = False
+ break
+ if is_free:
+ solutions.cancel_solution_by_uuid(sol_uuid)
+
+ if j.tools.dnstool.is_free(full_domain):
+ self.domain = full_domain
+ break
+ else:
+ random_number = random.randint(1000, 100000)
+ full_domain = f"{owner_prefix}-{solution_type}-{solution_name}-{random_number}.{managed_domain}"
+
+ for ns in self.gateway.dns_nameserver:
+ try:
+ self.addresses.append(j.sals.nettools.get_host_by_name(ns))
+ except Exception as e:
+ j.logger.error(f"Failed to resolve DNS {ns}, this gateway will be skipped")
+
+ if not self.addresses:
+ domains.pop(managed_domain)
+
+ if not self.addresses:
+ raise RuntimeError("No valid gateways found, Please contact support")
self.secret = f"{j.core.identity.me.tid}:{uuid.uuid4().hex}"
return self.domain
diff --git a/jumpscale/sals/marketplace/deployer.py b/jumpscale/sals/marketplace/deployer.py
index 000670c92b..b5e677c958 100644
--- a/jumpscale/sals/marketplace/deployer.py
+++ b/jumpscale/sals/marketplace/deployer.py
@@ -10,6 +10,9 @@
from .models import UserPool
+pool_factory = StoredFactory(UserPool)
+pool_factory.always_reload = True
+
class MarketPlaceDeployer(ChatflowDeployer):
@@ -21,7 +24,7 @@ def list_user_pool_ids(self, username):
return user_pool_ids
def list_user_pools(self, username):
- pool_factory = StoredFactory(UserPool)
+
_, _, user_pools = pool_factory.find_many(owner=username)
all_pools = [p for p in j.sals.zos.pools.list() if p.node_ids]
user_pool_ids = [p.pool_id for p in user_pools]
@@ -53,7 +56,6 @@ def list_networks(self, username, next_action=NextAction.DEPLOY, sync=True):
def create_pool(self, username, bot):
pool_info = super().create_pool(bot)
- pool_factory = StoredFactory(UserPool)
user_pool = pool_factory.new(f"pool_{username.replace('.3bot', '')}_{pool_info.reservation_id}")
user_pool.owner = username
user_pool.pool_id = pool_info.reservation_id
@@ -126,8 +128,33 @@ def select_network(self, username, bot):
network_name = bot.single_choice("Please select a network", network_names, required=True)
return network_views[f"{username}_{network_name}"]
- def list_all_gateways(self, username):
+ def _get_gateways_pools(self, farm_name):
+ """
+ Returns:
+ List : will return pool ids for pools on farms with gateways
+ """
+ gateways_pools_ids = []
+ farms_ids_with_gateways = [
+ gateway_farm.farm_id for gateway_farm in deployer._explorer.gateway.list() if gateway_farm.farm_id > 0
+ ]
+ farms_names_with_gateways = set(
+ map(lambda farm_id: deployer._explorer.farms.get(farm_id=farm_id).name, farms_ids_with_gateways)
+ )
+
+ for farm_name in farms_names_with_gateways:
+ gw_pool_name = f"marketplace_gateway_{farm_name}"
+ if gw_pool_name not in pool_factory.list_all():
+ gateways_pool_info = deployer.create_gateway_emptypool(gw_pool_name, farm_name)
+ gateways_pools_ids.append(gateways_pool_info.reservation_id)
+ else:
+ pool_id = pool_factory.get(gw_pool_name).pool_id
+ gateways_pools_ids.append(pool_id)
+ return gateways_pools_ids
+
+ def list_all_gateways(self, username, farm_name=None):
pool_ids = self.list_user_pool_ids(username)
+ gateways_pools = self._get_gateways_pools(farm_name) # Empty pools contains the gateways only
+ pool_ids.extend(gateways_pools)
return super().list_all_gateways(pool_ids=pool_ids)
def select_gateway(self, username, bot):
@@ -271,20 +298,51 @@ def extend_solution_pool(self, bot, pool_id, expiration, currency, **resources):
def create_solution_pool(self, bot, username, farm_name, expiration, currency, **resources):
cu, su = self.calculate_capacity_units(**resources)
pool_info = j.sals.zos.pools.create(int(cu * expiration), int(su * expiration), farm_name, [currency])
- pool_factory = StoredFactory(UserPool)
user_pool = pool_factory.new(f"pool_{username.replace('.3bot', '')}_{pool_info.reservation_id}")
user_pool.owner = username
user_pool.pool_id = pool_info.reservation_id
user_pool.save()
return pool_info
- def get_free_pools(self, username, workload_types=None):
+ def create_gateway_emptypool(self, gwpool_name, farm_name):
+ pool_info = j.sals.zos.pools.create(0, 0, farm_name, ["TFT"])
+ user_pool = pool_factory.new(gwpool_name)
+ user_pool.owner = gwpool_name
+ user_pool.pool_id = pool_info.reservation_id
+ user_pool.save()
+ return pool_info
+
+ def get_free_pools(
+ self, username, workload_types=None, free_to_use=False, cru=0, mru=0, sru=0, hru=0, ip_version="IPv6"
+ ):
+ def is_pool_free(pool, nodes_dict):
+ for node_id in pool.node_ids:
+ node = nodes_dict.get(node_id)
+ if node and not node.free_to_use:
+ return False
+ return True
+
user_pools = self.list_user_pools(username)
j.sals.reservation_chatflow.deployer.load_user_workloads()
free_pools = []
workload_types = workload_types or [WorkloadType.Container]
+ nodes = {}
+ if free_to_use:
+ nodes = {node.node_id: node for node in j.sals.zos._explorer.nodes.list()}
for pool in user_pools:
valid = True
+ try:
+ j.sals.reservation_chatflow.reservation_chatflow.get_nodes(
+ 1, cru=cru, mru=mru, sru=sru, hru=hru, ip_version=ip_version, pool_ids=[pool.pool_id],
+ )
+ except StopChatFlow as e:
+ j.logger.error(
+ f"Failed to find resources for this reservation in this pool: {pool}, {e}. We will use another one."
+ )
+ continue
+
+ if free_to_use and not is_pool_free(pool, nodes):
+ continue
for wokrkload_type in workload_types:
if j.sals.reservation_chatflow.deployer.workloads[NextAction.DEPLOY][wokrkload_type][pool.pool_id]:
valid = False
@@ -293,16 +351,11 @@ def get_free_pools(self, username, workload_types=None):
continue
if (pool.cus == 0 and pool.sus == 0) or pool.empty_at < j.data.time.now().timestamp:
continue
+
free_pools.append(pool)
return free_pools
- def get_best_fit_pool(self, pools, expiration, cru=0, mru=0, sru=0, hru=0, free_to_use=False):
- def is_pool_free(pool, nodes_dict):
- for node_id in pool.node_ids:
- node = nodes_dict.get(node_id)
- if node and not node.free_to_use:
- return False
- return True
+ def get_best_fit_pool(self, pools, expiration, cru=0, mru=0, sru=0, hru=0):
cu, su = self.calculate_capacity_units(cru, mru, sru, hru)
required_cu = cu * expiration
@@ -310,12 +363,7 @@ def is_pool_free(pool, nodes_dict):
exact_fit_pools = [] # contains pools that are exact match of the required resources
over_fit_pools = [] # contains pools that have higher cus AND sus than the required resources
under_fit_pools = [] # contains pools that have lower cus OR sus than the required resources
- nodes = {}
- if free_to_use:
- nodes = {node.node_id: node for node in j.sals.zos._explorer.nodes.list()}
for pool in pools:
- if free_to_use and not is_pool_free(pool, nodes):
- continue
if pool.cus == required_cu and pool.sus == required_su:
exact_fit_pools.append(pool)
else:
@@ -341,9 +389,9 @@ def is_pool_free(pool, nodes_dict):
result_pool = sorted_result[0]
return result_pool, result_pool.cus - required_cu, result_pool.sus - required_su
- def init_new_user_network(self, bot, username, pool_id):
+ def init_new_user_network(self, bot, username, pool_id, ip_version="IPv4"):
access_node = j.sals.reservation_chatflow.reservation_chatflow.get_nodes(
- 1, pool_ids=[pool_id], ip_version="IPv4"
+ 1, pool_ids=[pool_id], ip_version=ip_version
)[0]
result = self.deploy_network(
diff --git a/jumpscale/sals/reservation_chatflow/deployer.py b/jumpscale/sals/reservation_chatflow/deployer.py
index 3c3e316e58..a56edb285f 100644
--- a/jumpscale/sals/reservation_chatflow/deployer.py
+++ b/jumpscale/sals/reservation_chatflow/deployer.py
@@ -9,7 +9,7 @@
import gevent
import netaddr
from nacl.public import Box
-
+from contextlib import ContextDecorator
from jumpscale.clients.explorer.models import DiskType, NextAction, WorkloadType, ZDBMode
from jumpscale.core.base import StoredFactory
from jumpscale.loader import j
@@ -25,8 +25,24 @@
WorkloadType.Proxy,
]
+pool_factory = StoredFactory(PoolConfig)
+pool_factory.always_reload = True
+
class NetworkView:
+ class dry_run_context(ContextDecorator):
+ def __init__(self, test_network_name, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ self.test_network_name = test_network_name
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, *exc):
+ network_view = NetworkView(self.test_network_name)
+ for workload in network_view.network_workloads:
+ j.sals.zos.workloads.decomission(workload.id)
+
def __init__(self, name, workloads=None, nodes=None):
self.name = name
if not workloads:
@@ -159,12 +175,13 @@ def get_free_ip(self, node):
return ip
return None
- def dry_run(self, node_ids=None, pool_ids=None, bot=None, breaking_node_ids=None):
+ def dry_run(self, test_network_name=None, node_ids=None, pool_ids=None, bot=None, breaking_node_ids=None):
+ name = test_network_name or uuid.uuid4().hex
breaking_node_ids = breaking_node_ids or node_ids
if bot:
bot.md_show_update("Starting dry run to check nodes status")
ip_range = netaddr.IPNetwork("10.10.0.0/16")
- name = uuid.uuid4().hex
+
if any([node_ids, pool_ids]) and not all([node_ids, pool_ids]):
raise StopChatFlow("you must specify both pool ids and node ids together")
node_pool_dict = {}
@@ -190,8 +207,6 @@ def dry_run(self, node_ids=None, pool_ids=None, bot=None, breaking_node_ids=None
try:
result.append(j.sals.zos.workloads.deploy(resource))
except Exception as e:
- for wid in result:
- j.sals.zos.workloads.decomission(wid)
raise StopChatFlow(
f"failed to deploy workload on node {resource.info.node_id} due to" f" error {str(e)}"
)
@@ -203,14 +218,10 @@ def dry_run(self, node_ids=None, pool_ids=None, bot=None, breaking_node_ids=None
# if not a breaking nodes (old node not used for deployment) we can overlook it
if workload.info.node_id not in breaking_node_ids:
continue
- for wid in result:
- j.sals.zos.workloads.decomission(wid)
j.sals.reservation_chatflow.reservation_chatflow.block_node(network.network_resources[idx].info.node_id)
raise StopChatFlow(
"Network nodes dry run failed on node" f" {network.network_resources[idx].info.node_id}"
)
- for wid in result:
- j.sals.zos.workloads.decomission(wid)
class ChatflowDeployer:
@@ -364,7 +375,15 @@ def extend_pool(self, bot, pool_id):
self.wait_pool_payment(bot, pool_id, 10, qr_code, trigger_cus=trigger_cus, trigger_sus=trigger_sus)
return pool_info
- def check_farm_capacity(self, farm_name, currencies=None, sru=None, cru=None, mru=None, hru=None):
+ def check_farm_capacity(self, farm_name, currencies=None, sru=None, cru=None, mru=None, hru=None, ip_version=None):
+ node_filter = None
+ if ip_version and ip_version not in ["IPv4", "IPv6"]:
+ raise j.exceptions.Runtime(f"{ip_version} is not a valid IP Version")
+ else:
+ if ip_version == "IPv4":
+ node_filter = j.sals.zos.nodes_finder.filter_public_ip4
+ elif ip_version == "IPv6":
+ node_filter = j.sals.zos.nodes_finder.filter_public_ip6
currencies = currencies or []
farm_nodes = j.sals.zos.nodes_finder.nodes_search(farm_name=farm_name)
available_cru = 0
@@ -372,16 +391,23 @@ def check_farm_capacity(self, farm_name, currencies=None, sru=None, cru=None, mr
available_mru = 0
available_hru = 0
running_nodes = 0
+ blocked_nodes = j.sals.reservation_chatflow.reservation_chatflow.list_blocked_nodes()
+ access_node = None
for node in farm_nodes:
if "FreeTFT" in currencies and not node.free_to_use:
continue
if not j.sals.zos.nodes_finder.filter_is_up(node):
continue
+ if node.node_id in blocked_nodes:
+ continue
+ if not access_node and ip_version and node_filter(node):
+ access_node = node
running_nodes += 1
available_cru += node.total_resources.cru - node.used_resources.cru
available_sru += node.total_resources.sru - node.used_resources.sru
available_mru += node.total_resources.mru - node.used_resources.mru
available_hru += node.total_resources.hru - node.used_resources.hru
+
if not running_nodes:
return False, available_cru, available_sru, available_mru, available_hru
if sru and available_sru < sru:
@@ -392,6 +418,8 @@ def check_farm_capacity(self, farm_name, currencies=None, sru=None, cru=None, mr
return False, available_cru, available_sru, available_mru, available_hru
if hru and available_hru < hru:
return False, available_cru, available_sru, available_mru, available_hru
+ if ip_version and not access_node:
+ return False, available_cru, available_sru, available_mru, available_hru
return True, available_cru, available_sru, available_mru, available_hru
def show_payment(self, pool, bot):
@@ -435,7 +463,7 @@ def show_payment(self, pool, bot):
def list_pools(self, cu=None, su=None):
all_pools = [p for p in j.sals.zos.pools.list() if p.node_ids]
- pool_factory = StoredFactory(PoolConfig)
+
available_pools = {}
for pool in all_pools:
hidden = False
@@ -495,8 +523,9 @@ def select_pool(
pool = bot.single_choice(msg, list(pool_messages.keys()), required=True)
return pool_messages[pool]
- def get_pool_farm_id(self, pool_id):
- pool = j.sals.zos.pools.get(pool_id)
+ def get_pool_farm_id(self, pool_id=None, pool=None):
+ pool = pool or j.sals.zos.pools.get(pool_id)
+ pool_id = pool.pool_id
if not pool.node_ids:
raise StopChatFlow(f"Pool {pool_id} doesn't contain any nodes")
farm_id = None
@@ -584,12 +613,16 @@ def add_access(
# deploy only latest resource generated by zos sal for each node
for workload in network.network_resources:
node_workloads[workload.info.node_id] = workload
- network_view.dry_run(
- list(node_workloads.keys()),
- [w.info.pool_id for w in node_workloads.values()],
- bot,
- breaking_node_ids=[node_id],
- )
+
+ dry_run_name = uuid.uuid4().hex
+ with NetworkView.dry_run_context(dry_run_name):
+ network_view.dry_run(
+ dry_run_name,
+ list(node_workloads.keys()),
+ [w.info.pool_id for w in node_workloads.values()],
+ bot,
+ breaking_node_ids=[node_id],
+ )
parent_id = network_view.network_workloads[-1].id
for resource in node_workloads.values():
@@ -610,12 +643,15 @@ def delete_access(self, network_name, iprange, network_view=None, node_id=None,
# deploy only latest resource generated by zos sal for each node
for workload in network.network_resources:
node_workloads[workload.info.node_id] = workload
- network_view.dry_run(
- list(node_workloads.keys()),
- [w.info.pool_id for w in node_workloads.values()],
- bot,
- breaking_node_ids=[node_id],
- )
+ dry_run_name = uuid.uuid4().hex
+ with NetworkView.dry_run_context(dry_run_name):
+ network_view.dry_run(
+ dry_run_name,
+ list(node_workloads.keys()),
+ [w.info.pool_id for w in node_workloads.values()],
+ bot,
+ breaking_node_ids=[node_id],
+ )
parent_id = network_view.network_workloads[-1].id
result = []
for resource in node_workloads.values():
@@ -699,13 +735,15 @@ def add_network_node(self, name, node, pool_id, network_view=None, bot=None, **m
# deploy only latest resource generated by zos sal for each node
for workload in network.network_resources:
node_workloads[workload.info.node_id] = workload
-
- network_view.dry_run(
- list(node_workloads.keys()),
- [w.info.pool_id for w in node_workloads.values()],
- bot,
- breaking_node_ids=[node.node_id],
- )
+ dry_run_name = uuid.uuid4().hex
+ with NetworkView.dry_run_context(dry_run_name):
+ network_view.dry_run(
+ dry_run_name,
+ list(node_workloads.keys()),
+ [w.info.pool_id for w in node_workloads.values()],
+ bot,
+ breaking_node_ids=[node.node_id],
+ )
for workload in node_workloads.values():
workload.info.reference = ""
workload.info.description = j.data.serializers.json.dumps({"parent_id": parent_id})
@@ -988,7 +1026,7 @@ def deploy_kubernetes_cluster(
return result
def ask_multi_pool_placement(
- self, bot, number_of_nodes, resource_query_list=None, pool_ids=None, workload_names=None
+ self, bot, number_of_nodes, resource_query_list=None, pool_ids=None, workload_names=None, ip_version=None,
):
"""
Ask and schedule workloads accross multiple pools
@@ -1030,9 +1068,11 @@ def ask_multi_pool_placement(
continue
pool_choices[p] = pools[p]
pool_id = self.select_pool(bot, available_pools=pool_choices, workload_name=workload_names[i], cu=cu, su=su)
- node = self.ask_container_placement(bot, pool_id, workload_name=workload_names[i], **resource_query_list[i])
+ node = self.ask_container_placement(
+ bot, pool_id, workload_name=workload_names[i], ip_version=ip_version, **resource_query_list[i]
+ )
if not node:
- node = self.schedule_container(pool_id, **resource_query_list[i])
+ node = self.schedule_container(pool_id, ip_version=ip_version, **resource_query_list[i])
selected_nodes.append(node)
selected_pool_ids.append(pool_id)
return selected_nodes, selected_pool_ids
@@ -1078,7 +1118,6 @@ def list_all_gateways(self, pool_ids=None):
for pool in all_pools:
available_node_ids.update({node_id: pool for node_id in pool.node_ids})
result = {}
- pool_factory = StoredFactory(PoolConfig)
for gateway in all_gateways:
if gateway.node_id in available_node_ids:
if not gateway.dns_nameserver:
@@ -1464,7 +1503,9 @@ def get_zdb_url(self, zdb_id, password):
url = f"{namespace}:{password}@[{ip}]:{port}"
return url
- def ask_multi_pool_distribution(self, bot, number_of_nodes, resource_query=None, pool_ids=None, workload_name=None):
+ def ask_multi_pool_distribution(
+ self, bot, number_of_nodes, resource_query=None, pool_ids=None, workload_name=None, ip_version=None
+ ):
"""
Choose multiple pools to distribute workload automatically
@@ -1523,7 +1564,7 @@ def ask_multi_pool_distribution(self, bot, number_of_nodes, resource_query=None,
node_to_pool[node_id] = pool
nodes = j.sals.reservation_chatflow.reservation_chatflow.get_nodes(
- number_of_nodes, pool_ids=list(pool_ids.values()), **resource_query
+ number_of_nodes, pool_ids=list(pool_ids.values()), ip_version=ip_version, **resource_query
)
selected_nodes = []
selected_pool_ids = []
diff --git a/jumpscale/sals/reservation_chatflow/solutions.py b/jumpscale/sals/reservation_chatflow/solutions.py
index 7a23c15ca3..fdbbfbe869 100644
--- a/jumpscale/sals/reservation_chatflow/solutions.py
+++ b/jumpscale/sals/reservation_chatflow/solutions.py
@@ -483,6 +483,8 @@ def _list_subdomain_workloads(
"domain": workload.domain,
"ips": workload.ips,
"owner": metadata.get("owner"),
+ "pool": workload.info.pool_id,
+ "uuid": metadata.get("solution_uuid"),
}
if name not in result:
result[name] = [subdomain_dict]
@@ -525,7 +527,13 @@ def _list_proxy_workloads(
continue
name = name_identitfier(metadata)
- proxy_dict = {"wid": workload.id, "domain": workload.domain, "owner": metadata.get("owner")}
+ proxy_dict = {
+ "wid": workload.id,
+ "pool": workload.info.pool_id,
+ "domain": workload.domain,
+ "owner": metadata.get("owner"),
+ "uuid": metadata.get("solution_uuid"),
+ }
if name not in result:
result[name] = [proxy_dict]
else:
@@ -562,8 +570,8 @@ def meta_filter(metadata):
proxy_dicts = proxy_workloads.get(name)
if not subdomain_dicts or not proxy_dicts:
continue
- subdomain_dict = subdomain_dicts[0]
- proxy_dict = proxy_dicts[0]
+ subdomain_dict = subdomain_dicts[-1]
+ proxy_dict = proxy_dicts[-1]
sol_name = name
if owner:
if len(name) > len(owner) + 1:
@@ -636,5 +644,12 @@ def cancel_solution_by_uuid(self, solution_uuid):
if solution_uuid == self.get_solution_uuid(workload):
j.sals.zos.workloads.decomission(workload.id)
+ def get_workloads_by_uuid(self, solution_uuid, next_action=None):
+ workloads = []
+ for workload in j.sals.zos.workloads.list(j.core.identity.me.tid, next_action=next_action):
+ if solution_uuid == self.get_solution_uuid(workload):
+ workloads.append(workload)
+ return workloads
+
solutions = ChatflowSolutions()
diff --git a/jumpscale/sals/zos/container.py b/jumpscale/sals/zos/container.py
index 02b3584a71..3ee88fe919 100644
--- a/jumpscale/sals/zos/container.py
+++ b/jumpscale/sals/zos/container.py
@@ -120,9 +120,15 @@ def add_logs(
tfgrid.workloads.reservation.container.logs.1: logs object added to the container
"""
+ stdout = f"redis://{channel_host}:{channel_port}/{channel_name}-stdout"
+ stderr = f"redis://{channel_host}:{channel_port}/{channel_name}-stderr"
+
cont_logs = ContainerLogs()
cont_logs.type = channel_type
- cont_logs.data.stdout = f"redis://{channel_host}:{channel_port}/{channel_name}-stdout"
- cont_logs.data.stderr = f"redis://{channel_host}:{channel_port}/{channel_name}-stderr"
+ cont_logs.data.secret_stdout = self.encrypt_secret(
+ container.info.node_id, stdout)
+ cont_logs.data.secret_stderr = self.encrypt_secret(
+ container.info.node_id, stderr)
container.logs.append(cont_logs)
+
return cont_logs
diff --git a/jumpscale/servers/threebot/threebot.py b/jumpscale/servers/threebot/threebot.py
index 6c89b298fb..fc16ecc0fe 100644
--- a/jumpscale/servers/threebot/threebot.py
+++ b/jumpscale/servers/threebot/threebot.py
@@ -196,7 +196,7 @@ def __call__(self, e, h):
class Package:
- def __init__(self, path, default_domain, default_email, giturl=""):
+ def __init__(self, path, default_domain, default_email, giturl="", kwargs=None):
self.path = path
self.giturl = giturl
self._config = None
@@ -205,6 +205,7 @@ def __init__(self, path, default_domain, default_email, giturl=""):
self._module = None
self.default_domain = default_domain
self.default_email = default_email
+ self.kwargs = kwargs or {}
def load_config(self):
return toml.load(j.sals.fs.join_paths(self.path, "package.toml"))
@@ -296,7 +297,7 @@ def uninstall(self):
def start(self):
if self.module and hasattr(self.module, "start"):
- self.module.start()
+ self.module.start(**self.kwargs)
def stop(self):
if self.module and hasattr(self.module, "stop"):
@@ -325,11 +326,13 @@ def get(self, package_name):
if package_name in self.packages:
package_path = self.packages[package_name]["path"]
package_giturl = self.packages[package_name]["giturl"]
+ package_kwargs = self.packages[package_name].get("kwargs", {})
return Package(
path=package_path,
default_domain=self.threebot.domain,
default_email=self.threebot.email,
giturl=package_giturl,
+ kwargs=package_kwargs,
)
def get_packages(self):
@@ -373,14 +376,6 @@ def add(self, path: str = None, giturl: str = None, **kwargs):
if not any([path, giturl]) or all([path, giturl]):
raise j.exceptions.Value("either path or giturl is required")
- for package_name in self.packages:
- package = self.get(package_name)
- ## TODO: why do we care if the path is the same and giturl is the same? adding it 100 times should just add it once?
- # if path and path == package.path:
- # raise j.exceptions.Value("Package with the same path already exists")
- # if giturl and giturl == package.giturl:
- # raise j.exceptions.Value("Package with the same giturl already exists")
-
if giturl:
url = urlparse(giturl)
url_parts = url.path.lstrip("/").split("/", 4)
@@ -400,15 +395,17 @@ def add(self, path: str = None, giturl: str = None, **kwargs):
path = j.sals.fs.join_paths(repo_path, repo, package_path)
package = Package(
- path=path, default_domain=self.threebot.domain, default_email=self.threebot.email, giturl=giturl
+ path=path,
+ default_domain=self.threebot.domain,
+ default_email=self.threebot.email,
+ giturl=giturl,
+ kwargs=kwargs,
)
# TODO: adding under the same name if same path and same giturl should be fine, no?
# if package.name in self.packages:
# raise j.exceptions.Value(f"Package with name {package.name} already exists")
- self.packages[package.name] = {"name": package.name, "path": package.path, "giturl": package.giturl}
-
# execute package install method
package.install(**kwargs)
@@ -416,6 +413,13 @@ def add(self, path: str = None, giturl: str = None, **kwargs):
if self.threebot.started:
self.install(package)
self.threebot.nginx.reload()
+ self.packages[package.name] = {
+ "name": package.name,
+ "path": package.path,
+ "giturl": package.giturl,
+ "kwargs": package.kwargs,
+ }
+
self.save()
# Return updated package info
diff --git a/poetry.lock b/poetry.lock
index 2992c947d1..61f8645f5f 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -235,7 +235,7 @@ description = "Foreign Function Interface for Python calling C code."
name = "cffi"
optional = false
python-versions = "*"
-version = "1.14.2"
+version = "1.14.3"
[package.dependencies]
pycparser = "*"
@@ -376,14 +376,17 @@ description = "A Python library for the Docker Engine API."
name = "docker"
optional = false
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
-version = "4.3.1"
+version = "4.2.2"
[package.dependencies]
-pywin32 = "227"
requests = ">=2.14.2,<2.18.0 || >2.18.0"
six = ">=1.4.0"
websocket-client = ">=0.32.0"
+[package.dependencies.pypiwin32]
+python = ">=3.6"
+version = "223"
+
[package.extras]
ssh = ["paramiko (>=2.4.2)"]
tls = ["pyOpenSSL (>=17.5.0)", "cryptography (>=1.3.4)", "idna (>=2.0.0)"]
@@ -484,7 +487,6 @@ gitdb = ">=4.0.1,<5"
[[package]]
category = "main"
description = "Lightweight in-process concurrent programming"
-marker = "platform_python_implementation == \"CPython\""
name = "greenlet"
optional = false
python-versions = "*"
@@ -516,7 +518,7 @@ description = "Read metadata from Python packages"
name = "importlib-metadata"
optional = false
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=2.7"
-version = "1.7.0"
+version = "2.0.0"
[package.dependencies]
zipp = ">=0.5"
@@ -605,7 +607,7 @@ description = "system automation, configuration management and RPC framework"
name = "js-ng"
optional = false
python-versions = ">=3.6,<4.0"
-version = "11.0.0a3"
+version = "11.0b3"
[package.dependencies]
GitPython = ">=3.0,<4.0"
@@ -953,7 +955,7 @@ description = "Pygments is a syntax highlighting package written in Python."
name = "pygments"
optional = false
python-versions = ">=3.5"
-version = "2.7.0"
+version = "2.7.1"
[[package]]
category = "main"
@@ -1030,6 +1032,18 @@ optional = false
python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*"
version = "2.4.7"
+[[package]]
+category = "main"
+description = ""
+marker = "sys_platform == \"win32\" and python_version >= \"3.6\""
+name = "pypiwin32"
+optional = false
+python-versions = "*"
+version = "223"
+
+[package.dependencies]
+pywin32 = ">=223"
+
[[package]]
category = "main"
description = "Pure Python PNG image encoder/decoder"
@@ -1124,11 +1138,11 @@ version = "2020.1"
[[package]]
category = "main"
description = "Python for Window Extensions"
-marker = "sys_platform == \"win32\""
+marker = "sys_platform == \"win32\" and python_version >= \"3.6\" or sys_platform == \"win32\""
name = "pywin32"
optional = false
python-versions = "*"
-version = "227"
+version = "228"
[[package]]
category = "main"
@@ -1204,7 +1218,7 @@ description = "Twilio SendGrid library for Python"
name = "sendgrid"
optional = false
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
-version = "6.4.6"
+version = "6.4.7"
[package.dependencies]
python-http-client = ">=3.2.1"
@@ -1400,11 +1414,11 @@ description = "Backport of pathlib-compatible object wrapper for zip files"
name = "zipp"
optional = false
python-versions = ">=3.6"
-version = "3.1.0"
+version = "3.2.0"
[package.extras]
docs = ["sphinx", "jaraco.packaging (>=3.2)", "rst.linker (>=1.9)"]
-testing = ["jaraco.itertools", "func-timeout"]
+testing = ["pytest (>=3.5,<3.7.3 || >3.7.3)", "pytest-checkdocs (>=1.2.3)", "pytest-flake8", "pytest-cov", "jaraco.test (>=3.2.0)", "jaraco.itertools", "func-timeout", "pytest-black (>=0.3.7)", "pytest-mypy"]
[[package]]
category = "main"
@@ -1467,7 +1481,7 @@ description = "Very basic event publishing system"
name = "zope.event"
optional = false
python-versions = "*"
-version = "4.4"
+version = "4.5.0"
[package.dependencies]
setuptools = "*"
@@ -1525,7 +1539,7 @@ docs = ["sphinx", "repoze.sphinx.autointerface"]
test = ["zope.security", "zope.testrunner"]
[metadata]
-content-hash = "93d0fedf775e3437625cdac6cb0c2156dc35708f60cb229c01391dec914105db"
+content-hash = "ea21cbe644cdba5266fc47f01b8c6742e43512f28f89d43e82bdd63cf16203d7"
python-versions = "^3.6"
[metadata.files]
@@ -1608,34 +1622,42 @@ certifi = [
{file = "certifi-2019.11.28.tar.gz", hash = "sha256:25b64c7da4cd7479594d035c08c2d809eb4aab3a26e5a990ea98cc450c320f1f"},
]
cffi = [
- {file = "cffi-1.14.2-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:da9d3c506f43e220336433dffe643fbfa40096d408cb9b7f2477892f369d5f82"},
- {file = "cffi-1.14.2-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:23e44937d7695c27c66a54d793dd4b45889a81b35c0751ba91040fe825ec59c4"},
- {file = "cffi-1.14.2-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:0da50dcbccd7cb7e6c741ab7912b2eff48e85af217d72b57f80ebc616257125e"},
- {file = "cffi-1.14.2-cp27-cp27m-win32.whl", hash = "sha256:76ada88d62eb24de7051c5157a1a78fd853cca9b91c0713c2e973e4196271d0c"},
- {file = "cffi-1.14.2-cp27-cp27m-win_amd64.whl", hash = "sha256:15a5f59a4808f82d8ec7364cbace851df591c2d43bc76bcbe5c4543a7ddd1bf1"},
- {file = "cffi-1.14.2-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:e4082d832e36e7f9b2278bc774886ca8207346b99f278e54c9de4834f17232f7"},
- {file = "cffi-1.14.2-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:57214fa5430399dffd54f4be37b56fe22cedb2b98862550d43cc085fb698dc2c"},
- {file = "cffi-1.14.2-cp35-cp35m-macosx_10_9_x86_64.whl", hash = "sha256:6843db0343e12e3f52cc58430ad559d850a53684f5b352540ca3f1bc56df0731"},
- {file = "cffi-1.14.2-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:577791f948d34d569acb2d1add5831731c59d5a0c50a6d9f629ae1cefd9ca4a0"},
- {file = "cffi-1.14.2-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:8662aabfeab00cea149a3d1c2999b0731e70c6b5bac596d95d13f643e76d3d4e"},
- {file = "cffi-1.14.2-cp35-cp35m-win32.whl", hash = "sha256:837398c2ec00228679513802e3744d1e8e3cb1204aa6ad408b6aff081e99a487"},
- {file = "cffi-1.14.2-cp35-cp35m-win_amd64.whl", hash = "sha256:bf44a9a0141a082e89c90e8d785b212a872db793a0080c20f6ae6e2a0ebf82ad"},
- {file = "cffi-1.14.2-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:29c4688ace466a365b85a51dcc5e3c853c1d283f293dfcc12f7a77e498f160d2"},
- {file = "cffi-1.14.2-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:99cc66b33c418cd579c0f03b77b94263c305c389cb0c6972dac420f24b3bf123"},
- {file = "cffi-1.14.2-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:65867d63f0fd1b500fa343d7798fa64e9e681b594e0a07dc934c13e76ee28fb1"},
- {file = "cffi-1.14.2-cp36-cp36m-win32.whl", hash = "sha256:f5033952def24172e60493b68717792e3aebb387a8d186c43c020d9363ee7281"},
- {file = "cffi-1.14.2-cp36-cp36m-win_amd64.whl", hash = "sha256:7057613efefd36cacabbdbcef010e0a9c20a88fc07eb3e616019ea1692fa5df4"},
- {file = "cffi-1.14.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:6539314d84c4d36f28d73adc1b45e9f4ee2a89cdc7e5d2b0a6dbacba31906798"},
- {file = "cffi-1.14.2-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:672b539db20fef6b03d6f7a14b5825d57c98e4026401fce838849f8de73fe4d4"},
- {file = "cffi-1.14.2-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:95e9094162fa712f18b4f60896e34b621df99147c2cee216cfa8f022294e8e9f"},
- {file = "cffi-1.14.2-cp37-cp37m-win32.whl", hash = "sha256:b9aa9d8818c2e917fa2c105ad538e222a5bce59777133840b93134022a7ce650"},
- {file = "cffi-1.14.2-cp37-cp37m-win_amd64.whl", hash = "sha256:e4b9b7af398c32e408c00eb4e0d33ced2f9121fd9fb978e6c1b57edd014a7d15"},
- {file = "cffi-1.14.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:e613514a82539fc48291d01933951a13ae93b6b444a88782480be32245ed4afa"},
- {file = "cffi-1.14.2-cp38-cp38-manylinux1_i686.whl", hash = "sha256:9b219511d8b64d3fa14261963933be34028ea0e57455baf6781fe399c2c3206c"},
- {file = "cffi-1.14.2-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:c0b48b98d79cf795b0916c57bebbc6d16bb43b9fc9b8c9f57f4cf05881904c75"},
- {file = "cffi-1.14.2-cp38-cp38-win32.whl", hash = "sha256:15419020b0e812b40d96ec9d369b2bc8109cc3295eac6e013d3261343580cc7e"},
- {file = "cffi-1.14.2-cp38-cp38-win_amd64.whl", hash = "sha256:12a453e03124069b6896107ee133ae3ab04c624bb10683e1ed1c1663df17c13c"},
- {file = "cffi-1.14.2.tar.gz", hash = "sha256:ae8f34d50af2c2154035984b8b5fc5d9ed63f32fe615646ab435b05b132ca91b"},
+ {file = "cffi-1.14.3-2-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:3eeeb0405fd145e714f7633a5173318bd88d8bbfc3dd0a5751f8c4f70ae629bc"},
+ {file = "cffi-1.14.3-2-cp35-cp35m-macosx_10_9_x86_64.whl", hash = "sha256:cb763ceceae04803adcc4e2d80d611ef201c73da32d8f2722e9d0ab0c7f10768"},
+ {file = "cffi-1.14.3-2-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:44f60519595eaca110f248e5017363d751b12782a6f2bd6a7041cba275215f5d"},
+ {file = "cffi-1.14.3-2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:c53af463f4a40de78c58b8b2710ade243c81cbca641e34debf3396a9640d6ec1"},
+ {file = "cffi-1.14.3-2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:33c6cdc071ba5cd6d96769c8969a0531be2d08c2628a0143a10a7dcffa9719ca"},
+ {file = "cffi-1.14.3-2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:c11579638288e53fc94ad60022ff1b67865363e730ee41ad5e6f0a17188b327a"},
+ {file = "cffi-1.14.3-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:3cb3e1b9ec43256c4e0f8d2837267a70b0e1ca8c4f456685508ae6106b1f504c"},
+ {file = "cffi-1.14.3-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:f0620511387790860b249b9241c2f13c3a80e21a73e0b861a2df24e9d6f56730"},
+ {file = "cffi-1.14.3-cp27-cp27m-win32.whl", hash = "sha256:005f2bfe11b6745d726dbb07ace4d53f057de66e336ff92d61b8c7e9c8f4777d"},
+ {file = "cffi-1.14.3-cp27-cp27m-win_amd64.whl", hash = "sha256:2f9674623ca39c9ebe38afa3da402e9326c245f0f5ceff0623dccdac15023e05"},
+ {file = "cffi-1.14.3-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:09e96138280241bd355cd585148dec04dbbedb4f46128f340d696eaafc82dd7b"},
+ {file = "cffi-1.14.3-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:3363e77a6176afb8823b6e06db78c46dbc4c7813b00a41300a4873b6ba63b171"},
+ {file = "cffi-1.14.3-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:0ef488305fdce2580c8b2708f22d7785ae222d9825d3094ab073e22e93dfe51f"},
+ {file = "cffi-1.14.3-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:0b1ad452cc824665ddc682400b62c9e4f5b64736a2ba99110712fdee5f2505c4"},
+ {file = "cffi-1.14.3-cp35-cp35m-win32.whl", hash = "sha256:85ba797e1de5b48aa5a8427b6ba62cf69607c18c5d4eb747604b7302f1ec382d"},
+ {file = "cffi-1.14.3-cp35-cp35m-win_amd64.whl", hash = "sha256:e66399cf0fc07de4dce4f588fc25bfe84a6d1285cc544e67987d22663393926d"},
+ {file = "cffi-1.14.3-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:15f351bed09897fbda218e4db5a3d5c06328862f6198d4fb385f3e14e19decb3"},
+ {file = "cffi-1.14.3-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:4d7c26bfc1ea9f92084a1d75e11999e97b62d63128bcc90c3624d07813c52808"},
+ {file = "cffi-1.14.3-cp36-cp36m-manylinux2014_aarch64.whl", hash = "sha256:23e5d2040367322824605bc29ae8ee9175200b92cb5483ac7d466927a9b3d537"},
+ {file = "cffi-1.14.3-cp36-cp36m-win32.whl", hash = "sha256:a624fae282e81ad2e4871bdb767e2c914d0539708c0f078b5b355258293c98b0"},
+ {file = "cffi-1.14.3-cp36-cp36m-win_amd64.whl", hash = "sha256:de31b5164d44ef4943db155b3e8e17929707cac1e5bd2f363e67a56e3af4af6e"},
+ {file = "cffi-1.14.3-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:f92cdecb618e5fa4658aeb97d5eb3d2f47aa94ac6477c6daf0f306c5a3b9e6b1"},
+ {file = "cffi-1.14.3-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:22399ff4870fb4c7ef19fff6eeb20a8bbf15571913c181c78cb361024d574579"},
+ {file = "cffi-1.14.3-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:f4eae045e6ab2bb54ca279733fe4eb85f1effda392666308250714e01907f394"},
+ {file = "cffi-1.14.3-cp37-cp37m-win32.whl", hash = "sha256:b0358e6fefc74a16f745afa366acc89f979040e0cbc4eec55ab26ad1f6a9bfbc"},
+ {file = "cffi-1.14.3-cp37-cp37m-win_amd64.whl", hash = "sha256:6642f15ad963b5092d65aed022d033c77763515fdc07095208f15d3563003869"},
+ {file = "cffi-1.14.3-cp38-cp38-manylinux1_i686.whl", hash = "sha256:2791f68edc5749024b4722500e86303a10d342527e1e3bcac47f35fbd25b764e"},
+ {file = "cffi-1.14.3-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:529c4ed2e10437c205f38f3691a68be66c39197d01062618c55f74294a4a4828"},
+ {file = "cffi-1.14.3-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:8f0f1e499e4000c4c347a124fa6a27d37608ced4fe9f7d45070563b7c4c370c9"},
+ {file = "cffi-1.14.3-cp38-cp38-win32.whl", hash = "sha256:3b8eaf915ddc0709779889c472e553f0d3e8b7bdf62dab764c8921b09bf94522"},
+ {file = "cffi-1.14.3-cp38-cp38-win_amd64.whl", hash = "sha256:bbd2f4dfee1079f76943767fce837ade3087b578aeb9f69aec7857d5bf25db15"},
+ {file = "cffi-1.14.3-cp39-cp39-manylinux1_i686.whl", hash = "sha256:cc75f58cdaf043fe6a7a6c04b3b5a0e694c6a9e24050967747251fb80d7bce0d"},
+ {file = "cffi-1.14.3-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:bf39a9e19ce7298f1bd6a9758fa99707e9e5b1ebe5e90f2c3913a47bc548747c"},
+ {file = "cffi-1.14.3-cp39-cp39-win32.whl", hash = "sha256:d80998ed59176e8cba74028762fbd9b9153b9afc71ea118e63bbf5d4d0f9552b"},
+ {file = "cffi-1.14.3-cp39-cp39-win_amd64.whl", hash = "sha256:c150eaa3dadbb2b5339675b88d4573c1be3cb6f2c33a6c83387e10cc0bf05bd3"},
+ {file = "cffi-1.14.3.tar.gz", hash = "sha256:f92f789e4f9241cd262ad7a555ca2c648a98178a953af117ef7fad46aa1d5591"},
]
chardet = [
{file = "chardet-3.0.4-py2.py3-none-any.whl", hash = "sha256:fc323ffcaeaed0e0a02bf4d117757b98aed530d9ed4531e3e15460124c106691"},
@@ -1704,8 +1726,8 @@ dnspython = [
{file = "dnspython-1.16.0.zip", hash = "sha256:36c5e8e38d4369a08b6780b7f27d790a292b2b08eea01607865bf0936c558e01"},
]
docker = [
- {file = "docker-4.3.1-py2.py3-none-any.whl", hash = "sha256:13966471e8bc23b36bfb3a6fb4ab75043a5ef1dac86516274777576bed3b9828"},
- {file = "docker-4.3.1.tar.gz", hash = "sha256:bad94b8dd001a8a4af19ce4becc17f41b09f228173ffe6a4e0355389eef142f2"},
+ {file = "docker-4.2.2-py2.py3-none-any.whl", hash = "sha256:03a46400c4080cb6f7aa997f881ddd84fef855499ece219d75fbdb53289c17ab"},
+ {file = "docker-4.2.2.tar.gz", hash = "sha256:26eebadce7e298f55b76a88c4f8802476c5eaddbdbe38dbc6cce8781c47c9b54"},
]
docopt = [
{file = "docopt-0.6.2.tar.gz", hash = "sha256:49b3a825280bd66b3aa83585ef59c4a8c82f2c8a522dbe754a8bc8d08c85c491"},
@@ -1785,8 +1807,8 @@ idna-ssl = [
{file = "idna-ssl-1.1.0.tar.gz", hash = "sha256:a933e3bb13da54383f9e8f35dc4f9cb9eb9b3b78c6b36f311254d6d0d92c6c7c"},
]
importlib-metadata = [
- {file = "importlib_metadata-1.7.0-py2.py3-none-any.whl", hash = "sha256:dc15b2969b4ce36305c51eebe62d418ac7791e9a157911d58bfb1f9ccd8e2070"},
- {file = "importlib_metadata-1.7.0.tar.gz", hash = "sha256:90bb658cdbbf6d1735b6341ce708fc7024a3e14e99ffdc5783edea9f9b077f83"},
+ {file = "importlib_metadata-2.0.0-py2.py3-none-any.whl", hash = "sha256:cefa1a2f919b866c5beb7c9f7b0ebb4061f30a8a9bf16d609b000e2dfaceb9c3"},
+ {file = "importlib_metadata-2.0.0.tar.gz", hash = "sha256:77a540690e24b0305878c37ffd421785a6f7e53c8b5720d211b211de8d0e95da"},
]
importlib-resources = [
{file = "importlib_resources-3.0.0-py2.py3-none-any.whl", hash = "sha256:d028f66b66c0d5732dae86ba4276999855e162a749c92620a38c1d779ed138a7"},
@@ -1810,8 +1832,8 @@ josepy = [
{file = "josepy-1.4.0.tar.gz", hash = "sha256:c37ff4b93606e6a452b72cdb992da5e0544be12912fac01b31ddbdd61f6d5bd0"},
]
js-ng = [
- {file = "js-ng-11.0.0a3.tar.gz", hash = "sha256:b5dc7e035840a1b752f6d28ba85d5fcaf87c771b5ebfc9a60c261c68c31f641c"},
- {file = "js_ng-11.0.0a3-py3-none-any.whl", hash = "sha256:a5b142288ea860aac4b429ed4ac6006cac8767516a12124679e928ec7a836c8d"},
+ {file = "js-ng-11.0b3.tar.gz", hash = "sha256:4341b72c321e35ff47711d7ac66b3923eab5debe69aee43bae97695ce36b4a52"},
+ {file = "js_ng-11.0b3-py3-none-any.whl", hash = "sha256:a98c0b67d2893119d5b725cec13c06c644d37d8d6b4443c8857e07d264d5c889"},
]
jsonpickle = [
{file = "jsonpickle-1.4.1-py2.py3-none-any.whl", hash = "sha256:8919c166bac0574e3d74425c7559434062002d9dfc0ac2afa6dc746ba4a19439"},
@@ -2062,8 +2084,8 @@ pygithub = [
{file = "PyGithub-1.53.tar.gz", hash = "sha256:776befaddab9d8fddd525d52a6ca1ac228cf62b5b1e271836d766f4925e1452e"},
]
pygments = [
- {file = "Pygments-2.7.0-py3-none-any.whl", hash = "sha256:2df50d16b45b977217e02cba6c8422aaddb859f3d0570a88e09b00eafae89c6e"},
- {file = "Pygments-2.7.0.tar.gz", hash = "sha256:2594e8fdb06fef91552f86f4fd3a244d148ab24b66042036e64f29a291515048"},
+ {file = "Pygments-2.7.1-py3-none-any.whl", hash = "sha256:307543fe65c0947b126e83dd5a61bd8acbd84abec11f43caebaf5534cbc17998"},
+ {file = "Pygments-2.7.1.tar.gz", hash = "sha256:926c3f319eda178d1bd90851e4317e6d8cdb5e292a3386aac9bd75eca29cf9c7"},
]
pyjwkest = [
{file = "pyjwkest-1.4.2.tar.gz", hash = "sha256:5560fd5ba08655f29ff6ad1df1e15dc05abc9d976fcbcec8d2b5167f49b70222"},
@@ -2106,6 +2128,10 @@ pyparsing = [
{file = "pyparsing-2.4.7-py2.py3-none-any.whl", hash = "sha256:ef9d7589ef3c200abe66653d3f1ab1033c3c419ae9b9bdb1240a85b024efc88b"},
{file = "pyparsing-2.4.7.tar.gz", hash = "sha256:c203ec8783bf771a155b207279b9bccb8dea02d8f0c9e5f8ead507bc3246ecc1"},
]
+pypiwin32 = [
+ {file = "pypiwin32-223-py3-none-any.whl", hash = "sha256:67adf399debc1d5d14dffc1ab5acacb800da569754fafdc576b2a039485aa775"},
+ {file = "pypiwin32-223.tar.gz", hash = "sha256:71be40c1fbd28594214ecaecb58e7aa8b708eabfa0125c8a109ebd51edbd776a"},
+]
pypng = [
{file = "pypng-0.0.20.tar.gz", hash = "sha256:1032833440c91bafee38a42c38c02d00431b24c42927feb3e63b104d8550170b"},
]
@@ -2140,18 +2166,18 @@ pytz = [
{file = "pytz-2020.1.tar.gz", hash = "sha256:c35965d010ce31b23eeb663ed3cc8c906275d6be1a34393a1d73a41febf4a048"},
]
pywin32 = [
- {file = "pywin32-227-cp27-cp27m-win32.whl", hash = "sha256:371fcc39416d736401f0274dd64c2302728c9e034808e37381b5e1b22be4a6b0"},
- {file = "pywin32-227-cp27-cp27m-win_amd64.whl", hash = "sha256:4cdad3e84191194ea6d0dd1b1b9bdda574ff563177d2adf2b4efec2a244fa116"},
- {file = "pywin32-227-cp35-cp35m-win32.whl", hash = "sha256:f4c5be1a293bae0076d93c88f37ee8da68136744588bc5e2be2f299a34ceb7aa"},
- {file = "pywin32-227-cp35-cp35m-win_amd64.whl", hash = "sha256:a929a4af626e530383a579431b70e512e736e9588106715215bf685a3ea508d4"},
- {file = "pywin32-227-cp36-cp36m-win32.whl", hash = "sha256:300a2db938e98c3e7e2093e4491439e62287d0d493fe07cce110db070b54c0be"},
- {file = "pywin32-227-cp36-cp36m-win_amd64.whl", hash = "sha256:9b31e009564fb95db160f154e2aa195ed66bcc4c058ed72850d047141b36f3a2"},
- {file = "pywin32-227-cp37-cp37m-win32.whl", hash = "sha256:47a3c7551376a865dd8d095a98deba954a98f326c6fe3c72d8726ca6e6b15507"},
- {file = "pywin32-227-cp37-cp37m-win_amd64.whl", hash = "sha256:31f88a89139cb2adc40f8f0e65ee56a8c585f629974f9e07622ba80199057511"},
- {file = "pywin32-227-cp38-cp38-win32.whl", hash = "sha256:7f18199fbf29ca99dff10e1f09451582ae9e372a892ff03a28528a24d55875bc"},
- {file = "pywin32-227-cp38-cp38-win_amd64.whl", hash = "sha256:7c1ae32c489dc012930787f06244426f8356e129184a02c25aef163917ce158e"},
- {file = "pywin32-227-cp39-cp39-win32.whl", hash = "sha256:c054c52ba46e7eb6b7d7dfae4dbd987a1bb48ee86debe3f245a2884ece46e295"},
- {file = "pywin32-227-cp39-cp39-win_amd64.whl", hash = "sha256:f27cec5e7f588c3d1051651830ecc00294f90728d19c3bf6916e6dba93ea357c"},
+ {file = "pywin32-228-cp27-cp27m-win32.whl", hash = "sha256:37dc9935f6a383cc744315ae0c2882ba1768d9b06700a70f35dc1ce73cd4ba9c"},
+ {file = "pywin32-228-cp27-cp27m-win_amd64.whl", hash = "sha256:11cb6610efc2f078c9e6d8f5d0f957620c333f4b23466931a247fb945ed35e89"},
+ {file = "pywin32-228-cp35-cp35m-win32.whl", hash = "sha256:1f45db18af5d36195447b2cffacd182fe2d296849ba0aecdab24d3852fbf3f80"},
+ {file = "pywin32-228-cp35-cp35m-win_amd64.whl", hash = "sha256:6e38c44097a834a4707c1b63efa9c2435f5a42afabff634a17f563bc478dfcc8"},
+ {file = "pywin32-228-cp36-cp36m-win32.whl", hash = "sha256:ec16d44b49b5f34e99eb97cf270806fdc560dff6f84d281eb2fcb89a014a56a9"},
+ {file = "pywin32-228-cp36-cp36m-win_amd64.whl", hash = "sha256:a60d795c6590a5b6baeacd16c583d91cce8038f959bd80c53bd9a68f40130f2d"},
+ {file = "pywin32-228-cp37-cp37m-win32.whl", hash = "sha256:af40887b6fc200eafe4d7742c48417529a8702dcc1a60bf89eee152d1d11209f"},
+ {file = "pywin32-228-cp37-cp37m-win_amd64.whl", hash = "sha256:00eaf43dbd05ba6a9b0080c77e161e0b7a601f9a3f660727a952e40140537de7"},
+ {file = "pywin32-228-cp38-cp38-win32.whl", hash = "sha256:fa6ba028909cfc64ce9e24bcf22f588b14871980d9787f1e2002c99af8f1850c"},
+ {file = "pywin32-228-cp38-cp38-win_amd64.whl", hash = "sha256:9b3466083f8271e1a5eb0329f4e0d61925d46b40b195a33413e0905dccb285e8"},
+ {file = "pywin32-228-cp39-cp39-win32.whl", hash = "sha256:ed74b72d8059a6606f64842e7917aeee99159ebd6b8d6261c518d002837be298"},
+ {file = "pywin32-228-cp39-cp39-win_amd64.whl", hash = "sha256:8319bafdcd90b7202c50d6014efdfe4fde9311b3ff15fd6f893a45c0868de203"},
]
pyyaml = [
{file = "PyYAML-5.3.1-cp27-cp27m-win32.whl", hash = "sha256:74809a57b329d6cc0fdccee6318f44b9b8649961fa73144a98735b0aaf029f1f"},
@@ -2187,8 +2213,8 @@ secretconf = [
{file = "secretconf-0.1.2.tar.gz", hash = "sha256:89aa68c7bcd995aad277a0dadaaf9f92c2e22d56a17bc4f923a01077e097eb47"},
]
sendgrid = [
- {file = "sendgrid-6.4.6-py3-none-any.whl", hash = "sha256:904dbfe2e0da2bf07f51202af837cac029b160a829e041e1972da2489a928d3b"},
- {file = "sendgrid-6.4.6.tar.gz", hash = "sha256:145436de333ad4c38ea4b697db820de6e1ba70bf3a4aecc88818758438e25fcb"},
+ {file = "sendgrid-6.4.7-py3-none-any.whl", hash = "sha256:177f959aab5882297fe07efe7db3c67d84fef2d55e3e1038edd4a20f3eb5f059"},
+ {file = "sendgrid-6.4.7.tar.gz", hash = "sha256:f6a4608e696e5851dd12a716abf97240f947027855e2205dff112c3fdc1bc127"},
]
six = [
{file = "six-1.15.0-py2.py3-none-any.whl", hash = "sha256:8b74bedcbbbaca38ff6d7491d76f2b06b3592611af620f8426e82dddb04a5ced"},
@@ -2274,8 +2300,8 @@ yarl = [
{file = "yarl-1.5.1.tar.gz", hash = "sha256:c22c75b5f394f3d47105045ea551e08a3e804dc7e01b37800ca35b58f856c3d6"},
]
zipp = [
- {file = "zipp-3.1.0-py3-none-any.whl", hash = "sha256:aa36550ff0c0b7ef7fa639055d797116ee891440eac1a56f378e2d3179e0320b"},
- {file = "zipp-3.1.0.tar.gz", hash = "sha256:c599e4d75c98f6798c509911d08a22e6c021d074469042177c8c86fb92eefd96"},
+ {file = "zipp-3.2.0-py3-none-any.whl", hash = "sha256:43f4fa8d8bb313e65d8323a3952ef8756bf40f9a5c3ea7334be23ee4ec8278b6"},
+ {file = "zipp-3.2.0.tar.gz", hash = "sha256:b52f22895f4cfce194bc8172f3819ee8de7540aa6d873535a8668b730b8b411f"},
]
"zope.component" = [
{file = "zope.component-4.6.2-py2.py3-none-any.whl", hash = "sha256:607628e4c84f7887a69a958542b5c304663e726b73aba0882e3a3f059bff14f3"},
@@ -2290,8 +2316,8 @@ zipp = [
{file = "zope.deprecation-4.4.0.tar.gz", hash = "sha256:0d453338f04bacf91bbfba545d8bcdf529aa829e67b705eac8c1a7fdce66e2df"},
]
"zope.event" = [
- {file = "zope.event-4.4-py2.py3-none-any.whl", hash = "sha256:d8e97d165fd5a0997b45f5303ae11ea3338becfe68c401dd88ffd2113fe5cae7"},
- {file = "zope.event-4.4.tar.gz", hash = "sha256:69c27debad9bdacd9ce9b735dad382142281ac770c4a432b533d6d65c4614bcf"},
+ {file = "zope.event-4.5.0-py2.py3-none-any.whl", hash = "sha256:2666401939cdaa5f4e0c08cf7f20c9b21423b95e88f4675b1443973bdb080c42"},
+ {file = "zope.event-4.5.0.tar.gz", hash = "sha256:5e76517f5b9b119acf37ca8819781db6c16ea433f7e2062c4afc2b6fbedb1330"},
]
"zope.hookable" = [
{file = "zope.hookable-5.0.1-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:d3b3b3eedfdbf6b02898216e85aa6baf50207f4378a2a6803d6d47650cd37031"},
diff --git a/pyproject.toml b/pyproject.toml
index fc413d7aa4..19bfdaf4dc 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -2,14 +2,15 @@
name = "js-sdk"
packages = [{ include = "jumpscale" }]
-version = "11.0b6"
+version = "11.0b7"
description = "SDK for threefold grid"
authors = ["xmonader "]
license = "Apache-2.0"
[tool.poetry.dependencies]
cryptography = "3.0"
-js-ng = "11.0b3"
+js-ng = "11.0b4"
+greenlet = "0.4.16"
python = "^3.6"
pillow = "^6.1"
sendgrid = "^6.0"
diff --git a/tests/tools/nginx/test_nginx.py b/tests/tools/nginx/test_nginx.py
new file mode 100644
index 0000000000..bd73679870
--- /dev/null
+++ b/tests/tools/nginx/test_nginx.py
@@ -0,0 +1,63 @@
+from unittest import TestCase
+from jumpscale.loader import j
+import gevent
+
+
+class TestNginx(TestCase):
+ def _get_instance(self):
+ self.instance_name = j.data.random_names.random_name()
+ nginx_instance = j.tools.nginx.new(self.instance_name)
+ return nginx_instance
+
+ def _get_port(self, proc):
+ conn = proc.connections()[0]
+ return conn.laddr[1]
+
+ def test001_nginx_start_stop(self):
+ """Test case for start NGINX server and stop it.
+ **Test scenario**
+ #. Start nginx server .
+ #. check if it's running.
+ #. Stop nginx server server .
+ #. check if it's running.
+ """
+ nginx_instance = self._get_instance()
+ nginx_instance.save()
+ nginx_instance.start()
+ j.logger.info("NGINX server started")
+
+ self.assertTrue(nginx_instance.is_running())
+
+ nginx_instance.stop()
+ for _ in range(10):
+ if j.sals.nettools.tcp_connection_test("127.0.0.1", 80, 2):
+ gevent.sleep(0.2)
+ else:
+ break
+ j.logger.info("NGINX server stopped")
+ self.assertFalse(nginx_instance.is_running())
+
+ def test002_nginx_restart(self):
+ """Test case for start NGINX server and stop it.
+ **Test scenario**
+ #. Start nginx server .
+ #. check if it's running.
+ #. restart nginx server server .
+ #. check if it's running.
+ """
+
+ nginx_instance = self._get_instance()
+ nginx_instance.save()
+ nginx_instance.start()
+ j.logger.info("NGINX server started")
+
+ self.assertTrue(nginx_instance.is_running())
+
+ nginx_instance.restart()
+ j.logger.info("NGINX server restarted")
+ self.assertTrue(nginx_instance.is_running())
+
+ def tearDown(self):
+ nginx_instance = j.tools.nginx.find(self.instance_name)
+ nginx_instance.stop()
+ j.tools.nginx.delete(self.instance_name)
diff --git a/tests/tools/redis/test_redis.py b/tests/tools/redis/test_redis.py
new file mode 100644
index 0000000000..d94f985b56
--- /dev/null
+++ b/tests/tools/redis/test_redis.py
@@ -0,0 +1,82 @@
+from unittest import TestCase
+import random
+from jumpscale.loader import j
+
+
+class TestRedis(TestCase):
+ def _get_instance(self):
+ self.instance_name = j.data.random_names.random_name()
+ redis_instance = j.tools.redis.new(self.instance_name)
+ return redis_instance
+
+ def _get_port(self, proc):
+ conn = proc.connections()[0]
+ return conn.laddr[1]
+
+ def test001_redis_start_stop(self):
+ """Test case for start redis and stop it.
+ **Test scenario**
+ #. Start redis server .
+ #. check about port , pid and process.
+ #. Stop redis server .
+ #. check about pid and process.
+ """
+ port = random.randint(20000, 25000)
+ redis_instance = self._get_instance()
+ redis_instance.port = port
+ redis_instance.save()
+ redis_instance.start()
+ j.logger.info("Redis server started")
+ self.assertTrue(j.sals.nettools.wait_connection_test(redis_instance.host, redis_instance.port, 2))
+
+ self.assertTrue(redis_instance.cmd.is_running())
+ self.assertTrue(redis_instance.cmd.process)
+ self.assertTrue(redis_instance.cmd.process.pid)
+
+ proc_port = self._get_port(redis_instance.cmd.process)
+ self.assertEqual(proc_port, port)
+
+ redis_instance.stop()
+ j.logger.info("Redis server stopped")
+ self.assertFalse(j.sals.nettools.wait_connection_test(redis_instance.host, redis_instance.port, 2))
+ self.assertFalse(redis_instance.cmd.is_running())
+ self.assertFalse(redis_instance.cmd.process)
+
+ def test001_redis_restart(self):
+ """Test case for start redis and restart it.
+ **Test scenario**
+ #. Start redis server .
+ #. check about port , pid and process.
+ #. restart redis server .
+ #. check about port , pid and process.
+ """
+
+ port = random.randint(20000, 25000)
+ redis_instance = self._get_instance()
+ redis_instance.port = port
+ redis_instance.save()
+ j.logger.info("Redis server started")
+ redis_instance.start()
+ self.assertTrue(j.sals.nettools.wait_connection_test(redis_instance.host, redis_instance.port, 2))
+
+ self.assertTrue(redis_instance.cmd.is_running())
+ self.assertTrue(redis_instance.cmd.process)
+ self.assertTrue(redis_instance.cmd.process.pid)
+
+ pid = redis_instance.cmd.process.pid
+
+ proc_port = self._get_port(redis_instance.cmd.process)
+ self.assertTrue(redis_instance.cmd.is_running())
+ self.assertEqual(proc_port, port)
+
+ redis_instance.restart()
+ j.logger.info("Redis server restarted")
+ self.assertTrue(j.sals.nettools.wait_connection_test(redis_instance.host, redis_instance.port, 2))
+ self.assertTrue(redis_instance.cmd.is_running())
+ self.assertTrue(redis_instance.cmd.process)
+ self.assertNotEqual(redis_instance.cmd.process.pid, pid)
+
+ def tearDown(self):
+ redis_instance = j.tools.redis.find(self.instance_name)
+ redis_instance.stop()
+ j.tools.redis.delete(self.instance_name)