Skip to content

Commit 350bf7c

Browse files
elhimovdmyger
authored andcommitted
replicaset: more informative error at bootstrap
Make more informative error when trying to bootstrap non-vshard cluster with `tt rs vshard bootstrap` command. Closes #1201 @TarantoolBot document Title: `tt rs vs bootstrap` more informative error Make more informative error when trying to launch `tt rs vs bootstrap` against cluster where no sharing roles configured (for example when launched against non-vshard cluster).
1 parent 831c211 commit 350bf7c

File tree

7 files changed

+105
-21
lines changed

7 files changed

+105
-21
lines changed

CHANGELOG.md

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,8 @@ and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.
1818
- `tt cluster failover switch`: minor change in output that displays corresponding
1919
`switch-status` command with quoted URI argument so it could be copy-pasted for
2020
subsequent launch as-is.
21+
- `tt rs vshard bootstrap`: make more informative error message when sharding roles
22+
are not configured (for example when launched against non-vshard cluster).
2123

2224
### Fixed
2325

cli/replicaset/cconfig.go

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,8 @@ var (
2626
//go:embed lua/cconfig/bootstrap_vshard_body.lua
2727
cconfigBootstrapVShardBody string
2828

29-
cconfigGetShardingRolesBody = "return require('config'):get().sharding.roles"
29+
//go:embed lua/cconfig/get_sharding_roles_body.lua
30+
cconfigGetShardingRolesBody string
3031
)
3132

3233
// cconfigTopology used to export topology information from a Tarantool

cli/replicaset/lua/cconfig/bootstrap_vshard_body.lua

Lines changed: 7 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,15 @@
1+
local sharding = require('config'):get().sharding
2+
if sharding == nil or sharding.roles == nil then
3+
error("sharding roles are not configured, please make sure managed cluster is sharded")
4+
end
5+
16
local ok, vshard = pcall(require, 'vshard')
27
if not ok then
38
error("failed to require vshard module")
49
end
5-
local fiber = require('fiber')
6-
local config = require('config')
710

811
local is_router = false
9-
for _, role in ipairs(config:get().sharding.roles) do
12+
for _, role in ipairs(sharding.roles) do
1013
if role == "router" then
1114
is_router = true
1215
break
@@ -19,6 +22,7 @@ end
1922

2023
pcall(vshard.router.master_search_wakeup)
2124

25+
local fiber = require('fiber')
2226
local timeout = ...
2327
local deadline = fiber.time() + timeout
2428
local ok, err
Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
local sharding = require('config'):get().sharding
2+
if sharding == nil or sharding.roles == nil then
3+
error("sharding roles are not configured, please make sure managed cluster is sharded")
4+
end
5+
return sharding.roles

test/conftest.py

Lines changed: 25 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,4 @@
1+
import itertools
12
import os
23
import platform
34
import shutil
@@ -277,34 +278,41 @@ def tt_app(tt, tt_path, tt_instances, tt_running_targets, tt_post_start):
277278
yield app
278279

279280

281+
# Fixture to be used to avoid port collision in different tarantool instances.
282+
@pytest.fixture(scope="session")
283+
def port_factory():
284+
# It is expected that step by step existent tests with hardcoded ports
285+
# are adapted to use this fixture. It is also planned that the new tests
286+
# are able to be run in parallel so for now let's start far beyond the
287+
# default 3301 that is widely used in existent tests to avoid collision.
288+
ports = itertools.count(5501)
289+
290+
def _port_factory():
291+
return next(ports)
292+
293+
return _port_factory
294+
295+
280296
@pytest.fixture
281297
def cluster_params():
282-
return None
298+
return tt_helper.make_cluster_params(dict())
283299

284300

285301
@pytest.fixture
286-
def cluster(request, tt, cluster_params):
302+
def cluster(request, tt, cluster_params, port_factory):
287303
if utils.is_tarantool_less_3():
288304
pytest.skip("centralized config requires Tarantool v3.x")
289305

290-
params = dict(
291-
app_name="cluster_app",
292-
num_replicasets=1,
293-
num_replicas=3,
294-
username="client",
295-
password="secret",
296-
)
297-
if cluster_params is not None:
298-
params.update(cluster_params)
299-
300306
input_params = [
301-
params["num_replicasets"],
302-
params["num_replicas"],
303-
params["username"],
304-
params["password"],
307+
cluster_params["num_replicasets"],
308+
cluster_params["num_replicas"],
309+
cluster_params["username"],
310+
cluster_params["password"],
305311
]
306-
app = tt_helper.TtCluster(tt, params["app_name"], input_params)
312+
app = tt_helper.TtCluster(tt, cluster_params["app_name"], input_params)
307313
request.addfinalizer(lambda: app.stop("--yes"))
314+
app.update_ports(cluster_params["host"], port_factory)
315+
308316
return app
309317

310318

test/integration/replicaset/test_replicaset_vshard.py

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -352,3 +352,19 @@ def test_vshard_bootstrap_not_enough_timeout(tt_cmd, vshard_cconfig_app_timeout_
352352
assert rc == 1
353353
assert "failed to bootstrap vshard" in out
354354
assert "attempt to index field '_configdata_applied' (a nil value)" in out
355+
356+
357+
@pytest.mark.parametrize("target_type", ["APP", "INST", "URI"])
358+
def test_vshard_bootstrap_non_vshard(tt, cluster, target_type):
359+
targets = {
360+
"APP": cluster.app_name,
361+
"INST": f"{cluster.app_name}:{cluster.instances[0]['name']}",
362+
"URI": f"client:secret@{cluster.instances[0]['endpoint']}",
363+
}
364+
365+
p = cluster.start()
366+
assert p.returncode == 0
367+
assert cluster.wait_for_running(5)
368+
p = tt.run("rs", "vshard", "bootstrap", targets[target_type])
369+
assert p.returncode != 0
370+
assert "sharding roles are not configured" in p.stdout

test/tt_helper.py

Lines changed: 48 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -118,11 +118,27 @@ def __init__(self, tt, app_name, input_params):
118118
input = "".join(["\n" if x is None else f"{x}\n" for x in input_params])
119119
p = self.__tt.run("create", "cluster", "--name", self.__app_name, input=input)
120120
assert p.returncode == 0
121+
self.__instances = None
121122

122123
@property
123124
def app_name(self):
124125
return self.__app_name
125126

127+
@property
128+
def instances(self):
129+
if self.__instances is None:
130+
self.__instances = []
131+
for group in self.config["groups"].values():
132+
for rs in group["replicasets"].values():
133+
for inst_name, inst in rs["instances"].items():
134+
self.__instances.append(
135+
dict(
136+
name=inst_name,
137+
endpoint=inst["iproto"]["listen"][0]["uri"],
138+
),
139+
)
140+
return self.__instances
141+
126142
@property
127143
def config_path(self):
128144
return self.__tt.work_dir / self.__app_name / "config.yaml"
@@ -133,6 +149,7 @@ def config(self):
133149

134150
@config.setter
135151
def config(self, config):
152+
self.__instances = None
136153
self.config_path.write_text(yaml.dump(config))
137154

138155
def start(self, *args):
@@ -165,6 +182,37 @@ def update_config_leaves(self, other):
165182
utils.update_dict_leaves(config, other)
166183
self.config = config
167184

185+
def update_instances_config(self, configure_instance_func, *args):
186+
groups = {}
187+
for group_name, group in self.config["groups"].items():
188+
replicasets = {}
189+
for rs_name, rs in group["replicasets"].items():
190+
instances = {}
191+
for inst_name in rs["instances"]:
192+
instances[inst_name] = configure_instance_func(*args)
193+
replicasets[rs_name] = {"instances": instances}
194+
groups[group_name] = {"replicasets": replicasets}
195+
196+
self.update_config_leaves({"groups": groups})
197+
198+
def update_ports(self, host, port_factory):
199+
def configure_instance_port(host, port_factory):
200+
return {"iproto": {"listen": [{"uri": f"{host}:{port_factory()}"}]}}
201+
202+
self.update_instances_config(configure_instance_port, host, port_factory)
203+
204+
205+
def make_cluster_params(params):
206+
default_params = dict(
207+
app_name="cluster_app",
208+
num_replicasets=1,
209+
num_replicas=3,
210+
username="client",
211+
password="secret",
212+
host="127.0.0.1",
213+
)
214+
return default_params | params
215+
168216

169217
def status(tt, *args):
170218
rc, out = tt.exec("status", *args)

0 commit comments

Comments
 (0)