Skip to content

Commit

Permalink
Rename the ctl options and update demo vsm spec
Browse files Browse the repository at this point in the history
  • Loading branch information
kmova committed Dec 27, 2016
1 parent e3f49e1 commit add7e85
Show file tree
Hide file tree
Showing 6 changed files with 177 additions and 24 deletions.
2 changes: 1 addition & 1 deletion GNUmakefile
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ EXTERNAL_TOOLS=\
GOFILES_NOVENDOR = $(shell find . -type f -name '*.go' -not -path "./vendor/*")

# Specify the name for the maya binary
MAYACTL=mayactl
MAYACTL=maya

all: test

Expand Down
29 changes: 18 additions & 11 deletions command/install_maya.go
Original file line number Diff line number Diff line change
Expand Up @@ -32,26 +32,33 @@ type InstallMayaCommand struct {

func (c *InstallMayaCommand) Help() string {
helpText := `
Usage: maya install-maya
Usage: maya setup-omm
Installs maya server on this machine. In other words, the machine
where this command is run will become a maya server.
Configure this machine as OpenEBS Maya Master (omm)
OMM is a clustered management server node that can either be
run in VMs or Physical Hosts and is responsible for managing
and scheduling OpenEBS hosts and VSMs.
OMM also comes with an clustered configuration store.
A maya server is otherwise known as a maya master.
OMM can be clustered with other local or remote OMMs.
General Options:
` + generalOptionsUsage() + `
Install Maya Options:
OpenEBS Maya Master setup Options:
-member-ips=<IP Address(es) of all server members>
Comma separated list of IP addresses of all servers members
i.e. maya server peers, participating in the cluster.
-master-ips=<IP Address(es) of peer OMMs>

This comment has been minimized.

Copy link
@AmitKumarDas

AmitKumarDas Jan 6, 2017

-master-ips or -join-ips ?

Comma separated list of IP addresses of all management nodes
participating in the cluster.
NOTE: Do not include the IP address of this local machine i.e.
the machine where this command is being run.
If not provided, this machine will be added as the first node
in the cluster.
-self-ip=<IP Address>
The IP Address of this local machine i.e. the machine where
this command is being run. This is required when the machine
Expand All @@ -61,16 +68,16 @@ Install Maya Options:
}

func (c *InstallMayaCommand) Synopsis() string {
return "Installs maya server on this machine."
return "Configure OpenEBS Maya Master on this machine."
}

func (c *InstallMayaCommand) Run(args []string) int {
var runop int

flags := c.M.FlagSet("install-maya", FlagSetClient)
flags := c.M.FlagSet("setup-omm", FlagSetClient)
flags.Usage = func() { c.M.Ui.Output(c.Help()) }

flags.StringVar(&c.member_ips, "member-ips", "", "")
flags.StringVar(&c.member_ips, "join-ips", "", "")
flags.StringVar(&c.self_ip, "self-ip", "", "")

if err := flags.Parse(args); err != nil {
Expand Down
19 changes: 9 additions & 10 deletions command/install_openebs.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,11 +24,10 @@ type InstallOpenEBSCommand struct {

func (c *InstallOpenEBSCommand) Help() string {
helpText := `
Usage: maya install-openebs
Usage: maya setup-host
Installs maya openebs on this machine. In other words, the
machine where this command is run will become a maya openebs
node.
Configure this machine as OpenEBS Host and enable it
to run OpenEBS VSMs.
General Options:
Expand All @@ -45,9 +44,9 @@ Install Maya Options:
this command is being run. This is required when the machine
has many private IPs and you want to use a specific IP.
-member-ips=<IP Address(es) of all maya openebs nodes>
Comma separated list of IP addresses of all maya openebs
nodes partipating in the cluster.
-member-ips=<IP Address(es) of all peer openebs hosts>
Comma separated list of IP addresses of all openebs
hosts partipating in the cluster.
NOTE: Do not include the IP address of this local machine i.e.
the machine where this command is being run.
Expand All @@ -56,13 +55,13 @@ Install Maya Options:
}

func (c *InstallOpenEBSCommand) Synopsis() string {
return "Installs maya openebs on this machine."
return "Configure this machine as OpenEBS Host."
}

func (c *InstallOpenEBSCommand) Run(args []string) int {
var runop int

flags := c.M.FlagSet("install-openebs", FlagSetClient)
flags := c.M.FlagSet("setup-host", FlagSetClient)
flags.Usage = func() { c.M.Ui.Output(c.Help()) }

flags.StringVar(&c.master_ips, "master-ips", "", "")
Expand Down Expand Up @@ -97,7 +96,7 @@ func (c *InstallOpenEBSCommand) Run(args []string) int {
}

if runop = mi.Install(); runop != 0 {
c.M.Ui.Error("OpenEBS install failed")
c.M.Ui.Error("OpenEBS Host setup failed")
}

return runop
Expand Down
4 changes: 2 additions & 2 deletions commands.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,12 +24,12 @@ func Commands(metaPtr *command.Meta) map[string]cli.CommandFactory {
}

return map[string]cli.CommandFactory{
"install-maya": func() (cli.Command, error) {
"setup-omm": func() (cli.Command, error) {
return &command.InstallMayaCommand{
M: meta,
}, nil
},
"install-openebs": func() (cli.Command, error) {
"setup-host": func() (cli.Command, error) {
return &command.InstallOpenEBSCommand{
M: meta,
}, nil
Expand Down
146 changes: 146 additions & 0 deletions demo/jobs/demo1-vsm1.hcl
Original file line number Diff line number Diff line change
@@ -0,0 +1,146 @@
job "demo1" {
datacenters = ["dc1"]

# Restrict our job to only linux. We can specify multiple
# constraints as needed.
constraint {
attribute = "${attr.kernel.name}"
value = "linux"
}

#Declare the IP parameters generic to all controllers and replicas
meta {
JIVA_VOLNAME = "demo1-vsm1-vol1"
JIVA_VOLSIZE = "10g"
JIVA_FRONTENDIP = "172.28.128.101"
}

# Create a 'ctl' group. Each task in the group will be
# scheduled onto the same machine.
group "vsm1-ctl" {
# Configure the restart policy for the task group. If not provided, a
# default is used based on the job type.
restart {
# The number of attempts to run the job within the specified interval.
attempts = 3
interval = "5m"
delay = "25s"
mode = "delay"
}

# Define the controller task to run
task "iscsi" {
# Use a docker wrapper to run the task.
driver = "raw_exec"
artifact {
source = "https://raw.githubusercontent.com/openebs/jiva/master/scripts/launch-jiva-ctl-with-ip"
}

env {
JIVA_CTL_NAME = "${NOMAD_JOB_NAME}-${NOMAD_TASK_NAME}"
JIVA_CTL_VERSION = "openebs/jiva:latest"
JIVA_CTL_VOLNAME = "${NOMAD_META_JIVA_VOLNAME}"
JIVA_CTL_VOLSIZE = "${NOMAD_META_JIVA_VOLSIZE}"
JIVA_CTL_IP = "${NOMAD_META_JIVA_FRONTENDIP}"
JIVA_CTL_SUBNET = "24"
JIVA_CTL_IFACE = "enp0s8"
}

config {
command = "launch-jiva-ctl-with-ip"
}

service {
port = "api"
check {
name = "alive"
type = "tcp"
interval = "10s"
timeout = "2s"
}
}

# We must specify the resources required for
# this task to ensure it runs on a machine with
# enough capacity.
resources {
cpu = 500 # 500 MHz
memory = 256 # 256MB
network {
mbits = 20
port "iscsi" {
static = "3260"
}
port "api" {
static = "9501"
}
}
}

}
}

# Create a 'rep' group. Each task in the group will be
# scheduled onto the same machine.
group "vsm1-store1" {
# Configure the restart policy for the task group. If not provided, a
# default is used based on the job type.
restart {
# The number of attempts to run the job within the specified interval.
attempts = 3
interval = "5m"
delay = "25s"
mode = "delay"
}

# Define the controller task to run
task "rep1" {
# Use a docker wrapper to run the task.
driver = "raw_exec"
artifact {
source = "https://raw.githubusercontent.com/openebs/jiva/master/scripts/launch-jiva-rep-with-ip"
}

env {
JIVA_REP_NAME = "${NOMAD_JOB_NAME}-${NOMAD_TASK_NAME}"
JIVA_REP_VERSION = "openebs/jiva:latest"
JIVA_CTL_IP = "${NOMAD_META_JIVA_FRONTENDIP}"
JIVA_REP_VOLNAME = "${NOMAD_META_JIVA_VOLNAME}"
JIVA_REP_VOLSIZE = "${NOMAD_META_JIVA_VOLSIZE}"
JIVA_REP_IP = "172.28.128.102"
JIVA_REP_SUBNET = "24"
JIVA_REP_IFACE = "enp0s8"
JIVA_REP_VOLSTORE = "/tmp/jiva/rep1"
}

config {
command = "launch-jiva-rep-with-ip"
}

service {
port = "api"
check {
name = "alive"
type = "tcp"
interval = "10s"
timeout = "2s"
}
}

# We must specify the resources required for
# this task to ensure it runs on a machine with
# enough capacity.
resources {
cpu = 500 # 500 MHz
memory = 256 # 256MB
network {
mbits = 20
port "api" {
static = "9502"
}
}
}

}
}
}
1 change: 1 addition & 0 deletions templates/nomad-client.hcl.tmpl
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ client {
enabled = true
options = {
"docker.privileged.enabled" = "true"
"driver.raw_exec.enable" = "1"
}
servers=[__ALL_SERVERS_IPV4_N_PORTS__]
}

0 comments on commit add7e85

Please sign in to comment.