From a85773884e023f37a28ad1462a11c38da621b739 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Otto=20Kek=C3=A4l=C3=A4inen?= Date: Thu, 11 Jul 2024 22:42:03 -0700 Subject: [PATCH] Fix misc spelling --- docker/resources/systemctl.py | 12 +++++----- docs/configuration-discovery-pseudo-gtid.md | 2 +- docs/deployment-shared-backend.md | 2 +- docs/developers.md | 2 +- docs/docker.md | 2 +- docs/high-availability.md | 2 +- docs/risks.md | 2 +- docs/using-the-web-api.md | 6 ++--- go/agent/agent_dao.go | 6 ++--- go/app/command_help.go | 26 ++++++++++----------- go/cmd/orchestrator/main.go | 2 +- go/config/config.go | 10 ++++---- go/db/db.go | 2 +- go/http/agents_api.go | 2 +- go/http/api.go | 6 ++--- go/inst/binlog.go | 2 +- go/inst/instance.go | 2 +- go/inst/instance_binlog_dao.go | 10 ++++---- go/inst/instance_dao.go | 12 +++++----- go/inst/instance_key.go | 4 ++-- go/inst/instance_topology.go | 2 +- go/inst/instance_topology_dao.go | 2 +- go/inst/instance_utils.go | 2 +- go/logic/topology_recovery.go | 4 ++-- go/logic/topology_recovery_dao.go | 6 ++--- go/raft/raft.go | 2 +- go/ssl/ssl.go | 2 +- resources/metrics/orchestrator-grafana.json | 6 ++--- vagrant/base-build.sh | 4 ++-- 29 files changed, 72 insertions(+), 72 deletions(-) diff --git a/docker/resources/systemctl.py b/docker/resources/systemctl.py index 85b885dd3..bc1288271 100755 --- a/docker/resources/systemctl.py +++ b/docker/resources/systemctl.py @@ -1949,7 +1949,7 @@ def do_start_unit_from(self, conf): service_result = "failed" break if service_result in [ "success" ] and mainpid: - logg.debug("okay, wating on socket for %ss", timeout) + logg.debug("okay, waiting on socket for %ss", timeout) results = self.wait_notify_socket(notify, timeout, mainpid) if "MAINPID" in results: new_pid = results["MAINPID"] @@ -2756,7 +2756,7 @@ def get_substate_from(self, conf): else: return "dead" def is_failed_modules(self, *modules): - """ [UNIT]... -- check if these units are in failes state + """ [UNIT]... -- check if these units are in failed state implements True if any is-active = True """ units = [] results = [] @@ -3618,7 +3618,7 @@ def syntax_check_service(self, conf): + "\n\t\t\tUse ' ; ' for multiple commands (ExecReloadPost or ExedReloadPre do not exist)", unit) if len(usedExecReload) > 0 and "/bin/kill " in usedExecReload[0]: logg.warning(" %s: the use of /bin/kill is not recommended for ExecReload as it is asychronous." - + "\n\t\t\tThat means all the dependencies will perform the reload simultanously / out of order.", unit) + + "\n\t\t\tThat means all the dependencies will perform the reload simultaneously / out of order.", unit) if conf.getlist("Service", "ExecRestart", []): #pragma: no cover logg.error(" %s: there no such thing as an ExecRestart (ignored)", unit) if conf.getlist("Service", "ExecRestartPre", []): #pragma: no cover @@ -3854,7 +3854,7 @@ def enabled_default_system_services(self, sysv = "S", default_target = None, ign default_services.append(unit) for folder in [ self.rc3_root_folder() ]: if not os.path.isdir(folder): - logg.warning("non-existant %s", folder) + logg.warning("non-existent %s", folder) continue for unit in sorted(os.listdir(folder)): path = os.path.join(folder, unit) @@ -3960,7 +3960,7 @@ def init_modules(self, *modules): it was never enabled in the system. /// SPECIAL: when using --now then only the init-loop is started, with the reap-zombies function and waiting for an interrupt. - (and no unit is started/stoppped wether given or not). + (and no unit is started/stopped wether given or not). """ if self._now: return self.init_loop_until_stop([]) @@ -4387,7 +4387,7 @@ def logg_debug(*msg): pass _o.add_option("--reverse", action="store_true", help="Show reverse dependencies with 'list-dependencies' (ignored)") _o.add_option("--job-mode", metavar="MODE", - help="Specifiy how to deal with already queued jobs, when queuing a new job (ignored)") + help="Specify how to deal with already queued jobs, when queuing a new job (ignored)") _o.add_option("--show-types", action="store_true", help="When showing sockets, explicitly show their type (ignored)") _o.add_option("-i","--ignore-inhibitors", action="store_true", diff --git a/docs/configuration-discovery-pseudo-gtid.md b/docs/configuration-discovery-pseudo-gtid.md index bea225f60..35ae0ce41 100644 --- a/docs/configuration-discovery-pseudo-gtid.md +++ b/docs/configuration-discovery-pseudo-gtid.md @@ -12,7 +12,7 @@ See [Pseudo GTID](pseudo-gtid.md) "AutoPseudoGTID": true, } ``` -And you may ignore any other Pseudo-GTID related configuration (they will all be implicitly overriden by `orchestrator`). +And you may ignore any other Pseudo-GTID related configuration (they will all be implicitly overridden by `orchestrator`). You will further need to grant the following on your MySQL servers: ```sql diff --git a/docs/deployment-shared-backend.md b/docs/deployment-shared-backend.md index ee58a690e..79e069a2c 100644 --- a/docs/deployment-shared-backend.md +++ b/docs/deployment-shared-backend.md @@ -54,7 +54,7 @@ To interact with orchestrator from shell/automation/scripts, you may choose to: - The [orchestrator command line](executing-via-command-line.md). - Deploy the `orchestrator` binary (you may use the `orchestrator-cli` distributed package) on any box from which you wish to interact with `orchestrator`. - Create `/etc/orchestrator.conf.json` on those boxes, populate with credentials. This file should generally be the same as for the `orchestrator` service boxes. If you're unsure, use exact same file content. - - The `orchestrator` binary will access the shared backend DB. Make sure to give it access. Typicaly this will be port `3306`. + - The `orchestrator` binary will access the shared backend DB. Make sure to give it access. Typically this will be port `3306`. It is OK to run `orchestrator` CLI even while the `orchestrator` service is operating, since they will all coordinate on the same backend DB. diff --git a/docs/developers.md b/docs/developers.md index 50ef54f7c..f4fafdd92 100644 --- a/docs/developers.md +++ b/docs/developers.md @@ -1,6 +1,6 @@ # Developers -To build, test and contribute to `orchestrator`, please refer t othe following pages: +To build, test and contribute to `orchestrator`, please refer to the following pages: - [Understanding CI](ci.md) - [Building and testing](build.md) diff --git a/docs/docker.md b/docs/docker.md index 3e2fdd769..fb366c4f6 100644 --- a/docs/docker.md +++ b/docs/docker.md @@ -40,7 +40,7 @@ file is bind mounted into container at `/etc/orchestrator.conf.json` * `ORC_USER`: defaults to `orc_server_user` * `ORC_PASSWORD`: defaults to `orc_server_password` -To set these variables you could add these to an environment file where you add them like `key=value` (one pair per line). You can then pass this enviroment file to the docker command adding `--env-file=path/to/env-file` to the `docker run` command. +To set these variables you could add these to an environment file where you add them like `key=value` (one pair per line). You can then pass this environment file to the docker command adding `--env-file=path/to/env-file` to the `docker run` command. ## Create package files diff --git a/docs/high-availability.md b/docs/high-availability.md index 2f9b69053..25566840a 100644 --- a/docs/high-availability.md +++ b/docs/high-availability.md @@ -44,7 +44,7 @@ This setup provides semi HA for `orchestrator`. Two variations available: - The proxy always directs to same server (e.g. `first` algorithm for `HAProxy`) unless that server is dead. - Death of the active master causes `orchestrator` to talk to other master, which may be somewhat behind. `orchestrator` will typically self reapply the missing changes by nature of its continuous discovery. - `orchestrator` queries guarantee `STATEMENT` based replication will not cause duplicate errors, and master-master setup will always achieve consistency. - - `orchestrator` will be able to recover the death of a backend master even if in the middle of runnign a recovery (recovery will re-initiate on alternate master) + - `orchestrator` will be able to recover the death of a backend master even if in the middle of running a recovery (recovery will re-initiate on alternate master) - **Split brain is possible**. Depending on your setup, physical locations, type of proxy, there can be different `orchestrator` service nodes speaking to different backend `MySQL` servers. This scenario can lead two two `orchestrator` services which consider themselves as "active", both of which will run failovers independently, which would lead to topology corruption. To access your `orchestrator` service you may speak to any healthy node. diff --git a/docs/risks.md b/docs/risks.md index 514d36f83..77bc37bc3 100644 --- a/docs/risks.md +++ b/docs/risks.md @@ -7,7 +7,7 @@ Most of the time `orchestrator` only reads status from your topologies. Default You may use `orchestrator` to refactor your topologies: move replicas around and change the replication tree. `orchestrator` will do its best to: 1. Make sure you only move an instance to a location where it is valid for it to replicate (e.g. that you don't put a 5.5 server below a 5.6 server) -2. Make sure you move an instance at the right time (ie the instance and whichever affected servers are not lagging badly, so that operation can compeletely in a timely manner). +2. Make sure you move an instance at the right time (ie the instance and whichever affected servers are not lagging badly, so that operation can completely in a timely manner). 3. Do the math correctly: stop the replica at the right time, roll it forward to the right position, `CHANGE MASTER` to the correct location & position. The above is well tested. diff --git a/docs/using-the-web-api.md b/docs/using-the-web-api.md index e522d2a48..54c3fe938 100644 --- a/docs/using-the-web-api.md +++ b/docs/using-the-web-api.md @@ -139,11 +139,11 @@ The structure of an Instance evolves and documentation will always fall behind. * `ReplicationLagSeconds`: when `ReplicationLagQuery` provided, the computed replica lag; otherwise same as `SecondsBehindMaster` * `Replicas`: list of MySQL replicas _hostname & port_) * `ClusterName`: name of cluster this instance is associated with; uniquely identifies cluster -* `DataCenter`: (metadata) name of data center, infered by `DataCenterPattern` config variable -* `PhysicalEnvironment`: (metadata) name of environment, infered by `PhysicalEnvironmentPattern` config variable +* `DataCenter`: (metadata) name of data center, inferred by `DataCenterPattern` config variable +* `PhysicalEnvironment`: (metadata) name of environment, inferred by `PhysicalEnvironmentPattern` config variable * `ReplicationDepth`: distance from the master (master is `0`, direct replica is `1` and so on) * `IsCoMaster`: true when this instanceis part of a master-master pair -* `IsLastCheckValid`: whether last attempt at reading this instane succeeeded +* `IsLastCheckValid`: whether last attempt at reading this instance succeeeded * `IsUpToDate`: whether this data is up to date * `IsRecentlyChecked`: whether a read attempt on this instance has been recently made * `SecondsSinceLastSeen`: time elapsed since last successfully accessed this instance diff --git a/go/agent/agent_dao.go b/go/agent/agent_dao.go index f8495b7c8..82b094fb7 100644 --- a/go/agent/agent_dao.go +++ b/go/agent/agent_dao.go @@ -63,7 +63,7 @@ func InitHttpClient() { httpClient = &http.Client{Transport: httpTransport} } -// httpGet is a convenience method for getting http response from URL, optionaly skipping SSL cert verification +// httpGet is a convenience method for getting http response from URL, optionally skipping SSL cert verification func httpGet(url string) (resp *http.Response, err error) { return httpClient.Get(url) } @@ -683,7 +683,7 @@ func executeSeed(seedId int64, targetHostname string, sourceHostname string) err seedStateId, _ = submitSeedStateEntry(seedId, fmt.Sprintf("Checking MySQL status on target %s", targetHostname), "") if targetAgent.MySQLRunning { - return updateSeedStateEntry(seedStateId, errors.New("MySQL is running on target host. Cowardly refusing to proceeed. Please stop the MySQL service")) + return updateSeedStateEntry(seedStateId, errors.New("MySQL is running on target host. Cowardly refusing to proceed. Please stop the MySQL service")) } seedStateId, _ = submitSeedStateEntry(seedId, fmt.Sprintf("Looking up available snapshots on source %s", sourceHostname), "") @@ -711,7 +711,7 @@ func executeSeed(seedId int64, targetHostname string, sourceHostname string) err return updateSeedStateEntry(seedStateId, err) } - seedStateId, _ = submitSeedStateEntry(seedId, fmt.Sprintf("Aquiring target host datadir free space on %s", targetHostname), "") + seedStateId, _ = submitSeedStateEntry(seedId, fmt.Sprintf("Acquiring target host datadir free space on %s", targetHostname), "") targetAgent, err = GetAgent(targetHostname) if err != nil { return updateSeedStateEntry(seedStateId, err) diff --git a/go/app/command_help.go b/go/app/command_help.go index 2c04a0ced..36c74c5f8 100644 --- a/go/app/command_help.go +++ b/go/app/command_help.go @@ -208,7 +208,7 @@ func init() { CommandHelp["match-replicas"] = ` Matches all replicas of a given instance under another (destination) instance. This is a (faster) shortcut to matching said replicas one by one under the destination instance. In fact, this bulk operation is highly - optimized and can execute in orders of magnitue faster, depeding on the nu,ber of replicas involved and their + optimized and can execute in orders of magnitue faster, depending on the nu,ber of replicas involved and their respective position behind the instance (the more replicas, the more savings). The instance itself may be crashed or inaccessible. It is not contacted throughout the operation. Examples: @@ -254,7 +254,7 @@ func init() { local master of its siblings, using Pseudo-GTID. It is uncertain that there *is* a replica that will be able to become master to all its siblings. But if there is one, orchestrator will pick such one. There are many constraints, most notably the replication positions of all replicas, whether they use log_slave_updates, and - otherwise version compatabilities etc. + otherwise version compatibilities etc. As many replicas that can be regrouped under promoted slves are operated on. The rest are untouched. This command is useful in the event of a crash. For example, in the event that a master dies, this operation can promote a candidate replacement and set up the remaining topology to correctly replicate from that @@ -324,7 +324,7 @@ func init() { Undo a detach-replica operation. Reverses the binlog change into the original values, and resumes replication. Example: - orchestrator -c reattach-replica -i detahced.replica.whose.replication.will.amend.com + orchestrator -c reattach-replica -i detached.replica.whose.replication.will.amend.com Issuing this on an attached (i.e. normal) replica will do nothing. ` @@ -340,7 +340,7 @@ func init() { Undo a detach-replica-master-host operation. Reverses the hostname change into the original value, and resumes replication. Example: - orchestrator -c reattach-replica-master-host -i detahced.replica.whose.replication.will.amend.com + orchestrator -c reattach-replica-master-host -i detached.replica.whose.replication.will.amend.com Issuing this on an attached (i.e. normal) replica will do nothing. ` @@ -397,7 +397,7 @@ func init() { Get binlog file:pos of entry given by --pattern (exact full match, not a regular expression) in a given instance. This will search the instance's binary logs starting with most recent, and terminate as soon as an exact match is found. The given input is not a regular expression. It must fully match the entry (not a substring). - This is most useful when looking for uniquely identifyable values, such as Pseudo-GTID. Example: + This is most useful when looking for uniquely identifiable values, such as Pseudo-GTID. Example: orchestrator -c find-binlog-entry -i instance.to.search.on.com --pattern "insert into my_data (my_column) values ('distinct_value_01234_56789')" @@ -480,7 +480,7 @@ func init() { -i not given, implicitly assumed local hostname Instance must be already known to orchestrator. Topology is generated by orchestrator's mapping - and not from synchronuous investigation of the instances. The generated topology may include + and not from synchronous investigation of the instances. The generated topology may include instances that are dead, or whose replication is broken. ` CommandHelp["all-instances"] = ` @@ -612,7 +612,7 @@ func init() { assuming some_alias is a known cluster alias (see ClusterNameToAlias or DetectClusterAliasQuery configuration) ` CommandHelp["instance-status"] = ` - Output short status on a given instance (name, replication status, noteable configuration). Example2: + Output short status on a given instance (name, replication status, notable configuration). Example2: orchestrator -c instance-status -i instance.to.investigate.com @@ -631,7 +631,7 @@ func init() { CommandHelp["discover"] = ` Request that orchestrator cotacts given instance, reads its status, and upsert it into - orchestrator's respository. Examples: + orchestrator's repository. Examples: orchestrator -c discover -i instance.to.discover.com:3306 @@ -655,7 +655,7 @@ func init() { ` CommandHelp["begin-maintenance"] = ` Request a maintenance lock on an instance. Topology changes require placing locks on the minimal set of - affected instances, so as to avoid an incident of two uncoordinated operations on a smae instance (leading + affected instances, so as to avoid an incident of two uncoordinated operations on a same instance (leading to possible chaos). Locks are placed in the backend database, and so multiple orchestrator instances are safe. Operations automatically acquire locks and release them. This command manually acquires a lock, and will block other operations on the instance until lock is released. @@ -680,7 +680,7 @@ func init() { Mark an instance as downtimed. A downtimed instance is assumed to be taken care of, and recovery-analysis does not apply for such an instance. As result, no recommendation for recovery, and no automated-recovery are issued on a downtimed instance. - Downtime is different than maintanence in that it places no lock (mainenance uses an exclusive lock on the instance). + Downtime is different than maintenance in that it places no lock (mainenance uses an exclusive lock on the instance). It is OK to downtime an instance that is already downtimed -- the new begin-downtime command will override whatever previous downtime attributes there were on downtimes instance. Note that orchestrator automatically assumes downtime to be expired after MaintenanceExpireMinutes (hard coded value). @@ -801,17 +801,17 @@ func init() { CommandHelp["register-candidate"] = ` Indicate that a specific instance is a preferred candidate for master promotion. Upon a dead master recovery, orchestrator will do its best to promote instances that are marked as candidates. However - orchestrator cannot guarantee this will always work. Issues like version compatabilities, binlog format + orchestrator cannot guarantee this will always work. Issues like version compatibilities, binlog format etc. are limiting factors. You will want to mark an instance as a candidate when: it is replicating directly from the master, has binary logs and log_slave_updates is enabled, uses same binlog_format as its siblings, compatible version as its siblings. If you're using DataCenterPattern & PhysicalEnvironmentPattern (see configuration), you would further wish to make sure you have a candidate in each data center. Orchestrator first promotes the best-possible replica, and only then replaces it with your candidate, - and only if both in same datcenter and physical enviroment. + and only if both in same datcenter and physical environment. An instance needs to continuously be marked as candidate, so as to make sure orchestrator is not wasting time with stale instances. Orchestrator periodically clears candidate-registration for instances that have - not been registeres for over CandidateInstanceExpireMinutes (see config). + not been registers for over CandidateInstanceExpireMinutes (see config). Example: orchestrator -c register-candidate -i candidate.instance.com diff --git a/go/cmd/orchestrator/main.go b/go/cmd/orchestrator/main.go index c9c967d4b..ecb736764 100644 --- a/go/cmd/orchestrator/main.go +++ b/go/cmd/orchestrator/main.go @@ -32,7 +32,7 @@ import ( var AppVersion, GitCommit string -// main is the application's entry point. It will either spawn a CLI or HTTP itnerfaces. +// main is the application's entry point. It will either spawn a CLI or HTTP interfaces. func main() { configFile := flag.String("config", "", "config file name") command := flag.String("c", "", "command, required. See full list of commands via 'orchestrator -c help'") diff --git a/go/config/config.go b/go/config/config.go index 5d65da4b9..6770ed63b 100644 --- a/go/config/config.go +++ b/go/config/config.go @@ -220,12 +220,12 @@ type Configuration struct { AutoPseudoGTID bool // Should orchestrator automatically inject Pseudo-GTID entries to the masters PseudoGTIDPattern string // Pattern to look for in binary logs that makes for a unique entry (pseudo GTID). When empty, Pseudo-GTID based refactoring is disabled. PseudoGTIDPatternIsFixedSubstring bool // If true, then PseudoGTIDPattern is not treated as regular expression but as fixed substring, and can boost search time - PseudoGTIDMonotonicHint string // subtring in Pseudo-GTID entry which indicates Pseudo-GTID entries are expected to be monotonically increasing + PseudoGTIDMonotonicHint string // substring in Pseudo-GTID entry which indicates Pseudo-GTID entries are expected to be monotonically increasing DetectPseudoGTIDQuery string // Optional query which is used to authoritatively decide whether pseudo gtid is enabled on instance - BinlogEventsChunkSize int // Chunk size (X) for SHOW BINLOG|RELAYLOG EVENTS LIMIT ?,X statements. Smaller means less locking and mroe work to be done + BinlogEventsChunkSize int // Chunk size (X) for SHOW BINLOG|RELAYLOG EVENTS LIMIT ?,X statements. Smaller means less locking and more work to be done SkipBinlogEventsContaining []string // When scanning/comparing binlogs for Pseudo-GTID, skip entries containing given texts. These are NOT regular expressions (would consume too much CPU while scanning binlogs), just substrings to find. ReduceReplicationAnalysisCount bool // When true, replication analysis will only report instances where possibility of handled problems is possible in the first place (e.g. will not report most leaf nodes, that are mostly uninteresting). When false, provides an entry for every known instance - FailureDetectionPeriodBlockMinutes int // The time for which an instance's failure discovery is kept "active", so as to avoid concurrent "discoveries" of the instance's failure; this preceeds any recovery process, if any. + FailureDetectionPeriodBlockMinutes int // The time for which an instance's failure discovery is kept "active", so as to avoid concurrent "discoveries" of the instance's failure; this precedes any recovery process, if any. RecoveryPeriodBlockMinutes int // (supported for backwards compatibility but please use newer `RecoveryPeriodBlockSeconds` instead) The time for which an instance's recovery is kept "active", so as to avoid concurrent recoveries on same instance as well as flapping RecoveryPeriodBlockSeconds int // (overrides `RecoveryPeriodBlockMinutes`) The time for which an instance's recovery is kept "active", so as to avoid concurrent recoveries on same instance as well as flapping RecoveryIgnoreHostnameFilters []string // Recovery analysis will completely ignore hosts matching given patterns @@ -239,10 +239,10 @@ type Configuration struct { PostUnsuccessfulFailoverProcesses []string // Processes to execute after a not-completely-successful failover (order of execution undefined). May and should use some of these placeholders: {failureType}, {instanceType}, {isMaster}, {isCoMaster}, {failureDescription}, {command}, {failedHost}, {failureCluster}, {failureClusterAlias}, {failureClusterDomain}, {failedPort}, {successorHost}, {successorPort}, {successorBinlogCoordinates}, {successorAlias}, {countReplicas}, {replicaHosts}, {isDowntimed}, {isSuccessful}, {lostReplicas}, {countLostReplicas} PostMasterFailoverProcesses []string // Processes to execute after doing a master failover (order of execution undefined). Uses same placeholders as PostFailoverProcesses PostIntermediateMasterFailoverProcesses []string // Processes to execute after doing a master failover (order of execution undefined). Uses same placeholders as PostFailoverProcesses - PostGracefulTakeoverProcesses []string // Processes to execute after runnign a graceful master takeover. Uses same placeholders as PostFailoverProcesses + PostGracefulTakeoverProcesses []string // Processes to execute after running a graceful master takeover. Uses same placeholders as PostFailoverProcesses PostTakeMasterProcesses []string // Processes to execute after a successful Take-Master event has taken place RecoverNonWriteableMaster bool // When 'true', orchestrator treats a read-only master as a failure scenario and attempts to make the master writeable - CoMasterRecoveryMustPromoteOtherCoMaster bool // When 'false', anything can get promoted (and candidates are prefered over others). When 'true', orchestrator will promote the other co-master or else fail + CoMasterRecoveryMustPromoteOtherCoMaster bool // When 'false', anything can get promoted (and candidates are preferred over others). When 'true', orchestrator will promote the other co-master or else fail DetachLostSlavesAfterMasterFailover bool // synonym to DetachLostReplicasAfterMasterFailover DetachLostReplicasAfterMasterFailover bool // Should replicas that are not to be lost in master recovery (i.e. were more up-to-date than promoted replica) be forcibly detached ApplyMySQLPromotionAfterMasterFailover bool // Should orchestrator take upon itself to apply MySQL master promotion: set read_only=0, detach replication, etc. diff --git a/go/db/db.go b/go/db/db.go index 4080875af..0a4488d57 100644 --- a/go/db/db.go +++ b/go/db/db.go @@ -283,7 +283,7 @@ func deployStatements(db *sql.DB, queries []string) error { // where in NO_ZERO_IN_DATE,NO_ZERO_DATE sql_mode are invalid (since default is implicitly "0") // This means installation of orchestrator fails on such configured servers, and in particular on 5.7 // where this setting is the dfault. - // For purpose of backwards compatability, what we do is force sql_mode to be more relaxed, create the schemas + // For purpose of backwards compatibility, what we do is force sql_mode to be more relaxed, create the schemas // along with the "invalid" definition, and then go ahead and fix those definitions via following ALTER statements. // My bad. originalSqlMode := "" diff --git a/go/http/agents_api.go b/go/http/agents_api.go index 997662994..494073983 100644 --- a/go/http/agents_api.go +++ b/go/http/agents_api.go @@ -35,7 +35,7 @@ type HttpAgentsAPI struct { var AgentsAPI HttpAgentsAPI = HttpAgentsAPI{} -// SubmitAgent registeres an agent. It is initiated by an agent to register itself. +// SubmitAgent registers an agent. It is initiated by an agent to register itself. func (this *HttpAgentsAPI) SubmitAgent(params martini.Params, r render.Render) { port, err := strconv.Atoi(params["port"]) if err != nil { diff --git a/go/http/api.go b/go/http/api.go index d4c43d0e0..fa40a00da 100644 --- a/go/http/api.go +++ b/go/http/api.go @@ -2749,7 +2749,7 @@ func (this *HttpAPI) AgentSeedStates(params martini.Params, r render.Render, req r.JSON(http.StatusOK, output) } -// Seeds retruns all recent seeds +// Seeds returns all recent seeds func (this *HttpAPI) Seeds(params martini.Params, r render.Render, req *http.Request, user auth.User) { if !isAuthorizedForAction(req, user) { Respond(r, &APIResponse{Code: ERROR, Message: "Unauthorized"}) @@ -2809,12 +2809,12 @@ func (this *HttpAPI) Health(params martini.Params, r render.Render, req *http.Re } -// LBCheck returns a constant respnse, and this can be used by load balancers that expect a given string. +// LBCheck returns a constant response, and this can be used by load balancers that expect a given string. func (this *HttpAPI) LBCheck(params martini.Params, r render.Render, req *http.Request) { r.JSON(http.StatusOK, "OK") } -// LBCheck returns a constant respnse, and this can be used by load balancers that expect a given string. +// LBCheck returns a constant response, and this can be used by load balancers that expect a given string. func (this *HttpAPI) LeaderCheck(params martini.Params, r render.Render, req *http.Request) { respondStatus, err := strconv.Atoi(params["errorStatusCode"]) if err != nil || respondStatus < 0 { diff --git a/go/inst/binlog.go b/go/inst/binlog.go index 8e6887b01..11473d292 100644 --- a/go/inst/binlog.go +++ b/go/inst/binlog.go @@ -178,7 +178,7 @@ func (this *BinlogCoordinates) NextFileCoordinates() (BinlogCoordinates, error) return result, nil } -// Detach returns a detahced form of coordinates +// Detach returns a detached form of coordinates func (this *BinlogCoordinates) Detach() (detachedCoordinates BinlogCoordinates) { detachedCoordinates = BinlogCoordinates{LogFile: fmt.Sprintf("//%s:%d", this.LogFile, this.LogPos), LogPos: this.LogPos} return detachedCoordinates diff --git a/go/inst/instance.go b/go/inst/instance.go index 0f563b773..2708334b9 100644 --- a/go/inst/instance.go +++ b/go/inst/instance.go @@ -440,7 +440,7 @@ func (this *Instance) IsDescendantOf(other *Instance) bool { return false } -// CanReplicateFrom uses heursitics to decide whether this instacne can practically replicate from other instance. +// CanReplicateFrom uses heuristics to decide whether this instacne can practically replicate from other instance. // Checks are made to binlog format, version number, binary logs etc. func (this *Instance) CanReplicateFrom(other *Instance) (bool, error) { if this.Key.Equals(&other.Key) { diff --git a/go/inst/instance_binlog_dao.go b/go/inst/instance_binlog_dao.go index e99ec8d73..d089765d3 100644 --- a/go/inst/instance_binlog_dao.go +++ b/go/inst/instance_binlog_dao.go @@ -181,7 +181,7 @@ func getLastPseudoGTIDEntryInInstance(instance *Instance, minBinlogCoordinates * func getLastPseudoGTIDEntryInRelayLogs(instance *Instance, minBinlogCoordinates *BinlogCoordinates, recordedInstanceRelayLogCoordinates BinlogCoordinates, exhaustiveSearch bool) (*BinlogCoordinates, string, error) { // Look for last GTID in relay logs: // Since MySQL does not provide with a SHOW RELAY LOGS command, we heuristically start from current - // relay log (indiciated by Relay_log_file) and walk backwards. + // relay log (indicated by Relay_log_file) and walk backwards. // Eventually we will hit a relay log name which does not exist. pseudoGTIDRegexp, err := compilePseudoGTIDPattern() if err != nil { @@ -287,7 +287,7 @@ func getLastExecutedEntryInRelaylog(instanceKey *InstanceKey, binlog string, min func GetLastExecutedEntryInRelayLogs(instance *Instance, minBinlogCoordinates *BinlogCoordinates, recordedInstanceRelayLogCoordinates BinlogCoordinates) (binlogEvent *BinlogEvent, err error) { // Look for last GTID in relay logs: // Since MySQL does not provide with a SHOW RELAY LOGS command, we heuristically start from current - // relay log (indiciated by Relay_log_file) and walk backwards. + // relay log (indicated by Relay_log_file) and walk backwards. currentRelayLog := recordedInstanceRelayLogCoordinates for err == nil { @@ -303,7 +303,7 @@ func GetLastExecutedEntryInRelayLogs(instance *Instance, minBinlogCoordinates *B // and continue with exhaustive search. minBinlogCoordinates = nil log.Debugf("Heuristic relaylog search failed; continuing exhaustive search") - // And we do NOT iterate to previous log file: we scan same log faile again, with no heuristic + // And we do NOT iterate to previous log file: we scan same log failed again, with no heuristic } else { currentRelayLog, err = currentRelayLog.PreviousFileCoordinates() } @@ -379,7 +379,7 @@ func searchEventInRelaylog(instanceKey *InstanceKey, binlog string, searchEvent func SearchEventInRelayLogs(searchEvent *BinlogEvent, instance *Instance, minBinlogCoordinates *BinlogCoordinates, recordedInstanceRelayLogCoordinates BinlogCoordinates) (binlogCoordinates, nextCoordinates *BinlogCoordinates, found bool, err error) { // Since MySQL does not provide with a SHOW RELAY LOGS command, we heuristically start from current - // relay log (indiciated by Relay_log_file) and walk backwards. + // relay log (indicated by Relay_log_file) and walk backwards. log.Debugf("will search for event %+v", *searchEvent) if minBinlogCoordinates != nil { log.Debugf("Starting with coordinates: %+v", *minBinlogCoordinates) @@ -398,7 +398,7 @@ func SearchEventInRelayLogs(searchEvent *BinlogEvent, instance *Instance, minBin // and continue with exhaustive search. minBinlogCoordinates = nil log.Debugf("Heuristic relaylog search failed; continuing exhaustive search") - // And we do NOT iterate to previous log file: we scan same log faile again, with no heuristic + // And we do NOT iterate to previous log file: we scan same log failed again, with no heuristic } else { currentRelayLog, err = currentRelayLog.PreviousFileCoordinates() } diff --git a/go/inst/instance_dao.go b/go/inst/instance_dao.go index 63324c749..ec8c1a878 100644 --- a/go/inst/instance_dao.go +++ b/go/inst/instance_dao.go @@ -540,7 +540,7 @@ func ReadTopologyInstanceBufferable(instanceKey *InstanceKey, bufferWrites bool, instance.DataCenter = match[1] } } - // This can be overriden by later invocation of DetectDataCenterQuery + // This can be overridden by later invocation of DetectDataCenterQuery } if config.Config.RegionPattern != "" { if pattern, err := regexp.Compile(config.Config.RegionPattern); err == nil { @@ -549,7 +549,7 @@ func ReadTopologyInstanceBufferable(instanceKey *InstanceKey, bufferWrites bool, instance.Region = match[1] } } - // This can be overriden by later invocation of DetectRegionQuery + // This can be overridden by later invocation of DetectRegionQuery } if config.Config.PhysicalEnvironmentPattern != "" { if pattern, err := regexp.Compile(config.Config.PhysicalEnvironmentPattern); err == nil { @@ -558,7 +558,7 @@ func ReadTopologyInstanceBufferable(instanceKey *InstanceKey, bufferWrites bool, instance.PhysicalEnvironment = match[1] } } - // This can be overriden by later invocation of DetectPhysicalEnvironmentQuery + // This can be overridden by later invocation of DetectPhysicalEnvironmentQuery } instance.ReplicationIOThreadState = ReplicationThreadStateNoThread @@ -1665,7 +1665,7 @@ func filterOSCInstances(instances [](*Instance)) [](*Instance) { return result } -// GetClusterOSCReplicas returns a heuristic list of replicas which are fit as controll replicas for an OSC operation. +// GetClusterOSCReplicas returns a heuristic list of replicas which are fit as control replicas for an OSC operation. // These would be intermediate masters func GetClusterOSCReplicas(clusterName string) ([](*Instance), error) { intermediateMasters := [](*Instance){} @@ -1879,7 +1879,7 @@ func updateInstanceClusterName(instance *Instance) error { return ExecDBWriteFunc(writeFunc) } -// ReplaceClusterName replaces all occurances of oldClusterName with newClusterName +// ReplaceClusterName replaces all occurrences of oldClusterName with newClusterName // It is called after a master failover func ReplaceClusterName(oldClusterName string, newClusterName string) error { if oldClusterName == "" { @@ -1907,7 +1907,7 @@ func ReplaceClusterName(oldClusterName string, newClusterName string) error { return ExecDBWriteFunc(writeFunc) } -// ReviewUnseenInstances reviews instances that have not been seen (suposedly dead) and updates some of their data +// ReviewUnseenInstances reviews instances that have not been seen (supposedly dead) and updates some of their data func ReviewUnseenInstances() error { instances, err := ReadUnseenInstances() if err != nil { diff --git a/go/inst/instance_key.go b/go/inst/instance_key.go index a5b97cea4..b13488178 100644 --- a/go/inst/instance_key.go +++ b/go/inst/instance_key.go @@ -153,7 +153,7 @@ func (this *InstanceKey) IsValid() bool { return len(this.Hostname) > 0 && this.Port > 0 } -// DetachedKey returns an instance key whose hostname is detahced: invalid, but recoverable +// DetachedKey returns an instance key whose hostname is detached: invalid, but recoverable func (this *InstanceKey) DetachedKey() *InstanceKey { if this.IsDetached() { return this @@ -161,7 +161,7 @@ func (this *InstanceKey) DetachedKey() *InstanceKey { return &InstanceKey{Hostname: fmt.Sprintf("%s%s", detachHint, this.Hostname), Port: this.Port} } -// ReattachedKey returns an instance key whose hostname is detahced: invalid, but recoverable +// ReattachedKey returns an instance key whose hostname is detached: invalid, but recoverable func (this *InstanceKey) ReattachedKey() *InstanceKey { if !this.IsDetached() { return this diff --git a/go/inst/instance_topology.go b/go/inst/instance_topology.go index 26b93113b..ec8789c42 100644 --- a/go/inst/instance_topology.go +++ b/go/inst/instance_topology.go @@ -2721,7 +2721,7 @@ func relocateBelowInternal(instance, other *Instance) (*Instance, error) { instance, _, err := MatchBelow(&instance.Key, &other.Key, true) return instance, err } - // No Pseudo-GTID; cehck simple binlog file/pos operations: + // No Pseudo-GTID; check simple binlog file/pos operations: if InstancesAreSiblings(instance, other) { // If comastering, only move below if it's read-only if !other.IsCoMaster || other.ReadOnly { diff --git a/go/inst/instance_topology_dao.go b/go/inst/instance_topology_dao.go index f63ab2db1..ab5673129 100644 --- a/go/inst/instance_topology_dao.go +++ b/go/inst/instance_topology_dao.go @@ -1435,7 +1435,7 @@ func CheckAndInjectPseudoGTIDOnWriter(instance *Instance) (injected bool, err er } if !canInject { if util.ClearToLog("CheckAndInjectPseudoGTIDOnWriter", instance.Key.StringCode()) { - log.Warningf("AutoPseudoGTID enabled, but orchestrator has no priviliges on %+v to inject pseudo-gtid", instance.Key) + log.Warningf("AutoPseudoGTID enabled, but orchestrator has no privileges on %+v to inject pseudo-gtid", instance.Key) } return injected, nil diff --git a/go/inst/instance_utils.go b/go/inst/instance_utils.go index e68ca7d97..a628e0cbf 100644 --- a/go/inst/instance_utils.go +++ b/go/inst/instance_utils.go @@ -181,7 +181,7 @@ func RemoveInstance(instances [](*Instance), instanceKey *InstanceKey) [](*Insta return instances } -// removeBinlogServerInstances will remove all binlog servers from given lsit +// removeBinlogServerInstances will remove all binlog servers from given list func RemoveBinlogServerInstances(instances [](*Instance)) [](*Instance) { for i := len(instances) - 1; i >= 0; i-- { if instances[i].IsBinlogServer() { diff --git a/go/logic/topology_recovery.go b/go/logic/topology_recovery.go index 855a79387..55ea7c47f 100644 --- a/go/logic/topology_recovery.go +++ b/go/logic/topology_recovery.go @@ -1370,7 +1370,7 @@ func RecoverDeadCoMaster(topologyRecovery *TopologyRecovery, skipProcesses bool) // Maybe future me is a smarter person and finds a simple solution. Unlikely. I'm getting dumber. // // ... - // Now that we're convinved, take a look at what we can be left with: + // Now that we're convinced, take a look at what we can be left with: // Say we started with M1<->M2<-S1, with M2 failing, and we promoted S1. // We now have M1->S1 (because S1 is promoted), S1->M2 (because that's what it remembers), M2->M1 (because that's what it remembers) // !! This is an evil 3-node circle that must be broken. @@ -1923,7 +1923,7 @@ func CheckAndRecover(specificInstance *inst.InstanceKey, candidateInstanceKey *i } if specificInstance != nil { - // force mode. Keep it synchronuous + // force mode. Keep it synchronous var topologyRecovery *TopologyRecovery recoveryAttempted, topologyRecovery, err = executeCheckAndRecoverFunction(analysisEntry, candidateInstanceKey, true, skipProcesses) log.Errore(err) diff --git a/go/logic/topology_recovery_dao.go b/go/logic/topology_recovery_dao.go index 4a7e15d97..41bb219a8 100644 --- a/go/logic/topology_recovery_dao.go +++ b/go/logic/topology_recovery_dao.go @@ -267,7 +267,7 @@ func ClearActiveRecoveries() error { } // RegisterBlockedRecoveries writes down currently blocked recoveries, and indicates what recovery they are blocked on. -// Recoveries are blocked thru the in_active_period flag, which comes to avoid flapping. +// Recoveries are blocked through the in_active_period flag, which comes to avoid flapping. func RegisterBlockedRecoveries(analysisEntry *inst.ReplicationAnalysis, blockingRecoveries []*TopologyRecovery) error { for _, recovery := range blockingRecoveries { _, err := db.ExecOrchestrator(` @@ -438,7 +438,7 @@ func AcknowledgeClusterRecoveries(clusterName string, owner string, comment stri return countAcknowledgedEntries, nil } -// AcknowledgeInstanceRecoveries marks active recoveries for given instane as acknowledged. +// AcknowledgeInstanceRecoveries marks active recoveries for given instance as acknowledged. // This also implied clearing their active period, which in turn enables further recoveries on those topologies func AcknowledgeInstanceRecoveries(instanceKey *inst.InstanceKey, owner string, comment string) (countAcknowledgedEntries int64, err error) { whereClause := ` @@ -450,7 +450,7 @@ func AcknowledgeInstanceRecoveries(instanceKey *inst.InstanceKey, owner string, return acknowledgeRecoveries(owner, comment, false, whereClause, args) } -// AcknowledgeInstanceCompletedRecoveries marks active and COMPLETED recoveries for given instane as acknowledged. +// AcknowledgeInstanceCompletedRecoveries marks active and COMPLETED recoveries for given instance as acknowledged. // This also implied clearing their active period, which in turn enables further recoveries on those topologies func AcknowledgeInstanceCompletedRecoveries(instanceKey *inst.InstanceKey, owner string, comment string) (countAcknowledgedEntries int64, err error) { whereClause := ` diff --git a/go/raft/raft.go b/go/raft/raft.go index dcb8cca2d..348439c4f 100644 --- a/go/raft/raft.go +++ b/go/raft/raft.go @@ -213,7 +213,7 @@ func normalizeRaftNode(node string) (string, error) { // IsPartOfQuorum returns `true` when this node is part of the raft quorum, meaning its // data and opinion are trustworthy. -// Comapre that to a node which has left (or has not yet joined) the quorum: it has stale data. +// Compare that to a node which has left (or has not yet joined) the quorum: it has stale data. func IsPartOfQuorum() bool { if GetLeader() == "" { return false diff --git a/go/ssl/ssl.go b/go/ssl/ssl.go index 979b5c791..e70d017fb 100644 --- a/go/ssl/ssl.go +++ b/go/ssl/ssl.go @@ -159,7 +159,7 @@ func ReadPEMData(pemFile string, pemPass []byte) ([]byte, error) { var newBlock pem.Block newBlock.Type = pemBlock.Type newBlock.Bytes = pemData - // This is now like reading in an uncrypted key from a file and stuffing it + // This is now like reading in an unencrypted key from a file and stuffing it // into a byte stream pemData = pem.EncodeToMemory(&newBlock) } diff --git a/resources/metrics/orchestrator-grafana.json b/resources/metrics/orchestrator-grafana.json index ce0c59650..474b4fe83 100644 --- a/resources/metrics/orchestrator-grafana.json +++ b/resources/metrics/orchestrator-grafana.json @@ -938,7 +938,7 @@ ], "timeFrom": null, "timeShift": null, - "title": "dead master recovery - sucess", + "title": "dead master recovery - success", "tooltip": { "shared": true, "value_type": "cumulative" @@ -1127,7 +1127,7 @@ ], "timeFrom": null, "timeShift": null, - "title": "dead intermediate master recovery - sucess", + "title": "dead intermediate master recovery - success", "tooltip": { "shared": true, "value_type": "cumulative" @@ -1316,7 +1316,7 @@ ], "timeFrom": null, "timeShift": null, - "title": "dead co master recovery - sucess", + "title": "dead co master recovery - success", "tooltip": { "shared": true, "value_type": "cumulative" diff --git a/vagrant/base-build.sh b/vagrant/base-build.sh index 0a88a88ed..ce8607408 100755 --- a/vagrant/base-build.sh +++ b/vagrant/base-build.sh @@ -7,7 +7,7 @@ if [[ -e /etc/redhat-release ]]; then # All the project dependencies to build plus some utilities # No reason not to install this stuff in all the places :) yum -d 0 -y install Percona-Server-server-56 Percona-Server-shared-56 Percona-Server-client-56 Percona-Server-shared-compat percona-toolkit percona-xtrabackup ruby-devel gcc rpm-build git vim-enhanced golang jq - # newest versions of java aren't compatable with the installed version of ruby (1.8.7) + # newest versions of java aren't compatible with the installed version of ruby (1.8.7) gem install json --version 1.8.6 # Pin to 1.4 due to 1.5 no longer working on EL6 gem install fpm --version 1.4 @@ -82,7 +82,7 @@ elif [[ -e /etc/debian_version ]]; then echo "PATH=$PATH:/usr/local/go/bin:/usr/local/bin" | sudo tee -a /etc/environment export PATH="PATH=$PATH:/usr/local/go/bin:/usr/local/bin" - # newest versions of java aren't compatable with the installed version of ruby (1.8.7) + # newest versions of java aren't compatible with the installed version of ruby (1.8.7) gem install json --version 1.8.6 gem install fpm --version 1.4