diff --git a/CHANGELOG.md b/CHANGELOG.md index e40d627f0..04e42f339 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -157,7 +157,7 @@ The releases changes the default logging level for the Raft subsystem from `INFO This release further improves _Automatic Backup_ performance, as well as improving test coverage generally. ### Implementation changes and bug fixes - [PR #1592](https://github.com/rqlite/rqlite/pull/1592): Refactor and enhance upload logging. -- [PR #1593](https://github.com/rqlite/rqlite/pull/1593): Tighthen snapshot-join end-to-end testing. +- [PR #1593](https://github.com/rqlite/rqlite/pull/1593): Tighten snapshot-join end-to-end testing. - [PR #1596](https://github.com/rqlite/rqlite/pull/1596): Track Raft logs which change the database. - [PR #1597](https://github.com/rqlite/rqlite/pull/1597): Clarify end-to-end testing code. - [PR #1598](https://github.com/rqlite/rqlite/pull/1598): Refactor Store-level index tracking. @@ -362,8 +362,8 @@ Fix an edge case related to Raft Snapshotting when a chunked load is in progress This release fixes an edge case issue during restore-from-SQLite. It's possible if a rqlite system crashes shortly after restoring from SQLite it may not have loaded the data correctly. ### Implementation changes and bug fixes -- [PR #1456](https://github.com/rqlite/rqlite/pull/1456): Wrap Snaphot Store _FullNeeded_ logic in a function. -- [PR #1457](https://github.com/rqlite/rqlite/pull/1457): Allow FullNeeded to be explicity set to true. +- [PR #1456](https://github.com/rqlite/rqlite/pull/1456): Wrap Snapshot Store _FullNeeded_ logic in a function. +- [PR #1457](https://github.com/rqlite/rqlite/pull/1457): Allow FullNeeded to be explicitly set to true. - [PR #1458](https://github.com/rqlite/rqlite/pull/1458): Perform full snapshot after chunked load. ## 8.0.0 (December 5th 2023) @@ -375,13 +375,13 @@ This release also eases operations, as well as adding new features and bug fixes Release 8.0 supports (mostly) seamless upgrades from the 7.x series, and upgrading from 7.x has been tested. However, it is still strongly recommended you backup any production cluster before attempting an upgrade. A more conservative approach would be to create a brand new 8.0 system, and load your backup into that cluster. Then switch production traffic over to the new 8.0 cluster. -8.0 and 7.x nodes should be able to interoperate, so a rolling upgrade should work fine **as long as all nodes are fully caught up with the Leader node**. Note you also cannot join a new 8.x node to a pre-existing 7.x cluster. Otherwise upgrade should operate but, again, it is strongly recommended you test this first. It is also not recommended that you run a cluster with a mix of 7.x and 8.0 code for any significant length of time, just the time required for a rolling upgrade. +8.0 and 7.x nodes should be able to interoperate, so a rolling upgrade should work fine **as long as all nodes are fully caught up with the Leader node**. Note you also cannot join a new 8.x node to a preexisting 7.x cluster. Otherwise upgrade should operate but, again, it is strongly recommended you test this first. It is also not recommended that you run a cluster with a mix of 7.x and 8.0 code for any significant length of time, just the time required for a rolling upgrade. Important things to note if you decide to upgrade an existing 7.x system: - Backup your 7.x cluster first. - it is strongly recommended you upgrade your 7.x cluster to the [7.21.4](https://github.com/rqlite/rqlite/releases/tag/v7.21.4) release before upgrading to the 8.0 series. - 8.0 always runs with an on-disk database, in-memory databases are no longer supported. Improvements made late in the 7.0 series mean there is little difference in write performance between in-memory and on-disk modes, but supporting both modes just meant confusion and higher development costs. If you were previously running in in-memory mode (the previous default), you don't need to do anything. But if you were previously passing `-on-disk` to `rqlited` so that rqlite ran in on-disk mode, you must now remove that flag. -- When forming a new cluster using 8.0, pass the **Raft** addresss of the remote node to the `-join` command, not the HTTP API address. If your cluster is already formed, upgrades will work without changing anything (`-join` options are ignored if nodes are already members of a cluster). You may need to change any scripting or automatic-configuration generation however. +- When forming a new cluster using 8.0, pass the **Raft** address of the remote node to the `-join` command, not the HTTP API address. If your cluster is already formed, upgrades will work without changing anything (`-join` options are ignored if nodes are already members of a cluster). You may need to change any scripting or automatic-configuration generation however. - Bcrypted password hashes are no longer supported, due to security flaws in the 7.x release. You should regenerate any [Credentials file](https://rqlite.io/docs/guides/security/), and use plaintext passwords only (and prevent unauthorized access to the Credentials file). - A few rarely, if ever, used `rqlited` command-line flags have been removed. These flags just added operational overhead, while adding little value. @@ -517,7 +517,7 @@ This release changes the "syncing" mode SQLite uses to _OFF_ when rqlite runs in - [PR #1270](https://github.com/rqlite/rqlite/pull/1270): Confirm self-removal changes cluster config. - [PR #1272](https://github.com/rqlite/rqlite/pull/1272): Refactor node self-removal on shutdown. - [PR #1273](https://github.com/rqlite/rqlite/pull/1273): Make WaitForLeader() more consistent. -- [PR #1274](https://github.com/rqlite/rqlite/pull/1274): Do DNS boostrapping even if there is pre-existing state. Fixes [issue #1247](https://github.com/rqlite/rqlite/issues/1247) +- [PR #1274](https://github.com/rqlite/rqlite/pull/1274): Do DNS bootstrapping even if there is preexisting state. Fixes [issue #1247](https://github.com/rqlite/rqlite/issues/1247) ## 7.18.1 (May 20th 2023) This release also includes some small logging improvements, related to node-shutdown. @@ -671,7 +671,7 @@ This release addresses a shortcoming in inter-node communications. Nodes now con - [PR #1087](https://github.com/rqlite/rqlite/pull/1087): Notified and joined node checks address resolution. ## 7.9.0 (October 22nd 2022) -This release makes it more convenient to load SQLite files directly into rqlite, as any node can now process the request. For this to work however, all nodes in your cluster must be running 7.9.0 (or later). Otherwse 7.9.0 is fully compatible with earlier release, so a rolling upgrade process is an option. +This release makes it more convenient to load SQLite files directly into rqlite, as any node can now process the request. For this to work however, all nodes in your cluster must be running 7.9.0 (or later). Otherwise 7.9.0 is fully compatible with earlier release, so a rolling upgrade process is an option. ### New features - [PR #1084](https://github.com/rqlite/rqlite/pull/1084): Transparently forward SQLite data Restore requests to Leaders. @@ -680,7 +680,7 @@ This release makes it more convenient to load SQLite files directly into rqlite, - [PR #1085](https://github.com/rqlite/rqlite/pull/1085): Improved logs during joining. ## 7.8.0 (October 20th 2022) -This release makes it more convenient to retrieve a backup. Now any node can provide a backup of the underlying SQLite database. For this to work however, all nodes in your cluster must be running 7.8.0 (or later). Otherwse 7.8.0 is fully compatible with earlier release, so a rolling upgrade process is an option. +This release makes it more convenient to retrieve a backup. Now any node can provide a backup of the underlying SQLite database. For this to work however, all nodes in your cluster must be running 7.8.0 (or later). Otherwise 7.8.0 is fully compatible with earlier release, so a rolling upgrade process is an option. ### New features - [PR #1081](https://github.com/rqlite/rqlite/pull/1081): Transparently forward Backup requests to Leaders. @@ -696,7 +696,7 @@ This release makes it more convenient to retrieve a backup. Now any node can pro ## 7.7.1 (October 13th 2022) ### Implementation changes and bug fixes -- [PR #1074](https://github.com/rqlite/rqlite/pull/1074): Support `NULL` as a paramterized value. Fixes [issue #1073](https://github.com/rqlite/rqlite/issues/1073) +- [PR #1074](https://github.com/rqlite/rqlite/pull/1074): Support `NULL` as a parameterized value. Fixes [issue #1073](https://github.com/rqlite/rqlite/issues/1073) ## 7.7.0 (September 28th 2022) This release adds support for SQLite [`RANDOM()`](https://www.sqlite.org/deterministic.html), the first such [support for non-deterministic functions](https://github.com/rqlite/rqlite/blob/master/DOC/NON_DETERMINISTIC_FUNCTIONS.md). It does this via statement-rewriting. @@ -773,7 +773,7 @@ This release introduces supported for [DNS-based](https://www.cloudflare.com/lea - [PR #976](https://github.com/rqlite/rqlite/pull/976): Improve `/readyz` response. - [PR #978](https://github.com/rqlite/rqlite/pull/978): Return error on join request if node ID is the same as receiving node. - [PR #980](https://github.com/rqlite/rqlite/pull/980): Move config validation to Config type. -- [PR #981](https://github.com/rqlite/rqlite/pull/981): Add curent time to node `/status` output. +- [PR #981](https://github.com/rqlite/rqlite/pull/981): Add current time to node `/status` output. - [PR #982](https://github.com/rqlite/rqlite/pull/982): `/readyz` can skip leader check via `noleader` query param. - [PR #984](https://github.com/rqlite/rqlite/pull/984): Count number of `/status` and `/readyz` requests via expvar. - [PR #986](https://github.com/rqlite/rqlite/pull/986): Refactor join code with new Joiner type. @@ -970,7 +970,7 @@ This release addresses a significant issue related to SQLite connection handling ### Implementation changes and bug fixes - [PR #827](https://github.com/rqlite/rqlite/pull/827): Upgrade dependencies, including SQLite to 3.36. -- [PR #835](https://github.com/rqlite/rqlite/pull/835): Use Go standard libary sql/database abstraction. Fixes [issue #830](https://github.com/rqlite/rqlite/issues/830). +- [PR #835](https://github.com/rqlite/rqlite/pull/835): Use Go standard library sql/database abstraction. Fixes [issue #830](https://github.com/rqlite/rqlite/issues/830). - [PR #835](https://github.com/rqlite/rqlite/pull/835): Use SQLite connection pool and add pool statistics to status output. - [PR #836](https://github.com/rqlite/rqlite/pull/836): Add current SQLite journal mode to status output. - [PR #839](https://github.com/rqlite/rqlite/pull/839): Limit in-memory connection pool to 1 connection. @@ -985,7 +985,7 @@ This release implements a significant design change, which improves rqlite clust In the 5.0 series, _Follower_ nodes learned the HTTP API address of the cluster Leader via information - known as _Metadata_ - that each node wrote to the Raft log. This Metadata was then available to each node in the cluster, if that node needed to redirect queries to the cluster Leader (assuming that node wasn't the Leader at that time). However that design was somewhat complex, and required the tracking of extra state, in addition to the SQLite database. It also meant that if the Metadata got out of sync with the Raft state, the cluster could be in a degraded state. -In this new design, a node now queries the Leader as needed, when that node needs to learn the Leader's HTTP API address. As a result, the Metadata component has been removed from rqlite, since it is no longer needed. And without any possibility of discrepancy between Metadata and Raft state, a whole class of potential bugs is removed. Any request for the Leader HTTP API address means the requesting node node connects to a TCP port already open on the Leader for Raft connections, so does not introduce any new failure modes. This multiplexing of the Raft TCP port is performed via the `mux` package. +In this new design, a node now queries the Leader as needed, when that node needs to learn the Leader's HTTP API address. As a result, the Metadata component has been removed from rqlite, since it is no longer needed. And without any possibility of discrepancy between Metadata and Raft state, a whole class of potential bugs is removed. Any request for the Leader HTTP API address means the requesting node connects to a TCP port already open on the Leader for Raft connections, so does not introduce any new failure modes. This multiplexing of the Raft TCP port is performed via the `mux` package. This new design does mean that nodes running earlier software cannot communicate with 6.0 nodes, as 6.0 software no longer performs Metadata updates. As a result, **rqlite clusters running 5.x software or earlier must be explicitly upgraded**. To upgrade from an earlier version to this release you should [backup your Leader node](https://github.com/rqlite/rqlite/blob/master/DOC/BACKUPS.md), and [restore the database dump](https://github.com/rqlite/rqlite/blob/master/DOC/RESTORE_FROM_SQLITE.md) into a new 6.0 cluster. @@ -1125,7 +1125,7 @@ _This release should not be used, due to a HTTP redirection bug._ - [PR #641](https://github.com/rqlite/rqlite/pull/641): rqlite CLI now supports node removal. ## 5.2.0 (April 11th 2020) -This release fixes a very significant bug, whereby snapshotting was never occuring due to a zero snapshot-interval being passed to the Raft subsystem. This meant that the Raft log would grow without bound, and could result in very long start-up times if the Raft log was very large. +This release fixes a very significant bug, whereby snapshotting was never occurring due to a zero snapshot-interval being passed to the Raft subsystem. This meant that the Raft log would grow without bound, and could result in very long start-up times if the Raft log was very large. ### New features - [PR #637](https://github.com/rqlite/rqlite/pull/637): Allow the Raft snapshotting check interval to be set at launch time. @@ -1167,7 +1167,7 @@ The HTTP Query and Insert API remains unchanged in the 5.0 series relative to th - [PR #607](https://github.com/rqlite/rqlite/pull/607): Various Redirect fixes. - [PR #609](https://github.com/rqlite/rqlite/pull/609): Simplify rqlite implementation. - [PR #610](https://github.com/rqlite/rqlite/pull/610): Write node backup directly to HTTP response writer. Thanks @sum12. -- [PR #611](https://github.com/rqlite/rqlite/pull/611): Add varadic perm check functions to auth store. +- [PR #611](https://github.com/rqlite/rqlite/pull/611): Add variadic perm check functions to auth store. ## 4.6.0 (November 29th 2019) _This release adds significant new functionality to the command-line tool, including much more control over backup and restore of the database. [Visit the Releases page](https://github.com/rqlite/rqlite/releases/tag/v4.6.0) to download this release._ @@ -1209,7 +1209,7 @@ _This release adds significant new functionality to the command-line tool, inclu - [PR #384](https://github.com/rqlite/rqlite/pull/384): "status" perm required to access Go runtime information. ## 4.2.1 (November 10th 2017) -- [PR #367](https://github.com/rqlite/rqlite/pull/367): Remove superflous leading space at CLI prompt. +- [PR #367](https://github.com/rqlite/rqlite/pull/367): Remove superfluous leading space at CLI prompt. - [PR #368](https://github.com/rqlite/rqlite/pull/368): CLI displays clear error message when not authorized. - [PR #370](https://github.com/rqlite/rqlite/pull/370): CLI does not need to indent JSON when making requests. - [PR #373](https://github.com/rqlite/rqlite/pull/373), [PR #374](https://github.com/rqlite/rqlite/pull/374): Add simple INSERT-only benchmarking tool. diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 2e121fccb..10d81a8c9 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,6 +1,6 @@ # Contributing to rqlite rqlite is software, and it goes without saying it can always be improved. It's by no means finished -- issues are tracked, and I plan to develop this project further. Pull requests are welcome, though larger proposals should be discussed first. The design and implementation of rqlite is somewhat opinionated conservative however, so feature and design changes may be slow to become part of rqlite. -rqlite can be compiled and executed on Linux, OSX, and Microsoft Windows. +rqlite can be compiled and executed on Linux, macOS, and Microsoft Windows. For full details on developing, and contributing to, rqlite, check out [rqlite.io](https://rqlite.io/docs/install-rqlite/building-from-source/). diff --git a/README.md b/README.md index 28687d559..08041064e 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,7 @@ [![Circle CI](https://circleci.com/gh/rqlite/rqlite/tree/master.svg?style=svg)](https://circleci.com/gh/rqlite/rqlite/tree/master) -[![appveyor](https://ci.appveyor.com/api/projects/status/github/rqlite/rqlite?branch=master&svg=true)](https://ci.appveyor.com/project/otoolep/rqlite) +[![AppVeyor](https://ci.appveyor.com/api/projects/status/github/rqlite/rqlite?branch=master&svg=true)](https://ci.appveyor.com/project/otoolep/rqlite) [![Go Report Card](https://goreportcard.com/badge/github.com/rqlite/rqlite)](https://goreportcard.com/report/github.com/rqlite/rqlite/v8) [![Release](https://img.shields.io/github/release/rqlite/rqlite.svg)](https://github.com/rqlite/rqlite/releases) [![Docker](https://img.shields.io/docker/pulls/rqlite/rqlite?style=plastic)](https://hub.docker.com/r/rqlite/rqlite/) diff --git a/auth/credential_store.go b/auth/credential_store.go index 8dec1f6c5..5aa5b32e1 100644 --- a/auth/credential_store.go +++ b/auth/credential_store.go @@ -185,7 +185,7 @@ func (c *CredentialsStore) AA(username, password, perm string) bool { return c.HasAnyPerm(username, perm, PermAll) } -// HasPermRequest returns true if the username returned by b has the givem perm. +// HasPermRequest returns true if the username returned by b has the given perm. // It does not perform any password checking, but if there is no username // in the request, it returns false. func (c *CredentialsStore) HasPermRequest(b BasicAuther, perm string) bool { diff --git a/auto/backup/config_test.go b/auto/backup/config_test.go index bb7db3604..ce830641b 100644 --- a/auto/backup/config_test.go +++ b/auto/backup/config_test.go @@ -36,7 +36,7 @@ func Test_ReadConfigFile(t *testing.T) { } }) - t.Run("non-existent file", func(t *testing.T) { + t.Run("nonexistent file", func(t *testing.T) { _, err := ReadConfigFile("nonexistentfile") if !errors.Is(err, os.ErrNotExist) { t.Fatalf("Expected os.ErrNotExist, got %v", err) diff --git a/auto/restore/config_test.go b/auto/restore/config_test.go index 801146478..0651db6bb 100644 --- a/auto/restore/config_test.go +++ b/auto/restore/config_test.go @@ -36,7 +36,7 @@ func Test_ReadConfigFile(t *testing.T) { } }) - t.Run("non-existent file", func(t *testing.T) { + t.Run("nonexistent file", func(t *testing.T) { _, err := ReadConfigFile("nonexistentfile") if !errors.Is(err, os.ErrNotExist) { t.Fatalf("Expected os.ErrNotExist, got %v", err) diff --git a/cluster/service_test.go b/cluster/service_test.go index 54bb4e592..ef9dd91a5 100644 --- a/cluster/service_test.go +++ b/cluster/service_test.go @@ -180,7 +180,7 @@ func Test_NewServiceTestExecuteQueryAuthNoCredentials(t *testing.T) { db := mustNewMockDatabase() clstr := mustNewMockManager() - // Test that for a cluster with no credential store configed + // Test that for a cluster with no credential store configured // all users are authed for both operations var c CredentialStore = nil c = nil diff --git a/cmd/rqlite/README.md b/cmd/rqlite/README.md index 9a482e56e..8d44e0443 100644 --- a/cmd/rqlite/README.md +++ b/cmd/rqlite/README.md @@ -75,7 +75,7 @@ bye~ Connecting to a host running somewhere else on the network: ``` $ rqlite -H localhost -p 8493 -locahost:8493> +localhost:8493> ``` ## Build diff --git a/cmd/rqlite/history/history.go b/cmd/rqlite/history/history.go index efe76103f..6564fcd80 100644 --- a/cmd/rqlite/history/history.go +++ b/cmd/rqlite/history/history.go @@ -23,7 +23,7 @@ func Size() int { return maxSize } -// Dedupe returns a copy of the slice with contigous dupes removed. +// Dedupe returns a copy of the slice with contiguous dupes removed. func Dedupe(s []string) []string { if s == nil { return nil diff --git a/cmd/rqlite/http/client_test.go b/cmd/rqlite/http/client_test.go index b06a20651..55fd19b1e 100644 --- a/cmd/rqlite/http/client_test.go +++ b/cmd/rqlite/http/client_test.go @@ -70,13 +70,13 @@ func TestClient_QueryWhenSomeAreAvailable(t *testing.T) { t.Errorf("expected HostChangedError got nil instead") } - hcer, ok := err.(*HostChangedError) + hcerr, ok := err.(*HostChangedError) if !ok { t.Errorf("unexpected error occurred: %v", err) } - if hcer.NewHost != u2.Host { + if hcerr.NewHost != u2.Host { t.Errorf("unexpected responding host") } diff --git a/cmd/rqlite/main.go b/cmd/rqlite/main.go index 7c9f3f39f..7b820b8f2 100644 --- a/cmd/rqlite/main.go +++ b/cmd/rqlite/main.go @@ -467,7 +467,7 @@ func getVersionWithClient(client *http.Client, argv *argT) (string, error) { } func sendRequest(ctx *cli.Context, makeNewRequest func(string) (*http.Request, error), urlStr string, argv *argT) (*[]byte, error) { - // create a byte-based buffer that implments io.Writer + // create a byte-based buffer that implements io.Writer var buf []byte w := bytes.NewBuffer(buf) _, err := sendRequestW(ctx, makeNewRequest, urlStr, argv, w) diff --git a/cmd/rqlited/flags.go b/cmd/rqlited/flags.go index 1ddc06ce8..803410063 100644 --- a/cmd/rqlited/flags.go +++ b/cmd/rqlited/flags.go @@ -59,7 +59,7 @@ type Config struct { // AutoRestoreFile is the path to the auto-restore file. May not be set. AutoRestoreFile string `filepath:"true"` - // HTTPx509CACert is the path to the CA certficate file for when this node verifies + // HTTPx509CACert is the path to the CA certificate file for when this node verifies // other certificates for any HTTP communications. May not be set. HTTPx509CACert string `filepath:"true"` @@ -72,7 +72,7 @@ type Config struct { // HTTPVerifyClient indicates whether the HTTP server should verify client certificates. HTTPVerifyClient bool - // NodeX509CACert is the path to the CA certficate file for when this node verifies + // NodeX509CACert is the path to the CA certificate file for when this node verifies // other certificates for any inter-node communications. May not be set. NodeX509CACert string `filepath:"true"` @@ -120,7 +120,7 @@ type Config struct { // BootstrapExpectTimeout is the maximum time a bootstrap operation can take. BootstrapExpectTimeout time.Duration - // DisoMode sets the discovery mode. May not be set. + // DiscoMode sets the discovery mode. May not be set. DiscoMode string // DiscoKey sets the discovery prefix key. @@ -249,7 +249,7 @@ func (c *Config) Validate() error { c.NodeID = c.RaftAdv } - // Perfom some address validity checks. + // Perform some address validity checks. if strings.HasPrefix(strings.ToLower(c.HTTPAddr), "http") || strings.HasPrefix(strings.ToLower(c.HTTPAdv), "http") { return errors.New("HTTP options should not include protocol (http:// or https://)") @@ -299,7 +299,7 @@ func (c *Config) Validate() error { addrs := strings.Split(c.JoinAddrs, ",") for i := range addrs { if _, _, err := net.SplitHostPort(addrs[i]); err != nil { - return fmt.Errorf("%s is an invalid join adddress", addrs[i]) + return fmt.Errorf("%s is an invalid join address", addrs[i]) } if c.BootstrapExpect == 0 { @@ -389,7 +389,7 @@ func (c *Config) DiscoConfigReader() io.ReadCloser { } // CheckFilePaths checks that all file paths in the config exist. -// Empy filepaths are ignored. +// Empty filepaths are ignored. func (c *Config) CheckFilePaths() error { v := reflect.ValueOf(c).Elem() diff --git a/cmd/rqlited/main.go b/cmd/rqlited/main.go index 506e1f83f..1aa359024 100644 --- a/cmd/rqlited/main.go +++ b/cmd/rqlited/main.go @@ -436,7 +436,7 @@ func createCluster(cfg *Config, hasPeers bool, client *cluster.Client, str *stor } // Brand new node, told to bootstrap itself. So do it. - log.Println("bootstraping single new node") + log.Println("bootstrapping single new node") if err := str.Bootstrap(store.NewServer(str.ID(), cfg.RaftAdv, true)); err != nil { return fmt.Errorf("failed to bootstrap single new node: %s", err.Error()) } diff --git a/command/encoding/json_test.go b/command/encoding/json_test.go index d6626cd62..e4e89261e 100644 --- a/command/encoding/json_test.go +++ b/command/encoding/json_test.go @@ -237,7 +237,7 @@ func Test_MarshalQueryRows(t *testing.T) { } } -// Test_MarshalQueryAssociativeRows tests JSON marshaling of a QueryRows +// Test_MarshalQueryAssociativeRows tests JSON (pretty) marshaling of a QueryRows func Test_MarshalQueryAssociativeRows(t *testing.T) { var b []byte var err error @@ -409,7 +409,7 @@ func Test_MarshalQueryRowses(t *testing.T) { } } -// Test_MarshalQueryRowses tests JSON marshaling of a slice of QueryRows +// Test_MarshalQueryAssociativeRowses tests JSON marshaling of a slice of associative QueryRows func Test_MarshalQueryAssociativeRowses(t *testing.T) { var b []byte var err error diff --git a/command/marshal.go b/command/marshal.go index 1861a8a27..a23e0d931 100644 --- a/command/marshal.go +++ b/command/marshal.go @@ -172,7 +172,7 @@ func UnmarshalLoadChunkRequest(b []byte, lr *proto.LoadChunkRequest) error { return pb.Unmarshal(b, lr) } -// UnmarshalSubCommand unmarshalls a sub command m. It assumes that +// UnmarshalSubCommand unmarshals a sub command m. It assumes that // m is the correct type. func UnmarshalSubCommand(c *proto.Command, m pb.Message) error { b := c.SubCommand diff --git a/db/db.go b/db/db.go index ee65d5f14..4d96fa675 100644 --- a/db/db.go +++ b/db/db.go @@ -746,14 +746,14 @@ func (db *DB) Query(req *command.Request, xTime bool) ([]*command.QueryRows, err return db.queryWithConn(ctx, req, xTime, conn) } -type queryer interface { +type querier interface { QueryContext(ctx context.Context, query string, args ...interface{}) (*sql.Rows, error) } func (db *DB) queryWithConn(ctx context.Context, req *command.Request, xTime bool, conn *sql.Conn) ([]*command.QueryRows, error) { var err error - var queryer queryer + var querier querier var tx *sql.Tx if req.Transaction { stats.Add(numQTx, 1) @@ -762,9 +762,9 @@ func (db *DB) queryWithConn(ctx context.Context, req *command.Request, xTime boo return nil, err } defer tx.Rollback() // Will be ignored if tx is committed - queryer = tx + querier = tx } else { - queryer = conn + querier = conn } var allRows []*command.QueryRows @@ -795,7 +795,7 @@ func (db *DB) queryWithConn(ctx context.Context, req *command.Request, xTime boo continue } - rows, err = db.queryStmtWithConn(ctx, stmt, xTime, queryer, time.Duration(req.DbTimeout)) + rows, err = db.queryStmtWithConn(ctx, stmt, xTime, querier, time.Duration(req.DbTimeout)) if err != nil { stats.Add(numQueryErrors, 1) rows = &command.QueryRows{ @@ -811,7 +811,7 @@ func (db *DB) queryWithConn(ctx context.Context, req *command.Request, xTime boo return allRows, err } -func (db *DB) queryStmtWithConn(ctx context.Context, stmt *command.Statement, xTime bool, q queryer, timeout time.Duration) (retRows *command.QueryRows, retErr error) { +func (db *DB) queryStmtWithConn(ctx context.Context, stmt *command.Statement, xTime bool, q querier, timeout time.Duration) (retRows *command.QueryRows, retErr error) { defer func() { if retErr != nil { retErr = rewriteContextTimeout(retErr, ErrQueryTimeout) @@ -932,7 +932,7 @@ func (db *DB) Request(req *command.Request, xTime bool) ([]*command.ExecuteQuery defer cancel() } - var queryer queryer + var querier querier var execer execer var tx *sql.Tx if req.Transaction { @@ -942,10 +942,10 @@ func (db *DB) Request(req *command.Request, xTime bool) ([]*command.ExecuteQuery return nil, err } defer tx.Rollback() // Will be ignored if tx is committed - queryer = tx + querier = tx execer = tx } else { - queryer = conn + querier = conn execer = conn } @@ -978,7 +978,7 @@ func (db *DB) Request(req *command.Request, xTime bool) ([]*command.ExecuteQuery } if ro { - rows, opErr := db.queryStmtWithConn(ctx, stmt, xTime, queryer, time.Duration(req.DbTimeout)) + rows, opErr := db.queryStmtWithConn(ctx, stmt, xTime, querier, time.Duration(req.DbTimeout)) eqResponse = append(eqResponse, createEQQueryResponse(rows, opErr)) if abortOnError(opErr) { break diff --git a/db/db_checkpoint_test.go b/db/db_checkpoint_test.go index be9c46848..8e7ab73ae 100644 --- a/db/db_checkpoint_test.go +++ b/db/db_checkpoint_test.go @@ -28,7 +28,7 @@ func Test_WALDatabaseCheckpointOKNoWAL(t *testing.T) { } defer db.Close() if err := db.Checkpoint(CheckpointTruncate); err != nil { - t.Fatalf("failed to checkpoint database in WAL mode with non-existent WAL: %s", err.Error()) + t.Fatalf("failed to checkpoint database in WAL mode with nonexistent WAL: %s", err.Error()) } } diff --git a/db/db_common_test.go b/db/db_common_test.go index 20dd7c47b..1f2631981 100644 --- a/db/db_common_test.go +++ b/db/db_common_test.go @@ -53,7 +53,7 @@ func testSetSynchronousMode(t *testing.T, db *DB) { t.Fatalf("failed to get synchronous mode: %s", err.Error()) } if mm != i { - t.Fatalf("synchonous mode not set to %s", m) + t.Fatalf("synchronous mode not set to %s", m) } } } @@ -650,7 +650,7 @@ name TEXT func testSimpleFailingStatements_Execute(t *testing.T, db *DB) { r, err := db.ExecuteStringStmt(`INSERT INTO foo(name) VALUES("fiona")`) if err != nil { - t.Fatalf("error executing insertion into non-existent table: %s", err.Error()) + t.Fatalf("error executing insertion into nonexistent table: %s", err.Error()) } if exp, got := `[{"error":"no such table: foo"}]`, asJSON(r); exp != got { t.Fatalf("unexpected results for query\nexp: %s\ngot: %s", exp, got) @@ -697,7 +697,7 @@ func testSimpleFailingStatements_Execute(t *testing.T, db *DB) { func testSimpleFailingStatements_Query(t *testing.T, db *DB) { ro, err := db.QueryStringStmt(`SELECT * FROM bar`) if err != nil { - t.Fatalf("failed to attempt query of non-existent table: %s", err.Error()) + t.Fatalf("failed to attempt query of nonexistent table: %s", err.Error()) } if exp, got := `[{"error":"no such table: bar"}]`, asJSON(ro); exp != got { t.Fatalf("unexpected results for query\nexp: %s\ngot: %s", exp, got) @@ -1101,7 +1101,7 @@ func testSimpleRequest(t *testing.T, db *DB) { exp: `[{"last_insert_id":3,"rows_affected":1},{"columns":["COUNT(*)"],"types":["integer"],"values":[[3]]},{"columns":["last"],"types":["text"],"values":[["feynman"]]}]`, }, { - name: "insert and select non-existent table", + name: "insert and select nonexistent table", stmts: []string{ `INSERT INTO foo(first, last) VALUES("paul", "dirac")`, `SELECT COUNT(*) FROM foo`, diff --git a/db/db_test.go b/db/db_test.go index 1a294892d..32f2f2c9d 100644 --- a/db/db_test.go +++ b/db/db_test.go @@ -15,14 +15,14 @@ import ( "github.com/rqlite/rqlite/v8/random" ) -// Test_OpenNonExistentDatabase tests that opening a non-existent database +// Test_OpenNonExistentDatabase tests that opening a nonexistent database // works OK. It should. func Test_OpenNonExistentDatabase(t *testing.T) { path := mustTempPath() defer os.Remove(path) _, err := Open(path, false, false) if err != nil { - t.Fatalf("error opening non-existent database: %s", err.Error()) + t.Fatalf("error opening nonexistent database: %s", err.Error()) } // Confirm a file was created. if !fileExists(path) { @@ -35,7 +35,7 @@ func Test_WALRemovedOnClose(t *testing.T) { defer os.Remove(path) db, err := Open(path, false, true) if err != nil { - t.Fatalf("error opening non-existent database") + t.Fatalf("error opening nonexistent database") } defer db.Close() if !db.WALEnabled() { diff --git a/db/state.go b/db/state.go index ba3282fe9..375ca8bd5 100644 --- a/db/state.go +++ b/db/state.go @@ -38,7 +38,7 @@ func WALPath(dbPath string) string { } // IsValidSQLiteFile checks that the supplied path looks like a SQLite file. -// A non-existent file is considered invalid. +// A nonexistent file is considered invalid. func IsValidSQLiteFile(path string) bool { f, err := os.Open(path) if err != nil { @@ -62,7 +62,7 @@ func IsValidSQLiteData(b []byte) bool { // IsValidSQLiteWALFile checks that the supplied path looks like a SQLite // WAL file. See https://www.sqlite.org/fileformat2.html#walformat. A -// non-existent file is considered invalid. +// nonexistent file is considered invalid. func IsValidSQLiteWALFile(path string) bool { f, err := os.Open(path) if err != nil { diff --git a/disco/service_test.go b/disco/service_test.go index afe137ec5..1a10cc3ac 100644 --- a/disco/service_test.go +++ b/disco/service_test.go @@ -6,7 +6,7 @@ import ( "time" ) -func Test_NewServce(t *testing.T) { +func Test_NewService(t *testing.T) { s := NewService(&mockClient{}, &mockStore{}, Voter) if s == nil { t.Fatalf("service is nil") diff --git a/http/service.go b/http/service.go index 52b340960..c8ccb8a59 100644 --- a/http/service.go +++ b/http/service.go @@ -1469,7 +1469,7 @@ func (s *Service) CheckRequestPerm(r *http.Request, perm string) (b bool) { return s.credentialStore.AA(username, password, perm) } -// CheckRequestPermAll checksif the request is authenticated and authorized +// CheckRequestPermAll checks if the request is authenticated and authorized // with all the given Perms. func (s *Service) CheckRequestPermAll(r *http.Request, perms ...string) (b bool) { defer func() { diff --git a/http/service_tls_test.go b/http/service_tls_test.go index 91b7c2d2e..6fb33d4f1 100644 --- a/http/service_tls_test.go +++ b/http/service_tls_test.go @@ -96,7 +96,7 @@ func Test_TLSServiceSecure(t *testing.T) { url := fmt.Sprintf("https://%s", s.Addr().String()) - // Create a TLS Config which verfies server cert, and trusts the CA cert. + // Create a TLS Config which verifies server cert, and trusts the CA cert. tlsConfig := &tls.Config{InsecureSkipVerify: false} tlsConfig.RootCAs = x509.NewCertPool() ok := tlsConfig.RootCAs.AppendCertsFromPEM(cert) @@ -190,7 +190,7 @@ func Test_TLSServiceSecureMutual(t *testing.T) { url := fmt.Sprintf("https://%s", s.Addr().String()) - // Create a TLS Config which wil require verification of the server cert, and trusts the CA cert. + // Create a TLS Config which will require verification of the server cert, and trusts the CA cert. tlsConfig := &tls.Config{InsecureSkipVerify: false} tlsConfig.RootCAs = x509.NewCertPool() ok := tlsConfig.RootCAs.AppendCertsFromPEM(caCertPEM) diff --git a/rtls/config_test.go b/rtls/config_test.go index aadfbfa78..0d02a7d5b 100644 --- a/rtls/config_test.go +++ b/rtls/config_test.go @@ -70,7 +70,7 @@ func Test_CreateClientConfig(t *testing.T) { t.Fatalf("failed to create client config: %v", err) } if !config.InsecureSkipVerify { - t.Fatalf("expected InsecureSkipVerify to be true, got falsee") + t.Fatalf("expected InsecureSkipVerify to be true, got false") } // Check that the certificate is loaded correctly diff --git a/snapshot/sink.go b/snapshot/sink.go index 1a55a719e..bd276e92e 100644 --- a/snapshot/sink.go +++ b/snapshot/sink.go @@ -135,7 +135,7 @@ func (s *Sink) processSnapshotData() (retErr error) { } if len(snapshots) == 0 && !db.IsValidSQLiteFile(s.dataFD.Name()) { - // We have no snapshots yet, so the incomding data must be a valid SQLite file. + // We have no snapshots yet, so the incoming data must be a valid SQLite file. return fmt.Errorf("data for first snapshot must be a valid SQLite file") } diff --git a/snapshot/store.go b/snapshot/store.go index 6fc390967..fe472a1f0 100644 --- a/snapshot/store.go +++ b/snapshot/store.go @@ -399,7 +399,7 @@ func syncDirParentMaybe(dir string) error { return syncDir(parentDir(dir)) } -// syncDirParentMaybe syncsthe given directory, but only on non-Windows platforms. +// syncDirParentMaybe syncs the given directory, but only on non-Windows platforms. func syncDirMaybe(dir string) error { if runtime.GOOS == "windows" { return nil diff --git a/snapshot/store_test.go b/snapshot/store_test.go index 356560e77..ebce7a5f6 100644 --- a/snapshot/store_test.go +++ b/snapshot/store_test.go @@ -69,9 +69,9 @@ func Test_StoreEmpty(t *testing.T) { t.Errorf("Expected full snapshot needed, but it is not") } - _, _, err = store.Open("non-existent") + _, _, err = store.Open("nonexistent") if err == nil { - t.Fatalf("Expected error opening non-existent snapshot, got nil") + t.Fatalf("Expected error opening nonexistent snapshot, got nil") } n, err := store.Reap() diff --git a/snapshot/upgrader.go b/snapshot/upgrader.go index daaf957d2..bd57a5df4 100644 --- a/snapshot/upgrader.go +++ b/snapshot/upgrader.go @@ -18,7 +18,7 @@ const ( v7StateFile = "state.bin" ) -// Upgrade writes a copy of the 7.x-format Snapshot dircectory at 'old' to a +// Upgrade writes a copy of the 7.x-format Snapshot directory at 'old' to a // new Snapshot directory at 'new'. If the upgrade is successful, the // 'old' directory is removed before the function returns. func Upgrade(old, new string, logger *log.Logger) (retErr error) { @@ -91,7 +91,7 @@ func Upgrade(old, new string, logger *log.Logger) (retErr error) { // Ensure all file handles are closed before any directory is renamed or removed. if err := func() error { - // Write SQLite database file into new snapshto dir. + // Write SQLite database file into new snapshot dir. newSqlitePath := filepath.Join(newTmpDir, oldMeta.ID+".db") newSqliteFd, err := os.Create(newSqlitePath) if err != nil { diff --git a/snapshot/upgrader_test.go b/snapshot/upgrader_test.go index 9836de0d0..b1ddb9434 100644 --- a/snapshot/upgrader_test.go +++ b/snapshot/upgrader_test.go @@ -13,7 +13,7 @@ import ( func Test_Upgrade_NothingToDo(t *testing.T) { logger := log.New(os.Stderr, "[snapshot-store-upgrader] ", 0) if err := Upgrade("/does/not/exist", "/does/not/exist/either", logger); err != nil { - t.Fatalf("failed to upgrade non-existent directories: %s", err) + t.Fatalf("failed to upgrade nonexistent directories: %s", err) } oldEmpty := t.TempDir() @@ -30,7 +30,7 @@ func Test_Upgrade_OK(t *testing.T) { oldTemp := filepath.Join(t.TempDir(), "snapshots") newTemp := filepath.Join(t.TempDir(), "rsnapshots") - // Copy directory because succeessful test runs will delete it. + // Copy directory because successful test runs will delete it. copyDir(v7Snapshot, oldTemp) // Upgrade it. diff --git a/store/command_processor.go b/store/command_processor.go index 31131492d..84c01407c 100644 --- a/store/command_processor.go +++ b/store/command_processor.go @@ -77,7 +77,7 @@ func (c *CommandProcessor) Process(data []byte, db *sql.SwappableDB) (*proto.Com } // create a scratch file in the same directory as s.db.Path() - fd, err := createTemp(filepath.Dir(db.Path()), "rqlilte-load-") + fd, err := createTemp(filepath.Dir(db.Path()), "rqlite-load-") if err != nil { return cmd, false, &fsmGenericResponse{error: fmt.Errorf("failed to create temporary database file: %s", err)} } @@ -124,7 +124,7 @@ func (c *CommandProcessor) Process(data []byte, db *sql.SwappableDB) (*proto.Com c.decMgmr.Delete(lcr.StreamId) defer os.Remove(path) - // Check if reassembled dayabase is valid. If not, do not perform the load. This could + // Check if reassembled database is valid. If not, do not perform the load. This could // happen a snapshot truncated earlier parts of the log which contained the earlier parts // of a database load. If that happened then the database has already been loaded, and // this load should be ignored. diff --git a/store/gzip/decompressor.go b/store/gzip/decompressor.go index 96c50100b..24bc51f01 100644 --- a/store/gzip/decompressor.go +++ b/store/gzip/decompressor.go @@ -17,7 +17,7 @@ type Decompressor struct { nTx int64 } -// NewDecompressor returns an instantied Decompressor that reads from r and +// NewDecompressor returns an instantiated Decompressor that reads from r and // decompresses the data using gzip. func NewDecompressor(r io.Reader) *Decompressor { return &Decompressor{ diff --git a/store/gzip/decompressor_test.go b/store/gzip/decompressor_test.go index e96f08510..41e0a7c66 100644 --- a/store/gzip/decompressor_test.go +++ b/store/gzip/decompressor_test.go @@ -48,7 +48,7 @@ func Test_Decompressor_EndToEnd(t *testing.T) { testData := []byte("This is a test string, xxxxx -- xxxxxx -- test should compress") srcBuf := bytes.NewBuffer(testData) - // Accept connections on the listern + // Accept connections on the listener go func() { for { conn, err := ln.Accept() diff --git a/store/provider_test.go b/store/provider_test.go index abb242f1e..0c250c823 100644 --- a/store/provider_test.go +++ b/store/provider_test.go @@ -10,7 +10,7 @@ import ( command "github.com/rqlite/rqlite/v8/command/proto" ) -func test_SingleNodeProvide(t *testing.T, vaccuum, compress bool) { +func test_SingleNodeProvide(t *testing.T, vacuum, compress bool) { s0, ln := mustNewStore(t) defer ln.Close() @@ -49,7 +49,7 @@ func test_SingleNodeProvide(t *testing.T, vaccuum, compress bool) { tmpFd := mustCreateTempFD() defer os.Remove(tmpFd.Name()) defer tmpFd.Close() - provider := NewProvider(s0, vaccuum, compress) + provider := NewProvider(s0, vacuum, compress) if err := provider.Provide(tmpFd); err != nil { t.Fatalf("failed to provide SQLite data: %s", err.Error()) } diff --git a/store/state.go b/store/state.go index 76af3aa7f..3c6b2e9e3 100644 --- a/store/state.go +++ b/store/state.go @@ -31,7 +31,7 @@ func IsStaleRead( return false } if time.Since(leaderlastContact).Nanoseconds() > freshness { - // The Leader has not been in contact witin the freshness window, so + // The Leader has not been in contact within the freshness window, so // the read is stale. return true } @@ -56,7 +56,7 @@ func IsStaleRead( // IsNewNode returns whether a node using raftDir would be a brand-new node. // It also means that the window for this node joining a different cluster has passed. func IsNewNode(raftDir string) bool { - // If there is any pre-existing Raft state, then this node + // If there is any preexisting Raft state, then this node // has already been created. return !pathExists(filepath.Join(raftDir, raftDBPath)) } diff --git a/store/state_test.go b/store/state_test.go index c7dd99ad0..c13d2ef0b 100644 --- a/store/state_test.go +++ b/store/state_test.go @@ -74,7 +74,7 @@ func Test_IsStaleRead(t *testing.T) { Exp: true, }, { - Name: "freshness set, is ok, strict is set, appended time does not execeed, applied index is behind", + Name: "freshness set, is ok, strict is set, appended time does not exceed, applied index is behind", LeaderLastContact: time.Now(), LastFSMUpdateTime: time.Now(), LastAppendedAtTime: time.Now(), diff --git a/store/store.go b/store/store.go index 52df8dd37..0b3aa7041 100644 --- a/store/store.go +++ b/store/store.go @@ -278,7 +278,7 @@ type Store struct { fsmIdx *atomic.Uint64 fsmUpdateTime *AtomicTime // This is node-local time. - // appendedAtTimeis the Leader's clock time when that Leader appended the log entry. + // appendedAtTime is the Leader's clock time when that Leader appended the log entry. // The Leader that actually appended the log entry is not necessarily the current Leader. appendedAtTime *AtomicTime @@ -456,7 +456,7 @@ func (s *Store) Open() (retErr error) { config := s.raftConfig() config.LocalID = raft.ServerID(s.raftID) - // Upgrade any pre-existing snapshots. + // Upgrade any preexisting snapshots. oldSnapshotDir := filepath.Join(s.raftDir, "snapshots") if err := snapshot.Upgrade(oldSnapshotDir, s.snapshotDir, s.logger); err != nil { return fmt.Errorf("failed to upgrade snapshots: %s", err) @@ -1458,7 +1458,7 @@ func (s *Store) ReadFrom(r io.Reader) (int64, error) { return n, fmt.Errorf("invalid SQLite data") } - // Raft won't snapshot unless there is at least one unsnappshotted log entry, + // Raft won't snapshot unless there is at least one unsnapshotted log entry, // so prep that now before we do anything destructive. if af, err := s.Noop("boot"); err != nil { return n, err diff --git a/store/store_multi_test.go b/store/store_multi_test.go index 1666fb299..6b377afde 100644 --- a/store/store_multi_test.go +++ b/store/store_multi_test.go @@ -447,7 +447,7 @@ func Test_MultiNodeJoinRemove(t *testing.T) { err = s0.WaitForRemoval(s1.ID(), time.Second) // if err is nil then fail the test if err == nil { - t.Fatalf("no error waiting for removal of non-existent node") + t.Fatalf("no error waiting for removal of nonexistent node") } if !errors.Is(err, ErrWaitForRemovalTimeout) { t.Fatalf("waiting for removal resulted in wrong error: %s", err.Error()) diff --git a/store/store_test.go b/store/store_test.go index ebff3f155..8cfa84dca 100644 --- a/store/store_test.go +++ b/store/store_test.go @@ -94,7 +94,7 @@ func Test_OpenStoreSingleNode(t *testing.T) { } // Test_SingleNodeSQLitePath ensures that basic functionality works when the SQLite -// database path is explicitly specificed. It also checks that the CommitIndex is +// database path is explicitly specified. It also checks that the CommitIndex is // set correctly. func Test_SingleNodeOnDiskSQLitePath(t *testing.T) { s, ln, path := mustNewStoreSQLitePath(t) @@ -1561,7 +1561,7 @@ COMMIT; t.Fatalf("unexpected results for query\nexp: %s\ngot: %s", exp, got) } - // Check pre-existing data is gone. + // Check preexisting data is gone. qr = queryRequestFromString("SELECT * FROM bar", false, true) qr.Level = proto.QueryRequest_QUERY_REQUEST_LEVEL_STRONG r, err = s.Query(qr) @@ -1728,7 +1728,7 @@ COMMIT; t.Fatalf("unexpected results for query\nexp: %s\ngot: %s", exp, got) } - // Check pre-existing data is gone. + // Check preexisting data is gone. qr = queryRequestFromString("SELECT * FROM bar", false, true) qr.Level = proto.QueryRequest_QUERY_REQUEST_LEVEL_STRONG r, err = s.Query(qr) @@ -1869,7 +1869,7 @@ func Test_SingleNode_WALTriggeredSnapshot(t *testing.T) { testPoll(t, f, 100*time.Millisecond, 2*time.Second) // Sanity-check the contents of the Store. There should be two - // files -- a SQLite database file, and a diretory named after + // files -- a SQLite database file, and a directory named after // the most recent snapshot. This basically checks that reaping // is working, as it can be tricky on Windows due to stricter // file deletion rules. @@ -2433,16 +2433,16 @@ func Test_SingleNodeWaitForRemove(t *testing.T) { err := s.WaitForRemoval(s.ID(), time.Second) // if err is nil then fail the test if err == nil { - t.Fatalf("no error waiting for removal of non-existent node") + t.Fatalf("no error waiting for removal of nonexistent node") } if !errors.Is(err, ErrWaitForRemovalTimeout) { t.Fatalf("waiting for removal resulted in wrong error: %s", err.Error()) } - // should be no error waiting for removal of non-existent node - err = s.WaitForRemoval("non-existent-node", time.Second) + // should be no error waiting for removal of nonexistent node + err = s.WaitForRemoval("nonexistent-node", time.Second) if err != nil { - t.Fatalf("error waiting for removal of non-existent node: %s", err.Error()) + t.Fatalf("error waiting for removal of nonexistent node: %s", err.Error()) } } @@ -2567,7 +2567,7 @@ func Test_RWROCount(t *testing.T) { expRW: 1, }, { - name: "Single INSERT, non-existent table", + name: "Single INSERT, nonexistent table", stmts: []string{"INSERT INTO qux(id, name) VALUES(1, 'fiona')"}, expRW: 1, }, @@ -2577,7 +2577,7 @@ func Test_RWROCount(t *testing.T) { expRO: 1, }, { - name: "Single SELECT from non-existent table", + name: "Single SELECT from nonexistent table", stmts: []string{"SELECT * FROM qux"}, expRW: 1, // Yeah, this is unfortunate, but it's how SQLite works. }, @@ -2681,7 +2681,7 @@ type mockLayer struct { func mustMockLayer(addr string) Layer { ln, err := net.Listen("tcp", addr) if err != nil { - panic("failed to create new listner") + panic("failed to create new listener") } return &mockLayer{ln} } diff --git a/system_test/cluster_test.go b/system_test/cluster_test.go index 58384266a..f833b6bcd 100644 --- a/system_test/cluster_test.go +++ b/system_test/cluster_test.go @@ -227,7 +227,7 @@ func Test_MultiNodeClusterRANDOM(t *testing.T) { trueOrTimeout(tFn, 10*time.Second) } -// Test_MultiNodeClusterBootstrap tests formation of a 3-node cluster via bootstraping, +// Test_MultiNodeClusterBootstrap tests formation of a 3-node cluster via bootstrapping, // and its operation. func Test_MultiNodeClusterBootstrap(t *testing.T) { node1 := mustNewNode("node1", false) @@ -393,7 +393,7 @@ func Test_MultiNodeClusterBootstrap(t *testing.T) { } // Test_MultiNodeClusterBootstrapLaterJoin tests formation of a 3-node cluster and -// then checking a 4th node can join later with the bootstap parameters. +// then checking a 4th node can join later with the bootstrap parameters. func Test_MultiNodeClusterBootstrapLaterJoin(t *testing.T) { node1 := mustNewNode("node1", false) node1.Store.BootstrapExpect = 3 @@ -491,7 +491,7 @@ func Test_MultiNodeClusterBootstrapLaterJoin(t *testing.T) { } // Test_MultiNodeClusterBootstrapLaterJoinTLS tests formation of a 3-node cluster which -// uses HTTP and TLS,then checking a 4th node can join later with the bootstap parameters. +// uses HTTP and TLS,then checking a 4th node can join later with the bootstrap parameters. func Test_MultiNodeClusterBootstrapLaterJoinTLS(t *testing.T) { node1 := mustNewNodeEncrypted("node1", false, true, true) node1.Store.BootstrapExpect = 3 @@ -1671,7 +1671,7 @@ func Test_MultiNodeClusterReapNodes(t *testing.T) { t.Fatalf("failed waiting for leader: %s", err.Error()) } - // Confirm non-voter node is in the the cluster config. + // Confirm non-voter node is in the cluster config. nodes, err := leader.Nodes(true) if err != nil { t.Fatalf("failed to get nodes: %s", err.Error()) @@ -1690,7 +1690,7 @@ func Test_MultiNodeClusterReapNodes(t *testing.T) { t.Fatalf("timed out waiting for non-voting node to be reaped") } - // Confirm voting node is in the the cluster config. + // Confirm voting node is in the cluster config. nodes, err = leader.Nodes(true) if err != nil { t.Fatalf("failed to get nodes: %s", err.Error()) @@ -1737,7 +1737,7 @@ func Test_MultiNodeClusterNoReap(t *testing.T) { t.Fatalf("failed waiting for leader: %s", err.Error()) } - // Confirm non-voter node is in the the cluster config. + // Confirm non-voter node is in the cluster config. nodes, err := node1.Nodes(true) if err != nil { t.Fatalf("failed to get nodes: %s", err.Error()) @@ -1807,7 +1807,7 @@ func Test_MultiNodeClusterNoReapZero(t *testing.T) { t.Fatalf("failed to find cluster leader: %s", err.Error()) } - // Confirm voting node is in the the cluster config. + // Confirm voting node is in the cluster config. nodes, err := leader.Nodes(true) if err != nil { t.Fatalf("failed to get nodes: %s", err.Error()) @@ -1853,7 +1853,7 @@ func Test_MultiNodeClusterNoReapReadOnlyZero(t *testing.T) { t.Fatalf("failed waiting for leader: %s", err.Error()) } - // Confirm non-voter node is in the the cluster config. + // Confirm non-voter node is in the cluster config. nodes, err := node1.Nodes(true) if err != nil { t.Fatalf("failed to get nodes: %s", err.Error()) diff --git a/system_test/e2e/auto_clustering.py b/system_test/e2e/auto_clustering.py index 160bac3e2..e6e03b6c8 100644 --- a/system_test/e2e/auto_clustering.py +++ b/system_test/e2e/auto_clustering.py @@ -307,7 +307,7 @@ def test_consul_readonly(self): self.autocluster_readonly(TestAutoClusteringKVStores.DiscoModeConsulKV) def test_etcd_readonly(self): - '''Test clustering via Ectd when a read-only node is started first''' + '''Test clustering via Etcd when a read-only node is started first''' self.autocluster_readonly(TestAutoClusteringKVStores.DiscoModeEtcdKV) def test_consul_config(self): diff --git a/system_test/e2e/helpers.py b/system_test/e2e/helpers.py index d75339c19..b242ad9fc 100644 --- a/system_test/e2e/helpers.py +++ b/system_test/e2e/helpers.py @@ -331,7 +331,7 @@ def wait_for_leader(self, timeout=TIMEOUT, log=True, ready=True): pass time.sleep(0.1) - # Perform a check on readyness while we're here. + # Perform a check on readiness while we're here. if ready and (self.ready() is not True): raise Exception('leader is available but node reports not ready') diff --git a/system_test/e2e/joining.py b/system_test/e2e/joining.py index 19ff9c8b1..4358022bf 100644 --- a/system_test/e2e/joining.py +++ b/system_test/e2e/joining.py @@ -201,7 +201,7 @@ def test_no_change_id_addr(self): self.assertEqual(j, d_("{'results': [{'values': [[2]], 'types': ['integer'], 'columns': ['COUNT(*)']}]}")) applied = n0.wait_for_all_applied() - # Restart follower, explicity rejoin, and ensure it picks up new records + # Restart follower, explicitly rejoin, and ensure it picks up new records self.n1.start(join=self.n0.RaftAddr()) self.n1.wait_for_leader() self.n1.wait_for_fsm_index(applied) @@ -233,7 +233,7 @@ def test_change_addresses(self): self.assertEqual(j, d_("{'results': [{'values': [[2]], 'types': ['integer'], 'columns': ['COUNT(*)']}]}")) applied = n0.wait_for_all_applied() - # Restart follower with new network attributes, explicity rejoin, and ensure it picks up new records + # Restart follower with new network attributes, explicitly rejoin, and ensure it picks up new records self.n1.scramble_network() self.n1.start(join=self.n0.RaftAddr()) self.n1.wait_for_leader() diff --git a/system_test/e2e/single_node.py b/system_test/e2e/single_node.py index b77a1df1a..1ced2e36e 100644 --- a/system_test/e2e/single_node.py +++ b/system_test/e2e/single_node.py @@ -26,7 +26,7 @@ def tearDown(self): self.cluster.deprovision() def test_pragmas(self): - '''Test that the critical configration is correct''' + '''Test that the critical configuration is correct''' n = self.cluster.wait_for_leader() ro_pragmas = n.pragmas()['ro'] rw_pragmas = n.pragmas()['rw'] @@ -169,7 +169,7 @@ def test_simple_parameterized_mixed_queries_via_request(self): self.assertEqual(j, d_("{'results': [{'types': {'age': 'integer', 'id': 'integer', 'name': 'text'}, 'rows': [{'age': 20, 'id': 1, 'name': 'fiona'}, {'age': 25, 'id': 2, 'name': 'sinead'}]}]}")) def test_snapshot(self): - ''' Test that a node peforms at least 1 snapshot''' + ''' Test that a node performs at least 1 snapshot''' n = self.cluster.wait_for_leader() j = n.execute('CREATE TABLE foo (id INTEGER NOT NULL PRIMARY KEY, name TEXT)') self.assertEqual(j, d_("{'results': [{}]}")) diff --git a/system_test/helpers.go b/system_test/helpers.go index a37ac49e7..3cb096b77 100644 --- a/system_test/helpers.go +++ b/system_test/helpers.go @@ -106,7 +106,7 @@ func (n *Node) ExecuteMulti(stmts []string) (string, error) { return n.postExecute(string(j)) } -// ExecuteParameterized executes a single paramterized query against the node +// ExecuteParameterized executes a single parameterized query against the node func (n *Node) ExecuteParameterized(stmt []interface{}) (string, error) { m := make([][]interface{}, 1) m[0] = stmt @@ -166,7 +166,7 @@ func (n *Node) QueryMulti(stmts []string) (string, error) { return n.postQuery(string(j)) } -// QueryParameterized run a single paramterized query against the ndoe +// QueryParameterized run a single parameterized query against the node func (n *Node) QueryParameterized(stmt []interface{}) (string, error) { m := make([][]interface{}, 1) m[0] = stmt @@ -192,7 +192,7 @@ func (n *Node) RequestMulti(stmts []string) (string, error) { return n.postRequest(string(j)) } -// RequestMultiParameterized runs a single paramterized request against the node +// RequestMultiParameterized runs a single parameterized request against the node func (n *Node) RequestMultiParameterized(stmt []interface{}) (string, error) { m := make([][]interface{}, 1) m[0] = stmt @@ -251,9 +251,9 @@ func (n *Node) Noop(id string) error { // EnableTLSClient enables TLS support for the node's cluster client. func (n *Node) EnableTLSClient() { tlsConfig := mustCreateTLSConfig(n.NodeCertPath, n.NodeKeyPath, "") - clsterDialer := tcp.NewDialer(cluster.MuxClusterHeader, tlsConfig) - clsterClient := cluster.NewClient(clsterDialer, 30*time.Second) - n.Client = clsterClient + clusterDialer := tcp.NewDialer(cluster.MuxClusterHeader, tlsConfig) + clusterClient := cluster.NewClient(clusterDialer, 30*time.Second) + n.Client = clusterClient } // Join instructs this node to join the leader. @@ -364,7 +364,7 @@ func (n *Node) Ready() (bool, error) { return resp.StatusCode == 200, nil } -// Liveness returns the viveness status for the node, primarily +// Liveness returns the liveness status for the node, primarily // for use by Kubernetes. func (n *Node) Liveness() (bool, error) { v, _ := url.Parse("http://" + n.APIAddr + "/readyz?noleader") @@ -774,7 +774,7 @@ func mustNewLeaderNode(id string) *Node { func mustTempDir(s string) string { var err error - path, err := os.MkdirTemp("", fmt.Sprintf("rqlilte-system-test-%s-", s)) + path, err := os.MkdirTemp("", fmt.Sprintf("rqlite-system-test-%s-", s)) if err != nil { panic("failed to create temp dir") }