diff --git a/CMakeLists.txt b/CMakeLists.txt index 3f288dfd123..7ec4864198e 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -103,10 +103,10 @@ else() set(FDB_VERSION ${PROJECT_VERSION}) endif() if (NOT FDB_RELEASE) - string(TIMESTAMP FDB_BUILD_TIMESTMAP %Y%m%d%H%M%S) + string(TIMESTAMP FDB_BUILD_TIMESTAMP %Y%m%d%H%M%S) # Adding support to pass custom fdb_build timestamp, # required to achieve uniform naming across different builds - set(FDB_BUILDTIME "${FDB_BUILD_TIMESTMAP}" CACHE STRING "A timestamp for packages") + set(FDB_BUILDTIME "${FDB_BUILD_TIMESTAMP}" CACHE STRING "A timestamp for packages") set(FDB_BUILDTIME_STRING ".${FDB_BUILDTIME}") set(PRERELEASE_TAG "prerelease") endif() @@ -144,7 +144,7 @@ include(CompileCoverageTool) include(FlowCommands) ################################################################################ -# Vexilographer +# Vexillographer ################################################################################ include(CompileVexillographer) diff --git a/README.md b/README.md index 31b5f3d1e60..ad8bb1ab42f 100755 --- a/README.md +++ b/README.md @@ -150,9 +150,9 @@ cpack -G DEB For RPM simply replace `DEB` with `RPM`. -### MacOS +### macOS -The build under MacOS will work the same way as on Linux. To get boost and ninja you can use [Homebrew](https://brew.sh/). +The build under macOS will work the same way as on Linux. To get boost and ninja you can use [Homebrew](https://brew.sh/). ```sh cmake -G Ninja diff --git a/SWIFT_GUIDE.md b/SWIFT_GUIDE.md index aad33ec8e6d..331e35b89e0 100644 --- a/SWIFT_GUIDE.md +++ b/SWIFT_GUIDE.md @@ -47,7 +47,7 @@ The integration works "both ways", i.e. Swift can call into Flow/C++ code, as we Swift generates clang modules which can be consumed in C++. For example, the module `fdbserver_swift` contains all swift code in `fdbserver/`. -> Note: you can check, and add new files to the `_swift` targets by locating the command, e.g. `add_library(fdbserver_swft` in [fdbserver/CMakeLists.txt](fdbserver/CMakeLists.txt). +> Note: you can check, and add new files to the `_swift` targets by locating the command, e.g. `add_library(fdbserver_swift` in [fdbserver/CMakeLists.txt](fdbserver/CMakeLists.txt). Then, you can then include the generated module in C++: diff --git a/SWIFT_IDE_SETUP.md b/SWIFT_IDE_SETUP.md index 835ea3a6037..7c4d225f788 100644 --- a/SWIFT_IDE_SETUP.md +++ b/SWIFT_IDE_SETUP.md @@ -11,7 +11,7 @@ Note that Swift 5.9 (or higher) is required for the build at the time of writing You can download Swift toolchains from [https://www.swift.org/download/](https://www.swift.org/download/), or by using the experimental https://github.com/swift-server/swiftly which simplifies this process. -## VSCode + Cross Compilation (MacOS to Linux) +## VSCode + Cross Compilation (macOS to Linux) ## Host toolchain setup diff --git a/bindings/bindingtester/run_binding_tester.sh b/bindings/bindingtester/run_binding_tester.sh index 06c3f0a710a..332aeb9b5ea 100644 --- a/bindings/bindingtester/run_binding_tester.sh +++ b/bindings/bindingtester/run_binding_tester.sh @@ -42,7 +42,7 @@ VERSION="1.6" if [ "${#}" -lt 2 ] then echo 'run_binding_tester.sh ' - echo ' cycles: number of cycles to run test (0 => unlimitted)' + echo ' cycles: number of cycles to run test (0 => unlimited)' echo '' echo ' Modifiable Environment Variables:' echo ' CONCURRENCY: number of concurrent requests' diff --git a/bindings/c/test/apitester/TesterTransactionExecutor.cpp b/bindings/c/test/apitester/TesterTransactionExecutor.cpp index 2df78c123cb..cdf32487bd9 100644 --- a/bindings/c/test/apitester/TesterTransactionExecutor.cpp +++ b/bindings/c/test/apitester/TesterTransactionExecutor.cpp @@ -230,7 +230,7 @@ class TransactionContextBase : public ITransactionContext { return errCode == error_code_no_cluster_file_found || errCode == error_code_connection_string_invalid; } - // Complete the transaction with an (unretriable) error + // Complete the transaction with an (unretryable) error void transactionFailed(fdb::Error err) { ASSERT(err); std::unique_lock lock(mutex); @@ -701,7 +701,7 @@ class TransactionExecutorBase : public ITransactionExecutor { originalClusterFile = clusterFile; this->clusterFile = tamperedClusterFile.getFileName(); - // begin with a valid cluster file, but with non existing address + // begin with a valid cluster file, but with nonexistent address tamperedClusterFile.write(fmt::format("{}:{}@192.168.{}.{}:{}", Random().get().randomStringLowerCase(3, 8), Random().get().randomStringLowerCase(1, 100), diff --git a/bindings/c/test/fdb_c_client_config_tests.py b/bindings/c/test/fdb_c_client_config_tests.py index 6de76ea74c8..4cab7a87495 100644 --- a/bindings/c/test/fdb_c_client_config_tests.py +++ b/bindings/c/test/fdb_c_client_config_tests.py @@ -452,7 +452,7 @@ def test_no_external_client_support_api_version_ignore(self): test.exec() def test_one_external_client_wrong_api_version(self): - # Multiple external clients, API version unsupported by one of othem + # Multiple external clients, API version unsupported by one of them test = ClientConfigTest(self) test.create_external_lib_dir( [CURRENT_VERSION, PREV_RELEASE_VERSION, PREV2_RELEASE_VERSION] diff --git a/bindings/c/test/mako/async.cpp b/bindings/c/test/mako/async.cpp index 8ae83bab09e..000da942bda 100644 --- a/bindings/c/test/mako/async.cpp +++ b/bindings/c/test/mako/async.cpp @@ -251,7 +251,7 @@ void ResumableStateForRunWorkload::onIterationEnd(FutureRC rc) { void ResumableStateForRunWorkload::updateErrorStats(fdb::Error err, int op) { if (err) { - if (err.is(1020 /*not_commited*/)) { + if (err.is(1020 /*not_committed*/)) { stats.incrConflictCount(); } else if (err.is(1031 /*timeout*/)) { stats.incrTimeoutCount(op); diff --git a/bindings/c/test/mako/ddsketch.hpp b/bindings/c/test/mako/ddsketch.hpp index 137e595240e..e2f8cc9b022 100644 --- a/bindings/c/test/mako/ddsketch.hpp +++ b/bindings/c/test/mako/ddsketch.hpp @@ -166,7 +166,7 @@ class DDSketchBase { uint64_t getPopulationSize() const { return populationSize; } - double getErrorGurantee() const { return errorGuarantee; } + double getErrorGuarantee() const { return errorGuarantee; } DDSketchBase& mergeWith(const DDSketchBase& anotherSketch) { // Must have the same guarantee diff --git a/bindings/c/test/mako/mako.cpp b/bindings/c/test/mako/mako.cpp index 52cd772b9b2..b4a9df8526f 100644 --- a/bindings/c/test/mako/mako.cpp +++ b/bindings/c/test/mako/mako.cpp @@ -294,7 +294,7 @@ int populate(Database db, const ThreadArgs& thread_args, int thread_tps, Workflo void updateErrorStatsRunMode(WorkflowStatistics& stats, fdb::Error err, int op) { if (err) { - if (err.is(1020 /*not_commited*/)) { + if (err.is(1020 /*not_committed*/)) { stats.incrConflictCount(); } else if (err.is(1031 /*timeout*/)) { stats.incrTimeoutCount(op); @@ -2207,7 +2207,7 @@ void printReport(Arguments const& args, fmt::fprintf(fp, "\"totalErrors\": %lu,", final_worker_stats.getTotalErrorCount()); fmt::fprintf(fp, "\"totalTimeouts\": %lu,", final_worker_stats.getTotalTimeoutCount()); fmt::fprintf(fp, "\"overallTPS\": %lu,", tps_i); - fmt::fprintf(fp, "\"workerProcesseCPU\": %.8f,", cpu_util_worker_processes); + fmt::fprintf(fp, "\"workerProcessesCPU\": %.8f,", cpu_util_worker_processes); fmt::fprintf(fp, "\"workerThreadCPU\": %.8f,", cpu_util_worker_threads); fmt::fprintf(fp, "\"localNetworkCPU\": %.8f,", cpu_util_local_fdb_networks); fmt::fprintf(fp, "\"externalNetworkCPU\": %.8f,", cpu_util_external_fdb_networks); diff --git a/bindings/c/test/mako/mako.rst b/bindings/c/test/mako/mako.rst index 8e3b143920d..a325dae770f 100644 --- a/bindings/c/test/mako/mako.rst +++ b/bindings/c/test/mako/mako.rst @@ -178,10 +178,10 @@ Format Transaction Specification Examples ---------------------------------- -- | 100 GETs (Non-commited) +- | 100 GETs (Non-committed) | ``g100`` -- | 10 GET RANGE with Range of 50 (Non-commited) +- | 10 GET RANGE with Range of 50 (Non-committed) | ``gr10:50`` - | 90 GETs and 10 Updates (Committed) diff --git a/bindings/flow/Tuple.cpp b/bindings/flow/Tuple.cpp index 3e4e4a4a98c..39c7205ed43 100644 --- a/bindings/flow/Tuple.cpp +++ b/bindings/flow/Tuple.cpp @@ -547,7 +547,7 @@ Tuple Tuple::getNested(size_t index) const { } } - // The item may shrink because of escaped nulls that are unespaced. + // The item may shrink because of escaped nulls that are unescaped. return Tuple(dest, dest_offsets); } diff --git a/bindings/flow/Tuple.h b/bindings/flow/Tuple.h index 097f4053b26..5e4cad0de57 100644 --- a/bindings/flow/Tuple.h +++ b/bindings/flow/Tuple.h @@ -115,7 +115,7 @@ struct Tuple { static const uint8_t TRUE_CODE; static const uint8_t UUID_CODE; // Java Tuple layer VERSIONSTAMP has 96 bits(12 bytes). - // It has additional 2 bytes user code than the internal VERSIONTAMP of size 10 bytes + // It has additional 2 bytes user code than the internal VERSIONSTAMP of size 10 bytes static const uint8_t VERSIONSTAMP_96_CODE; Tuple(const StringRef& data); diff --git a/bindings/go/src/_stacktester/directory.go b/bindings/go/src/_stacktester/directory.go index 430f6874c9f..be674d1a849 100644 --- a/bindings/go/src/_stacktester/directory.go +++ b/bindings/go/src/_stacktester/directory.go @@ -204,7 +204,7 @@ func (de *DirectoryExtension) processOp(sm *StackMachine, op string, isDB bool, path := sm.maybePath() // This ***HAS*** to call Transact to ensure that any directory version // key set in the process of trying to remove this potentially - // non-existent directory, in the REMOVE but not REMOVE_IF_EXISTS case, + // nonexistent directory, in the REMOVE but not REMOVE_IF_EXISTS case, // doesn't end up committing the version key. (Other languages have // separate remove() and remove_if_exists() so don't have this tricky // issue). diff --git a/bindings/go/src/fdb/doc.go b/bindings/go/src/fdb/doc.go index c9fcdce1cad..23ea175d80e 100644 --- a/bindings/go/src/fdb/doc.go +++ b/bindings/go/src/fdb/doc.go @@ -112,7 +112,7 @@ implemented using only error values: If either read encounters an error, it will be returned to Transact, which will determine if the error is retryable or not (using (Transaction).OnError). If the -error is an FDB Error and retryable (such as a conflict with with another +error is an FDB Error and retryable (such as a conflict with another transaction), then the programmer-provided function will be run again. If the error is fatal (or not an FDB Error), then the error will be returned to the caller of Transact. diff --git a/bindings/go/src/fdb/tuple/tuple.go b/bindings/go/src/fdb/tuple/tuple.go index 2597e20c967..bc0517e94f6 100644 --- a/bindings/go/src/fdb/tuple/tuple.go +++ b/bindings/go/src/fdb/tuple/tuple.go @@ -121,7 +121,7 @@ func (uuid UUID) String() string { return fmt.Sprintf("%x-%x-%x-%x-%x", uuid[0:4], uuid[4:6], uuid[6:8], uuid[8:10], uuid[10:]) } -// Versionstamp is struct for a FoundationDB verionstamp. Versionstamps are +// Versionstamp is struct for a FoundationDB versionstamp. Versionstamps are // 12 bytes long composed of a 10 byte transaction version and a 2 byte user // version. The transaction version is filled in at commit time and the user // version is provided by the application to order results within a transaction. diff --git a/bindings/java/JavaWorkload.cpp b/bindings/java/JavaWorkload.cpp index 7b0b8df960b..1b1861d0ac5 100644 --- a/bindings/java/JavaWorkload.cpp +++ b/bindings/java/JavaWorkload.cpp @@ -534,7 +534,7 @@ struct JavaWorkload final : FDBWorkload { jvm->shutdownWorkload(workload, name); jvm->env->DeleteGlobalRef(workload); } catch (JNIError& e) { - log.trace(error, "JNIShutDownUnsucessful", { { "Error", e.toString() }, { "Location", e.location() } }); + log.trace(error, "JNIShutDownUnsuccessful", { { "Error", e.toString() }, { "Location", e.location() } }); } } } diff --git a/bindings/java/README.md b/bindings/java/README.md index d30c8b7fb12..9a7ff5a16e4 100644 --- a/bindings/java/README.md +++ b/bindings/java/README.md @@ -31,14 +31,14 @@ make packages #### Multi-Platform Jar-File If you want to create a jar file that can run on more than one supported -architecture (the official one supports MacOS, Linux, and Windows), you can do +architecture (the official one supports macOS, Linux, and Windows), you can do that by executing the following steps: 1. Create a directory called `lib` somewhere on your file system. 1. Create a subdirectory for each *additional* platform you want to support - (`windows` for windows, `osx` for MacOS, and `linux` for Linux). + (`windows` for windows, `osx` for macOS, and `linux` for Linux). 1. Under each of those create a subdirectory with the name of the architecture - (currently only `amd64` is supported - on MacOS this has to be called + (currently only `amd64` is supported - on macOS this has to be called `x86_64` - `amd64` on all others). 1. Set the cmake variable `FAT_JAR_BINARIES` to this `lib` directory. For example, if you created this directory structure under `/foo/bar`, the diff --git a/bindings/java/fdbJNI.cpp b/bindings/java/fdbJNI.cpp index 4ebea0cf0b3..0bf652d3aa5 100644 --- a/bindings/java/fdbJNI.cpp +++ b/bindings/java/fdbJNI.cpp @@ -94,11 +94,11 @@ void throwOutOfMem(JNIEnv* jenv) { } static jthrowable getThrowable(JNIEnv* jenv, fdb_error_t e, const char* msg = nullptr) { - jclass excepClass = jenv->FindClass("com/apple/foundationdb/FDBException"); + jclass exceptionClass = jenv->FindClass("com/apple/foundationdb/FDBException"); if (jenv->ExceptionOccurred()) return JNI_NULL; - jmethodID excepCtor = jenv->GetMethodID(excepClass, "", "(Ljava/lang/String;I)V"); + jmethodID exceptionCtor = jenv->GetMethodID(exceptionClass, "", "(Ljava/lang/String;I)V"); if (jenv->ExceptionOccurred()) return JNI_NULL; @@ -107,7 +107,7 @@ static jthrowable getThrowable(JNIEnv* jenv, fdb_error_t e, const char* msg = nu if (jenv->ExceptionOccurred()) return JNI_NULL; - jthrowable t = (jthrowable)jenv->NewObject(excepClass, excepCtor, m, e); + jthrowable t = (jthrowable)jenv->NewObject(exceptionClass, exceptionCtor, m, e); if (jenv->ExceptionOccurred()) return JNI_NULL; diff --git a/bindings/java/src/README.md b/bindings/java/src/README.md index 412c1837543..67cdbb9614a 100644 --- a/bindings/java/src/README.md +++ b/bindings/java/src/README.md @@ -30,7 +30,7 @@ test, do the following: 1. Tag all tests that require multiple clients with `@Tag("MultiClient")` 2. Ensure that your tests have the `MultiClientHelper` extension present, and Registered as an extension -3. Ensure that your test class is in the the JAVA_INTEGRATION_TESTS list in `test.cmake` +3. Ensure that your test class is in the JAVA_INTEGRATION_TESTS list in `test.cmake` ( see `BasicMultiClientIntegrationTest` for a good reference example) diff --git a/bindings/java/src/integration/com/apple/foundationdb/TransactionIntegrationTest.java b/bindings/java/src/integration/com/apple/foundationdb/TransactionIntegrationTest.java index 5c4512fc2e6..bfdc4334d42 100644 --- a/bindings/java/src/integration/com/apple/foundationdb/TransactionIntegrationTest.java +++ b/bindings/java/src/integration/com/apple/foundationdb/TransactionIntegrationTest.java @@ -28,7 +28,7 @@ import org.junit.jupiter.api.extension.ExtendWith; /** - * Miscellanenous tests for Java-bindings-specific implementation of + * Miscellaneous tests for Java-bindings-specific implementation of * transactions */ @ExtendWith(RequiresDatabase.class) diff --git a/bindings/java/src/junit/com/apple/foundationdb/RangeQueryTest.java b/bindings/java/src/junit/com/apple/foundationdb/RangeQueryTest.java index f15a9a99229..a62eda968dd 100644 --- a/bindings/java/src/junit/com/apple/foundationdb/RangeQueryTest.java +++ b/bindings/java/src/junit/com/apple/foundationdb/RangeQueryTest.java @@ -271,14 +271,14 @@ void testRangeScansWorkWithRowLimit(StreamingMode mode) throws Exception { for (KeyValue actualKv : kvs) { if (ByteArrayUtil.compareTo(entry.getKey(), 0, entry.getKey().length, actualKv.getKey(), 0, actualKv.getKey().length) == 0) { - String erroMsg = String.format("Incorrect value for key '%s'; Expected: <%s>, Actual: <%s>", + String errorMsg = String.format("Incorrect value for key '%s'; Expected: <%s>, Actual: <%s>", new String(entry.getKey()), new String(entry.getValue()), new String(actualKv.getValue())); Assertions.assertEquals( 0, ByteArrayUtil.compareTo(entry.getValue(), 0, entry.getValue().length, actualKv.getValue(), 0, actualKv.getValue().length), - erroMsg); + errorMsg); found = true; break; } @@ -321,14 +321,14 @@ void testRangeScansWorkWithoutRowLimitReversed(StreamingMode mode) throws Except for (KeyValue actualKv : kvs) { if (ByteArrayUtil.compareTo(entry.getKey(), 0, entry.getKey().length, actualKv.getKey(), 0, actualKv.getKey().length) == 0) { - String erroMsg = String.format("Incorrect value for key '%s'; Expected: <%s>, Actual: <%s>", + String errorMsg = String.format("Incorrect value for key '%s'; Expected: <%s>, Actual: <%s>", new String(entry.getKey()), new String(entry.getValue()), new String(actualKv.getValue())); Assertions.assertEquals( 0, ByteArrayUtil.compareTo(entry.getValue(), 0, entry.getValue().length, actualKv.getValue(), 0, actualKv.getValue().length), - erroMsg); + errorMsg); found = true; break; } @@ -368,14 +368,14 @@ void testRangeScansWorkWithRowLimitReversed(StreamingMode mode) throws Exception for (KeyValue actualKv : kvs) { if (ByteArrayUtil.compareTo(entry.getKey(), 0, entry.getKey().length, actualKv.getKey(), 0, actualKv.getKey().length) == 0) { - String erroMsg = String.format("Incorrect value for key '%s'; Expected: <%s>, Actual: <%s>", + String errorMsg = String.format("Incorrect value for key '%s'; Expected: <%s>, Actual: <%s>", new String(entry.getKey()), new String(entry.getValue()), new String(actualKv.getValue())); Assertions.assertEquals( 0, ByteArrayUtil.compareTo(entry.getValue(), 0, entry.getValue().length, actualKv.getValue(), 0, actualKv.getValue().length), - erroMsg); + errorMsg); found = true; break; } diff --git a/bindings/java/src/junit/com/apple/foundationdb/tuple/ArrayUtilTest.java b/bindings/java/src/junit/com/apple/foundationdb/tuple/ArrayUtilTest.java index adc7eba4d81..6bfa78579a4 100644 --- a/bindings/java/src/junit/com/apple/foundationdb/tuple/ArrayUtilTest.java +++ b/bindings/java/src/junit/com/apple/foundationdb/tuple/ArrayUtilTest.java @@ -385,7 +385,7 @@ void replaceWorks() throws Exception { byte[] expectedResults = arrays.get(i + 3); byte[] results = ByteArrayUtil.replace(src, pattern, replacement); String errorMsg = String.format( - "results <%s> did not match expected results <%s> when replaceing <%s> with <%s> in <%s>", + "results <%s> did not match expected results <%s> when replacing <%s> with <%s> in <%s>", ByteArrayUtil.printable(results), ByteArrayUtil.printable(expectedResults), ByteArrayUtil.printable(pattern), ByteArrayUtil.printable(replacement), ByteArrayUtil.printable(src)); diff --git a/bindings/java/src/main/com/apple/foundationdb/Database.java b/bindings/java/src/main/com/apple/foundationdb/Database.java index b6fd7c9d00e..a58ca2befc9 100644 --- a/bindings/java/src/main/com/apple/foundationdb/Database.java +++ b/bindings/java/src/main/com/apple/foundationdb/Database.java @@ -485,7 +485,7 @@ CompletableFuture readAsync( * transaction may be executed twice. For more information about how to reason * about these situations see * the FounationDB Developer Guide + * target="_blank">the FoundationDB Developer Guide * * @param retryable the block of logic to execute in a {@link Transaction} against * this database @@ -527,7 +527,7 @@ default T run(Function retryable) { * transaction may be executed twice. For more information about how to reason * about these situations see * the FounationDB Developer Guide
+ * target="_blank">the FoundationDB Developer Guide
*
* Any errors encountered executing {@code retryable}, or received from the * database, will be set on the returned {@code CompletableFuture}. diff --git a/bindings/java/src/main/com/apple/foundationdb/MappedRangeQuery.java b/bindings/java/src/main/com/apple/foundationdb/MappedRangeQuery.java index 9de3753ec32..90d118eeb91 100644 --- a/bindings/java/src/main/com/apple/foundationdb/MappedRangeQuery.java +++ b/bindings/java/src/main/com/apple/foundationdb/MappedRangeQuery.java @@ -207,7 +207,7 @@ public void accept(MappedRangeResultInfo data, Throwable error) { private synchronized void startNextFetch() { if (fetchOutstanding) - throw new IllegalStateException("Reentrant call not allowed"); // This can not be called reentrantly + throw new IllegalStateException("Reentrant call not allowed"); // This cannot be called reentrantly if (isCancelled) return; if (chunk != null && mainChunkIsTheLast()) return; diff --git a/bindings/java/src/main/com/apple/foundationdb/RangeQuery.java b/bindings/java/src/main/com/apple/foundationdb/RangeQuery.java index 77e62427357..e51f6f1c3a6 100644 --- a/bindings/java/src/main/com/apple/foundationdb/RangeQuery.java +++ b/bindings/java/src/main/com/apple/foundationdb/RangeQuery.java @@ -209,7 +209,7 @@ public void accept(RangeResultInfo data, Throwable error) { private synchronized void startNextFetch() { if(fetchOutstanding) - throw new IllegalStateException("Reentrant call not allowed"); // This can not be called reentrantly + throw new IllegalStateException("Reentrant call not allowed"); // This cannot be called reentrantly if(isCancelled) return; diff --git a/bindings/java/src/main/com/apple/foundationdb/Tenant.java b/bindings/java/src/main/com/apple/foundationdb/Tenant.java index af9cdb4e9b4..bf411b19551 100644 --- a/bindings/java/src/main/com/apple/foundationdb/Tenant.java +++ b/bindings/java/src/main/com/apple/foundationdb/Tenant.java @@ -169,7 +169,7 @@ CompletableFuture readAsync( * transaction may be executed twice. For more information about how to reason * about these situations see * the FounationDB Developer Guide + * target="_blank">the FoundationDB Developer Guide * * @param retryable the block of logic to execute in a {@link Transaction} against * this tenant @@ -211,7 +211,7 @@ default T run(Function retryable) { * transaction may be executed twice. For more information about how to reason * about these situations see * the FounationDB Developer Guide
+ * target="_blank">the FoundationDB Developer Guide
*
* Any errors encountered executing {@code retryable}, or received from the * database, will be set on the returned {@code CompletableFuture}. diff --git a/bindings/java/src/main/com/apple/foundationdb/async/AsyncUtil.java b/bindings/java/src/main/com/apple/foundationdb/async/AsyncUtil.java index b2c657c9655..a462072fd12 100644 --- a/bindings/java/src/main/com/apple/foundationdb/async/AsyncUtil.java +++ b/bindings/java/src/main/com/apple/foundationdb/async/AsyncUtil.java @@ -534,7 +534,7 @@ public static CompletableFuture composeHandleAsync(CompletableFuture= 630 and inspect.isgeneratorfunction(func): raise ValueError( - "Generators can not be wrapped with fdb.transactional" + "Generators cannot be wrapped with fdb.transactional" ) if isinstance(args[index], TransactionRead): @@ -313,7 +313,7 @@ def wrapper(*args, **kwargs): ret = func(*largs, **kwargs) if fdb.get_api_version() >= 630 and inspect.isgenerator(ret): raise ValueError( - "Generators can not be wrapped with fdb.transactional" + "Generators cannot be wrapped with fdb.transactional" ) tr.commit().wait() committed = True diff --git a/bindings/python/fdb/tenant_management.py b/bindings/python/fdb/tenant_management.py index 42fb5c9c79d..76a8c3a6aa8 100644 --- a/bindings/python/fdb/tenant_management.py +++ b/bindings/python/fdb/tenant_management.py @@ -33,11 +33,11 @@ # calls to this function will not perform the existence check. # # If the existence_check_marker is a non-empty list, return None. -def _check_tenant_existence(tr, key, existence_check_marker, force_maybe_commited): +def _check_tenant_existence(tr, key, existence_check_marker, force_maybe_committed): if not existence_check_marker: existing_tenant = tr[key].wait() existence_check_marker.append(None) - if force_maybe_commited: + if force_maybe_committed: raise _impl.FDBError(1021) # maybe_committed return existing_tenant != None diff --git a/bindings/python/fdb/tuple.py b/bindings/python/fdb/tuple.py index bd2d986d630..69c6bfb436b 100644 --- a/bindings/python/fdb/tuple.py +++ b/bindings/python/fdb/tuple.py @@ -535,7 +535,7 @@ def _elem_has_incomplete(item): def range(t): """Returns a slice of keys that includes all tuples of greater - length than the specified tuple that that start with the + length than the specified tuple that start with the specified elements. e.g. range(('a', 'b')) includes all tuples ('a', 'b', ...)""" diff --git a/cmake/CompileBoost.cmake b/cmake/CompileBoost.cmake index d0a53b4c076..c38e7d3136a 100644 --- a/cmake/CompileBoost.cmake +++ b/cmake/CompileBoost.cmake @@ -177,7 +177,7 @@ set(FORCE_BOOST_BUILD OFF CACHE BOOL "Forces cmake to build boost and ignores an # The precompiled boost silently broke in CI. While investigating, I considered extending # the old check with something like this, so that it would fail loudly if it found a bad -# pre-existing boost. It turns out the error messages we get from CMake explain what is +# preexisting boost. It turns out the error messages we get from CMake explain what is # wrong with Boost. Rather than reimplementing that, I just deleted this logic. This # approach is simpler, has better ergonomics and should be easier to maintain. If the build # is picking up your locally installed or partial version of boost, and you don't want diff --git a/cmake/ConfigureCompiler.cmake b/cmake/ConfigureCompiler.cmake index 131a9973a7b..42340c66d0f 100644 --- a/cmake/ConfigureCompiler.cmake +++ b/cmake/ConfigureCompiler.cmake @@ -614,6 +614,6 @@ if (WITH_SWIFT) include(CompilerChecks) check_swift_source_compiles("import CxxStdlib" CanImportCxxStdlibIntoSwift) if (NOT CanImportCxxStdlibIntoSwift) - message(FATAL_ERROR "Swift compiler: can not import C++ standard library into Swift; did you forget to set 'CMAKE_Swift_COMPILER_EXTERNAL_TOOLCHAIN'?") + message(FATAL_ERROR "Swift compiler: cannot import C++ standard library into Swift; did you forget to set 'CMAKE_Swift_COMPILER_EXTERNAL_TOOLCHAIN'?") endif() endif() diff --git a/cmake/FDBComponents.cmake b/cmake/FDBComponents.cmake index d305f48eb6c..c8c54e4c70f 100644 --- a/cmake/FDBComponents.cmake +++ b/cmake/FDBComponents.cmake @@ -166,7 +166,7 @@ endif() ################################################################################ set(WITH_ROCKSDB ON CACHE BOOL "Build with experimental RocksDB support") - # PORTABLE flag for RockdDB changed as of this PR (with v8.3.2 and after): https://github.com/facebook/rocksdb/pull/11419 + # PORTABLE flag for RocksDB changed as of this PR (with v8.3.2 and after): https://github.com/facebook/rocksdb/pull/11419 # https://github.com/facebook/rocksdb/blob/v8.6.7/CMakeLists.txt#L256 set(PORTABLE_ROCKSDB 1 CACHE STRING "Minimum CPU arch to support (i.e. skylake, haswell, etc., or 0 = current CPU, 1 = baseline CPU)") set(ROCKSDB_TOOLS OFF CACHE BOOL "Compile RocksDB tools") diff --git a/cmake/Findjemalloc.cmake b/cmake/Findjemalloc.cmake index 01fbe99f7b8..215fbcc8407 100644 --- a/cmake/Findjemalloc.cmake +++ b/cmake/Findjemalloc.cmake @@ -88,7 +88,7 @@ macro(_configure_jemalloc_pic_target) set_target_properties( jemalloc_pic::jemalloc_pic PROPERTIES INTERFACE_INCLUDE_DIRECTORIES "${jemalloc_INCLUDE_DIRS}" - IMPORTED_LOCATION "${jemalloc_pic_LIBRARTY}" + IMPORTED_LOCATION "${jemalloc_pic_LIBRARY}" VERSION "${jemalloc_VERSION}") endif() endmacro() diff --git a/cmake/InstallLayout.cmake b/cmake/InstallLayout.cmake index 99b7747e995..f342e12f5b8 100644 --- a/cmake/InstallLayout.cmake +++ b/cmake/InstallLayout.cmake @@ -215,7 +215,7 @@ set(CPACK_COMPONENT_CLIENTS-TGZ_DISPLAY_NAME "foundationdb-clients") set(CPACK_COMPONENT_CLIENTS-VERSIONED_DISPLAY_NAME "foundationdb${FDB_VERSION}${FDB_BUILDTIME_STRING}${PRERELEASE_TAG}-clients") -# MacOS needs a file extension for the LICENSE file +# macOS needs a file extension for the LICENSE file configure_file(${CMAKE_SOURCE_DIR}/LICENSE ${CMAKE_BINARY_DIR}/License.txt COPYONLY) ################################################################################ diff --git a/design/LoadBalancing/LoadBalancing.md b/design/LoadBalancing/LoadBalancing.md index f67686b570b..277a0ae4f5b 100644 --- a/design/LoadBalancing/LoadBalancing.md +++ b/design/LoadBalancing/LoadBalancing.md @@ -8,7 +8,7 @@ The interpocess communications (IPC) between the processes are supported by the In many cases, the same request can be proceed by multiple processes, e.g. all commit proxies can accept commit requests, and multiple storage server processes can provide values for a given key in double/triple redundancy mode. A load balancer (LB) can be used to distribute the requests over the possible interfaces, preventing one or a few processes getting overloaded. The interface candidates are also referred as *alternative*s. The LB is also able to react when one or more interfaces are (temporarily) unavailable by retrying, or re-routing the request to other candidates. The interface candidates are also known as *alternative*s. -Two LBs are provided in FoundationDB: `basicLoadBalance` and `loadBalance`, both defined in [`LoadBalance.actor.h`](https://github.com/apple/foundationdb/blob/main/fdbrpc/include/fdbrpc/LoadBalance.actor.h). The `basicLoadBalance` is a simple load balancer which each interface is equally chosen; while the `loadBalance` accepts a model object, which provides [datacenter](https://apple.github.io/foundationdb/configuration.html#configuring-regions) (DC) awaring balancing algorithms, allowing requests being sent to interfaces in the same DC. +Two LBs are provided in FoundationDB: `basicLoadBalance` and `loadBalance`, both defined in [`LoadBalance.actor.h`](https://github.com/apple/foundationdb/blob/main/fdbrpc/include/fdbrpc/LoadBalance.actor.h). The `basicLoadBalance` is a simple load balancer where each interface is equally chosen; while the `loadBalance` accepts a model object, which provides [datacenter](https://apple.github.io/foundationdb/configuration.html#configuring-regions) (DC) aware balancing algorithms, allowing requests being sent to interfaces in the same DC. In the following sections, the two LBs will be discussed in details. diff --git a/design/backup_v2_partitioned_logs.md b/design/backup_v2_partitioned_logs.md index bf54a1b1789..8b80a42feea 100644 --- a/design/backup_v2_partitioned_logs.md +++ b/design/backup_v2_partitioned_logs.md @@ -1,6 +1,6 @@ # The New FDB Backup System: Requirements & Design -Github tracking issue: https://github.com/apple/foundationdb/issues/1003 +GitHub tracking issue: https://github.com/apple/foundationdb/issues/1003 ## Purpose and Audience @@ -23,7 +23,7 @@ The design of old backup system is [here](https://github.com/apple/foundationdb/ ## Terminology -* **Blob storage**: blob storage is an object storage for unstructed data. Backup files are encoded in binary format and saved in blob storage, e.g., Amazon S3. +* **Blob storage**: blob storage is an object storage for unstructured data. Backup files are encoded in binary format and saved in blob storage, e.g., Amazon S3. * **Version**: FDB continuously generate increasing number as version and use version to decide mutation ordering. Version number typically advance one million per second. To restore a FDB cluster to a specified date and time, the restore system first convert the date and time to the corresponding version number and restore the cluster to the version number. * **Epoch**: A generation of FDB’s transaction system. After a component of the transaction system failed, FDB automatically initiates a recovery and restores the system in a new healthy generation, which is called an epoch. * **Backup worker**: is a new role added to the FDB cluster that is responsible for pulling mutations from transaction logs and saving them to blob storage. diff --git a/design/dynamic-knobs.md b/design/dynamic-knobs.md index c3a51d0b807..629344600bd 100644 --- a/design/dynamic-knobs.md +++ b/design/dynamic-knobs.md @@ -391,7 +391,7 @@ timestamp and a text description of the changes being made. To keep the size of the configuration database manageable, a compaction process runs periodically (defaulting to every five minutes) which compacts individual mutations into a simplified snapshot of key-value pairs. Compaction is controlled by the -``ConfigBroadcaster``, using information it peridiodically requests from +``ConfigBroadcaster``, using information it periodically requests from ``ConfigNode``s. Compaction will only compact up to the minimum known version across *all* ``ConfigNode``s. This means that if one ``ConfigNode`` is permanently partitioned from the ``ConfigBroadcaster`` or from clients, no diff --git a/design/global-tag-throttling.md b/design/global-tag-throttling.md index 1ea0db9a354..059541c5e24 100644 --- a/design/global-tag-throttling.md +++ b/design/global-tag-throttling.md @@ -75,7 +75,7 @@ For write operations that are sampled (with probability `COMMIT_SAMPLE_COST`), w The ratekeeper tracks per-storage, per-tag cost statistics in the `GlobalTagThrottlerImpl::throughput` object. -The ratekeeper must also track the rate of transactions performed with each tag. Each GRV proxy agreggates a per-tag counter of transactions started (without sampling). These are sent to the ratekeeper through `GetRateInfoRequest` messages. The global tag throttler then tracks per tag transaction rates in the `GlobalTagThrottlerImpl::tagStatistics` object. +The ratekeeper must also track the rate of transactions performed with each tag. Each GRV proxy aggregates a per-tag counter of transactions started (without sampling). These are sent to the ratekeeper through `GetRateInfoRequest` messages. The global tag throttler then tracks per tag transaction rates in the `GlobalTagThrottlerImpl::tagStatistics` object. ### Average Cost Calculation Quotas are expressed in terms of cost, but because throttling is enforced at the beginning of transactions, budgets need to be calculated in terms of transactions per second. To make this conversion, it is necessary to track the average cost of transactions (per-tag, and per-tag on a particular storage server). @@ -126,6 +126,6 @@ In each unit test, the `GlobalTagThrottlerTesting::monitor` function is used to ## Visibility ### Tracing -On the ratekeeper, every `SERVER_KNOBS->TAG_THROTTLE_PUSH_INTERVAL` seconds, the ratekeeper will call `GlobalTagThrottler::getClientRates`. At the end of the rate calculation for each tag, a trace event of type `GlobalTagThrottler_GotClientRate` is produced. This trace event reports the relevant inputs that went in to the rate calculation, and can be used for debugging. +On the ratekeeper, every `SERVER_KNOBS->TAG_THROTTLE_PUSH_INTERVAL` seconds, the ratekeeper will call `GlobalTagThrottler::getClientRates`. At the end of the rate calculation for each tag, a trace event of type `GlobalTagThrottler_GotClientRate` is produced. This trace event reports the relevant inputs that went into the rate calculation, and can be used for debugging. On storage servers, every `SERVER_KNOBS->TAG_MEASUREMENT_INTERVAL` seconds, there are `BusyReadTag` events for every tag that has sufficient read cost to be reported to the ratekeeper. Both cost and fractional busyness are reported. diff --git a/design/storage-quota.md b/design/storage-quota.md index e140713e42b..3804c218b9d 100644 --- a/design/storage-quota.md +++ b/design/storage-quota.md @@ -13,7 +13,7 @@ The storage used by tenants is estimated using the `getEstimatedRangeSizeBytes() `(size(key) + size(value)) / ((size(key) + 100) * 250)`. ### Tuple Layer -The storage quotas are stored under `TenantMedataSpecification` with suffix `storageQuota/`, which translates to `\xff/tenant/storageQuota/`. +The storage quotas are stored under `TenantMetadataSpecification` with suffix `storageQuota/`, which translates to `\xff/tenant/storageQuota/`. ### fdbcli The easiest way for an external client to interact with tag quotas is through `fdbcli`. To get the quota of a particular tenant group, run the following command: diff --git a/design/tlog-spilling.md.html b/design/tlog-spilling.md.html index 8da75923c5f..0d05a53add8 100644 --- a/design/tlog-spilling.md.html +++ b/design/tlog-spilling.md.html @@ -508,7 +508,7 @@ ### Spilling The most notable effect of the spilling changes is that the Disk Queue files -will now grow to potentially terrabytes in size. +will now grow to potentially terabytes in size. 1. Spilling will occur in larger batches, which will result in a more sawtooth-like `BytesInput - BytesDurable` value. I'm not aware that this will have any meaningful impact. @@ -571,7 +571,7 @@ If this turns out to be an issue, it's trivial to address. There's no reason to do only a binary search when drives support parallel requests. A - 32-way search could reasonably be done, and would would make a 4TB Disk + 32-way search could reasonably be done, and would make a 4TB Disk Queue file faster to recover than a 4GB one currently. 3. Disk queue files can now shrink. @@ -597,7 +597,7 @@ `UPDATE_STORAGE_BYTE_LIMIT` : How many bytes of mutations should be spilled at once in a spill-by-value TLog.
- This knob is pre-existing, and has only been "changed" to only apply to spill-by-value.
+ This knob is preexisting, and has only been "changed" to only apply to spill-by-value.
`TLOG_SPILL_REFERENCE_MAX_BATCHES_PER_PEEK` : How many batches of spilled data index batches should be read from disk to serve one peek request.
diff --git a/documentation/sphinx/source/administration.rst b/documentation/sphinx/source/administration.rst index 8aa06d31fd7..879912a5a52 100644 --- a/documentation/sphinx/source/administration.rst +++ b/documentation/sphinx/source/administration.rst @@ -217,7 +217,7 @@ To temporarily or permanently remove one or more machines from a FoundationDB cl .. note:: Addresses have the form ``IP``:``PORT``. This form is used even if TLS is enabled. -.. warning:: Localtiy based exclusions should only be used in FDB versions 7.1.42+, 7.3.26+ or newer. Older versions have limited support for locality based exclsuions and therefore the usage of locality based exclusions on those older version is not recommended. +.. warning:: Locality based exclusions should only be used in FDB versions 7.1.42+, 7.3.26+ or newer. Older versions have limited support for locality based exclusions and therefore the usage of locality based exclusions on those older version is not recommended. .. warning:: There is currently one known limitation with the locality based exclusions: If a process is serving as a log and the process is currently not reporting to the cluster, e.g. because of a partition, the process will not be detected by the exclude command. The exclusion is still initiated in the background but the exclude command itself will not block. diff --git a/documentation/sphinx/source/api-c.rst b/documentation/sphinx/source/api-c.rst index 053721931a3..f06790f4e89 100644 --- a/documentation/sphinx/source/api-c.rst +++ b/documentation/sphinx/source/api-c.rst @@ -568,7 +568,7 @@ Tenant Destroys an :type:`FDBTenant` object. It must be called exactly once for each successful call to :func:`fdb_database_create_tenant()`. This function only destroys a handle to the tenant -- the tenant and its data will be fine! -.. function:: fdb_error_t fdb_tenant_create_transaction(FDBTenant* tenant, FDBTronsaction **out_transaction) +.. function:: fdb_error_t fdb_tenant_create_transaction(FDBTenant* tenant, FDBTransaction **out_transaction) Creates a new transaction on the given tenant. This transaction will operate within the tenant's key-space and cannot access data outside the tenant. The caller assumes ownership of the :type:`FDBTransaction` object and must destroy it with :func:`fdb_transaction_destroy()`. diff --git a/documentation/sphinx/source/api-common.rst.inc b/documentation/sphinx/source/api-common.rst.inc index ba9088d9592..e9f6ebc0bf4 100644 --- a/documentation/sphinx/source/api-common.rst.inc +++ b/documentation/sphinx/source/api-common.rst.inc @@ -178,7 +178,7 @@ Gets the version number at which a successful commit modified the database. This must be called only after the successful (non-error) completion of a call to |commit-func| on this Transaction, or the behavior is undefined. Read-only transactions do not modify the database when committed and will have a committed version of -1. Keep in mind that a transaction which reads keys and then sets them to their current values may be optimized to a read-only transaction. .. |transaction-get-approximate-size-blurb| replace:: - Gets the the approximate transaction size so far, which is the summation of the estimated size of mutations, read conflict ranges, and write conflict ranges. + Gets the approximate transaction size so far, which is the summation of the estimated size of mutations, read conflict ranges, and write conflict ranges. .. |transaction-get-versionstamp-blurb| replace:: Returns a future which will contain the versionstamp which was used by any versionstamp operations in this transaction. This function must be called before a call to |commit-func| on this Transaction. The future will be ready only after the successful completion of a call to |commit-func| on this Transaction. Read-only transactions do not modify the database when committed and will result in the future completing with an error. Keep in mind that a transaction which reads keys and then sets them to their current values may be optimized to a read-only transaction. @@ -253,7 +253,7 @@ It is an error to set these options after the first call to |open-func| anywhere in your application. -.. |tls-options-burb| replace:: +.. |tls-options-blurb| replace:: The following options are only used when connecting to a :doc:`TLS-enabled cluster `. diff --git a/documentation/sphinx/source/api-python.rst b/documentation/sphinx/source/api-python.rst index 09af83c6536..1dfec2605c2 100644 --- a/documentation/sphinx/source/api-python.rst +++ b/documentation/sphinx/source/api-python.rst @@ -168,7 +168,7 @@ After importing the ``fdb`` module and selecting an API version, you probably wa |option-external-client-directory| - .. note:: |tls-options-burb| + .. note:: |tls-options-blurb| .. method :: fdb.options.set_tls_plugin(plugin_path_or_name) @@ -1366,7 +1366,7 @@ the most part, this also implies that ``T == fdb.tuple.unpack(fdb.tuple.pack(T)) will combine the ``tr_version`` and ``user_version`` to produce a byte string that lexicographically sorts appropriately with other ``Versionstamp`` instances. If this instance is incomplete, then the ``tr_version`` component gets filled in with dummy bytes that will cause it - to sort after every complete ``Verionstamp``'s serialized bytes. + to sort after every complete ``Versionstamp``'s serialized bytes. .. method:: Versionstamp.__eq__(other) .. method:: Versionstamp.__ne__(other) @@ -1411,7 +1411,7 @@ Subspaces .. method:: Subspace.pack_with_versionstamp(tuple) Returns the key encoding the specified tuple in the subspace so that it may be used as the key in the - :meth:`fdb.Transaction.set_versionstampe_key` method. The passed tuple must contain exactly one incomplete + :meth:`fdb.Transaction.set_versionstamped_key` method. The passed tuple must contain exactly one incomplete :class:`fdb.tuple.Versionstamp` instance or the method will raise an error. The behavior here is the same as if one used the :meth:`fdb.tuple.pack_with_versionstamp` method to appropriately pack together this subspace and the passed tuple. diff --git a/documentation/sphinx/source/api-ruby.rst b/documentation/sphinx/source/api-ruby.rst index 25a939ebca7..25a533532a8 100644 --- a/documentation/sphinx/source/api-ruby.rst +++ b/documentation/sphinx/source/api-ruby.rst @@ -147,7 +147,7 @@ After requiring the ``FDB`` gem and selecting an API version, you probably want |option-external-client-directory| - .. note:: |tls-options-burb| + .. note:: |tls-options-blurb| .. method :: FDB.options.set_tls_plugin(plugin_path_or_name) -> nil @@ -452,7 +452,7 @@ Reading data Returns all keys ``k`` such that ``begin <= k < end`` and their associated values as an enumerable of :class:`KeyValue` objects. Note the exclusion of ``end`` from the range. - Like a |future-object|, the returned enumerable issues asynchronous read operations to fetch data in the range, and may block while enumerating its values if the read has not completed. Data will be fetched in one more more efficient batches (depending on the value of the ``:streaming_mode`` parameter). + Like a |future-object|, the returned enumerable issues asynchronous read operations to fetch data in the range, and may block while enumerating its values if the read has not completed. Data will be fetched in one or more efficient batches (depending on the value of the ``:streaming_mode`` parameter). Each of ``begin`` and ``end`` may be a key (:class:`String` or :class:`Key`) or a :class:`KeySelector`. Note that in the case of a :class:`KeySelector`, the exclusion of ``end`` from the range still applies. diff --git a/documentation/sphinx/source/client-testing.rst b/documentation/sphinx/source/client-testing.rst index f7dd3c01b97..f0833905b98 100644 --- a/documentation/sphinx/source/client-testing.rst +++ b/documentation/sphinx/source/client-testing.rst @@ -456,7 +456,7 @@ The transaction context provides methods for implementation of the transaction l commit and finish the transaction. If the commit is successful, the execution proceeds to the continuation of ``execTransaction()``. In case of a retriable error the transaction is - automatically retried. A fatal error results in a failure of the workoad. + automatically retried. A fatal error results in a failure of the workload. .. function:: done() @@ -624,7 +624,7 @@ The ``[[test]]`` section can contain the following options: - ``databasePerTransaction``: Create a separate database instance for each transaction (default: false). It is a special mode useful for testing bugs related to creation and destruction of database instances. - ``fdbCallbacksOnExternalThreads``: Enables the option ``FDB_NET_OPTION_CALLBACKS_ON_EXTERNAL_THREADS`` - causting the callbacks of futures to be executed directly on the threads of the external FDB clients + causing the callbacks of futures to be executed directly on the threads of the external FDB clients rather than on the thread of the local FDB client. The workload section ``[[test.workload]]`` must contain the attribute name matching the registered name diff --git a/documentation/sphinx/source/configuration.rst b/documentation/sphinx/source/configuration.rst index 13b10bcb37a..3d6002c7f42 100644 --- a/documentation/sphinx/source/configuration.rst +++ b/documentation/sphinx/source/configuration.rst @@ -604,7 +604,7 @@ If a region failover occurs, clients will generally only see a latency spike of Specifying datacenters ---------------------- -To use region configurations all processes in the cluster need to specify in which datacenter they are located. This can be done on the command line with either ``--locality-dcid`` or ``--datacenter-id``. This datacenter identifier is case sensitive. +To use region configurations all processes in the cluster need to specify in which datacenter they are located. This can be done on the command line with either ``--locality-dcid`` or ``--datacenter-id``. This datacenter identifier is case-sensitive. Clients should also specify their datacenter with the database option ``datacenter-id``. If a client does not specify their datacenter, they will use latency estimates to balance traffic between the two regions. This will result in about 5% of requests being served by the remote regions, so reads will suffer from high tail latencies. diff --git a/documentation/sphinx/source/data-modeling.rst b/documentation/sphinx/source/data-modeling.rst index 5972c5110a9..37a4efad7be 100644 --- a/documentation/sphinx/source/data-modeling.rst +++ b/documentation/sphinx/source/data-modeling.rst @@ -99,7 +99,7 @@ Unicode strings --------------- * For unicode strings ordered lexicographically by unicode code point, use UTF-8 encoding. (This approach is used by the tuple layer.) -* For unicode strings ordered by a particular collation (for example, a case insensitive ordering for a particular language), use an appropriate string collation transformation and then apply UTF-8 encoding. Internationalization or "locale" libraries in most environments and programming languages provide a string collation transformation, for example `C `_, `C++ `_, `Python `_, `Ruby `_, `Java `_, the `ICU `_ library, etc. Usually the output of this function is a unicode string, which needs to be further encoded in a code-point ordered encoding such as UTF-8 to get a byte string. +* For unicode strings ordered by a particular collation (for example, a case-insensitive ordering for a particular language), use an appropriate string collation transformation and then apply UTF-8 encoding. Internationalization or "locale" libraries in most environments and programming languages provide a string collation transformation, for example `C `_, `C++ `_, `Python `_, `Ruby `_, `Java `_, the `ICU `_ library, etc. Usually the output of this function is a unicode string, which needs to be further encoded in a code-point ordered encoding such as UTF-8 to get a byte string. Floating point numbers ---------------------- @@ -537,7 +537,7 @@ FoundationDB's transactional guarantees ensure that, even when multiple clients Key and value sizes =================== -How you map your application data to keys and values can have a dramatic impact on performance. Below are some guidelines to consider as you design a data model. (For more general discussion of performance considerations, see :ref:`developer-guide-peformance-considerations`.) +How you map your application data to keys and values can have a dramatic impact on performance. Below are some guidelines to consider as you design a data model. (For more general discussion of performance considerations, see :ref:`developer-guide-performance-considerations`.) * Structure keys so that range reads can efficiently retrieve the most frequently accessed data. diff --git a/documentation/sphinx/source/developer-guide.rst b/documentation/sphinx/source/developer-guide.rst index efe37b50d2d..aad9e045d27 100644 --- a/documentation/sphinx/source/developer-guide.rst +++ b/documentation/sphinx/source/developer-guide.rst @@ -766,7 +766,7 @@ If you only need to detect the *fact* of a change, and your response doesn't dep fdb.Future.wait_for_any(*watch_keys(db)) yield -.. _developer-guide-peformance-considerations: +.. _developer-guide-performance-considerations: Performance considerations @@ -970,4 +970,4 @@ The trickiest errors are non-retryable errors. ``Transaction.on_error`` will ret If you see one of those errors, the best way of action is to fail the client. -At a first glance this looks very similar to an ``commit_unknown_result``. However, these errors lack the one guarantee ``commit_unknown_result`` still gives to the user: if the commit has already been sent to the database, the transaction could get committed at a later point in time. This means that if you retry the transaction, your new transaction might race with the old transaction. While this technically doesn't violate any consistency guarantees, abandoning a transaction means that there are no causality guaranatees. +At a first glance this looks very similar to an ``commit_unknown_result``. However, these errors lack the one guarantee ``commit_unknown_result`` still gives to the user: if the commit has already been sent to the database, the transaction could get committed at a later point in time. This means that if you retry the transaction, your new transaction might race with the old transaction. While this technically doesn't violate any consistency guarantees, abandoning a transaction means that there are no causality guarantees. diff --git a/documentation/sphinx/source/dtrace-probes.rst b/documentation/sphinx/source/dtrace-probes.rst index 7786176925c..ba32487d836 100644 --- a/documentation/sphinx/source/dtrace-probes.rst +++ b/documentation/sphinx/source/dtrace-probes.rst @@ -6,7 +6,7 @@ FoundationDB contains many dtrace probes that can be inspected during runtime with tools like bcc and SystemTap. All of them are in the ``foundationdb`` provider namespace. -``FDB_TRACE_PROBE`` is simply an alias to the varias ``DTRACE_PROBE`` +``FDB_TRACE_PROBE`` is simply an alias to the various ``DTRACE_PROBE`` macros. Probes diff --git a/documentation/sphinx/source/known-limitations.rst b/documentation/sphinx/source/known-limitations.rst index 8c4406b745b..1f87575e0d2 100644 --- a/documentation/sphinx/source/known-limitations.rst +++ b/documentation/sphinx/source/known-limitations.rst @@ -13,7 +13,7 @@ For related information, also see: * :doc:`platforms` that affect the operation of FoundationDB. * :ref:`system-requirements` for OS/hardware requirements. * :doc:`anti-features` for limitations of the scope of the FoundationDB core. - * :ref:`developer-guide-peformance-considerations` for how different workloads can limit performance. + * :ref:`developer-guide-performance-considerations` for how different workloads can limit performance. Design limitations ================== diff --git a/documentation/sphinx/source/read-write-path.rst b/documentation/sphinx/source/read-write-path.rst index 8257f8905f9..d1e4be213e6 100644 --- a/documentation/sphinx/source/read-write-path.rst +++ b/documentation/sphinx/source/read-write-path.rst @@ -334,7 +334,7 @@ which is a typical database technique to increase transaction throughput. * **Step 8 (Make mutation messages durable in the queuing system)**: Proxy sends serialized mutation messages to the queuing system. - The queuing system will append the mutation to an append-only file, fsync it, and send the respnose back. + The queuing system will append the mutation to an append-only file, fsync it, and send the response back. Each message has a tag, which decides which process in the queuing system the message should be sent to. The queuing system returns to the proxy the minimum known committed version, which is the smallest commit version among all proxies. The minimum known commit version is used when the system recovers from fault. diff --git a/documentation/sphinx/source/release-notes/release-notes-022.rst b/documentation/sphinx/source/release-notes/release-notes-022.rst index deb432424c3..d70dff40f3e 100644 --- a/documentation/sphinx/source/release-notes/release-notes-022.rst +++ b/documentation/sphinx/source/release-notes/release-notes-022.rst @@ -37,7 +37,7 @@ Language APIs * Java * Compiles class files with 1.6 source and target flags. - * Single-jar packaging for all platforms. (In rare cases, setting the ``FDB_LIBRARY_PATH_FDB_JAVA`` environment variable will be requried if you previously relied on loading the library from a system path.) + * Single-jar packaging for all platforms. (In rare cases, setting the ``FDB_LIBRARY_PATH_FDB_JAVA`` environment variable will be required if you previously relied on loading the library from a system path.) * Ruby @@ -53,7 +53,7 @@ Fixes * Status showed the wrong explanation when performance was limited by system write-to-read latency limit. * Fixed a rare issue where a "stuck" process trying to participate in the database could run out of RAM. * Increased robustness of FoundationDB server when loaded with large data sets. -* Eliminated certain cases where the data distribution algorithim could do unnecessary splitting and merging work. +* Eliminated certain cases where the data distribution algorithm could do unnecessary splitting and merging work. * Several fixes for rare issues encountered by our fault simulation framework. * Certain uncommon usage of on_ready() in Python could cause segmentation faults. diff --git a/documentation/sphinx/source/release-notes/release-notes-100.rst b/documentation/sphinx/source/release-notes/release-notes-100.rst index e82bf840693..e36b5deca52 100644 --- a/documentation/sphinx/source/release-notes/release-notes-100.rst +++ b/documentation/sphinx/source/release-notes/release-notes-100.rst @@ -41,7 +41,7 @@ All * Improvements to installation packages, including package paths and directory modes. * Eliminated cases of excessive resource usage in the locality API. * Watches are disabled when read-your-writes functionality is disabled. -* Fatal error paths now call ``_exit()`` instead instead of ``exit()``. +* Fatal error paths now call ``_exit()`` instead of ``exit()``. Fixes ----- diff --git a/documentation/sphinx/source/release-notes/release-notes-300.rst b/documentation/sphinx/source/release-notes/release-notes-300.rst index 289ad5c518b..a63e307d7ef 100644 --- a/documentation/sphinx/source/release-notes/release-notes-300.rst +++ b/documentation/sphinx/source/release-notes/release-notes-300.rst @@ -154,7 +154,7 @@ Java ---- * ``ReadTransactionContext`` added next to ``TransactionContext``, allowing ``read()`` and ``readAsync()`` composable read-only operations on transactions. -* The ``Future`` interface adds ``getInterruptibly()`` and ``blockInterruptibly()``, which propagate ``InterruptedExcetption`` to the calling code. +* The ``Future`` interface adds ``getInterruptibly()`` and ``blockInterruptibly()``, which propagate ``InterruptedException`` to the calling code. * Exception-handling logic is reworked in ``map()``, ``flatMap()``, and ``rescue()`` to propagate ``OutOfMemoryError`` and ``RejectedExecutionException`` instead of the spurious ``SettableAlreadySet`` exception. * Performance is improved for applications that use many blocking-style ``get()`` calls. diff --git a/documentation/sphinx/source/release-notes/release-notes-430.rst b/documentation/sphinx/source/release-notes/release-notes-430.rst index 0d83be2450b..63028cf0d6a 100644 --- a/documentation/sphinx/source/release-notes/release-notes-430.rst +++ b/documentation/sphinx/source/release-notes/release-notes-430.rst @@ -8,7 +8,7 @@ Release Notes Features -------- -* Improved DR thoughput by having mutations copied into the DR database before applying them. +* Improved DR throughput by having mutations copied into the DR database before applying them. * Renamed db_agent to dr_agent. * Added more detailed DR and backup active task detail into layer status. diff --git a/documentation/sphinx/source/release-notes/release-notes-440.rst b/documentation/sphinx/source/release-notes/release-notes-440.rst index 6f6c7cd993a..531d9084dfe 100644 --- a/documentation/sphinx/source/release-notes/release-notes-440.rst +++ b/documentation/sphinx/source/release-notes/release-notes-440.rst @@ -26,7 +26,7 @@ Features * Added support for streaming writes. This allows a client to load an ordered list of mutations into the database in parallel, and once they are all loaded, the mutations will be applied to the database in order. * DR uses streaming writes to significantly improve throughput. -* Restore was rewritten so that many clients can partipate in restoring data, significantly improving restore speed. The command line restore tool interface has been updated to support this new capability. +* Restore was rewritten so that many clients can participate in restoring data, significantly improving restore speed. The command line restore tool interface has been updated to support this new capability. * Cluster files now support comments (using the '#' character). * A wide variety of new client-side statistics are logged in client trace files every 5 seconds. * Status reports the generation of the system. The generation is incremented every time there is a failure (and recovery) in the transaction subsystem. diff --git a/documentation/sphinx/source/release-notes/release-notes-500.rst b/documentation/sphinx/source/release-notes/release-notes-500.rst index 13b6b8ead13..1fe1a29a8a5 100644 --- a/documentation/sphinx/source/release-notes/release-notes-500.rst +++ b/documentation/sphinx/source/release-notes/release-notes-500.rst @@ -49,7 +49,7 @@ Fixes * Logs continued to make their data persistent to disk after being removed. * Removed logs did not delete their data before shutting down. -* In rare scenarios, a disk error which occured during log recruitment could cause the recruitment to hang indefinately. +* In rare scenarios, a disk error which occurred during log recruitment could cause the recruitment to hang indefinitely. 5.0.3 ===== diff --git a/documentation/sphinx/source/release-notes/release-notes-510.rst b/documentation/sphinx/source/release-notes/release-notes-510.rst index 8e3fc540632..5f90fa3f5ef 100644 --- a/documentation/sphinx/source/release-notes/release-notes-510.rst +++ b/documentation/sphinx/source/release-notes/release-notes-510.rst @@ -27,7 +27,7 @@ Fixes * The consistency check calculated the size of the database inefficiently. * Could not create new directories with the Python and Ruby implementations of the directory layer. * fdbcli could erroneously report that it was incompatible with some processes in the cluster. -* The commit commmand in fdbcli did not wait for the result of the commit before continuing to the next command. +* The commit command in fdbcli did not wait for the result of the commit before continuing to the next command. Other Changes diff --git a/documentation/sphinx/source/release-notes/release-notes-520.rst b/documentation/sphinx/source/release-notes/release-notes-520.rst index dadb1e04f20..78540e8a7bf 100644 --- a/documentation/sphinx/source/release-notes/release-notes-520.rst +++ b/documentation/sphinx/source/release-notes/release-notes-520.rst @@ -42,7 +42,7 @@ Fixes * A memory leak was fixed in connection closing. `(PR #574) `_ * A memory leak was fixed in the coordinator's handling of disconnected clients. `(PR #579) `_ -* Aligned memory allocation on MacOS was sometimes failing to allocate memory, causing a crash. `(PR #547) `_ +* Aligned memory allocation on macOS was sometimes failing to allocate memory, causing a crash. `(PR #547) `_ 5.2.5 ===== diff --git a/documentation/sphinx/source/release-notes/release-notes-600.rst b/documentation/sphinx/source/release-notes/release-notes-600.rst index f3a921b15be..032916d32db 100644 --- a/documentation/sphinx/source/release-notes/release-notes-600.rst +++ b/documentation/sphinx/source/release-notes/release-notes-600.rst @@ -51,7 +51,7 @@ Features * Added support for asynchronous replication to a remote DC with processes in a single cluster. This improves on the asynchronous replication offered by fdbdr because servers can fetch data from the remote DC if all replicas have been lost in one DC. * Added support for synchronous replication of the transaction log to a remote DC. This remote DC does not need to contain any storage servers, meaning you need much fewer servers in this remote DC. * The TLS plugin is now statically linked into the client and server binaries and no longer requires a separate library. `(Issue #436) `_ -* TLS peer verification now supports verifiying on Subject Alternative Name. `(Issue #514) `_ +* TLS peer verification now supports verifying on Subject Alternative Name. `(Issue #514) `_ * TLS peer verification now supports suffix matching by field. `(Issue #515) `_ * TLS certificates are automatically reloaded after being updated. [6.0.5] `(Issue #505) `_ * Added the ``fileconfigure`` command to fdbcli, which configures a database from a JSON document. [6.0.10] `(PR #713) `_ @@ -105,7 +105,7 @@ Fixes * Sometimes data distribution calculated the size of a shard incorrectly. [6.0.15] `(PR #892) `_ * Changing the storage engine configuration would not effect which storage engine was used by the transaction logs. [6.0.15] `(PR #892) `_ * On exit, fdbmonitor will only kill its child processes instead of its process group when run without the daemonize option. [6.0.15] `(PR #826) `_ -* HTTP client used by backup-to-blobstore now correctly treats response header field names as case insensitive. [6.0.15] `(PR #904) `_ +* HTTP client used by backup-to-blobstore now correctly treats response header field names as case-insensitive. [6.0.15] `(PR #904) `_ * Blobstore REST client was not following the S3 API in several ways (bucket name, date, and response formats). [6.0.15] `(PR #914) `_ * Data distribution could queue shard movements for restoring replication at a low priority. [6.0.15] `(PR #907) `_ diff --git a/documentation/sphinx/source/release-notes/release-notes-620.rst b/documentation/sphinx/source/release-notes/release-notes-620.rst index 3e388a81299..fb7e3c3f856 100644 --- a/documentation/sphinx/source/release-notes/release-notes-620.rst +++ b/documentation/sphinx/source/release-notes/release-notes-620.rst @@ -121,7 +121,7 @@ Release Notes ====== * When configuring a cluster to usable_regions=2, data distribution would not react to machine failures while copying data to the remote region. `(PR #2774) `_. -* When a cluster is configured with usable_regions=2, data distribution could push a cluster into saturation by relocating too many shards simulatenously. `(PR #2776) `_. +* When a cluster is configured with usable_regions=2, data distribution could push a cluster into saturation by relocating too many shards simultaneously. `(PR #2776) `_. * Do not allow the cluster controller to mark any process as failed within 30 seconds of startup. `(PR #2780) `_. * Backup could not establish TLS connections (broken in 6.2.16). `(PR #2775) `_. * Certificates were not refreshed automatically (broken in 6.2.16). `(PR #2781) `_. diff --git a/documentation/sphinx/source/release-notes/release-notes-700.rst b/documentation/sphinx/source/release-notes/release-notes-700.rst index 823c0308b51..de56447519c 100644 --- a/documentation/sphinx/source/release-notes/release-notes-700.rst +++ b/documentation/sphinx/source/release-notes/release-notes-700.rst @@ -46,7 +46,7 @@ Performance Reliability ----------- * Backup agents now pause themselves upon a successful snapshot recovery to avoid unintentional data corruption. Operators should manually abort backup agents and clear the backup agent keyspace to avoid using the old cluster's backup configuration. `(PR #4027) `_ -* Log class processes are prioritized above transaction class proceses for becoming tlogs. `(PR #4509) `_ +* Log class processes are prioritized above transaction class processes for becoming tlogs. `(PR #4509) `_ * Improved worker recruitment logic to avoid unnecessary recoveries when processes are added or removed from a cluster. `(PR #4695) `_ `(PR #4631) `_ `(PR #4509) `_ Fixes diff --git a/documentation/sphinx/source/release-notes/release-notes-710.rst b/documentation/sphinx/source/release-notes/release-notes-710.rst index eb1beb2c1ae..72d795c5a91 100644 --- a/documentation/sphinx/source/release-notes/release-notes-710.rst +++ b/documentation/sphinx/source/release-notes/release-notes-710.rst @@ -73,7 +73,7 @@ Release Notes ====== * Released with AVX disabled. * Added redistribute fdbcli command to manually split shards. `(PR #10909) `_, `(PR #10936) `_, `(PR #10942) `_, `(PR #10905) `_, and `(PR #10958) `_ -* Fixed a MacOS linking issue for go bindings. `(PR #10924) `_ +* Fixed a macOS linking issue for go bindings. `(PR #10924) `_ * Added knobs to control backup retry delays for blob stores. `(PR #10947) `_ * Fixed two use-after-free bugs for backup agents. `(PR #10951) `_ * Added automatic range split for hot storage queue. `(PR #10932) `_ @@ -191,8 +191,8 @@ Release Notes * Fixed a data distribution bug where exclusions can become stuck because DD cannot build new teams. `(PR #9035) `_ * Added FoundationDB version to ProcessMetrics. `(PR #9037) `_ * Removed RocksDB read iterator destruction from the commit path. `(PR #8971) `_ -* Added determinstic degraded server selection in gray failure detection. `(PR #9001) `_ -* Fixed an interger overflow bug that causes fetching backup files to fail. `(PR #8996) `_ +* Added deterministic degraded server selection in gray failure detection. `(PR #9001) `_ +* Fixed an integer overflow bug that causes fetching backup files to fail. `(PR #8996) `_ * Fixed a log router race condition that blocks remote tlogs forever. `(PR #8966) `_ * Fixed a backup worker assertion failure. `(PR #8887) `_ * Upgraded RocksDB to 7.7.3 version. `(PR #8880) `_ diff --git a/documentation/sphinx/source/release-notes/release-notes-730.rst b/documentation/sphinx/source/release-notes/release-notes-730.rst index 6e5ad0d7b1d..2ce87bf32db 100644 --- a/documentation/sphinx/source/release-notes/release-notes-730.rst +++ b/documentation/sphinx/source/release-notes/release-notes-730.rst @@ -27,7 +27,7 @@ Release Notes 7.3.26 ====== -* Updated RocskDB version to 8.6.7. `(PR #11043) `_ +* Updated RocksDB version to 8.6.7. `(PR #11043) `_ * Changed RocksDB rate limiter to all IO. `(PR #11016) `_ * Added ``fdb_c_apiversion.g.h`` to OSX package. `(PR #11042) `_ * Added write traffic metrics to ddMetricsGetRange. `(PR #10998) `_ @@ -70,7 +70,7 @@ Release Notes 7.3.16 ====== * Released with AVX disabled. -* Added location_metadata fdbcli to query shard locations and assignements. `(PR #10428) `_ +* Added location_metadata fdbcli to query shard locations and assignments. `(PR #10428) `_ * Added degraded/disconnected peer recovery in gray failure. `(PR #10541) `_ * Added replica and metadata audit support. `(PR #10631) `_ * Added a SecurityMode for data distributor where data movements are not allowed but auditStorage is enabled. `(PR #10660) `_ diff --git a/documentation/sphinx/source/special-keys.rst b/documentation/sphinx/source/special-keys.rst index 036a950d7c7..702477d8298 100644 --- a/documentation/sphinx/source/special-keys.rst +++ b/documentation/sphinx/source/special-keys.rst @@ -83,7 +83,7 @@ Caveats ~~~~~~~ #. ``\xff\xff/transaction/read_conflict_range/`` The conflict range for a read is sometimes not known until that read completes (e.g. range reads with limits, key selectors). When you read from these special keys, the returned future first blocks until all pending reads are complete so it can give an accurate response. -#. ``\xff\xff/transaction/write_conflict_range/`` The conflict range range for a ``set_versionstamped_key`` atomic op is not known until commit time. You'll get an approximate range (the actual range will be a subset of the approximate range) until the precise range is known. +#. ``\xff\xff/transaction/write_conflict_range/`` The conflict range for a ``set_versionstamped_key`` atomic op is not known until commit time. You'll get an approximate range (the actual range will be a subset of the approximate range) until the precise range is known. #. ``\xff\xff/transaction/conflicting_keys/`` Since using this feature costs server (i.e., commit proxy and resolver) resources, it's disabled by default. You must opt in by setting the ``report_conflicting_keys`` transaction option. Metrics module diff --git a/documentation/sphinx/source/time-series.rst b/documentation/sphinx/source/time-series.rst index dfec299553f..12c16245d6f 100644 --- a/documentation/sphinx/source/time-series.rst +++ b/documentation/sphinx/source/time-series.rst @@ -69,7 +69,7 @@ Or maybe you want to track individual pages so you have a record of page access subspace_pages = FDB::Subspace.new(['pages-counter']) # then use atomic add again with a tuple sorted by time and counter - tr.add(subpace_pages.pack([page_ID, year, month, day, hour]), [1].pack('q<')) + tr.add(subspace_pages.pack([page_ID, year, month, day, hour]), [1].pack('q<')) # then retrieving any information about this page just requires specifying the time range you want to return the associated counter values tr.get(subspace_pages.pack([year, month, day, hour])) @@ -85,7 +85,7 @@ Putting it all together, your application just updates the primary copy of the d tr.set(FDB::Tuple.pack([year, month, day, seconds]), FDB::Tuple.pack([website_ID, page_ID, browser_ID])) tr.set(subspace_website_index.pack([website_ID, year, month, day, seconds]), value) tr.add(subspace_count.pack([website_ID, year, month]), [1].pack('q<')) - tr.add(subpace_pages.pack([page_ID, year, month, day, hour]), [1].pack('q<')) + tr.add(subspace_pages.pack([page_ID, year, month, day, hour]), [1].pack('q<')) end Ordering and Transactions diff --git a/fdbbackup/FileConverter.actor.cpp b/fdbbackup/FileConverter.actor.cpp index a60aa726127..7110dc74bd2 100644 --- a/fdbbackup/FileConverter.actor.cpp +++ b/fdbbackup/FileConverter.actor.cpp @@ -274,7 +274,7 @@ struct MutationFilesReadProgress : public ReferenceCounted getNextMutation() { return getMutationImpl(this); } ACTOR static Future getMutationImpl(MutationFilesReadProgress* self) { diff --git a/fdbbackup/backup.actor.cpp b/fdbbackup/backup.actor.cpp index 6526ad62266..9378dab888a 100644 --- a/fdbbackup/backup.actor.cpp +++ b/fdbbackup/backup.actor.cpp @@ -1697,7 +1697,7 @@ ACTOR Future getLayerStatus(Reference tr return json; } -// Check for unparsable or expired statuses and delete them. +// Check for unparseable or expired statuses and delete them. // First checks the first doc in the key range, and if it is valid, alive and not "me" then // returns. Otherwise, checks the rest of the range as well. ACTOR Future cleanupStatus(Reference tr, diff --git a/fdbbackup/include/fdbbackup/BackupTLSConfig.h b/fdbbackup/include/fdbbackup/BackupTLSConfig.h index 503f819fcf7..fad9d0a19c9 100644 --- a/fdbbackup/include/fdbbackup/BackupTLSConfig.h +++ b/fdbbackup/include/fdbbackup/BackupTLSConfig.h @@ -33,7 +33,7 @@ struct BackupTLSConfig { // Returns if TLS setup is successful bool setupTLS(); - // Sets up blob crentials. Add the file specified by FDB_BLOB_CREDENTIALS as well. + // Sets up blob credentials. Add the file specified by FDB_BLOB_CREDENTIALS as well. // Note this must be called after g_network is set up. void setupBlobCredentials(); }; diff --git a/fdbcli/DebugCommands.actor.cpp b/fdbcli/DebugCommands.actor.cpp index 1d287e9a436..ceeeee6cfa2 100644 --- a/fdbcli/DebugCommands.actor.cpp +++ b/fdbcli/DebugCommands.actor.cpp @@ -376,7 +376,7 @@ ACTOR Future doCheckAll(Database cx, KeyRange inputRange, bool checkAll) { std::vector keyValueReplies; for (int j = 0; j < replies.size(); j++) { auto reply = replies[j].get(); - ASSERT(reply.present() && !reply.get().error.present()); // has thrown eariler of error + ASSERT(reply.present() && !reply.get().error.present()); // has thrown earlier of error keyValueReplies.push_back(reply.get()); } // keyServers and keyValueReplies must follow the same order diff --git a/fdbcli/ExcludeCommand.actor.cpp b/fdbcli/ExcludeCommand.actor.cpp index 67d2a54a7f0..bc6a1588727 100644 --- a/fdbcli/ExcludeCommand.actor.cpp +++ b/fdbcli/ExcludeCommand.actor.cpp @@ -455,7 +455,7 @@ CommandFactory excludeFactory( CommandHelp( "exclude [FORCE] [failed] [no_wait] [] [locality_dcid:]\n" " [locality_zoneid:] [locality_machineid:]\n" - " [locality_processid:] [locality_:]", + " [locality_processid:] [locality_:]", "exclude servers from the database by IP address or locality", "If no addresses or localities are specified, lists the set of excluded addresses and localities.\n" "\n" diff --git a/fdbcli/tests/fdbcli_tests.py b/fdbcli/tests/fdbcli_tests.py index e09517d3beb..bc28e201b25 100755 --- a/fdbcli/tests/fdbcli_tests.py +++ b/fdbcli/tests/fdbcli_tests.py @@ -286,7 +286,7 @@ def lockAndUnlock(logger): env=fdbcli_env, ) process.stdout.readline() - # The randome passphrease we need to confirm to proceed the unlocking + # The random passphrase we need to confirm to proceed the unlocking line2 = process.stdout.readline() logger.debug("Random passphrase: {}".format(line2)) output3, err = process.communicate(input=line2) @@ -623,9 +623,9 @@ def transaction(logger): assert lines[2] == "`key' is `value'" assert lines[3].startswith("Committed (") and lines[3].endswith(")") # validate commit version is larger than the read version - commit_verion = int(lines[3][len("Committed (") : -1]) - logger.debug("Commit version: {}".format(commit_verion)) - assert commit_verion >= read_version + commit_version = int(lines[3][len("Committed (") : -1]) + logger.debug("Commit version: {}".format(commit_version)) + assert commit_version >= read_version # check the transaction is committed output2 = run_fdbcli_command("get", "key") assert output2 == "`key' is `value'" @@ -1222,7 +1222,7 @@ def tenant_configure(logger): ERROR: Tenant configuration is invalid (2140) """.strip() output = run_fdbcli_command_and_get_error( - "tenant configure tenant assigned_cluster=nonexist" + "tenant configure tenant assigned_cluster=nonexistent" ) assert output == expected_output diff --git a/fdbclient/AsyncFileS3BlobStore.actor.cpp b/fdbclient/AsyncFileS3BlobStore.actor.cpp index 2429318cfa6..1c48797b02d 100644 --- a/fdbclient/AsyncFileS3BlobStore.actor.cpp +++ b/fdbclient/AsyncFileS3BlobStore.actor.cpp @@ -35,7 +35,7 @@ Future AsyncFileS3BlobStoreRead::read(void* data, int length, int64_t offse } ACTOR Future sendStuff(int id, Reference t, int bytes) { - printf("Starting fake sender %d which will send send %d bytes.\n", id, bytes); + printf("Starting fake sender %d which will send %d bytes.\n", id, bytes); state double ts = timer(); state int total = 0; while (total < bytes) { diff --git a/fdbclient/AuditUtils.actor.cpp b/fdbclient/AuditUtils.actor.cpp index 4c60da82e09..4c066e5ff36 100644 --- a/fdbclient/AuditUtils.actor.cpp +++ b/fdbclient/AuditUtils.actor.cpp @@ -199,7 +199,7 @@ ACTOR Future clearAuditMetadataForType(Database cx, .detail("MaxAuditIdToClear", maxAuditIdToClear); try { - loop { // Cleanup until succeed or facing unretriable error + loop { // Cleanup until succeed or facing unretryable error try { state std::vector auditStates = wait(getAuditStates(cx, auditType, /*newFirst=*/false)); @@ -373,7 +373,7 @@ ACTOR Future persistNewAuditState(Database cx, } } } catch (Error& e) { - TraceEvent(SevWarn, "AuditUtilPersistedNewAuditStateUnretriableError", auditId) + TraceEvent(SevWarn, "AuditUtilPersistedNewAuditStateUnretryableError", auditId) .errorUnsuppressed(e) .detail("AuditKey", auditKey(auditState.getType(), auditId)); ASSERT_WE_THINK(e.code() == error_code_actor_cancelled || e.code() == error_code_movekeys_conflict); diff --git a/fdbclient/BackupAgentBase.actor.cpp b/fdbclient/BackupAgentBase.actor.cpp index 742a53cc59c..307721f3c37 100644 --- a/fdbclient/BackupAgentBase.actor.cpp +++ b/fdbclient/BackupAgentBase.actor.cpp @@ -754,7 +754,7 @@ ACTOR Future sendCommitTransactionRequest(CommitTransactionRequest req, Future commitAndUnlock = commitLock->releaseWhen(success(commit.getReply(req)), *mutationSize); if (tenantMapChanging) { // If tenant map is changing, we need to wait until it's committed before processing next mutations. - // Next muations need the updated tenant map for filtering. + // Next mutations need the updated tenant map for filtering. wait(commitAndUnlock); } else { addActor.send(commitAndUnlock); @@ -851,7 +851,7 @@ ACTOR Future kvMutationLogToTransactions(Database cx, newBeginVersion = group.groupKey + 1; // At this point if the tenant map changed we would have already sent any normalKey mutations - // accumulated thus far, so all thats left to do is to send all the mutations in the the offending + // accumulated thus far, so all thats left to do is to send all the mutations in the offending // transaction that changed the tenant map. This is necessary so that we don't batch these tenant map // mutations with future normalKey mutations (which will result in the same problem discussed above). if (tenantMapChanging || mutationSize >= CLIENT_KNOBS->BACKUP_LOG_WRITE_BATCH_MAX_SIZE) { diff --git a/fdbclient/BackupContainerFileSystem.actor.cpp b/fdbclient/BackupContainerFileSystem.actor.cpp index 86fb726d8b4..70c10197605 100644 --- a/fdbclient/BackupContainerFileSystem.actor.cpp +++ b/fdbclient/BackupContainerFileSystem.actor.cpp @@ -851,7 +851,7 @@ class BackupContainerFileSystemImpl { return false; } - // for each range in tags, check all tags from 1 are continouous + // for each range in tags, check all tags from 1 are continuous for (const auto& [beginEnd, count] : tags) { for (int i = 1; i < count; i++) { if (!isContinuous(files, tagIndices[i], beginEnd.first, std::min(beginEnd.second - 1, end), nullptr)) { diff --git a/fdbclient/BlobCipher.cpp b/fdbclient/BlobCipher.cpp index bb269307d2d..8d26448429d 100644 --- a/fdbclient/BlobCipher.cpp +++ b/fdbclient/BlobCipher.cpp @@ -101,7 +101,7 @@ void validateEncryptHeaderAlgoHeaderVersion(const EncryptCipherMode cipherMode, .detail("AuthMode", authMode) .detail("AuthAlgo", authAlgo) .detail("AlgoHeaderVersion", version) - .detail("MaxSsupportedVersion", maxSupportedVersion); + .detail("MaxSupportedVersion", maxSupportedVersion); throw not_implemented(); } } @@ -310,18 +310,18 @@ void BlobCipherEncryptHeaderRef::validateEncryptionHeaderDetails(const BlobCiphe // Validate baseCipher KCVs if (persistedTextKCV != kcvs.textKCV) { - TraceEvent(SevError, "EncryptionHeadeTextKCVMismatch") + TraceEvent(SevError, "EncryptionHeaderTextKCVMismatch") .detail("Persisted", persistedTextKCV) .detail("Expected", kcvs.textKCV); throw encrypt_key_check_value_mismatch(); } if (persistedHeaderKCV.present()) { if (!kcvs.headerKCV.present()) { - TraceEvent(SevError, "EncryptionHeadeMissingHeaderKCV"); + TraceEvent(SevError, "EncryptionHeaderMissingHeaderKCV"); throw encrypt_key_check_value_mismatch(); } if (persistedHeaderKCV.get() != kcvs.headerKCV.get()) { - TraceEvent(SevError, "EncryptionHeadeTextKCVMismatch") + TraceEvent(SevError, "EncryptionHeaderTextKCVMismatch") .detail("Persisted", persistedTextKCV) .detail("Expected", kcvs.textKCV); throw encrypt_key_check_value_mismatch(); @@ -1417,7 +1417,7 @@ void DecryptBlobCipherAes256Ctr::validateEncryptHeaderFlagsV1(const uint32_t hea } } -void DecryptBlobCipherAes256Ctr::vaidateEncryptHeaderCipherKCVs(const BlobCipherEncryptHeaderRef& headerRef, +void DecryptBlobCipherAes256Ctr::validateEncryptHeaderCipherKCVs(const BlobCipherEncryptHeaderRef& headerRef, const BlobCipherEncryptHeaderFlagsV1& flags) { const EncryptHeaderCipherKCVs kcvs = headerRef.getKCVs(); Sha256KCV::checkEqual(textCipherKey, kcvs.textKCV); @@ -1440,7 +1440,7 @@ void DecryptBlobCipherAes256Ctr::validateEncryptHeader(const uint8_t* ciphertext BlobCipherEncryptHeaderFlagsV1 flags = std::get(headerRef.flags); validateEncryptHeaderFlagsV1(headerRef.flagsVersion(), flags); - vaidateEncryptHeaderCipherKCVs(headerRef, flags); + validateEncryptHeaderCipherKCVs(headerRef, flags); validateAuthTokensV1(ciphertext, ciphertextLen, flags, headerRef); *authTokenMode = (EncryptAuthTokenMode)flags.authTokenMode; @@ -2059,7 +2059,7 @@ void testKeyCacheEssentials(DomainKeyMap& domainKeyMap, ASSERT_NE(std::memcmp(cipherKey->rawCipher(), baseCipher->key.get(), len), 0); } } - TraceEvent("TestLooksupDone").log(); + TraceEvent("TestLookUpsDone").log(); // Ensure attempting to insert existing cipherKey (identical) more than once is treated as a NOP try { diff --git a/fdbclient/BlobGranuleReader.actor.cpp b/fdbclient/BlobGranuleReader.actor.cpp index e05ff855cd9..34b6d680ee1 100644 --- a/fdbclient/BlobGranuleReader.actor.cpp +++ b/fdbclient/BlobGranuleReader.actor.cpp @@ -161,8 +161,8 @@ TEST_CASE("/fdbserver/blobgranule/isRangeCoveredByBlob") { // check empty chunks. not covered { - Standalone> empyChunks; - ASSERT(isRangeFullyCovered(KeyRangeRef(), empyChunks) == false); + Standalone> emptyChunks; + ASSERT(isRangeFullyCovered(KeyRangeRef(), emptyChunks) == false); } // check '' to \xff diff --git a/fdbclient/DatabaseBackupAgent.actor.cpp b/fdbclient/DatabaseBackupAgent.actor.cpp index 04f47e1ace7..3b5e72f95b7 100644 --- a/fdbclient/DatabaseBackupAgent.actor.cpp +++ b/fdbclient/DatabaseBackupAgent.actor.cpp @@ -2596,7 +2596,7 @@ class DatabaseBackupAgentImpl { wait(waitForAll(backupIntoResults)); for (auto result : backupIntoResults) { if (result.get().size() > 0) { - // One of the ranges we will be backing up into has pre-existing data. + // One of the ranges we will be backing up into has preexisting data. throw restore_destination_not_empty(); } } @@ -3097,7 +3097,7 @@ class DatabaseBackupAgentImpl { state Future> fBackupUid = tr->get(backupAgent->states.get(BinaryWriter::toValue(logUid, Unversioned())) .pack(DatabaseBackupAgent::keyFolderId)); - state Future> fBackupVerison = + state Future> fBackupVersion = tr->get(BinaryWriter::toValue(logUid, Unversioned()).withPrefix(applyMutationsBeginRange.begin)); state Future> fTagName = tr->get(backupAgent->states.get(BinaryWriter::toValue(logUid, Unversioned())) @@ -3195,7 +3195,7 @@ class DatabaseBackupAgentImpl { // calculate time differential Optional backupUid = wait(fBackupUid); if (backupUid.present()) { - Optional v = wait(fBackupVerison); + Optional v = wait(fBackupVersion); if (v.present()) { state Version destApplyBegin = BinaryReader::fromStringRef(v.get(), Unversioned()); Version sourceVersion = wait(srcReadVersion); diff --git a/fdbclient/FileBackupAgent.actor.cpp b/fdbclient/FileBackupAgent.actor.cpp index 664ae1096b5..f6dd5028e46 100644 --- a/fdbclient/FileBackupAgent.actor.cpp +++ b/fdbclient/FileBackupAgent.actor.cpp @@ -806,7 +806,7 @@ struct EncryptedRangeFileWriter : public IRangeFileWriter { return Void(); } - ACTOR static Future handleTenantBondary(EncryptedRangeFileWriter* self, + ACTOR static Future handleTenantBoundary(EncryptedRangeFileWriter* self, Key k, Value v, bool writeValue, @@ -843,7 +843,7 @@ struct EncryptedRangeFileWriter : public IRangeFileWriter { wait(getEncryptionDomainDetails(self->lastKey, self->encryptMode, self->tenantCache, checkTenantCache)); if (curKeyDomainId != prevKeyDomainId) { CODE_PROBE(true, "crossed tenant boundaries"); - wait(handleTenantBondary(self, k, v, writeValue, curKeyDomainId, checkTenantCache)); + wait(handleTenantBoundary(self, k, v, writeValue, curKeyDomainId, checkTenantCache)); return true; } return false; @@ -5247,11 +5247,11 @@ class FileBackupAgentImpl { tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS); tr->setOption(FDBTransactionOptions::LOCK_AWARE); try { - // Note: we always lock DB here in case DB is modified at the bacupRanges boundary. + // Note: we always lock DB here in case DB is modified at the backupRanges boundary. for (restoreIndex = 0; restoreIndex < backupRanges.size(); restoreIndex++) { auto range = backupRanges[restoreIndex]; Standalone restoreTag(backupTag.toString() + "_" + std::to_string(restoreIndex)); - // Register the request request in DB, which will be picked up by restore worker leader + // Register the request in DB, which will be picked up by restore worker leader struct RestoreRequest restoreRequest(restoreIndex, restoreTag, bcUrl, diff --git a/fdbclient/MultiVersionTransaction.actor.cpp b/fdbclient/MultiVersionTransaction.actor.cpp index 3240e9dd5ae..c8fa821ab93 100644 --- a/fdbclient/MultiVersionTransaction.actor.cpp +++ b/fdbclient/MultiVersionTransaction.actor.cpp @@ -3046,7 +3046,7 @@ void MultiVersionApi::setNetworkOptionInternal(FDBNetworkOptions::Option option, traceFileIdentifier = value.get().toString(); { MutexHolder holder(lock); - // Forward the option unmodified only to the the local client and let it validate it. + // Forward the option unmodified only to the local client and let it validate it. // While for external clients the trace file identifiers are determined in setupNetwork localClient->api->setNetworkOption(option, value); } diff --git a/fdbclient/RESTClient.actor.cpp b/fdbclient/RESTClient.actor.cpp index 8b2630e7289..45fad7cb38c 100644 --- a/fdbclient/RESTClient.actor.cpp +++ b/fdbclient/RESTClient.actor.cpp @@ -72,10 +72,10 @@ RESTClient::Stats RESTClient::Stats::operator-(const Stats& rhs) { return r; } -RESTClient::RESTClient() : conectionPool(makeReference(knobs.connection_pool_size)) {} +RESTClient::RESTClient() : connectionPool(makeReference(knobs.connection_pool_size)) {} RESTClient::RESTClient(std::unordered_map& knobSettings) - : conectionPool(makeReference(knobs.connection_pool_size)) { + : connectionPool(makeReference(knobs.connection_pool_size)) { knobs.set(knobSettings); } @@ -142,7 +142,7 @@ ACTOR Future> doRequest_impl(Reference frconn = - client->conectionPool->connect(connectPoolKey, url.connType.secure, client->knobs.max_connection_life); + client->connectionPool->connect(connectPoolKey, url.connType.secure, client->knobs.max_connection_life); // Finish connecting, do request state RESTConnectionPool::ReusableConnection rconn = @@ -157,7 +157,7 @@ ACTOR Future> doRequest_impl(Referencedata.headers["Connection"] != "close") { - client->conectionPool->returnConnection(connectPoolKey, rconn, client->knobs.connection_pool_size); + client->connectionPool->returnConnection(connectPoolKey, rconn, client->knobs.connection_pool_size); } rconn.conn.clear(); } catch (Error& e) { @@ -257,7 +257,7 @@ ACTOR Future> doRequest_impl(Reference> RESTClient::doDelete(const std::string url, // 200 - action has been enacted. // 202 - action will likely succeed, but, has not yet been enacted. - // 204 - action has been enated, no further information is to supplied. + // 204 - action has been enacted, no further information is to supplied. { HTTP::HTTP_STATUS_CODE_OK, HTTP::HTTP_STATUS_CODE_NO_CONTENT, HTTP::HTTP_STATUS_CODE_ACCEPTED }); } diff --git a/fdbclient/RESTUtils.actor.cpp b/fdbclient/RESTUtils.actor.cpp index d99b2274f13..56fdd22c372 100644 --- a/fdbclient/RESTUtils.actor.cpp +++ b/fdbclient/RESTUtils.actor.cpp @@ -38,7 +38,7 @@ const std::unordered_map RESTConnectionType::su RESTConnectionType RESTConnectionType::getConnectionType(const std::string& protocol) { auto itr = RESTConnectionType::supportedConnTypes.find(protocol); if (itr == RESTConnectionType::supportedConnTypes.end()) { - TraceEvent("RESTConnectionTypeUnsupportedPrototocol").detail("Protocol", protocol); + TraceEvent("RESTConnectionTypeUnsupportedProtocol").detail("Protocol", protocol); CODE_PROBE(true, "REST URI unsupported protocol"); throw rest_unsupported_protocol(); } @@ -53,7 +53,7 @@ bool RESTConnectionType::isProtocolSupported(const std::string& protocol) { bool RESTConnectionType::isSecure(const std::string& protocol) { auto itr = RESTConnectionType::supportedConnTypes.find(protocol); if (itr == RESTConnectionType::supportedConnTypes.end()) { - TraceEvent("RESTConnectionTypeUnsupportedPrototocol").detail("Protocol", protocol); + TraceEvent("RESTConnectionTypeUnsupportedProtocol").detail("Protocol", protocol); throw rest_unsupported_protocol(); } return itr->second.secure == RESTConnectionType::SECURE_CONNECTION; diff --git a/fdbclient/S3BlobStore.actor.cpp b/fdbclient/S3BlobStore.actor.cpp index 290106fd818..dbf8856b0bd 100644 --- a/fdbclient/S3BlobStore.actor.cpp +++ b/fdbclient/S3BlobStore.actor.cpp @@ -614,7 +614,7 @@ ACTOR Future> tryReadJSONFile(std::string path) { ASSERT(r == size); content = buf.toString(); - // Any exceptions from hehre forward are parse failures + // Any exceptions from here forward are parse failures errorEventType = "BlobCredentialFileParseFailed"; json_spirit::mValue json; json_spirit::read_string(content, json); @@ -1063,7 +1063,7 @@ ACTOR Future> doRequest_impl(Referenceknobs.max_delay_connection_failed : bstore->knobs.max_delay_retryable_error; @@ -1102,7 +1102,7 @@ ACTOR Future> doRequest_impl(Reference queryParameters; std::string canonicalURI = awsCanonicalURI(resource, queryParameters, true); diff --git a/fdbclient/ServerKnobs.cpp b/fdbclient/ServerKnobs.cpp index 988561c3ea9..00f4e6e3682 100644 --- a/fdbclient/ServerKnobs.cpp +++ b/fdbclient/ServerKnobs.cpp @@ -664,8 +664,8 @@ void ServerKnobs::initialize(Randomize randomize, ClientKnobs* clientKnobs, IsSi init( REPORT_TRANSACTION_COST_ESTIMATION_DELAY, 0.1 ); init( PROXY_REJECT_BATCH_QUEUED_TOO_LONG, true ); - bool buggfyUseResolverPrivateMutations = randomize && BUGGIFY && !ENABLE_VERSION_VECTOR_TLOG_UNICAST; - init( PROXY_USE_RESOLVER_PRIVATE_MUTATIONS, false ); if( buggfyUseResolverPrivateMutations ) PROXY_USE_RESOLVER_PRIVATE_MUTATIONS = deterministicRandom()->coinflip(); + bool buggifyUseResolverPrivateMutations = randomize && BUGGIFY && !ENABLE_VERSION_VECTOR_TLOG_UNICAST; + init( PROXY_USE_RESOLVER_PRIVATE_MUTATIONS, false ); if( buggifyUseResolverPrivateMutations ) PROXY_USE_RESOLVER_PRIVATE_MUTATIONS = deterministicRandom()->coinflip(); init( BURSTINESS_METRICS_ENABLED , false ); init( BURSTINESS_METRICS_LOG_INTERVAL, 0.1 ); @@ -680,7 +680,7 @@ void ServerKnobs::initialize(Randomize randomize, ClientKnobs* clientKnobs, IsSi init( GLOBAL_CONFIG_REFRESH_TIMEOUT, 10.0 ); if ( randomize && BUGGIFY ) GLOBAL_CONFIG_REFRESH_TIMEOUT = 1.0; // Master Server - // masterCommitter() in the master server will allow lower priority tasks (e.g. DataDistibution) + // masterCommitter() in the master server will allow lower priority tasks (e.g. DataDistribution) // by delay()ing for this amount of time between accepted batches of TransactionRequests. bool fastBalancing = randomize && BUGGIFY; init( COMMIT_SLEEP_TIME, 0.0001 ); if( randomize && BUGGIFY ) COMMIT_SLEEP_TIME = 0; @@ -1190,7 +1190,7 @@ void ServerKnobs::initialize(Randomize randomize, ClientKnobs* clientKnobs, IsSi // KMS connector type init( KMS_CONNECTOR_TYPE, "RESTKmsConnector" ); - // Blob granlues + // Blob granules init( BG_URL, isSimulated ? "file://simfdb/fdbblob/" : "" ); // TODO: store in system key space or something, eventually bool buggifyMediumGranules = simulationMediumShards || (randomize && BUGGIFY); // BlobGranuleVerify* simulation tests use "knobs", BlobGranuleCorrectness* use "tenant", default in real clusters is "knobs" diff --git a/fdbclient/SpecialKeySpace.actor.cpp b/fdbclient/SpecialKeySpace.actor.cpp index 513710c2539..50535851271 100644 --- a/fdbclient/SpecialKeySpace.actor.cpp +++ b/fdbclient/SpecialKeySpace.actor.cpp @@ -758,7 +758,7 @@ ACTOR Future ddMetricsGetRangeActor(ReadYourWritesTransaction* ryw, state Error err(e); if (e.code() == error_code_dd_not_found) { TraceEvent(SevWarnAlways, "DataDistributorNotPresent") - .detail("Operation", "DDMetricsReqestThroughSpecialKeys"); + .detail("Operation", "DDMetricsRequestThroughSpecialKeys"); wait(delayJittered(FLOW_KNOBS->PREVENT_FAST_SPIN_DELAY)); continue; } @@ -2008,13 +2008,13 @@ ACTOR static Future> advanceVersionCommitActor(ReadYourWri // Max version we can set for minRequiredCommitVersionKey, // making sure the cluster can still be alive for 1000 years after the recovery - static const Version maxAllowedVerion = + static const Version maxAllowedVersion = std::numeric_limits::max() - 1 - CLIENT_KNOBS->VERSIONS_PER_SECOND * 3600 * 24 * 365 * 1000; ryw->getTransaction().setOption(FDBTransactionOptions::LOCK_AWARE); ryw->getTransaction().setOption(FDBTransactionOptions::RAW_ACCESS); - TraceEvent(SevDebug, "AdvanceVersion").detail("MaxAllowedVersion", maxAllowedVerion); - if (v > maxAllowedVerion) { + TraceEvent(SevDebug, "AdvanceVersion").detail("MaxAllowedVersion", maxAllowedVersion); + if (v > maxAllowedVersion) { return ManagementAPIError::toJsonString( false, "advanceversion", diff --git a/fdbclient/TenantManagement.actor.cpp b/fdbclient/TenantManagement.actor.cpp index 8c38e5a3dab..5994272a6aa 100644 --- a/fdbclient/TenantManagement.actor.cpp +++ b/fdbclient/TenantManagement.actor.cpp @@ -77,7 +77,7 @@ bool tenantMapChanging(MutationRef const& mutation, KeyRangeRef const& tenantMap return false; } -// validates whether the the ID created by adding delta to baseID is a valid ID in the same tenant prefix +// validates whether the ID created by adding delta to baseID is a valid ID in the same tenant prefix int64_t computeNextTenantId(int64_t baseId, int64_t delta) { if ((baseId & 0xFFFFFFFFFFFF) + delta > 0xFFFFFFFFFFFF) { TraceEvent(g_network->isSimulated() ? SevWarnAlways : SevError, "NoMoreTenantIds") @@ -90,7 +90,7 @@ int64_t computeNextTenantId(int64_t baseId, int64_t delta) { return baseId + delta; } -// returns the maximum allowable tenant id in which the 2 byte prefix is not overriden +// returns the maximum allowable tenant id in which the 2 byte prefix is not overridden int64_t getMaxAllowableTenantId(int64_t curTenantId) { // The maximum tenant id allowed is 1 for the first 48 bits (6 bytes) with the first 16 bits (2 bytes) being the // tenant prefix diff --git a/fdbclient/azure_backup/BackupContainerAzureBlobStore.actor.cpp b/fdbclient/azure_backup/BackupContainerAzureBlobStore.actor.cpp index 02d9a43174a..f9eccbb5fca 100644 --- a/fdbclient/azure_backup/BackupContainerAzureBlobStore.actor.cpp +++ b/fdbclient/azure_backup/BackupContainerAzureBlobStore.actor.cpp @@ -374,7 +374,7 @@ Future> BackupContainerAzureBlobStore::writeFile(const st } Future BackupContainerAzureBlobStore::writeEntireFile(const std::string& fileName, - const std::string& fileConents) { + const std::string& fileContents) { return writeEntireFileFallback(fileName, fileContents); } diff --git a/fdbclient/include/fdbclient/Audit.h b/fdbclient/include/fdbclient/Audit.h index a0d58647298..e26039fe286 100644 --- a/fdbclient/include/fdbclient/Audit.h +++ b/fdbclient/include/fdbclient/Audit.h @@ -77,7 +77,7 @@ struct AuditStorageState { UID id; UID ddId; // ddId indicates this audit is managed by which dd // ddId is used to check if dd has changed - // When a new dd starts in the middle of an onging audit, + // When a new dd starts in the middle of an ongoing audit, // The ongoing audit's ddId gets updated // When SS updates the progress, it checks ddId // If the ddId is updated, SS Audit actors of the old dd will stop themselves diff --git a/fdbclient/include/fdbclient/BackupContainer.h b/fdbclient/include/fdbclient/BackupContainer.h index 772b05b3ba5..730d87dcdba 100644 --- a/fdbclient/include/fdbclient/BackupContainer.h +++ b/fdbclient/include/fdbclient/BackupContainer.h @@ -292,7 +292,7 @@ class IBackupContainer { // Get exactly the files necessary to restore the key space filtered by the specified key ranges to targetVersion. // If targetVersion is 'latestVersion', use the minimum restorable version in a snapshot. - // If logsOnly is set, only use log files in [beginVersion, targetVervions) in restore set. + // If logsOnly is set, only use log files in [beginVersion, targetVersions) in restore set. // Returns non-present if restoring to the given version is not possible. virtual Future> getRestoreSet(Version targetVersion, VectorRef keyRangesFilter = {}, diff --git a/fdbclient/include/fdbclient/BlobCipher.h b/fdbclient/include/fdbclient/BlobCipher.h index 48f5b0c1a3f..6d6bd7ae335 100644 --- a/fdbclient/include/fdbclient/BlobCipher.h +++ b/fdbclient/include/fdbclient/BlobCipher.h @@ -113,7 +113,7 @@ std::string toString(BlobCipherMetrics::UsageType type); // For encryption EncryptBuf is allocated using client supplied Arena and provided to AES library to capture // the ciphertext. Similarly, on decryption EncryptBuf is allocated using client supplied Arena and provided // to the AES library to capture decipher text and passed back to the clients. Given the object passed around -// is reference-counted, it gets freed once refrenceCount goes to 0. +// is reference-counted, it gets freed once referenceCount goes to 0. class EncryptBuf : public ReferenceCounted, NonCopyable { public: @@ -255,7 +255,7 @@ struct BlobCipherEncryptHeaderFlagsV1 { // 'authentication token' (crypto-secure) to protect against malicious tampering and/or bit rot/flip scenarios. // // Encryption header support two modes of generation 'authentication tokens': -// 1) SingleAuthTokenMode: the scheme generates single crypto-secrure auth token to protect {cipherText + +// 1) SingleAuthTokenMode: the scheme generates single crypto-secure auth token to protect {cipherText + // header} payload. Scheme is geared towards optimizing cost due to crypto-secure auth-token generation, // however, on decryption client needs to be read 'header' + 'encrypted-buffer' to validate the 'auth-token'. // The scheme is ideal for usecases where payload represented by the encryptionHeader is not large and it is @@ -563,7 +563,7 @@ typedef struct BlobCipherEncryptHeader { // 'authentication token' (crypto-secure) to protect against malicious tampering and/or bit rot/flip scenarios. // Encryption header support two modes of generation 'authentication tokens': - // 1) SingleAuthTokenMode: the scheme generates single crypto-secrure auth token to protect {cipherText + + // 1) SingleAuthTokenMode: the scheme generates single crypto-secure auth token to protect {cipherText + // header} payload. Scheme is geared towards optimizing cost due to crypto-secure auth-token generation, // however, on decryption client needs to be read 'header' + 'encrypted-buffer' to validate the 'auth-token'. // The scheme is ideal for usecases where payload represented by the encryptionHeader is not large and it is @@ -899,7 +899,7 @@ class BlobCipherKeyCache : NonCopyable, public ReferenceCounted { public: @@ -1036,7 +1036,7 @@ class DecryptBlobCipherAes256Ctr final : NonCopyable, public ReferenceCounted diff --git a/fdbclient/include/fdbclient/MonitorLeader.h b/fdbclient/include/fdbclient/MonitorLeader.h index 0aff5800083..2cf5913123f 100644 --- a/fdbclient/include/fdbclient/MonitorLeader.h +++ b/fdbclient/include/fdbclient/MonitorLeader.h @@ -63,7 +63,7 @@ struct MonitorLeaderInfo { Optional> getLeader(const std::vector>& nominees); -// This is one place where the leader election algorithm is run. The coodinator contacts all coodinators to collect +// This is one place where the leader election algorithm is run. The coordinator contacts all coordinators to collect // nominees, the nominee with the most nomination is the leader. This function also monitors the change of the leader. // If a leader is elected for long enough and communication with a quorum of coordinators is possible, eventually // outKnownLeader will be that leader's interface. @@ -71,7 +71,7 @@ template Future monitorLeader(Reference const& connFile, Reference>> const& outKnownLeader); -// This is one place where the leader election algorithm is run. The coodinator contacts all coodinators to collect +// This is one place where the leader election algorithm is run. The coordinator contacts all coordinators to collect // nominees, the nominee with the most nomination is the leader, and collects client data from the leader. This function // also monitors the change of the leader. Future monitorLeaderAndGetClientInfo(Key const& clusterKey, diff --git a/fdbclient/include/fdbclient/MultiVersionTransaction.h b/fdbclient/include/fdbclient/MultiVersionTransaction.h index 40d368def26..6bd2bd18e02 100644 --- a/fdbclient/include/fdbclient/MultiVersionTransaction.h +++ b/fdbclient/include/fdbclient/MultiVersionTransaction.h @@ -161,7 +161,7 @@ struct FdbCApi : public ThreadSafeReferenceCounted { FDBFuture* (*databaseCreateSnapshot)(FDBDatabase* database, uint8_t const* uid, int uidLength, - uint8_t const* snapshotCommmand, + uint8_t const* snapshotCommand, int snapshotCommandLength); FDBFuture* (*databaseCreateSharedState)(FDBDatabase* database); void (*databaseSetSharedState)(FDBDatabase* database, DatabaseSharedState* p); diff --git a/fdbclient/include/fdbclient/RESTClient.h b/fdbclient/include/fdbclient/RESTClient.h index 7b37430e3af..6113dbb902e 100644 --- a/fdbclient/include/fdbclient/RESTClient.h +++ b/fdbclient/include/fdbclient/RESTClient.h @@ -52,7 +52,7 @@ class RESTClient : public ReferenceCounted { }; RESTClientKnobs knobs; - Reference conectionPool; + Reference connectionPool; // Connection stats maintained per "host:service" std::unordered_map> statsMap; diff --git a/fdbclient/include/fdbclient/ServerKnobs.h b/fdbclient/include/fdbclient/ServerKnobs.h index c0af1893abf..e43ec3d8e26 100644 --- a/fdbclient/include/fdbclient/ServerKnobs.h +++ b/fdbclient/include/fdbclient/ServerKnobs.h @@ -128,7 +128,7 @@ class SWIFT_CXX_IMMORTAL_SINGLETON_TYPE ServerKnobs : public KnobsImpl { public: - // Public visibility constructior ONLY to assist FlowSingleton instance creation. + // Public visibility constructor ONLY to assist FlowSingleton instance creation. // API Note: Constructor is expected to be instantiated only in simulation mode. explicit SimKmsVaultCtx(bool ignored) { diff --git a/fdbclient/include/fdbclient/SimpleIni.h b/fdbclient/include/fdbclient/SimpleIni.h index b0f75fe7591..177106c0814 100644 --- a/fdbclient/include/fdbclient/SimpleIni.h +++ b/fdbclient/include/fdbclient/SimpleIni.h @@ -114,7 +114,7 @@ where the multi-line text ends. - The newline after ENDTAG in the start tag, and the newline before ENDTAG in the end tag is not included in the data value. - - The ending tag must be on it's own line with no whitespace before + - The ending tag must be on its own line with no whitespace before or after it. - The multi-line value is modified at load so that each line in the value is delimited by a single '\\n' character on all platforms. At save time @@ -272,7 +272,7 @@ enum SI_Error { /** Simple INI file reader. This can be instantiated with the choice of unicode or native characterset, - and case sensitive or insensitive comparisons of section and key names. + and case-sensitive or insensitive comparisons of section and key names. The supported combinations are pre-defined with the following typedefs: diff --git a/fdbclient/include/fdbclient/TenantManagement.actor.h b/fdbclient/include/fdbclient/TenantManagement.actor.h index 0006422dbcd..0fa55c6ff48 100644 --- a/fdbclient/include/fdbclient/TenantManagement.actor.h +++ b/fdbclient/include/fdbclient/TenantManagement.actor.h @@ -426,7 +426,7 @@ Future deleteTenantTransaction(Transaction tr, } } } else { - CODE_PROBE(true, "Delete non-existent tenant"); + CODE_PROBE(true, "Delete nonexistent tenant"); } if (clusterType == ClusterType::METACLUSTER_DATA) { diff --git a/fdbclient/include/fdbclient/VersionVector.h b/fdbclient/include/fdbclient/VersionVector.h index 3d3efd903f9..23b6a79804d 100644 --- a/fdbclient/include/fdbclient/VersionVector.h +++ b/fdbclient/include/fdbclient/VersionVector.h @@ -441,7 +441,7 @@ struct VersionVector { } } - // Figrue out the type that was used to serialize commit version deltas and call the above + // Figure out the type that was used to serialize commit version deltas and call the above // method to do the deserialization. // T: Type that was used to serialize tag ids (uint8_t/uint16_t) template diff --git a/fdbrpc/DDSketchTest.actor.cpp b/fdbrpc/DDSketchTest.actor.cpp index f2ecb48bd29..0560c971dc7 100644 --- a/fdbrpc/DDSketchTest.actor.cpp +++ b/fdbrpc/DDSketchTest.actor.cpp @@ -43,7 +43,7 @@ TEST_CASE("/fdbrpc/ddsketch/correctness") { DDSketch dd; for (int i = 0; i < 4000; i++) { - // This generates a uniform real disitribution between the range of + // This generates a uniform real distribution between the range of // [0.0004, 0.01] double sample = (static_cast(deterministicRandom()->randomSkewedUInt32(40, 1000)) / 100000); dd.addSample(sample); diff --git a/fdbrpc/HTTP.actor.cpp b/fdbrpc/HTTP.actor.cpp index 0d48f0615c5..e280b2219c3 100644 --- a/fdbrpc/HTTP.actor.cpp +++ b/fdbrpc/HTTP.actor.cpp @@ -397,7 +397,7 @@ ACTOR Future readHTTPData(HTTPData* r, // Now truncate the buffer to just the dechunked contiguous content. r->content.erase(r->contentLen); } else { - // Some unrecogize response content scheme is being used. + // Some unrecognize response content scheme is being used. throw http_bad_response(); } diff --git a/fdbrpc/ReplicationUtils.cpp b/fdbrpc/ReplicationUtils.cpp index bd557086147..163eb0e9c69 100644 --- a/fdbrpc/ReplicationUtils.cpp +++ b/fdbrpc/ReplicationUtils.cpp @@ -101,7 +101,7 @@ bool findBestPolicySetSimple(int targetUniqueValueCount, std::vector& bestSet, int desired) { auto& mutableEntries = logServerSet->getMutableEntries(); - // First make sure the current localitySet is able to fulfuill the policy + // First make sure the current localitySet is able to fulfill the policy AttribKey indexKey = logServerSet->keyIndex("zoneid"); int uniqueValueCount = logServerSet->getKeyValueArray()[indexKey._id].size(); @@ -250,7 +250,7 @@ bool findBestPolicySet(std::vector& bestResults, auto oldBestFound = findBestPolicySetExpensive(oldBest, localitySet, policy, nMinItems, nSelectTests, nPolicyTests); if (!oldBestFound) { - TraceEvent(SevError, "FBPSMissmatch").detail("Policy", policy->info()); + TraceEvent(SevError, "FBPSMismatch").detail("Policy", policy->info()); } else { ASSERT(mostUsedZoneCount(localitySet, bestResults) <= mostUsedZoneCount(localitySet, oldBest)); } diff --git a/fdbrpc/include/fdbrpc/AsyncFileCached.actor.h b/fdbrpc/include/fdbrpc/AsyncFileCached.actor.h index 8aebb92225d..9b7cd962ecc 100644 --- a/fdbrpc/include/fdbrpc/AsyncFileCached.actor.h +++ b/fdbrpc/include/fdbrpc/AsyncFileCached.actor.h @@ -180,7 +180,7 @@ class AsyncFileCached final : public IAsyncFile, public ReferenceCounted write_impl(AsyncFileCached* self, void const* data, int length, int64_t offset) { - // If there is a truncate in progress before the the write position then we must + // If there is a truncate in progress before the write position then we must // wait for it to complete. if (length + offset > self->currentTruncateSize) wait(self->currentTruncate); @@ -600,7 +600,7 @@ struct AFCPage : public EvictablePage, public FastAllocated { } Future truncate() { - // Allow truncatation during zero copy reads but orphan the previous buffer + // Allow truncation during zero copy reads but orphan the previous buffer if (zeroCopyRefCount != 0) orphan(); truncated = true; diff --git a/fdbrpc/include/fdbrpc/AsyncFileNonDurable.actor.h b/fdbrpc/include/fdbrpc/AsyncFileNonDurable.actor.h index 4274ce76dee..d4e0dcb420e 100644 --- a/fdbrpc/include/fdbrpc/AsyncFileNonDurable.actor.h +++ b/fdbrpc/include/fdbrpc/AsyncFileNonDurable.actor.h @@ -439,7 +439,7 @@ class AsyncFileNonDurable final : public IAsyncFile, public ReferenceCountedlru.leastRecentlyUsedPage(); while (self->writing.find(page) != self->writing.end() || page == -1) { // avoid concurrent ops - wait(delay(FLOW_KNOBS->ASYNC_FILE_WRITE_CHEKCER_CHECKING_DELAY)); + wait(delay(FLOW_KNOBS->ASYNC_FILE_WRITE_CHECKER_CHECKING_DELAY)); continue; } int64_t offset = page * checksumHistoryPageSize; @@ -238,7 +238,7 @@ class AsyncFileWriteChecker : public IAsyncFile, public ReferenceCounted runChecksumLogger(AsyncFileWriteChecker* self) { - state double delayDuration = FLOW_KNOBS->ASYNC_FILE_WRITE_CHEKCER_LOGGING_INTERVAL; + state double delayDuration = FLOW_KNOBS->ASYNC_FILE_WRITE_CHECKER_LOGGING_INTERVAL; loop { wait(delay(delayDuration)); // TODO: add more stats, such as total checked, current entries, budget @@ -255,7 +255,7 @@ class AsyncFileWriteChecker : public IAsyncFile, public ReferenceCounted pages; - // Check or set each full block in the the range + // Check or set each full block in the range int page = offset / checksumHistoryPageSize; // First page number int slack = offset % checksumHistoryPageSize; // Bytes after most recent page boundary uint8_t* start = buf; // Position in buffer to start checking from diff --git a/fdbrpc/include/fdbrpc/ReplicationUtils.h b/fdbrpc/include/fdbrpc/ReplicationUtils.h index 911c912ff88..9c5f12a691e 100644 --- a/fdbrpc/include/fdbrpc/ReplicationUtils.h +++ b/fdbrpc/include/fdbrpc/ReplicationUtils.h @@ -40,7 +40,7 @@ extern double ratePolicy(Reference& localitySet, // given a localitySet, replication policy and number of selected tests, apply the // policy and return the rating // rating can be -1 there are no unique results failing while applying the replication -// policy, otherwise largest mode from the items per unique set of locaility entry +// policy, otherwise largest mode from the items per unique set of locality entry // are returned. extern bool findBestPolicySet(std::vector& bestResults, diff --git a/fdbrpc/include/fdbrpc/Stats.h b/fdbrpc/include/fdbrpc/Stats.h index 748565a080a..982b158426c 100644 --- a/fdbrpc/include/fdbrpc/Stats.h +++ b/fdbrpc/include/fdbrpc/Stats.h @@ -140,7 +140,7 @@ struct Counter final : public ICounter, NonCopyable { // // A uniformly periodic counter will have roughness of 0 // A uniformly periodic counter that increases in clumps of N will have roughness of N-1 - // A counter with exponentially distributed incrementations will have roughness of 1 + // A counter with exponentially distributed increments will have roughness of 1 double getRoughness() const override; bool hasRate() const override { return true; } diff --git a/fdbrpc/include/fdbrpc/simulator.h b/fdbrpc/include/fdbrpc/simulator.h index 57fa1ecddf0..857111b126a 100644 --- a/fdbrpc/include/fdbrpc/simulator.h +++ b/fdbrpc/include/fdbrpc/simulator.h @@ -406,7 +406,7 @@ class ISimulator : public INetwork { std::set> corruptedBlocks; - // Valdiate at-rest encryption guarantees. If enabled, tests should inject a known 'marker' in Key and/or Values + // Validate at-rest encryption guarantees. If enabled, tests should inject a known 'marker' in Key and/or Values // inserted into FDB by the workload. On shutdown, all test generated files (under simfdb/) are scanned to find if // 'plaintext marker' is present. Optional dataAtRestPlaintextMarker; diff --git a/fdbrpc/libcoroutine/Coro.c b/fdbrpc/libcoroutine/Coro.c index 8e7244a2aa8..537232abff9 100644 --- a/fdbrpc/libcoroutine/Coro.c +++ b/fdbrpc/libcoroutine/Coro.c @@ -299,7 +299,7 @@ void Coro_switchTo_(Coro* self, Coro* next) { #if defined(USE_SETJMP) && defined(__x86_64__) void Coro_setup(Coro* self, void* arg) { - /* since ucontext seems to be broken on amg64 */ + /* since ucontext seems to be broken on amd64 */ setjmp(self->env); /* This is probably not nice in that it deals directly with @@ -400,7 +400,7 @@ void Coro_setup(Coro* self, void* arg) { void Coro_setup(Coro* self, void* arg) { /* - setjmp/longjmp is flakey under Symbian. + setjmp/longjmp is flaky under Symbian. If the setjmp is done inside the call then a crash occurs. Inlining it here solves the problem */ diff --git a/fdbrpc/sim2.actor.cpp b/fdbrpc/sim2.actor.cpp index a0fe2c976f0..8d03f82f687 100644 --- a/fdbrpc/sim2.actor.cpp +++ b/fdbrpc/sim2.actor.cpp @@ -576,7 +576,7 @@ struct Sim2Conn final : IConnection, ReferenceCounted { } void rollRandomClose() { - // make sure connections between parenta and their childs are not closed + // make sure connections between parents and their children are not closed if (!stableConnection && now() - g_simulator->lastConnectionFailure > g_simulator->connectionFailuresDisableDuration && deterministicRandom()->random01() < .00001) { @@ -965,7 +965,7 @@ class SimpleFile : public IAsyncFile, public ReferenceCounted { opId.shortString().c_str()); INJECT_FAULT(io_timeout, "SimpleFile::sync"); // SimpleFile::sync inject io_timeout - INJECT_FAULT(io_error, "SimpleFile::sync"); // SimpleFile::sync inject io_errot + INJECT_FAULT(io_error, "SimpleFile::sync"); // SimpleFile::sync inject io_error return Void(); } diff --git a/fdbserver/BackupWorker.actor.cpp b/fdbserver/BackupWorker.actor.cpp index f99ffe84c68..b5d301edfe5 100644 --- a/fdbserver/BackupWorker.actor.cpp +++ b/fdbserver/BackupWorker.actor.cpp @@ -626,7 +626,7 @@ ACTOR Future monitorBackupProgress(BackupData* self) { // Check every version is larger than backup's startVersion for (auto& [uid, info] : self->backups) { if (self->recruitedEpoch == self->oldestBackupEpoch) { - // update update progress so far if previous epochs are done + // update progress so far if previous epochs are done Version v = std::numeric_limits::max(); for (const auto& [tag, version] : tagVersions) { v = std::min(v, version); @@ -767,7 +767,7 @@ ACTOR Future saveMutationsToFile(BackupData* self, TraceEvent("BackupWorkerTrueUp", self->myId).detail("LastSavedVersion", it->second.lastSavedVersion); } // The true-up version can be larger than first message version, so keep - // the begin versions for later muation filtering. + // the begin versions for later mutation filtering. beginVersions.push_back(it->second.lastSavedVersion); logFileFutures.push_back(it->second.container.get().get()->writeTaggedLogFile( diff --git a/fdbserver/BlobGranuleValidation.actor.cpp b/fdbserver/BlobGranuleValidation.actor.cpp index 57492224684..0a1d931753f 100644 --- a/fdbserver/BlobGranuleValidation.actor.cpp +++ b/fdbserver/BlobGranuleValidation.actor.cpp @@ -440,7 +440,7 @@ ACTOR Future validateForceFlushing(Database cx, try { tr.setOption(FDBTransactionOptions::RAW_ACCESS); if (compact) { - // read at current read version version in case re-snapshot had to redo at a higher version + // read at current read version in case re-snapshot had to redo at a higher version wait(store(readVersion, tr.getReadVersion())); } else { readVersion = flushVersion; diff --git a/fdbserver/BlobManager.actor.cpp b/fdbserver/BlobManager.actor.cpp index 5289d998f84..d194ad99212 100644 --- a/fdbserver/BlobManager.actor.cpp +++ b/fdbserver/BlobManager.actor.cpp @@ -430,7 +430,7 @@ struct BlobManagerData : NonCopyable, ReferenceCounted { int64_t manifestDumperSeqNo = 1; bool enableManifestEncryption = false; AsyncTrigger backupTrigger; - AsyncTrigger manifestCompletitionTrigger; + AsyncTrigger manifestCompletionTrigger; Promise iAmReplaced; @@ -1803,7 +1803,7 @@ ACTOR Future reevaluateInitialSplit(Reference bmData, } // redo key alignment on full set of split points - // FIXME: only need to align propsedSplitKey in the middle + // FIXME: only need to align proposedSplitKey in the middle state BlobGranuleSplitPoints finalSplit = wait(alignKeys(bmData, granuleRange, newRanges)); if (BM_DEBUG) { @@ -3383,7 +3383,7 @@ ACTOR Future checkBlobWorkerList(Reference bmData, Promis // but it might also contain blob workers that died while the new manager was being recruited state std::vector blobWorkers = wait(getBlobWorkers(bmData->db, true)); - // We could get the affinity list transactionally with the blob workers, however it is simpilier from an API + // We could get the affinity list transactionally with the blob workers, however it is simpler from an API // perspective to get the affinities after the blob worker list, which ensures we will have the affinity for // every worker returned. std::vector> blobWorkerAffinities = wait(getBlobWorkerAffinity(bmData->db, true)); @@ -4697,7 +4697,7 @@ ACTOR Future partiallyDeleteGranule(Reference self, state std::vector> deletions; // deletion work per file state std::vector deletedFileKeys; // keys for deleted files - state std::vector filesToDelete; // TODO: remove evenutally, just for debugging + state std::vector filesToDelete; // TODO: remove eventually, just for debugging // TODO: binary search these snapshot files for latestSnapshotVersion for (int idx = files.snapshotFiles.size() - 1; idx >= 0; --idx) { @@ -5688,8 +5688,8 @@ ACTOR Future tryFlushRange(Reference bmData, KeyRange ran throw; // terminate for unretryable error } - // check if the range is blobified and then decide retry or skip. - // it may take long time to flush the whole key range and some ranges may have been unblobified or purged. + // check if the range is blobbified and then decide retry or skip. + // it may take long time to flush the whole key range and some ranges may have been unblobbified or purged. // so we try to check that first when seeing non-fatal errors bool knownRange = false; for (auto& r : bmData->knownBlobRanges.intersectingRanges(range)) { @@ -5809,7 +5809,7 @@ ACTOR Future backupManifest(Reference bmData) { bmData->stats.lastManifestSeqNo = bmData->manifestDumperSeqNo; bmData->stats.manifestSizeInBytes += bytes; bmData->stats.lastManifestDumpTs = now(); - bmData->manifestCompletitionTrigger.trigger(); + bmData->manifestCompletionTrigger.trigger(); return Void(); } @@ -5865,7 +5865,7 @@ ACTOR Future truncateMutationsLoop(Reference bmData) { TraceEvent("BlobManifestDumped").detail("Seq", bmData->manifestDumperSeqNo); break; } - wait(bmData->manifestCompletitionTrigger.onTrigger()); + wait(bmData->manifestCompletionTrigger.onTrigger()); } // Truncate mutations up to lastFlushVersion - wait(truncateMutations(bmData, lastFlushVersion)); diff --git a/fdbserver/BlobManifest.actor.cpp b/fdbserver/BlobManifest.actor.cpp index 8dfb9f79e12..eb07d7dc5cc 100644 --- a/fdbserver/BlobManifest.actor.cpp +++ b/fdbserver/BlobManifest.actor.cpp @@ -442,7 +442,7 @@ class BlobManifestDumper : public ReferenceCounted { ranges.push_back(KeyRangeRef(metadataVersionKey, metadataVersionKeyEnd)); state Version readVersion = wait(dumpRanges(self, splitter, ranges)); - // blobGranuleHistoryKeys - Map granule to its parents and parent bundaries. for time-travel read + // blobGranuleHistoryKeys - Map granule to its parents and parent boundaries. for time-travel read wait(dumpRange(self, splitter, blobGranuleHistoryKeys, [=](KeyValueRef row) { return shouldDumpBlobGranuleHistoryKey(row, readVersion); })); @@ -540,7 +540,7 @@ class BlobManifestDumper : public ReferenceCounted { return true; } - // Start a transcation to read range and append to splitter. Number of rows are limited by maxRowsPerTransaction. + // Start a transaction to read range and append to splitter. Number of rows are limited by maxRowsPerTransaction. // It returns the last key that has been read. ACTOR static Future dumpRange(Reference self, Reference splitter, @@ -610,7 +610,7 @@ class BlobManifestDumper : public ReferenceCounted { loop { state std::vector allFiles = wait(BlobManifestFile::listAll(writer)); - TraceEvent("BlobManfiestCleanup").detail("FileCount", allFiles.size()); + TraceEvent("BlobManifestCleanup").detail("FileCount", allFiles.size()); int count = BlobManifest::count(allFiles); if (count <= SERVER_KNOBS->BLOB_RESTORE_MANIFEST_RETENTION_MAX) { return Void(); @@ -651,7 +651,7 @@ class BlobManifestLoader : public ReferenceCounted { wait(validate(self)); } catch (Error& e) { dprint("WARNING: unexpected manifest loader error {}\n", e.what()); - TraceEvent("BlobManfiestError").error(e).log(); + TraceEvent("BlobManifestError").error(e).log(); throw; } return Void(); diff --git a/fdbserver/BlobMigrator.actor.cpp b/fdbserver/BlobMigrator.actor.cpp index 29f0a6aa521..44f6c78f209 100644 --- a/fdbserver/BlobMigrator.actor.cpp +++ b/fdbserver/BlobMigrator.actor.cpp @@ -479,7 +479,7 @@ class BlobMigrator : public NonCopyable, public ReferenceCounted, } } - // Apply muation logs for system backup ranges after manifest version + // Apply mutation logs for system backup ranges after manifest version Version manifestVersion = wait(getManifestVersion(self->db_)); for (auto& range : getSystemBackupRanges()) { self->mlogRestoreRanges_.push_back(self->mlogRestoreRanges_.arena(), range); @@ -707,7 +707,7 @@ class BlobMigrator : public NonCopyable, public ReferenceCounted, ACTOR static Future processStorageQueuingMetricsRequest(StorageQueuingMetricsRequest req) { // dprint("Unsupported StorageQueuingMetricsRequest\n"); - // FIXME get rid of this delay. it's a temp solution to avoid starvaion scheduling of DD + // FIXME get rid of this delay. it's a temp solution to avoid starvation scheduling of DD // processes wait(delay(1)); req.reply.sendError(unsupported_operation()); diff --git a/fdbserver/BlobWorker.actor.cpp b/fdbserver/BlobWorker.actor.cpp index df8cb688ead..178b1734079 100644 --- a/fdbserver/BlobWorker.actor.cpp +++ b/fdbserver/BlobWorker.actor.cpp @@ -407,7 +407,7 @@ ACTOR Future getGranuleCipherKeysImpl(Reference blobGranuleUpdateFiles(Reference bwData, // ASSERT(!deltas.mutations.empty()); if (!deltas.mutations.empty()) { if (deltas.mutations.size() == 1 && deltas.mutations.back().param1 == lastEpochEndPrivateKey) { - // Note rollbackVerision is durable, [rollbackVersion+1 - deltas.version] needs to + // Note rollbackVersion is durable, [rollbackVersion+1 - deltas.version] needs to // be tossed For correctness right now, there can be no waits and yields either in // rollback handling or in handleBlobGranuleFileRequest once waitForVersion has // succeeded, otherwise this will race and clobber results @@ -5419,7 +5419,7 @@ ACTOR Future blobWorkerCore(BlobWorkerInterface bwInterf, ReferencecurrentManagerStatusStream.get().sendError(connection_failed()); // hold a copy of the previous stream if it exists, so any waiting send calls don't get - // proken_promise before onChange + // broken_promise before onChange ReplyPromiseStream copy; if (self->statusStreamInitialized) { copy = self->currentManagerStatusStream.get(); diff --git a/fdbserver/ClusterController.actor.cpp b/fdbserver/ClusterController.actor.cpp index 718f0368993..d6cd3d05b87 100644 --- a/fdbserver/ClusterController.actor.cpp +++ b/fdbserver/ClusterController.actor.cpp @@ -177,7 +177,7 @@ bool ClusterControllerData::transactionSystemContainsDegradedServers() { }; // Check if transaction system contains degraded/disconnected servers. For satellite and remote regions, we only - // check for disconnection since the latency between prmary and satellite is across WAN and may not be very + // check for disconnection since the latency between primary and satellite is across WAN and may not be very // stable. return transactionWorkerInList(degradationInfo.degradedServers, /*skipSatellite=*/true, /*skipRemote=*/true) || transactionWorkerInList(degradationInfo.disconnectedServers, @@ -2486,7 +2486,7 @@ ACTOR Future startEncryptKeyProxy(ClusterControllerData* self, EncryptionA // Recruit EncryptKeyProxy in the same datacenter as the ClusterController. // This should always be possible, given EncryptKeyProxy is stateless, we can recruit EncryptKeyProxy - // on the same process as the CluserController. + // on the same process as the ClusterController. state std::map>, int> id_used; self->updateKnownIds(&id_used); state WorkerFitnessInfo ekpWorker = self->getWorkerForRoleInDatacenter(self->clusterControllerDcId, @@ -3559,7 +3559,7 @@ TEST_CASE("/fdbserver/clustercontroller/getDegradationInfo") { data.workerHealth.clear(); } - // Test that if both A complains B and B compalins A, only one of the server will be chosen as degraded + // Test that if both A complains B and B complains A, only one of the server will be chosen as degraded // server. { data.workerHealth[worker].degradedPeers[badPeer1] = { now() - SERVER_KNOBS->CC_MIN_DEGRADATION_INTERVAL - 1, diff --git a/fdbserver/ClusterRecovery.actor.cpp b/fdbserver/ClusterRecovery.actor.cpp index 2b8f4fed77d..9e13b15384b 100644 --- a/fdbserver/ClusterRecovery.actor.cpp +++ b/fdbserver/ClusterRecovery.actor.cpp @@ -373,7 +373,7 @@ ACTOR Future newSeedServers(Reference self, !newServer.isError(error_code_request_maybe_delivered)) throw newServer.getError(); - CODE_PROBE(true, "initial storage recuitment loop failed to get new server"); + CODE_PROBE(true, "initial storage recruitment loop failed to get new server"); wait(delay(SERVER_KNOBS->STORAGE_RECRUITMENT_DELAY)); } else { if (!dcId_tags.count(recruits.storageServers[idx].locality.dcId())) { diff --git a/fdbserver/CommitProxyServer.actor.cpp b/fdbserver/CommitProxyServer.actor.cpp index 95286f58fb2..dc44c663342 100644 --- a/fdbserver/CommitProxyServer.actor.cpp +++ b/fdbserver/CommitProxyServer.actor.cpp @@ -578,7 +578,7 @@ ACTOR Future addBackupMutations(ProxyCommitData* self, *partBuffer = bigEndian32(part); } - // Define the mutation type and and location + // Define the mutation type and location backupMutation.param1 = wr.toValue(); ASSERT(backupMutation.param1.startsWith( logRangeMutation->first)); // We are writing into the configured destination @@ -1836,7 +1836,7 @@ Future writeMutation(CommitBatchContext* self, CODE_PROBE(true, "using already encrypted mutation"); encryptedMutation = encryptedMutationOpt->get(); ASSERT(encryptedMutation.isEncrypted()); - // During simulation check whether the encrypted mutation matches the decrpyted mutation + // During simulation check whether the encrypted mutation matches the decrypted mutation if (g_network && g_network->isSimulated()) { return writeMutationEncryptedMutation(self, domainId, mutation, encryptedMutationOpt, arena); } @@ -2514,7 +2514,7 @@ ACTOR Future reply(CommitBatchContext* self) { // Send replies to clients // TODO: should be timer_monotonic(), but gets compared to request time, which uses g_network->timer(). double endTime = g_network->timer(); - // Reset all to zero, used to track the correct index of each commitTransacitonRef on each resolver + // Reset all to zero, used to track the correct index of each commitTransactionRef on each resolver std::fill(self->nextTr.begin(), self->nextTr.end(), 0); std::unordered_map idCountsForKey; diff --git a/fdbserver/ConsistencyScan.actor.cpp b/fdbserver/ConsistencyScan.actor.cpp index 1d4122261c1..455bdeed35e 100644 --- a/fdbserver/ConsistencyScan.actor.cpp +++ b/fdbserver/ConsistencyScan.actor.cpp @@ -1808,7 +1808,7 @@ ACTOR Future checkDataConsistency(Database cx, *success = false; return Void(); } - // All shards should be available in quiscence + // All shards should be available in quiescence if (performQuiescentChecks && !storageServerInterfaces[i].isTss()) { testFailure( "Storage server unavailable", performQuiescentChecks, success, failureIsError); diff --git a/fdbserver/DDShardTracker.actor.cpp b/fdbserver/DDShardTracker.actor.cpp index 1e0d0e91e2c..992e3d8f552 100644 --- a/fdbserver/DDShardTracker.actor.cpp +++ b/fdbserver/DDShardTracker.actor.cpp @@ -1120,7 +1120,7 @@ ACTOR Future shardEvaluator(DataDistributionTracker* self, // There are the bounds inside of which we are happy with the shard size. // getShardSizeBounds() will always have shardBounds.min.bytes == 0 for shards that start at allKeys.begin, - // so will will never attempt to merge that shard with the one previous. + // so will never attempt to merge that shard with the one previous. ShardSizeBounds shardBounds = getShardSizeBounds(keys, self->maxShardSize->get().get()); StorageMetrics const& stats = shardSize->get().get().metrics; auto bandwidthStatus = getBandwidthStatus(stats); @@ -1167,7 +1167,7 @@ ACTOR Future shardEvaluator(DataDistributionTracker* self, // .detail("CurrentMetrics", stats.toString()) // .detail("ShardBoundsMaxBytes", shardBounds.max.bytes) // .detail("ShardBoundsMinBytes", shardBounds.min.bytes) - // .detail("WriteBandwitdhStatus", bandwidthStatus) + // .detail("WriteBandwidthStatus", bandwidthStatus) // .detail("SplitBecauseHighWriteBandWidth", writeSplit ? "Yes" : "No"); if (!self->anyZeroHealthyTeams->get() && wantsToMerge->hasBeenTrueForLongEnough()) { @@ -2019,7 +2019,7 @@ void PhysicalShardCollection::initPhysicalShardCollection(KeyRange keys, } } else { // If any physicalShard restored when DD init is the anonymousShard, - // Then DD enters Transition state where DD graduatelly moves Shard (or KeyRange) + // Then DD enters Transition state where DD gradually moves Shard (or KeyRange) // out of the anonymousShard setTransitionCheck(); } diff --git a/fdbserver/DDTeamCollection.actor.cpp b/fdbserver/DDTeamCollection.actor.cpp index fb1ab34721f..5a0dbb5f6f2 100644 --- a/fdbserver/DDTeamCollection.actor.cpp +++ b/fdbserver/DDTeamCollection.actor.cpp @@ -200,7 +200,7 @@ class DDTeamCollectionImpl { // A team storage queue size is defined as the longest storage queue size among all SSes of the team static int64_t calculateTeamStorageQueueThreshold(const std::vector>& teams) { if (teams.size() == 0) { - return std::numeric_limits::max(); // disable this funcationality + return std::numeric_limits::max(); // disable this functionality } std::vector queueLengthList; for (const auto& team : teams) { @@ -1952,7 +1952,7 @@ class DDTeamCollectionImpl { // Remove the machine team bool foundRemovedMachineTeam = self->removeMachineTeam(mt); // When we remove the last server team on a machine team in removeTeam(), we also remove the machine - // team This is needed for removeTeam() functoin. So here the removeMachineTeam() should not find the + // team This is needed for removeTeam() function. So here the removeMachineTeam() should not find the // machine team ASSERT(foundRemovedMachineTeam); numMachineTeamRemoved++; @@ -3686,7 +3686,7 @@ void DDTeamCollection::updateTeamEligibility() { bool lowCPU = team->hasLowerCpu(teamPivots.pivotCPU); healthyCount++; - DisabledTraceEvent(SevDebug, "EligiblityTeamDebug") + DisabledTraceEvent(SevDebug, "EligibilityTeamDebug") .detail("TeamId", team->getTeamID()) .detail("CPU", team->getAverageCPU()) .detail("AvailableSpace", team->getMinAvailableSpace()) @@ -5677,14 +5677,14 @@ Reference DDTeamCollection::checkAndCreateMachineTeam(Referen void DDTeamCollection::removeMachine(Reference removedMachineInfo) { // Find machines that share teams with the removed machine - std::set> machinesWithAjoiningTeams; + std::set> machinesWithAdjoiningTeams; for (auto& machineTeam : removedMachineInfo->machineTeams) { - machinesWithAjoiningTeams.insert(machineTeam->getMachineIDs().begin(), machineTeam->getMachineIDs().end()); + machinesWithAdjoiningTeams.insert(machineTeam->getMachineIDs().begin(), machineTeam->getMachineIDs().end()); } - machinesWithAjoiningTeams.erase(removedMachineInfo->machineID); + machinesWithAdjoiningTeams.erase(removedMachineInfo->machineID); // For each machine in a machine team with the removed machine, // erase shared machine teams from the list of teams. - for (auto it = machinesWithAjoiningTeams.begin(); it != machinesWithAjoiningTeams.end(); ++it) { + for (auto it = machinesWithAdjoiningTeams.begin(); it != machinesWithAdjoiningTeams.end(); ++it) { auto& machineTeams = machine_info[*it]->machineTeams; for (int t = 0; t < machineTeams.size(); t++) { auto& machineTeam = machineTeams[t]; @@ -5711,7 +5711,7 @@ void DDTeamCollection::removeMachine(Reference removedMachineInfo machine_info.erase(removedMachineInfo->machineID); TraceEvent("MachineLocalityMapUpdate").detail("MachineUIDRemoved", removedMachineInfo->machineID.toString()); - // We do not update macineLocalityMap when a machine is removed because we will do so when we use it in + // We do not update machineLocalityMap when a machine is removed because we will do so when we use it in // addBestMachineTeams() // rebuildMachineLocalityMap(); } @@ -5762,17 +5762,17 @@ void DDTeamCollection::removeServer(UID removedServer) { // Step: Remove server team that relate to removedServer // Find all servers with which the removedServer shares teams - std::set serversWithAjoiningTeams; + std::set serversWithAdjoiningTeams; auto const& sharedTeams = removedServerInfo->getTeams(); for (int i = 0; i < sharedTeams.size(); ++i) { auto& teamIds = sharedTeams[i]->getServerIDs(); - serversWithAjoiningTeams.insert(teamIds.begin(), teamIds.end()); + serversWithAdjoiningTeams.insert(teamIds.begin(), teamIds.end()); } - serversWithAjoiningTeams.erase(removedServer); + serversWithAdjoiningTeams.erase(removedServer); // For each server in a team with the removedServer, erase shared teams from the list of teams in that other // server - for (auto it = serversWithAjoiningTeams.begin(); it != serversWithAjoiningTeams.end(); ++it) { + for (auto it = serversWithAdjoiningTeams.begin(); it != serversWithAdjoiningTeams.end(); ++it) { server_info[*it]->removeTeamsContainingServer(removedServer); } @@ -5833,7 +5833,7 @@ void DDTeamCollection::removeServer(UID removedServer) { removeMachine(removedMachineInfo); } - // If the machine uses removedServer's locality and the machine still has servers, the the machine's + // If the machine uses removedServer's locality and the machine still has servers, the machine's // representative server will be updated when it is used in addBestMachineTeams() // Note that since we do not rebuildMachineLocalityMap() here, the machineLocalityMap can be stale. // This is ok as long as we do not arbitrarily validate if machine team satisfies replication policy. diff --git a/fdbserver/DataDistribution.actor.cpp b/fdbserver/DataDistribution.actor.cpp index 0f093f97c45..ede9c8ee7fa 100644 --- a/fdbserver/DataDistribution.actor.cpp +++ b/fdbserver/DataDistribution.actor.cpp @@ -550,7 +550,7 @@ struct DataDistributor : NonCopyable, ReferenceCounted { // waitDataDistributorEnabled and then set to 0 before // waitUntilDataDistributorExitSecurityMode. For this case, // after waitUntilDataDistributorExitSecurityMode, DDMode is 0. - // The init loop does not break and the loop will stuct at + // The init loop does not break and the loop will stuck at // waitDataDistributorEnabled in the next iteration. TraceEvent("DataDistributorExitSecurityMode").log(); @@ -605,7 +605,7 @@ struct DataDistributor : NonCopyable, ReferenceCounted { .detail("BytesWrittenAverageRate", 0) .detail("PriorityRecoverMove", 0) .detail("PriorityRebalanceUnderutilizedTeam", 0) - .detail("PriorityRebalannceOverutilizedTeam", 0) + .detail("PriorityRebalanceOverutilizedTeam", 0) .detail("PriorityTeamHealthy", 0) .detail("PriorityTeamContainsUndesiredServer", 0) .detail("PriorityTeamRedundant", 0) @@ -2456,7 +2456,7 @@ ACTOR Future dispatchAuditStorageServerShard(Reference se state int i = 0; for (; i < interfs.size(); ++i) { state StorageServerInterface targetServer = interfs[i]; - // Currently, Tss server may not follow the auit consistency rule + // Currently, Tss server may not follow the audit consistency rule // Thus, skip if the server is tss if (targetServer.isTss()) { continue; diff --git a/fdbserver/DiskQueue.actor.cpp b/fdbserver/DiskQueue.actor.cpp index 9d6dd78544f..8d0ff92c412 100644 --- a/fdbserver/DiskQueue.actor.cpp +++ b/fdbserver/DiskQueue.actor.cpp @@ -515,7 +515,7 @@ class RawDiskQueue_TwoFiles : public Tracked { files[1].popped += popped - pop0; } - // Set the starting point of the ring buffer, i.e., the first useful page to be read (and poped) + // Set the starting point of the ring buffer, i.e., the first useful page to be read (and popped) ACTOR static Future setPoppedPage(RawDiskQueue_TwoFiles* self, int file, int64_t page, int64_t debugSeq) { self->files[file].popped = page * sizeof(Page); if (file) @@ -527,7 +527,7 @@ class RawDiskQueue_TwoFiles : public Tracked { // If we are starting in file 1, we truncate file 0 in case it has been corrupted. // In particular, we are trying to avoid a dropped or corrupted write to the first page of file 0 causing it to // be sequenced before file 1, when in fact it contains many pages that follow file 1. These ok pages may be - // incorrectly read if the machine dies after overwritting the first page of file 0 and is then recovered + // incorrectly read if the machine dies after overwriting the first page of file 0 and is then recovered if (file == 1) wait(self->truncateFile(self, 0, 0)); @@ -672,7 +672,7 @@ class RawDiskQueue_TwoFiles : public Tracked { // Begin pushing at the beginning of files[1] // Truncate both files, since perhaps only the first pages are corrupted. This avoids cases where - // overwritting the first page and then terminating makes subsequent pages valid upon recovery. + // overwriting the first page and then terminating makes subsequent pages valid upon recovery. std::vector> truncates; for (int i = 0; i < 2; ++i) if (self->files[i].size > 0) @@ -944,7 +944,7 @@ class DiskQueue final : public IDiskQueue, public Tracked { if (!anyPopped) return 0; - // To mark pages are poped, we push an empty page to specify that following pages were poped. + // To mark pages are popped, we push an empty page to specify that following pages were popped. // maxPayLoad is the max. payload size, i.e., (page_size - page_header_size). return Page::maxPayload; } else @@ -956,7 +956,7 @@ class DiskQueue final : public IDiskQueue, public Tracked { if (!pushedPageCount()) { if (!anyPopped) return Void(); - addEmptyPage(); // To remove poped pages, we push an empty page to specify that pages behind it were poped. + addEmptyPage(); // To remove popped pages, we push an empty page to specify that pages behind it were popped. } anyPopped = false; backPage().popped = poppedSeq; @@ -996,7 +996,7 @@ class DiskQueue final : public IDiskQueue, public Tracked { Future> readNext(int bytes) override { return readNext(this, bytes); } // FIXME: getNextReadLocation should ASSERT( initialized ), but the memory storage engine needs - // to be changed to understand the new intiailizeRecovery protocol. + // to be changed to understand the new initializeRecovery protocol. location getNextReadLocation() const override { return nextReadLocation; } location getNextPushLocation() const override { ASSERT(initialized); diff --git a/fdbserver/EncryptKeyProxy.actor.cpp b/fdbserver/EncryptKeyProxy.actor.cpp index f314dc0b643..5cc447ff524 100644 --- a/fdbserver/EncryptKeyProxy.actor.cpp +++ b/fdbserver/EncryptKeyProxy.actor.cpp @@ -1059,8 +1059,8 @@ ACTOR Future encryptKeyProxyServer(EncryptKeyProxyInterface ekpInterface, // Approach avoids external RPCs due to EncryptionKey refreshes for the inline write encryption codepath such as: // CPs, Redwood Storage Server node flush etc. The process doing the encryption refresh the cached cipher keys based // on FLOW_KNOB->ENCRYPTION_CIPHER_KEY_CACHE_TTL_SEC interval which is intentionally kept longer than - // FLOW_KNOB->ENCRRYPTION_KEY_REFRESH_INTERVAL_SEC, allowing the interactions with external Encryption Key Manager - // mostly not co-inciding with FDB process encryption key refresh attempts. + // FLOW_KNOB->ENCRYPTION_KEY_REFRESH_INTERVAL_SEC, allowing the interactions with external Encryption Key Manager + // mostly not coinciding with FDB process encryption key refresh attempts. self->encryptionKeyRefresher = recurringAsync([&]() { return refreshEncryptionKeys(self, kmsConnectorInf); }, FLOW_KNOBS->ENCRYPT_KEY_REFRESH_INTERVAL, /* interval */ diff --git a/fdbserver/KeyValueStoreMemory.actor.cpp b/fdbserver/KeyValueStoreMemory.actor.cpp index 2e2634ac0f4..173af0296c7 100644 --- a/fdbserver/KeyValueStoreMemory.actor.cpp +++ b/fdbserver/KeyValueStoreMemory.actor.cpp @@ -841,7 +841,7 @@ class KeyValueStoreMemory final : public IKeyValueStore, NonCopyable { wait(self->recovering); state Key nextKey = self->recoveredSnapshotKey; - state bool nextKeyAfter = false; // setting this to true is equilvent to setting nextKey = keyAfter(nextKey) + state bool nextKeyAfter = false; // setting this to true is equivalent to setting nextKey = keyAfter(nextKey) state uint64_t snapshotTotalWrittenBytes = 0; state int lastDiff = 0; state int snapItems = 0; diff --git a/fdbserver/KeyValueStoreRocksDB.actor.cpp b/fdbserver/KeyValueStoreRocksDB.actor.cpp index ceaa6b594cd..68e95d770d4 100644 --- a/fdbserver/KeyValueStoreRocksDB.actor.cpp +++ b/fdbserver/KeyValueStoreRocksDB.actor.cpp @@ -406,7 +406,7 @@ rocksdb::ExportImportFilesMetaData getMetaData(const CheckpointMetaData& checkpo liveFileMetaData.num_entries = fileMetaData.num_entries; liveFileMetaData.num_deletions = fileMetaData.num_deletions; liveFileMetaData.oldest_blob_file_number = fileMetaData.oldest_blob_file_number; - liveFileMetaData.oldest_ancester_time = fileMetaData.oldest_ancester_time; + liveFileMetaData.oldest_ancestor_time = fileMetaData.oldest_ancestor_time; liveFileMetaData.file_creation_time = fileMetaData.file_creation_time; liveFileMetaData.epoch_number = fileMetaData.epoch_number; liveFileMetaData.name = fileMetaData.name; @@ -441,7 +441,7 @@ void populateMetaData(CheckpointMetaData* checkpoint, const rocksdb::ExportImpor liveFileMetaData.num_entries = fileMetaData.num_entries; liveFileMetaData.num_deletions = fileMetaData.num_deletions; liveFileMetaData.oldest_blob_file_number = fileMetaData.oldest_blob_file_number; - liveFileMetaData.oldest_ancester_time = fileMetaData.oldest_ancester_time; + liveFileMetaData.oldest_ancestor_time = fileMetaData.oldest_ancestor_time; liveFileMetaData.file_creation_time = fileMetaData.file_creation_time; liveFileMetaData.epoch_number = fileMetaData.epoch_number; liveFileMetaData.name = fileMetaData.name; @@ -1558,9 +1558,9 @@ struct RocksDBKeyValueStore : IKeyValueStore { rocksdb::PinnableSlice value; rocksdb::ReadOptions readOptions = sharedState->getReadOptions(); if (shouldThrottle(a.type, a.key) && SERVER_KNOBS->ROCKSDB_SET_READ_TIMEOUT) { - uint64_t deadlineMircos = + uint64_t deadlineMicros = db->GetEnv()->NowMicros() + (readValueTimeout - (readBeginTime - a.startTime)) * 1000000; - std::chrono::seconds deadlineSeconds(deadlineMircos / 1000000); + std::chrono::seconds deadlineSeconds(deadlineMicros / 1000000); readOptions.deadline = std::chrono::duration_cast(deadlineSeconds); } @@ -1647,9 +1647,9 @@ struct RocksDBKeyValueStore : IKeyValueStore { rocksdb::PinnableSlice value; rocksdb::ReadOptions readOptions = sharedState->getReadOptions(); if (shouldThrottle(a.type, a.key) && SERVER_KNOBS->ROCKSDB_SET_READ_TIMEOUT) { - uint64_t deadlineMircos = + uint64_t deadlineMicros = db->GetEnv()->NowMicros() + (readValuePrefixTimeout - (readBeginTime - a.startTime)) * 1000000; - std::chrono::seconds deadlineSeconds(deadlineMircos / 1000000); + std::chrono::seconds deadlineSeconds(deadlineMicros / 1000000); readOptions.deadline = std::chrono::duration_cast(deadlineSeconds); } @@ -1831,7 +1831,7 @@ struct RocksDBKeyValueStore : IKeyValueStore { numReadWaiters(SERVER_KNOBS->ROCKSDB_READ_QUEUE_HARD_MAX - SERVER_KNOBS->ROCKSDB_READ_QUEUE_SOFT_MAX), numFetchWaiters(SERVER_KNOBS->ROCKSDB_FETCH_QUEUE_HARD_MAX - SERVER_KNOBS->ROCKSDB_FETCH_QUEUE_SOFT_MAX), errorListener(std::make_shared(id)), errorFuture(errorListener->getFuture()) { - // In simluation, run the reader/writer threads as Coro threads (i.e. in the network thread. The storage engine + // In simulation, run the reader/writer threads as Coro threads (i.e. in the network thread. The storage engine // is still multi-threaded as background compaction threads are still present. Reads/writes to disk will also // block the network thread in a way that would be unacceptable in production but is a necessary evil here. When // performing the reads in background threads in simulation, the event loop thinks there is no work to do and @@ -2115,7 +2115,7 @@ struct RocksDBKeyValueStore : IKeyValueStore { } } - // Checks and waits for few seconds if rocskdb is overloaded. + // Checks and waits for few seconds if rocksdb is overloaded. ACTOR Future checkRocksdbState(RocksDBKeyValueStore* self) { state uint64_t estPendCompactBytes; state uint64_t numImmutableMemtables; @@ -2353,7 +2353,7 @@ struct RocksDBKeyValueStore : IKeyValueStore { // keysSet will store the written keys in the current transaction. // previousCommitKeysSet will store the written keys that are currently in the rocksdb commit path. // When one commit is in the rocksdb commit path, the other processing commit in the kvsstorerocksdb - // read iterators will not see the the writes set in previousCommitKeysSet. To avoid that, we will + // read iterators will not see the writes set in previousCommitKeysSet. To avoid that, we will // maintain the previousCommitKeysSet until the rocksdb commit is processed and returned. std::set keysSet; std::set previousCommitKeysSet; diff --git a/fdbserver/KeyValueStoreSQLite.actor.cpp b/fdbserver/KeyValueStoreSQLite.actor.cpp index b7d8b998bb3..125aead2202 100644 --- a/fdbserver/KeyValueStoreSQLite.actor.cpp +++ b/fdbserver/KeyValueStoreSQLite.actor.cpp @@ -1810,7 +1810,7 @@ class KeyValueStoreSQLite final : public IKeyValueStore { // If a wal file fails during the commit process before finishing a checkpoint, then it is possible that our // wal file will be non-empty when we reload it. We execute a checkpoint here to remedy that situation. - // This call must come before before creating a cursor because it will fail if there are any outstanding + // This call must come before creating a cursor because it will fail if there are any outstanding // transactions. fullCheckpoint(); @@ -2331,7 +2331,7 @@ ACTOR Future KVFileCheck(std::string filename, bool integrity) { state IKeyValueStore* store = keyValueStoreSQLite(filename, UID(0, 0), type, !integrity, integrity); ASSERT(store != nullptr); - // Wait for integry check to finish + // Wait for integrity check to finish wait(success(store->readValue(StringRef()))); if (store->getError().isError()) diff --git a/fdbserver/KeyValueStoreShardedRocksDB.actor.cpp b/fdbserver/KeyValueStoreShardedRocksDB.actor.cpp index fd12758110b..029467b5820 100644 --- a/fdbserver/KeyValueStoreShardedRocksDB.actor.cpp +++ b/fdbserver/KeyValueStoreShardedRocksDB.actor.cpp @@ -56,7 +56,7 @@ static_assert((ROCKSDB_MAJOR == FDB_ROCKSDB_MAJOR && ROCKSDB_MINOR == FDB_ROCKSD const std::string rocksDataFolderSuffix = "-data"; const std::string METADATA_SHARD_ID = "kvs-metadata"; -const std::string DEFAULT_CF_NAME = "default"; // `specialKeys` is stored in this culoumn family. +const std::string DEFAULT_CF_NAME = "default"; // `specialKeys` is stored in this column family. const std::string manifestFilePrefix = "MANIFEST-"; const KeyRef shardMappingPrefix("\xff\xff/ShardMapping/"_sr); const KeyRef compactionTimestampPrefix("\xff\xff/CompactionTimestamp/"_sr); @@ -317,7 +317,7 @@ rocksdb::ExportImportFilesMetaData getMetaData(const CheckpointMetaData& checkpo liveFileMetaData.num_entries = fileMetaData.num_entries; liveFileMetaData.num_deletions = fileMetaData.num_deletions; liveFileMetaData.oldest_blob_file_number = fileMetaData.oldest_blob_file_number; - liveFileMetaData.oldest_ancester_time = fileMetaData.oldest_ancester_time; + liveFileMetaData.oldest_ancestor_time = fileMetaData.oldest_ancestor_time; liveFileMetaData.file_creation_time = fileMetaData.file_creation_time; liveFileMetaData.smallest = fileMetaData.smallest; liveFileMetaData.largest = fileMetaData.largest; @@ -356,7 +356,7 @@ void populateMetaData(CheckpointMetaData* checkpoint, const rocksdb::ExportImpor liveFileMetaData.num_entries = fileMetaData.num_entries; liveFileMetaData.num_deletions = fileMetaData.num_deletions; liveFileMetaData.oldest_blob_file_number = fileMetaData.oldest_blob_file_number; - liveFileMetaData.oldest_ancester_time = fileMetaData.oldest_ancester_time; + liveFileMetaData.oldest_ancestor_time = fileMetaData.oldest_ancestor_time; liveFileMetaData.file_creation_time = fileMetaData.file_creation_time; liveFileMetaData.smallest = fileMetaData.smallest; liveFileMetaData.largest = fileMetaData.largest; @@ -1103,7 +1103,7 @@ class ShardManager { } e.detail("NumLevels", numLevels); } - TraceEvent(SevInfo, "KVSPhysialShardMetrics") + TraceEvent(SevInfo, "KVSPhysicalShardMetrics") .detail("NumActiveShards", shardManager->numActiveShards()) .detail("TotalPhysicalShards", shardManager->numPhysicalShards()) .detail("NumSstFiles", numSstFiles); @@ -1153,7 +1153,7 @@ class ShardManager { return status; } - TraceEvent("ShardedRocksDBOpen").detail("Duraton", now() - start).detail("NumCFs", descriptors.size()); + TraceEvent("ShardedRocksDBOpen").detail("Duration", now() - start).detail("NumCFs", descriptors.size()); if (foundMetadata) { TraceEvent(SevInfo, "ShardedRocksInitLoadPhysicalShards", this->logId) @@ -3113,7 +3113,7 @@ struct ShardedRocksDBKeyValueStore : IKeyValueStore { (*columnFamilyMap)[ps->cf->GetID()] = ps->cf; TraceEvent(SevInfo, "RocksDBRestoreCFSuccess", logId) .detail("Path", a.path) - .detail("ColumnFaminly", ps->cf->GetName()) + .detail("ColumnFamily", ps->cf->GetName()) .detail("Checkpoints", describe(a.checkpoints)); // Remove the extra data. @@ -3314,9 +3314,9 @@ struct ShardedRocksDBKeyValueStore : IKeyValueStore { auto db = a.shard->db; if (shouldThrottle(a.type, a.key) && SERVER_KNOBS->ROCKSDB_SET_READ_TIMEOUT) { - uint64_t deadlineMircos = + uint64_t deadlineMicros = db->GetEnv()->NowMicros() + (readValueTimeout - (timer_monotonic() - a.startTime)) * 1000000; - std::chrono::seconds deadlineSeconds(deadlineMircos / 1000000); + std::chrono::seconds deadlineSeconds(deadlineMicros / 1000000); options.deadline = std::chrono::duration_cast(deadlineSeconds); } double dbGetBeginTime = a.getHistograms ? timer_monotonic() : 0; @@ -3401,9 +3401,9 @@ struct ShardedRocksDBKeyValueStore : IKeyValueStore { auto options = getReadOptions(); auto db = a.shard->db; if (shouldThrottle(a.type, a.key) && SERVER_KNOBS->ROCKSDB_SET_READ_TIMEOUT) { - uint64_t deadlineMircos = + uint64_t deadlineMicros = db->GetEnv()->NowMicros() + (readValuePrefixTimeout - (timer_monotonic() - a.startTime)) * 1000000; - std::chrono::seconds deadlineSeconds(deadlineMircos / 1000000); + std::chrono::seconds deadlineSeconds(deadlineMicros / 1000000); options.deadline = std::chrono::duration_cast(deadlineSeconds); } @@ -3557,7 +3557,7 @@ struct ShardedRocksDBKeyValueStore : IKeyValueStore { } }; - // Persist shard mappinng key range should not be in shardMap. + // Persist shard mapping key range should not be in shardMap. explicit ShardedRocksDBKeyValueStore(const std::string& path, UID id) : rState(std::make_shared()), path(path), id(id), readSemaphore(SERVER_KNOBS->ROCKSDB_READ_QUEUE_SOFT_MAX), @@ -3569,7 +3569,7 @@ struct ShardedRocksDBKeyValueStore : IKeyValueStore { errorFuture(forwardError(errorListener->getFuture())), dbOptions(getOptions()), shardManager(path, id, dbOptions, errorListener, eventListener, &counters), rocksDBMetrics(std::make_shared(id, dbOptions.statistics)) { - // In simluation, run the reader/writer threads as Coro threads (i.e. in the network thread. The storage + // In simulation, run the reader/writer threads as Coro threads (i.e. in the network thread. The storage // engine is still multi-threaded as background compaction threads are still present. Reads/writes to disk // will also block the network thread in a way that would be unacceptable in production but is a necessary // evil here. When performing the reads in background threads in simulation, the event loop thinks there is @@ -3706,7 +3706,7 @@ struct ShardedRocksDBKeyValueStore : IKeyValueStore { } } - // Checks and waits for few seconds if rocskdb is overloaded. + // Checks and waits for few seconds if rocksdb is overloaded. ACTOR Future checkRocksdbState(rocksdb::DB* db) { state uint64_t estPendCompactBytes; state int count = SERVER_KNOBS->ROCKSDB_CAN_COMMIT_DELAY_TIMES_ON_OVERLOAD; @@ -4660,7 +4660,7 @@ TEST_CASE("noSim/ShardedRocksDB/CheckpointBasic") { iter0.reset(); iter1.reset(); ASSERT(!cpReader->inUse()); - TraceEvent(SevDebug, "ShardedRocksCheckpointReaaderTested"); + TraceEvent(SevDebug, "ShardedRocksCheckpointReaderTested"); std::vector> closes; closes.push_back(cpReader->close()); closes.push_back(kvStore->onClosed()); diff --git a/fdbserver/MoveKeys.actor.cpp b/fdbserver/MoveKeys.actor.cpp index 88511d27e21..b0ac852d6c1 100644 --- a/fdbserver/MoveKeys.actor.cpp +++ b/fdbserver/MoveKeys.actor.cpp @@ -2808,7 +2808,7 @@ ACTOR Future removeKeysFromFailedServer(Database cx, trace.detail("Begin", it.key); trace.detail("End", keyServers[i + 1].key); if (!dest.empty()) { - trace.detail("DropedDest", describe(dest)); + trace.detail("DroppedDest", describe(dest)); } trace.detail("NewTeamForDroppedShard", describe(teamForDroppedRange)); } else { @@ -2855,7 +2855,7 @@ ACTOR Future removeKeysFromFailedServer(Database cx, // Background cleanup is triggered when the normal cleanup (cleanUpDataMoveCore) with a succeed transaction // is failed to see the update of metadata (datamove key space) by the startMoveShard // For this case, the startMoveShard must exit without update the meta data -// This background cleanup is used to clean the placehold left by the normal cleanup +// This background cleanup is used to clean the placeholder left by the normal cleanup // To understand this trick of cleanup place holder, we have three cases: // (1) Race condition of dataMove metadata between cleanUpDataMoveCore and startMoveShard, and // cleanUpDataMoveCore wins the race. Then startMoveShard retries and see the place holder on the metadata diff --git a/fdbserver/MutationTracking.cpp b/fdbserver/MutationTracking.cpp index e84ef172d61..c7e3dd51cd8 100644 --- a/fdbserver/MutationTracking.cpp +++ b/fdbserver/MutationTracking.cpp @@ -67,7 +67,7 @@ TraceEvent debugMutationEnabled(const char* context, Version version, MutationRe return TraceEvent(); } -TraceEvent debugEncrptedMutationEnabled(const char* context, Version version, MutationRef const& mutation, UID id) { +TraceEvent debugEncryptedMutationEnabled(const char* context, Version version, MutationRef const& mutation, UID id) { ASSERT(mutation.type == mutation.Encrypted); MutationRef fmutation = Standalone(mutation); Arena tempArena; @@ -127,7 +127,7 @@ TraceEvent debugTagsAndMessageEnabled(const char* context, Version version, Stri #if MUTATION_TRACKING_ENABLED TraceEvent debugMutation(const char* context, Version version, MutationRef const& mutation, UID id) { if (mutation.type == mutation.Encrypted) { - return debugEncrptedMutationEnabled(context, version, mutation, id); + return debugEncryptedMutationEnabled(context, version, mutation, id); } else { return debugMutationEnabled(context, version, mutation, id); } diff --git a/fdbserver/OldTLogServer_6_0.actor.cpp b/fdbserver/OldTLogServer_6_0.actor.cpp index 845a80094b6..ab46dfc984d 100644 --- a/fdbserver/OldTLogServer_6_0.actor.cpp +++ b/fdbserver/OldTLogServer_6_0.actor.cpp @@ -1922,8 +1922,8 @@ ACTOR Future tLogEnablePopReq(TLogEnablePopRequest enablePopReq, TLogData* TraceEvent("TLogExecCmdPopEnable") .detail("UidStr", enablePopReq.snapUID.toString()) .detail("IgnorePopUid", self->ignorePopUid) - .detail("IgnporePopRequest", self->ignorePopRequest) - .detail("IgnporePopDeadline", self->ignorePopDeadline) + .detail("IgnorePopRequest", self->ignorePopRequest) + .detail("IgnorePopDeadline", self->ignorePopDeadline) .detail("PersistentDataVersion", logData->persistentDataVersion) .detail("PersistentDatadurableVersion", logData->persistentDataDurableVersion) .detail("QueueCommittedVersion", logData->queueCommittedVersion.get()) diff --git a/fdbserver/OldTLogServer_6_2.actor.cpp b/fdbserver/OldTLogServer_6_2.actor.cpp index 96f8773c775..3daaacba5be 100644 --- a/fdbserver/OldTLogServer_6_2.actor.cpp +++ b/fdbserver/OldTLogServer_6_2.actor.cpp @@ -1709,7 +1709,7 @@ Future tLogPeekMessages(PromiseType replyPromise, messages.serializeBytes(messages2.toValue()); } } else { - // FIXME: Limit to approximately DESIRED_TOTATL_BYTES somehow. + // FIXME: Limit to approximately DESIRED_TOTAL_BYTES somehow. RangeResult kvrefs = wait(self->persistentData->readRange( KeyRangeRef( persistTagMessageRefsKey(logData->logId, reqTag, reqBegin), @@ -2366,8 +2366,8 @@ ACTOR Future tLogEnablePopReq(TLogEnablePopRequest enablePopReq, TLogData* TraceEvent("TLogExecCmdPopEnable") .detail("UidStr", enablePopReq.snapUID.toString()) .detail("IgnorePopUid", self->ignorePopUid) - .detail("IgnporePopRequest", self->ignorePopRequest) - .detail("IgnporePopDeadline", self->ignorePopDeadline) + .detail("IgnorePopRequest", self->ignorePopRequest) + .detail("IgnorePopDeadline", self->ignorePopDeadline) .detail("PersistentDataVersion", logData->persistentDataVersion) .detail("PersistentDatadurableVersion", logData->persistentDataDurableVersion) .detail("QueueCommittedVersion", logData->queueCommittedVersion.get()) diff --git a/fdbserver/QuietDatabase.actor.cpp b/fdbserver/QuietDatabase.actor.cpp index bad611d66fb..a4e7256b132 100644 --- a/fdbserver/QuietDatabase.actor.cpp +++ b/fdbserver/QuietDatabase.actor.cpp @@ -556,7 +556,7 @@ ACTOR Future getTeamCollectionValid(Database cx, WorkerInterface dataDistr // If the machineTeamRemover does not remove the machine team with the most machine teams, // we may oscillate between building more server teams by teamBuilder() and removing those teams by // teamRemover To avoid false positive in simulation, we skip the consistency check in this case. - // This is a corner case. This is a work-around if case the team number requirements cannot be satisfied. + // This is a corner case. This is a workaround if case the team number requirements cannot be satisfied. // // The checking for too many teams is disabled because teamRemover may not remove a team if it leads to 0 // team on a server diff --git a/fdbserver/RESTKmsConnector.actor.cpp b/fdbserver/RESTKmsConnector.actor.cpp index 3261ef1cb8e..a7d2caa0eff 100644 --- a/fdbserver/RESTKmsConnector.actor.cpp +++ b/fdbserver/RESTKmsConnector.actor.cpp @@ -148,7 +148,7 @@ struct KmsUrlStore { std::sort(kmsUrls.begin(), kmsUrls.end(), [](const KmsUrlCtx& l, const KmsUrlCtx& r) { // Sort the available URLs based on following rules: // 1. URL with higher unresponsiveness-penalty are least preferred - // 2. Among URLs with same unresponsiveness-penalty weight, URLs with more number of failed-respones are + // 2. Among URLs with same unresponsiveness-penalty weight, URLs with more number of failed-responses are // less preferred // 3. Lastly, URLs with more malformed response messages are less preferred @@ -338,23 +338,23 @@ ACTOR Future parseDiscoverKmsUrlFile(Reference ctx, s std::stringstream ss(buff.toString()); std::string url; while (std::getline(ss, url, DISCOVER_URL_FILE_URL_SEP)) { - std::string trimedUrl = boost::trim_copy(url); + std::string trimmedUrl = boost::trim_copy(url); // Remove the trailing '/'(s) - while (!trimedUrl.empty() && trimedUrl.ends_with('/')) { - trimedUrl.pop_back(); + while (!trimmedUrl.empty() && trimmedUrl.ends_with('/')) { + trimmedUrl.pop_back(); } - if (trimedUrl.empty()) { + if (trimmedUrl.empty()) { // Empty URL, ignore and continue continue; } - auto itr = urlMap.find(trimedUrl); + auto itr = urlMap.find(trimmedUrl); if (itr != urlMap.end()) { if (FLOW_KNOBS->REST_LOG_LEVEL >= RESTLogSeverity::INFO) { TraceEvent("RESTParseDiscoverKmsUrlsExistingUrl", ctx->uid).detail("UrlCtx", itr->second.toString()); } ctx->kmsUrlStore.kmsUrls.emplace_back(itr->second); } else { - auto urlCtx = KmsUrlCtx(trimedUrl); + auto urlCtx = KmsUrlCtx(trimmedUrl); if (FLOW_KNOBS->REST_LOG_LEVEL >= RESTLogSeverity::INFO) { TraceEvent("RESTParseDiscoverKmsUrlsAddUrl", ctx->uid).detail("UrlCtx", urlCtx.toString()); } diff --git a/fdbserver/RESTSimKmsVault.actor.cpp b/fdbserver/RESTSimKmsVault.actor.cpp index 2eac3bb55ba..f4a168b8186 100644 --- a/fdbserver/RESTSimKmsVault.actor.cpp +++ b/fdbserver/RESTSimKmsVault.actor.cpp @@ -206,7 +206,7 @@ void addCipherDetailToRespDoc(rapidjson::Document& doc, cipherDetails.PushBack(cipherDetail, doc.GetAllocator()); } -void addBlobMetadaToResDoc(rapidjson::Document& doc, rapidjson::Value& blobDetails, const EncryptCipherDomainId domId) { +void addBlobMetadataToResDoc(rapidjson::Document& doc, rapidjson::Value& blobDetails, const EncryptCipherDomainId domId) { Standalone detailsRef = SimKmsVault::getBlobMetadata(domId, bgUrl); rapidjson::Value blobDetail(rapidjson::kObjectType); @@ -358,7 +358,7 @@ VaultResponse handleFetchKeysByKeyIds(const std::string& content) { return response; } -VaultResponse handleFetchBlobMetada(const std::string& content) { +VaultResponse handleFetchBlobMetadata(const std::string& content) { VaultResponse response; rapidjson::Document doc; @@ -390,7 +390,7 @@ VaultResponse handleFetchBlobMetada(const std::string& content) { rapidjson::Value blobDetails(rapidjson::kArrayType); for (const auto& blobDetail : doc[BLOB_METADATA_DETAILS_TAG].GetArray()) { EncryptCipherDomainId domainId = blobDetail[BLOB_METADATA_DOMAIN_ID_TAG].GetInt64(); - addBlobMetadaToResDoc(doc, blobDetails, domainId); + addBlobMetadataToResDoc(doc, blobDetails, domainId); } rapidjson::Value memberKey(BLOB_METADATA_DETAILS_TAG, result.GetAllocator()); result.AddMember(memberKey, blobDetails, result.GetAllocator()); @@ -406,7 +406,7 @@ VaultResponse handleFetchBlobMetada(const std::string& content) { ASSERT(!response.failed); response.buff = std::string(sb.GetString(), sb.GetSize()); - //TraceEvent(SevDebug, "FetchBlobMetadataResponeStr").detail("Str", response.buff); + //TraceEvent(SevDebug, "FetchBlobMetadataResponseStr").detail("Str", response.buff); return response; } @@ -423,7 +423,7 @@ ACTOR Future simKmsVaultRequestHandler(Reference re } else if (request->resource.compare(REST_SIM_KMS_VAULT_GET_ENCRYPTION_KEYS_BY_DOMAIN_IDS_RESOURCE) == 0) { vaultResponse = handleFetchKeysByDomainIds(request->data.content); } else if (request->resource.compare(REST_SIM_KMS_VAULT_GET_BLOB_METADATA_RESOURCE) == 0) { - vaultResponse = handleFetchBlobMetada(request->data.content); + vaultResponse = handleFetchBlobMetadata(request->data.content); } else { TraceEvent("UnexpectedResource").detail("Resource", request->resource); throw http_bad_response(); @@ -768,7 +768,7 @@ TEST_CASE("/restSimKmsVault/GetBlobMetadata/missingVersion") { EncryptCipherDomainIdVec domIds; std::string requestContent = getFakeBlobDomainIdsRequestContent(domIds, FaultType::MISSING_VERSION); - VaultResponse response = handleFetchBlobMetada(requestContent); + VaultResponse response = handleFetchBlobMetadata(requestContent); ASSERT(response.failed); Optional detail = getErrorDetail(response.buff); ASSERT(detail.present()); @@ -781,7 +781,7 @@ TEST_CASE("/restSimKmsVault/GetBlobMetadata/invalidVersion") { EncryptCipherDomainIdVec domIds; std::string requestContent = getFakeBlobDomainIdsRequestContent(domIds, FaultType::INVALID_VERSION); - VaultResponse response = handleFetchBlobMetada(requestContent); + VaultResponse response = handleFetchBlobMetadata(requestContent); ASSERT(response.failed); Optional detail = getErrorDetail(response.buff); ASSERT(detail.present()); @@ -794,7 +794,7 @@ TEST_CASE("/restSimKmsVault/GetByKeyIds/missingValidationTokens") { EncryptCipherDomainIdVec domIds; std::string requestContent = getFakeBlobDomainIdsRequestContent(domIds, FaultType::MISSING_VALIDATION_TOKEN); - VaultResponse response = handleFetchBlobMetada(requestContent); + VaultResponse response = handleFetchBlobMetadata(requestContent); ASSERT(response.failed); Optional detail = getErrorDetail(response.buff); ASSERT(detail.present()); @@ -807,7 +807,7 @@ TEST_CASE("/restSimKmsVault/GetBlobMetadata/foo") { EncryptCipherDomainIdVec domIds; std::string requestContent = getFakeBlobDomainIdsRequestContent(domIds); - VaultResponse response = handleFetchBlobMetada(requestContent); + VaultResponse response = handleFetchBlobMetadata(requestContent); validateBlobLookup(response, domIds); return Void(); } \ No newline at end of file diff --git a/fdbserver/RestoreApplier.actor.cpp b/fdbserver/RestoreApplier.actor.cpp index 1f6f66870b6..7e17b1a0be2 100644 --- a/fdbserver/RestoreApplier.actor.cpp +++ b/fdbserver/RestoreApplier.actor.cpp @@ -145,7 +145,7 @@ ACTOR static Future handleSendMutationVectorRequest(RestoreSendVersionedMu TraceEvent(printTrace ? SevInfo : SevFRDebugInfo, "FastRestoreApplierPhaseReceiveMutations", self->id()) .detail("BatchIndex", req.batchIndex) .detail("RestoreAsset", req.asset.toString()) - .detail("RestoreAssetMesssageIndex", batchData->processedFileState[req.asset].get()) + .detail("RestoreAssetMessageIndex", batchData->processedFileState[req.asset].get()) .detail("Request", req.toString()) .detail("CurrentMemory", getSystemStatistics().processMemory) .detail("PreviousVersionBatchState", batchData->vbState.get()) @@ -367,7 +367,7 @@ ACTOR static Future precomputeMutationsResult(Reference int64_t batchIndex, Database cx) { // Apply range mutations (i.e., clearRange) to database cx - TraceEvent("FastRestoreApplerPhasePrecomputeMutationsResultStart", applierID) + TraceEvent("FastRestoreApplierPhasePrecomputeMutationsResultStart", applierID) .detail("BatchIndex", batchIndex) .detail("Step", "Applying clear range mutations to DB") .detail("ClearRanges", batchData->stagingKeyRanges.size()); @@ -398,7 +398,7 @@ ACTOR static Future precomputeMutationsResult(Reference } // Apply range mutations (i.e., clearRange) to stagingKeyRanges - TraceEvent("FastRestoreApplerPhasePrecomputeMutationsResult", applierID) + TraceEvent("FastRestoreApplierPhasePrecomputeMutationsResult", applierID) .detail("BatchIndex", batchIndex) .detail("Step", "Applying clear range mutations to staging keys") .detail("ClearRanges", batchData->stagingKeyRanges.size()) @@ -409,7 +409,7 @@ ACTOR static Future precomputeMutationsResult(Reference std::map::iterator ub = batchData->stagingKeys.lower_bound(rangeMutation.mutation.param2); while (lb != ub) { if (lb->first >= rangeMutation.mutation.param2) { - TraceEvent(SevError, "FastRestoreApplerPhasePrecomputeMutationsResultIncorrectUpperBound") + TraceEvent(SevError, "FastRestoreApplierPhasePrecomputeMutationsResultIncorrectUpperBound") .detail("Key", lb->first) .detail("ClearRangeUpperBound", rangeMutation.mutation.param2) .detail("UsedUpperBound", ub->first); @@ -421,13 +421,13 @@ ACTOR static Future precomputeMutationsResult(Reference lb++; } } - TraceEvent("FastRestoreApplerPhasePrecomputeMutationsResult", applierID) + TraceEvent("FastRestoreApplierPhasePrecomputeMutationsResult", applierID) .detail("BatchIndex", batchIndex) .detail("Step", "Wait on applying clear range mutations to DB") .detail("FutureClearRanges", fClearRanges.size()); wait(waitForAll(fClearRanges)); - TraceEvent("FastRestoreApplerPhasePrecomputeMutationsResult", applierID) + TraceEvent("FastRestoreApplierPhasePrecomputeMutationsResult", applierID) .detail("BatchIndex", batchIndex) .detail("Step", "Getting and computing staging keys") .detail("StagingKeys", batchData->stagingKeys.size()); @@ -461,7 +461,7 @@ ACTOR static Future precomputeMutationsResult(Reference } } - TraceEvent("FastRestoreApplerPhasePrecomputeMutationsResult", applierID) + TraceEvent("FastRestoreApplierPhasePrecomputeMutationsResult", applierID) .detail("BatchIndex", batchIndex) .detail("Step", "Compute the other staging keys") .detail("StagingKeys", batchData->stagingKeys.size()) @@ -480,7 +480,7 @@ ACTOR static Future precomputeMutationsResult(Reference // Sanity check all stagingKeys have been precomputed ASSERT_WE_THINK(batchData->allKeysPrecomputed()); - TraceEvent("FastRestoreApplerPhasePrecomputeMutationsResultDone", applierID).detail("BatchIndex", batchIndex); + TraceEvent("FastRestoreApplierPhasePrecomputeMutationsResultDone", applierID).detail("BatchIndex", batchIndex); return Void(); } @@ -605,7 +605,7 @@ ACTOR static Future applyStagingKeys(Reference batchData state int txnBatches = 0; double txnSize = 0; std::vector> fBatches; - TraceEvent("FastRestoreApplerPhaseApplyStagingKeysStart", applierID) + TraceEvent("FastRestoreApplierPhaseApplyStagingKeysStart", applierID) .detail("BatchIndex", batchIndex) .detail("StagingKeys", batchData->stagingKeys.size()); batchData->totalBytesToWrite = 0; @@ -644,7 +644,7 @@ ACTOR static Future applyStagingKeys(Reference batchData wait(waitForAll(fBatches)); - TraceEvent("FastRestoreApplerPhaseApplyStagingKeysDone", applierID) + TraceEvent("FastRestoreApplierPhaseApplyStagingKeysDone", applierID) .detail("BatchIndex", batchIndex) .detail("StagingKeys", batchData->stagingKeys.size()) .detail("TransactionBatches", txnBatches) diff --git a/fdbserver/RestoreController.actor.cpp b/fdbserver/RestoreController.actor.cpp index ec308bfebbc..8ab2b21d777 100644 --- a/fdbserver/RestoreController.actor.cpp +++ b/fdbserver/RestoreController.actor.cpp @@ -145,7 +145,7 @@ ACTOR Future startRestoreController(Reference controlle return Void(); } -// RestoreWorker that has restore controller role: Recruite a role for each worker +// RestoreWorker that has restore controller role: Recruit a role for each worker ACTOR Future recruitRestoreRoles(Reference controllerWorker, Reference controllerData) { state int nodeIndex = 0; @@ -357,7 +357,7 @@ ACTOR static Future processRestoreRequest(ReferencedumpVersionBatches(self->versionBatches); state std::vector> fBatches; - state std::vector versionBatches; // To randomize invoking order of version batchs + state std::vector versionBatches; // To randomize invoking order of version batches for (auto& vb : self->versionBatches) { versionBatches.push_back(vb.second); } diff --git a/fdbserver/RestoreLoader.actor.cpp b/fdbserver/RestoreLoader.actor.cpp index fa5e0d335e7..260096ddf2e 100644 --- a/fdbserver/RestoreLoader.actor.cpp +++ b/fdbserver/RestoreLoader.actor.cpp @@ -92,7 +92,7 @@ ACTOR Future handleFinishVersionBatchRequest(RestoreVersionBatchRequest re // Dispatch requests based on node's business (i.e, cpu usage for now) and requests' priorities // Requests for earlier version batches are preferred; which is equivalent to -// sendMuttionsRequests are preferred than loadingFileRequests +// sendMutationsRequests are preferred than loadingFileRequests ACTOR Future dispatchRequests(Reference self, Database cx) { try { state int curVBInflightReqs = 0; @@ -449,7 +449,7 @@ ACTOR static Future _parsePartitionedLogFileOnLoader( mutation = decryptedMutation; } - // Skip mutation whose commitVesion < range kv's version + // Skip mutation whose commitVersion < range kv's version if (logMutationTooOld(pRangeVersions, mutation, msgVersion.version)) { cc->oldLogMutations += 1; wait(yield()); // avoid potential stack overflows @@ -1097,7 +1097,7 @@ bool concatenateBackupMutationForLogFile(SerializedMutationListMap* pMutationMap } } else { // Concatenate the val string with the same commitVersion it->second.first = - it->second.first.contents().withSuffix(val_input.contents()); // Assign the new Areana to the map's value + it->second.first.contents().withSuffix(val_input.contents()); // Assign the new Arena to the map's value auto& currentPart = it->second.second; if (part != (currentPart + 1)) { // Check if the same range or log file has been processed more than once! @@ -1181,7 +1181,7 @@ ACTOR Future _parseSerializedMutation(KeyRangeMap* pRangeVersions mutation = decryptedMutation; } // Should this mutation be skipped? - // Skip mutation whose commitVesion < range kv's version + // Skip mutation whose commitVersion < range kv's version if (logMutationTooOld(pRangeVersions, mutation, commitVersion)) { cc->oldLogMutations += 1; } else { diff --git a/fdbserver/RocksDBCheckpointUtils.actor.cpp b/fdbserver/RocksDBCheckpointUtils.actor.cpp index c589c2d46c8..f7d869ba6de 100644 --- a/fdbserver/RocksDBCheckpointUtils.actor.cpp +++ b/fdbserver/RocksDBCheckpointUtils.actor.cpp @@ -91,7 +91,7 @@ rocksdb::ExportImportFilesMetaData getMetaData(const CheckpointMetaData& checkpo liveFileMetaData.num_entries = fileMetaData.num_entries; liveFileMetaData.num_deletions = fileMetaData.num_deletions; liveFileMetaData.oldest_blob_file_number = fileMetaData.oldest_blob_file_number; - liveFileMetaData.oldest_ancester_time = fileMetaData.oldest_ancester_time; + liveFileMetaData.oldest_ancestor_time = fileMetaData.oldest_ancestor_time; liveFileMetaData.file_creation_time = fileMetaData.file_creation_time; liveFileMetaData.epoch_number = fileMetaData.epoch_number; liveFileMetaData.name = fileMetaData.name; diff --git a/fdbserver/SimulatedCluster.actor.cpp b/fdbserver/SimulatedCluster.actor.cpp index 3ffa5cfa8b5..1e419207064 100644 --- a/fdbserver/SimulatedCluster.actor.cpp +++ b/fdbserver/SimulatedCluster.actor.cpp @@ -2180,7 +2180,7 @@ void SimulationConfig::setMachineCount(const TestConfig& testConfig) { if (generateMachineTeamTestConfig) { // When DESIRED_TEAMS_PER_SERVER is set to 1, the desired machine team number is 5 // while the max possible machine team number is 10. - // If machine_count > 5, we can still test the effectivenss of machine teams + // If machine_count > 5, we can still test the effectiveness of machine teams // Note: machine_count may be much larger than 5 because we may have a big replication factor machine_count = std::max(machine_count, deterministicRandom()->randomInt( diff --git a/fdbserver/Status.actor.cpp b/fdbserver/Status.actor.cpp index 1501221b006..162324eea90 100644 --- a/fdbserver/Status.actor.cpp +++ b/fdbserver/Status.actor.cpp @@ -2690,13 +2690,13 @@ static JsonBuilderObject faultToleranceStatusFetcher(DatabaseConfiguration confi zoneFailuresWithoutLosingData = std::max(std::min(zoneFailuresWithoutLosingData, oldLogFaultTolerance), -1); statusObj["max_zone_failures_without_losing_data"] = zoneFailuresWithoutLosingData; - int32_t maxAvaiabilityZoneFailures = configuration.maxZoneFailuresTolerated(fullyReplicatedRegions, true); + int32_t maxAvailabilityZoneFailures = configuration.maxZoneFailuresTolerated(fullyReplicatedRegions, true); if (underMaintenance) { - maxAvaiabilityZoneFailures--; + maxAvailabilityZoneFailures--; } statusObj["max_zone_failures_without_losing_availability"] = std::max( - std::min(maxAvaiabilityZoneFailures, std::min(extraTlogEligibleZones, zoneFailuresWithoutLosingData)), -1); + std::min(maxAvailabilityZoneFailures, std::min(extraTlogEligibleZones, zoneFailuresWithoutLosingData)), -1); return statusObj; } diff --git a/fdbserver/StorageCache.actor.cpp b/fdbserver/StorageCache.actor.cpp index 09394c2e39e..032b90a37ad 100644 --- a/fdbserver/StorageCache.actor.cpp +++ b/fdbserver/StorageCache.actor.cpp @@ -174,7 +174,7 @@ struct StorageCacheData { KeyRangeMap> cachedRangeMap; // map of cached key-ranges uint64_t cacheRangeChangeCounter; // Max( CacheRangeInfo->changecounter ) - // TODO Add cache metrics, such as available memory/in-use memory etc to help dat adistributor assign cached ranges + // TODO Add cache metrics, such as available memory/in-use memory etc to help data distributor assign cached ranges // StorageCacheMetrics metrics; // newestAvailableVersion[k] @@ -249,7 +249,7 @@ struct StorageCacheData { fetchKeysParallelismLock(SERVER_KNOBS->FETCH_KEYS_PARALLELISM_BYTES), debug_inApplyUpdate(false), debug_lastValidateTime(0), versionLag(0), behind(false), counters(this) { version.initMetric("StorageCacheData.Version"_sr, counters.cc.getId()); - desiredOldestVersion.initMetric("StorageCacheData.DesriedOldestVersion"_sr, counters.cc.getId()); + desiredOldestVersion.initMetric("StorageCacheData.DesiredOldestVersion"_sr, counters.cc.getId()); oldestVersion.initMetric("StorageCacheData.OldestVersion"_sr, counters.cc.getId()); newestAvailableVersion.insert(allKeys, invalidVersion); @@ -1020,7 +1020,7 @@ void StorageCacheData::applyMutation(MutationRef const& m, Arena& arena, Storage KeyRef nextKey = keyAfter(m.param1, arena); if (end != nextKey) { ASSERT(end > nextKey); - // TODO double check if it's okay to let go of the the insert version of the "right half" + // TODO double check if it's okay to let go of the insert version of the "right half" // FIXME: This copy is technically an asymptotic problem, definitely a waste of memory (copy of keyAfter // is a waste, but not asymptotic) data.insert(nextKey, ValueOrClearToRef::clearTo(KeyRef(arena, end))); @@ -1274,7 +1274,7 @@ ACTOR Future fetchKeys(StorageCacheData* data, AddingCacheRange* cacheRang // TODO: double check the following block of code!! // We want to make sure that we can't query below lastAvailable, by waiting for the oldestVersion to become - // lastAvaialble + // lastAvailable auto navr = data->newestAvailableVersion.intersectingRanges(keys); Version lastAvailable = invalidVersion; for (auto r = navr.begin(); r != navr.end(); ++r) { @@ -1822,21 +1822,21 @@ ACTOR Future compactCache(StorageCacheData* data) { state Version desiredVersion = data->desiredOldestVersion.get(); // Call the compaction routine that does the actual work, //TraceEvent(SevDebug, "SCCompactCache", data->thisServerID).detail("DesiredVersion", desiredVersion); - // TODO It's a synchronous function call as of now. Should it asynch? + // TODO It's a synchronous function call as of now. Should it async? data->mutableData().compact(desiredVersion); Future finishedForgetting = data->mutableData().forgetVersionsBeforeAsync(desiredVersion, TaskPriority::CompactCache); data->oldestVersion.set(desiredVersion); wait(finishedForgetting); // TODO how do we yield here? This may not be enough, because compact() does the heavy lifting - // of compating the VersionedMap. We should probably look into per version compaction and then + // of compacting the VersionedMap. We should probably look into per version compaction and then // we can yield after compacting one version wait(yield(TaskPriority::CompactCache)); // TODO what flowlock to acquire during compaction? compactionInProgress.send(Void()); wait(delay(2.0)); // we want to wait at least some small amount of time before - // wait( delay(0, TaskPriority::CompactCache) ); //Setting compactionInProgess could cause the cache server to + // wait( delay(0, TaskPriority::CompactCache) ); //Setting compactionInProgress could cause the cache server to // shut down, so delay to check for cancellation } } @@ -2099,7 +2099,7 @@ ACTOR Future pullAsyncData(StorageCacheData* data) { if (data->otherError.getFuture().isReady()) data->otherError.getFuture().get(); - // we can get rid of versions beyond maxVerionsInMemory at any point. Update the + // we can get rid of versions beyond maxVersionsInMemory at any point. Update the // desiredOldestVersion and that may invoke the compaction actor Version maxVersionsInMemory = SERVER_KNOBS->MAX_READ_TRANSACTION_LIFE_VERSIONS; Version proposedOldestVersion = data->version.get() - maxVersionsInMemory; diff --git a/fdbserver/TLogServer.actor.cpp b/fdbserver/TLogServer.actor.cpp index b86bb901bcd..c31c4d0f94b 100644 --- a/fdbserver/TLogServer.actor.cpp +++ b/fdbserver/TLogServer.actor.cpp @@ -1982,7 +1982,7 @@ Future tLogPeekMessages(PromiseType replyPromise, messages.serializeBytes(messages2.toValue()); } } else { - // FIXME: Limit to approximately DESIRED_TOTATL_BYTES somehow. + // FIXME: Limit to approximately DESIRED_TOTAL_BYTES somehow. RangeResult kvrefs = wait(self->persistentData->readRange( KeyRangeRef( persistTagMessageRefsKey(logData->logId, reqTag, reqBegin), diff --git a/fdbserver/TagPartitionedLogSystem.actor.cpp b/fdbserver/TagPartitionedLogSystem.actor.cpp index eb5818d14c2..065c0b6d36f 100644 --- a/fdbserver/TagPartitionedLogSystem.actor.cpp +++ b/fdbserver/TagPartitionedLogSystem.actor.cpp @@ -1509,7 +1509,7 @@ ACTOR Future TagPartitionedLogSystem::popFromLog(TagPartitionedLogSystem* loop { wait(delay(delayBeforePop, TaskPriority::TLogPop)); - // to: first is upto version, second is durableKnownComittedVersion + // to: first is upto version, second is durableKnownCommittedVersion state std::pair to = self->outstandingPops[std::make_pair(log->get().id(), tag)]; if (to.first <= last) { @@ -2209,7 +2209,7 @@ ACTOR Future TagPartitionedLogSystem::epochEnd(Reference(this, meta, version) }); } - // Set the pending oldest versiont to keep as of the next commit + // Set the pending oldest version to keep as of the next commit void setOldestReadableVersion(Version v) override { ASSERT(v >= header.oldestVersion); ASSERT(v <= header.committedVersion); @@ -3740,7 +3740,7 @@ class DWALPager final : public IPager2 { int64_t reusable = reusablePageSpace + reusableQueueSpace + reusablePagerSlackSpace; - // Space currently in used by old page versions have have not yet been freed due to the remap cleanup window. + // Space currently in use by old page versions have not yet been freed due to the remap cleanup window. int64_t temp = remapQueue.numEntries * physicalPageSize; return StorageBytes(free, total, pagerPhysicalSize, free + reusable, temp); @@ -5126,7 +5126,7 @@ class VersionedBTree { ASSERT(entry.height > 1); // Iterate over page entries, skipping key decoding using BTreePage::ValueTree which uses - // RedwoodRecordRef::DeltaValueOnly as the delta type type to skip key decoding + // RedwoodRecordRef::DeltaValueOnly as the delta type to skip key decoding BTreePage::ValueTree::Cursor c(makeReference(dbBegin, dbEnd), btPage.valueTree()); ASSERT(c.moveFirst()); @@ -5404,7 +5404,7 @@ class VersionedBTree { self->clear(KeyRangeRef(dbBegin.key, dbEnd.key)); wait(self->commit(self->getLastCommittedVersion() + 1)); - // Loop commits until the the lazy delete queue is completely processed. + // Loop commits until the lazy delete queue is completely processed. loop { wait(self->commit(self->getLastCommittedVersion() + 1)); @@ -5977,7 +5977,7 @@ class VersionedBTree { ASSERT(!enableEncryptionDomain || (a.domainId.present() && b.domainId.present())); if (!enableEncryptionDomain || a.domainId.get() == b.domainId.get()) { - // While the last page page has too much slack and the second to last page + // While the last page has too much slack and the second to last page // has more than the minimum record count, shift a record from the second // to last page to the last page. while (b.slackFraction() > maxNewSlack && a.count > minRecords) { @@ -7279,7 +7279,7 @@ class VersionedBTree { // If u's subtree is either all cleared or all unchanged if (uniform) { // We do not need to recurse to this subtree. Next, let's see if we can embiggen u's range to - // include sibling subtrees also covered by (mBegin, mEnd) so we can not recurse to those, too. + // include sibling subtrees also covered by (mBegin, mEnd) so we cannot recurse to those, too. // If the cursor is valid, u.subtreeUpperBound is the cursor's position, which is >= mEnd.key(). // If equal, no range expansion is possible. if (cursor.valid() && mEnd.key() != u.subtreeUpperBound.key) { @@ -7401,7 +7401,7 @@ class VersionedBTree { debug_print(addPrefix(context, update->toString())); // TODO(yiwu): check whether we can pass decodeUpperBound as nextBoundary when the last slice - // have childenChanged=true. + // have childrenChanged=true. modifier.applyUpdate(*slices.back(), modifier.changesMade || slices.back()->childrenChanged ? &update->subtreeUpperBound : &update->decodeUpperBound); @@ -10989,7 +10989,7 @@ TEST_CASE(":/redwood/performance/set") { int recs = recordsThisCommit; int kvb = kvBytesThisCommit; - // Capturing invervalStart via this->intervalStart makes IDE's unhappy as they do not know about the + // Capturing intervalStart via this->intervalStart makes IDE's unhappy as they do not know about the // actor state object double* pIntervalStart = &intervalStart; diff --git a/fdbserver/fdbserver.actor.cpp b/fdbserver/fdbserver.actor.cpp index 13023973b90..9576b942368 100644 --- a/fdbserver/fdbserver.actor.cpp +++ b/fdbserver/fdbserver.actor.cpp @@ -740,7 +740,7 @@ static void printUsage(const char* name, bool devhelp) { " A multitester will wait for NUM testers before starting" " (defaults to 1)."); printOptionUsage("--test-PARAMNAME PARAMVALUE", - " Set a UnitTest named parameter to the given value. Names are case sensitive."); + " Set a UnitTest named parameter to the given value. Names are case-sensitive."); #ifdef __linux__ printOptionUsage("--rsssize SIZE", " Turns on automatic heap profiling when RSS memory size exceeds" diff --git a/fdbserver/include/fdbserver/ClusterController.actor.h b/fdbserver/include/fdbserver/ClusterController.actor.h index b82304e77ad..dad7db6d6d5 100644 --- a/fdbserver/include/fdbserver/ClusterController.actor.h +++ b/fdbserver/include/fdbserver/ClusterController.actor.h @@ -402,7 +402,7 @@ class ClusterControllerData { std::set excludedAddresses(req.excludeAddresses.begin(), req.excludeAddresses.end()); for (auto& it : id_worker) { // the worker must be available, have the same dcID as CC, - // not be one of the excluded addrs from req and have the appropiate fitness + // not be one of the excluded addrs from req and have the appropriate fitness if (workerAvailable(it.second, false) && clusterControllerDcId == it.second.details.interf.locality.dcId() && !addressExcluded(excludedAddresses, it.second.details.interf.address()) && @@ -3127,7 +3127,7 @@ class ClusterControllerData { } } - auto deterministicDecendingOrder = [](const std::pair& a, + auto deterministicDescendingOrder = [](const std::pair& a, const std::pair& b) -> bool { return a.first > b.first || (a.first == b.first && a.second < b.second); }; @@ -3137,13 +3137,13 @@ class ClusterControllerData { for (const auto& [degradedPeer, complainers] : degradedLinkDst2Src) { count2DegradedPeer.push_back({ complainers.size(), degradedPeer }); } - std::sort(count2DegradedPeer.begin(), count2DegradedPeer.end(), deterministicDecendingOrder); + std::sort(count2DegradedPeer.begin(), count2DegradedPeer.end(), deterministicDescendingOrder); std::vector> count2DisconnectedPeer; for (const auto& [disconnectedPeer, complainers] : disconnectedLinkDst2Src) { count2DisconnectedPeer.push_back({ complainers.size(), disconnectedPeer }); } - std::sort(count2DisconnectedPeer.begin(), count2DisconnectedPeer.end(), deterministicDecendingOrder); + std::sort(count2DisconnectedPeer.begin(), count2DisconnectedPeer.end(), deterministicDescendingOrder); // Go through all reported degraded peers by decreasing order of the number of complainers. For a particular // degraded peer, if a complainer has already be considered as degraded, we skip the current examine degraded diff --git a/fdbserver/include/fdbserver/DDTeamCollection.h b/fdbserver/include/fdbserver/DDTeamCollection.h index e774c9730c3..cfeed170cb9 100644 --- a/fdbserver/include/fdbserver/DDTeamCollection.h +++ b/fdbserver/include/fdbserver/DDTeamCollection.h @@ -624,7 +624,7 @@ class DDTeamCollection : public ReferenceCounted { // The empty team is used as the starting point to move data to the remote DB // begin : the start of the team member ID // end : end of the team member ID - // isIntialTeam : False when the team is added by addTeamsBestOf(); True otherwise, e.g., + // isInitialTeam : False when the team is added by addTeamsBestOf(); True otherwise, e.g., // when the team added at init() when we recreate teams by looking up DB template void addTeam(InputIt begin, InputIt end, IsInitialTeam isInitialTeam) { diff --git a/fdbserver/include/fdbserver/DeltaTree.h b/fdbserver/include/fdbserver/DeltaTree.h index e03d0af491d..46475d9d877 100644 --- a/fdbserver/include/fdbserver/DeltaTree.h +++ b/fdbserver/include/fdbserver/DeltaTree.h @@ -225,7 +225,7 @@ struct DeltaTree { // Returns true if otherAncestor is the previous ("greatest lesser") ancestor bool otherAncestorPrev() const { return parent && parent->leftChild == this; } - // Returns true if otherAncestor is the next ("least greator") ancestor + // Returns true if otherAncestor is the next ("least greater") ancestor bool otherAncestorNext() const { return parent && parent->rightChild == this; } DecodedNode* getPrevAncestor() const { return otherAncestorPrev() ? otherAncestor : parent; } @@ -1547,7 +1547,7 @@ struct DeltaTree2 { return false; } - // Find the base base to borrow from, see if the resulting delta fits into the tree + // Find the base to borrow from, see if the resulting delta fits into the tree int leftBaseIndex, rightBaseIndex; bool addingRight = cmp > 0; if (addingRight) { diff --git a/fdbserver/include/fdbserver/KnobProtectiveGroups.h b/fdbserver/include/fdbserver/KnobProtectiveGroups.h index 05ef3636fa7..bde2e970924 100644 --- a/fdbserver/include/fdbserver/KnobProtectiveGroups.h +++ b/fdbserver/include/fdbserver/KnobProtectiveGroups.h @@ -54,7 +54,7 @@ class KnobProtectiveGroup { void assignKnobs(const KnobKeyValuePairs& overrideKnobs); public: - KnobProtectiveGroup(const KnobKeyValuePairs& overridenKnobs_); + KnobProtectiveGroup(const KnobKeyValuePairs& overriddenKnobs_); ~KnobProtectiveGroup(); }; diff --git a/fdbserver/include/fdbserver/MovingWindow.h b/fdbserver/include/fdbserver/MovingWindow.h index 09d2c0784e8..95588f671be 100644 --- a/fdbserver/include/fdbserver/MovingWindow.h +++ b/fdbserver/include/fdbserver/MovingWindow.h @@ -33,7 +33,7 @@ // want to get the actual average bytes moved rate by DD in the past DD_TRACE_MOVE_BYTES_AVERAGE_INTERVAL. We would have // a sense of how many bytes DD moved recently and it will help us get to know DD workload. // Comparison with Smoother: if you want to use a recency-based weighting(i.e. less important if sampled long time ago) -// Smoother(which uses an exponential function for smoothing) woule be a good choice. On the other hand, if you want to +// Smoother(which uses an exponential function for smoothing) would be a good choice. On the other hand, if you want to // know the average sample rates in the last , MovingWindow might be better. template diff --git a/fdbserver/include/fdbserver/OTELSpanContextMessage.h b/fdbserver/include/fdbserver/OTELSpanContextMessage.h index 77c94dd9349..7a2e384790b 100644 --- a/fdbserver/include/fdbserver/OTELSpanContextMessage.h +++ b/fdbserver/include/fdbserver/OTELSpanContextMessage.h @@ -27,7 +27,7 @@ #include "fdbclient/CommitTransaction.h" struct OTELSpanContextMessage { - // This message is pushed into the the transaction logs' memory to inform + // This message is pushed into the transaction logs' memory to inform // it what transaction subsequent mutations were a part of. This allows // transaction logs and storage servers to associate mutations with a // transaction identifier, called a span context. diff --git a/fdbserver/include/fdbserver/RestoreApplier.actor.h b/fdbserver/include/fdbserver/RestoreApplier.actor.h index 0abaf5ac5a3..fe362190bbf 100644 --- a/fdbserver/include/fdbserver/RestoreApplier.actor.h +++ b/fdbserver/include/fdbserver/RestoreApplier.actor.h @@ -68,7 +68,7 @@ struct StagingKey { // This could happen because the same mutation can be present in // overlapping mutation logs, because new TLogs can copy mutations // from old generation TLogs (or backup worker is recruited without - // knowning previously saved progress). + // knowing previously saved progress). ASSERT(type == m.type && key == m.param1 && val == m.param2); TraceEvent("SameVersion").detail("Version", version.toString()).detail("Mutation", m); return; diff --git a/fdbserver/include/fdbserver/RestoreCommon.actor.h b/fdbserver/include/fdbserver/RestoreCommon.actor.h index 4f8507fdc05..b24fb9b841b 100644 --- a/fdbserver/include/fdbserver/RestoreCommon.actor.h +++ b/fdbserver/include/fdbserver/RestoreCommon.actor.h @@ -258,7 +258,7 @@ Future getBatchReplies(RequestStream Interface::*channel, } state double start = now(); - state int oustandingReplies = requests.size(); + state int outstandingReplies = requests.size(); loop { try { state std::vector> cmdReplies; @@ -279,7 +279,7 @@ Future getBatchReplies(RequestStream Interface::*channel, TraceEvent(SevInfo, "FastRestoreGetBatchReplies") .suppressFor(1.0) .detail("Requests", requests.size()) - .detail("OutstandingReplies", oustandingReplies) + .detail("OutstandingReplies", outstandingReplies) .detail("ReplyIndex", i) .detail("ReplyIsReady", cmdReplies[i].isReady()) .detail("ReplyIsError", cmdReplies[i].isError()) @@ -291,7 +291,7 @@ Future getBatchReplies(RequestStream Interface::*channel, ongoingRepliesIndex.push_back(i); } } - ASSERT(ongoingReplies.size() == oustandingReplies); + ASSERT(ongoingReplies.size() == outstandingReplies); if (ongoingReplies.empty()) { break; } else { @@ -303,24 +303,24 @@ Future getBatchReplies(RequestStream Interface::*channel, for (int j = 0; j < ongoingReplies.size(); ++j) { if (ongoingReplies[j].isReady()) { std::get<2>(replyDurations[ongoingRepliesIndex[j]]) = now(); - --oustandingReplies; + --outstandingReplies; } else if (ongoingReplies[j].isError()) { // When this happens, - // the above assertion ASSERT(ongoingReplies.size() == oustandingReplies) will fail + // the above assertion ASSERT(ongoingReplies.size() == outstandingReplies) will fail TraceEvent(SevError, "FastRestoreGetBatchRepliesReplyError") .detail("OngoingReplyIndex", j) .detail("FutureError", ongoingReplies[j].getError().what()); } } } - ASSERT(oustandingReplies == 0); + ASSERT(outstandingReplies == 0); if (trackRequestLatency && SERVER_KNOBS->FASTRESTORE_TRACK_REQUEST_LATENCY) { // Calculate the latest end time for each interface std::map maxEndTime; - UID bathcID = deterministicRandom()->randomUniqueID(); + UID batchID = deterministicRandom()->randomUniqueID(); for (int i = 0; i < replyDurations.size(); ++i) { double endTime = std::get<2>(replyDurations[i]); - TraceEvent(SevInfo, "ProfileSendRequestBatchLatency", bathcID) + TraceEvent(SevInfo, "ProfileSendRequestBatchLatency", batchID) .detail("Node", std::get<0>(replyDurations[i])) .detail("Request", std::get<1>(replyDurations[i]).toString()) .detail("Duration", endTime - start); @@ -343,9 +343,9 @@ Future getBatchReplies(RequestStream Interface::*channel, } } if (latest - earliest > SERVER_KNOBS->FASTRESTORE_STRAGGLER_THRESHOLD_SECONDS) { - TraceEvent(SevWarn, "ProfileSendRequestBatchLatencyFoundStraggler", bathcID) + TraceEvent(SevWarn, "ProfileSendRequestBatchLatencyFoundStraggler", batchID) .detail("SlowestNode", latestNode) - .detail("FatestNode", earliestNode) + .detail("FastestNode", earliestNode) .detail("EarliestEndtime", earliest) .detail("LagTime", latest - earliest); } diff --git a/fdbserver/include/fdbserver/RestoreController.actor.h b/fdbserver/include/fdbserver/RestoreController.actor.h index cd55bbc17ba..769377b1490 100644 --- a/fdbserver/include/fdbserver/RestoreController.actor.h +++ b/fdbserver/include/fdbserver/RestoreController.actor.h @@ -436,7 +436,7 @@ struct RestoreControllerData : RestoreRoleData, public ReferenceCountedemplace(vb.beginVersion, vb); } - // Invariant: The last vb endverion should be no smaller than targetVersion + // Invariant: The last vb endversion should be no smaller than targetVersion if (maxVBVersion < targetVersion) { // Q: Is the restorable version always less than the maximum version from all backup filenames? // A: This is true for the raw backup files returned by backup container before we remove the empty files. diff --git a/fdbserver/include/fdbserver/RestoreWorkerInterface.actor.h b/fdbserver/include/fdbserver/RestoreWorkerInterface.actor.h index 3c2830514ae..a0127128cd6 100644 --- a/fdbserver/include/fdbserver/RestoreWorkerInterface.actor.h +++ b/fdbserver/include/fdbserver/RestoreWorkerInterface.actor.h @@ -593,7 +593,7 @@ struct RestoreSendVersionedMutationsRequest : TimedRequest { int batchIndex; // version batch index RestoreAsset asset; // Unique identifier for the current restore asset - Version msgIndex; // Monitonically increasing index of mutation messages + Version msgIndex; // Monotonically increasing index of mutation messages bool isRangeFile; VersionedMutationsVec versionedMutations; // Versioned mutations may be at different versions parsed by one loader diff --git a/fdbserver/include/fdbserver/RocksDBCheckpointUtils.actor.h b/fdbserver/include/fdbserver/RocksDBCheckpointUtils.actor.h index 5880dea0411..3e042f802e2 100644 --- a/fdbserver/include/fdbserver/RocksDBCheckpointUtils.actor.h +++ b/fdbserver/include/fdbserver/RocksDBCheckpointUtils.actor.h @@ -79,7 +79,7 @@ struct SstFileMetaData { SstFileMetaData() : file_number(0), file_type(2), size(0), temperature(0), smallest_seqno(0), largest_seqno(0), num_reads_sampled(0), being_compacted(false), num_entries(0), num_deletions(0), oldest_blob_file_number(0), - oldest_ancester_time(0), file_creation_time(0), epoch_number(0) {} + oldest_ancestor_time(0), file_creation_time(0), epoch_number(0) {} SstFileMetaData(const std::string& _relative_filename, const std::string& _directory, @@ -98,7 +98,7 @@ struct SstFileMetaData { uint64_t _num_entries, uint64_t _num_deletions, uint64_t _oldest_blob_file_number, - uint64_t _oldest_ancester_time, + uint64_t _oldest_ancestor_time, uint64_t _file_creation_time, uint64_t _epoch_number, const std::string& _name, @@ -109,7 +109,7 @@ struct SstFileMetaData { largest_seqno(_largest_seqno), smallestkey(_smallestkey), largestkey(_largestkey), num_reads_sampled(_num_reads_sampled), being_compacted(_being_compacted), num_entries(_num_entries), num_deletions(_num_deletions), oldest_blob_file_number(_oldest_blob_file_number), - oldest_ancester_time(_oldest_ancester_time), file_creation_time(_file_creation_time), + oldest_ancestor_time(_oldest_ancestor_time), file_creation_time(_file_creation_time), epoch_number(_epoch_number), name(_name), db_path(_db_path) {} // The name of the file within its directory (e.g. "123456.sst") @@ -154,7 +154,7 @@ struct SstFileMetaData { // 0 if the information is not available. // // Note: for TTL blob files, it contains the start of the expiration range. - uint64_t oldest_ancester_time; + uint64_t oldest_ancestor_time; // Timestamp when the SST file is created, provided by // SystemClock::GetCurrentTime(). 0 if the information is not available. uint64_t file_creation_time; @@ -197,7 +197,7 @@ struct SstFileMetaData { num_entries, num_deletions, oldest_blob_file_number, - oldest_ancester_time, + oldest_ancestor_time, file_creation_time, epoch_number, name, @@ -235,7 +235,7 @@ struct LiveFileMetaData : public SstFileMetaData { SstFileMetaData::num_entries, SstFileMetaData::num_deletions, SstFileMetaData::oldest_blob_file_number, - SstFileMetaData::oldest_ancester_time, + SstFileMetaData::oldest_ancestor_time, SstFileMetaData::file_creation_time, SstFileMetaData::epoch_number, SstFileMetaData::name, diff --git a/fdbserver/include/fdbserver/SpanContextMessage.h b/fdbserver/include/fdbserver/SpanContextMessage.h index d6afa6b49b6..ccd636ba5cf 100644 --- a/fdbserver/include/fdbserver/SpanContextMessage.h +++ b/fdbserver/include/fdbserver/SpanContextMessage.h @@ -26,7 +26,7 @@ #include "fdbclient/CommitTransaction.h" struct SpanContextMessage { - // This message is pushed into the the transaction logs' memory to inform + // This message is pushed into the transaction logs' memory to inform // it what transaction subsequent mutations were a part of. This allows // transaction logs and storage servers to associate mutations with a // transaction identifier, called a span context. diff --git a/fdbserver/include/fdbserver/Status.actor.h b/fdbserver/include/fdbserver/Status.actor.h index 7e9d4927e01..f7cc13c6a1d 100644 --- a/fdbserver/include/fdbserver/Status.actor.h +++ b/fdbserver/include/fdbserver/Status.actor.h @@ -55,7 +55,7 @@ Future clusterGetStatus( Version const& datacenterVersionDifference, Version const& dcLogServerVersionDifference, Version const& dcStorageServerVersionDifference, - ConfigBroadcaster const* const& conifgBroadcaster, + ConfigBroadcaster const* const& configBroadcaster, Optional const& metaclusterRegistration, metacluster::MetaclusterMetrics const& metaclusterMetrics); diff --git a/fdbserver/include/fdbserver/TagPartitionedLogSystem.actor.h b/fdbserver/include/fdbserver/TagPartitionedLogSystem.actor.h index 04c1cce3527..119d4902fa5 100644 --- a/fdbserver/include/fdbserver/TagPartitionedLogSystem.actor.h +++ b/fdbserver/include/fdbserver/TagPartitionedLogSystem.actor.h @@ -325,7 +325,7 @@ struct TagPartitionedLogSystem final : ILogSystem, ReferenceCounted monitorLog(Reference>> logServer, Reference> failed); - // returns the log group's knownComittedVersion, DV, and a vector of TLogLockResults for each tLog in the group. + // returns the log group's knownCommittedVersion, DV, and a vector of TLogLockResults for each tLog in the group. Optional>> static getDurableVersion( UID dbgid, LogLockInfo lockInfo, diff --git a/fdbserver/include/fdbserver/TagThrottler.h b/fdbserver/include/fdbserver/TagThrottler.h index 4cae8d70fdd..e2f4f4f823e 100644 --- a/fdbserver/include/fdbserver/TagThrottler.h +++ b/fdbserver/include/fdbserver/TagThrottler.h @@ -52,7 +52,7 @@ class ITagThrottler { // Based on the busiest read and write tags in the provided storage queue info, these methods // update tag throttling limits. Unfortunately, the two effective interfaces of the two - // implementations of ITagThrottler (GlobalTagThrottler and TagThrottler) have diveraged over + // implementations of ITagThrottler (GlobalTagThrottler and TagThrottler) have diverged over // time. As a result, exactly one of the below methods is a noop for each implementation. virtual Future tryUpdateAutoThrottling(StorageQueueInfo const&) = 0; virtual void updateThrottling(Map const&) = 0; diff --git a/fdbserver/include/fdbserver/TenantCache.h b/fdbserver/include/fdbserver/TenantCache.h index 3cc43e179d0..afdd4b37e2b 100644 --- a/fdbserver/include/fdbserver/TenantCache.h +++ b/fdbserver/include/fdbserver/TenantCache.h @@ -57,7 +57,7 @@ class TenantCache : public ReferenceCounted { uint64_t generation; TenantMapByPrefix tenantCache; - // Map from tenant group names to the list of tenants, cumumlative storage used by + // Map from tenant group names to the list of tenants, cumulative storage used by // all the tenants in the group, and its storage quota. TenantStorageMap tenantStorageMap; diff --git a/fdbserver/include/fdbserver/art_impl.h b/fdbserver/include/fdbserver/art_impl.h index db632ae6c21..1402d3dcbba 100644 --- a/fdbserver/include/fdbserver/art_impl.h +++ b/fdbserver/include/fdbserver/art_impl.h @@ -843,7 +843,7 @@ void art_tree::remove_child4(art_node4* n, art_node** ref, art_node** l, int dep // If you have a fat node, you cannot compress when you get to one child // So it can happen that the one child get removed // At that point, you transform the fat node into a leaf: the fat key is not prefix of any subtree! - // This should only happen to a node4kv: if the node is normal, then already when there there is 1 child + // This should only happen to a node4kv: if the node is normal, then already when there is 1 child // the node gets compressed. // We still check for depth b/c we want to avoid that root becomes empty and becomes a leaf else if (n->n.num_children == 0 && depth) { @@ -1229,7 +1229,7 @@ art_leaf* art_tree::insert_leaf(art_node* n, return l2; } else { - // The key in the leaf is a supertset of the new key + // The key in the leaf is a superset of the new key // So the leaf stays a leaf and the new key goes in the kv_node art_node4_kv* new_node = (art_node4_kv*)alloc_kv_node(ART_NODE4_KV); art_leaf* fat_leaf = make_leaf(k, value); diff --git a/fdbserver/include/fdbserver/pubsub.h b/fdbserver/include/fdbserver/pubsub.h index 42742e99718..aac5942e9c2 100644 --- a/fdbserver/include/fdbserver/pubsub.h +++ b/fdbserver/include/fdbserver/pubsub.h @@ -54,7 +54,7 @@ * 1. Subscriptions are "retroactive". If a subscription is in place, the messages * from that feed will start to appear in the listing of that inboxes messages * and appear in historical lists as well. This could lead to odd behaviour - * if paging through the contents of an inbox while a new subscrption was added + * if paging through the contents of an inbox while a new subscription was added */ typedef uint64_t Feed; diff --git a/fdbserver/masterserver.swift b/fdbserver/masterserver.swift index 2742d42ef0f..57bc5cdb996 100644 --- a/fdbserver/masterserver.swift +++ b/fdbserver/masterserver.swift @@ -424,7 +424,7 @@ public actor MasterDataActor { extension MasterData { var swiftActorImpl: MasterDataActor { #if FDBSERVER_FORWARD_DECLARE_SWIFT_APIS - // During the generationg of the C++ header for this module, we do not + // During the generation of the C++ header for this module, we do not // yet have access to `getSwiftImpl` API. return MasterDataActor() #else diff --git a/fdbserver/pubsub.actor.cpp b/fdbserver/pubsub.actor.cpp index d33e6c56b26..b7f97ccb9a8 100644 --- a/fdbserver/pubsub.actor.cpp +++ b/fdbserver/pubsub.actor.cpp @@ -95,7 +95,7 @@ Key keyForMessage(uint64_t message) { return StringRef(format("m/%016llx", message)); } -Key keyForDisptchEntry(uint64_t message) { +Key keyForDispatchEntry(uint64_t message) { return StringRef(format("d/%016llx", message)); } @@ -270,7 +270,7 @@ ACTOR Future _postMessage(Database cx, uint64_t feed, Standalone _postMessage(Database cx, uint64_t feed, Standalone> _listInboxMessages(Database cx, uint64_t inbo // Check the list of dispatching messages to make sure there are no older ones than ours state MessageId earliestMessage = feedLatest.begin()->first; - RangeResult dispatching = wait(tr.getRange(firstGreaterOrEqual(keyForDisptchEntry(earliestMessage)), - firstGreaterOrEqual(keyForDisptchEntry(UINT64_MAX)), + RangeResult dispatching = wait(tr.getRange(firstGreaterOrEqual(keyForDispatchEntry(earliestMessage)), + firstGreaterOrEqual(keyForDispatchEntry(UINT64_MAX)), 1)); // If there are messages "older" than ours, try this again // (with a new transaction and a flush of the "stale" feeds diff --git a/fdbserver/storageserver.actor.cpp b/fdbserver/storageserver.actor.cpp index c22214f2217..6d9e8bc8d5f 100644 --- a/fdbserver/storageserver.actor.cpp +++ b/fdbserver/storageserver.actor.cpp @@ -153,7 +153,7 @@ bool canReplyWith(Error e) { // getMappedRange related exceptions that are not retriable: case error_code_mapper_bad_index: case error_code_mapper_no_such_key: - case error_code_mapper_bad_range_decriptor: + case error_code_mapper_bad_range_descriptor: case error_code_quick_get_key_values_has_more: case error_code_quick_get_value_miss: case error_code_quick_get_key_values_miss: @@ -584,7 +584,7 @@ struct StorageServerDisk { } // SOMEDAY: Put readNextKeyInclusive in IKeyValueStore - // Read the key that is equal or greater then 'key' from the storage engine. + // Read the key that is equal or greater than 'key' from the storage engine. // For example, readNextKeyInclusive("a") should return: // - "a", if key "a" exist // - "b", if key "a" doesn't exist, and "b" is the next existing key in total order @@ -2908,13 +2908,13 @@ ACTOR Future fetchCheckpointKeyValuesQ(StorageServer* self, FetchCheckpoin state RangeResult res = wait(iter->nextBatch(CLIENT_KNOBS->REPLY_BYTE_LIMIT, CLIENT_KNOBS->REPLY_BYTE_LIMIT)); if (!res.empty()) { - TraceEvent(SevDebug, "FetchCheckpontKeyValuesReadRange", self->thisServerID) + TraceEvent(SevDebug, "FetchCheckpointKeyValuesReadRange", self->thisServerID) .detail("CheckpointID", req.checkpointID) .detail("FirstReturnedKey", res.front().key) .detail("LastReturnedKey", res.back().key) .detail("Size", res.size()); } else { - TraceEvent(SevInfo, "FetchCheckpontKeyValuesEmptyRange", self->thisServerID) + TraceEvent(SevInfo, "FetchCheckpointKeyValuesEmptyRange", self->thisServerID) .detail("CheckpointID", req.checkpointID); } @@ -4970,7 +4970,7 @@ void preprocessMappedKey(Tuple& mappedKeyFormatTuple, std::vector()); @@ -5077,7 +5077,7 @@ ACTOR Future auditStorageServerShardQ(StorageServer* data, AuditStorageReq // The trackShardAssignment is correct when at most 1 auditStorageServerShardQ runs // at a time. Currently, this is guaranteed by setting serveAuditStorageParallelismLock == 1 // If serveAuditStorageParallelismLock > 1, we need to check trackShardAssignmentMinVersion - // to make sure no onging auditStorageServerShardQ is running + // to make sure no ongoing auditStorageServerShardQ is running if (data->trackShardAssignmentMinVersion != invalidVersion) { // Another auditStorageServerShardQ is running req.reply.sendError(audit_storage_cancelled()); @@ -5968,7 +5968,7 @@ TEST_CASE("/fdbserver/storageserver/constructMappedKey") { Key mappedKey = constructMappedKey(&kvr, vt, mappedKeyFormatTuple); } catch (Error& e) { - ASSERT(e.code() == error_code_mapper_bad_range_decriptor); + ASSERT(e.code() == error_code_mapper_bad_range_descriptor); throwException2 = true; } ASSERT(throwException2); @@ -7521,7 +7521,7 @@ ACTOR Future>> tryReadBlobGranuleChunk } } -// Read blob granules metadata. The key range can cross tenant bundary. +// Read blob granules metadata. The key range can cross tenant boundary. ACTOR Future>> readBlobGranuleChunks(Transaction* tr, Database cx, KeyRangeRef keys, @@ -9256,8 +9256,8 @@ ACTOR Future fetchShardApplyUpdates(StorageServer* data, if (!updates.empty()) { TraceEvent(moveInShard->logSev, "FetchShardApplyingUpdates", data->thisServerID) .detail("MoveInShard", moveInShard->toString()) - .detail("MinVerion", updates.front().version) - .detail("MaxVerion", updates.back().version) + .detail("MinVersion", updates.front().version) + .detail("MaxVersion", updates.back().version) .detail("TargetVersion", version) .detail("HighWatermark", highWatermark) .detail("Size", updates.size()); @@ -11926,7 +11926,7 @@ ACTOR Future updateStorage(StorageServer* data) { .detail("Version", data->pendingAddRanges.begin()->first) .detail("DurableVersion", data->durableVersion.get()); addedRanges = true; - // Remove commit byte limit to make sure the private mutaiton(s) associated with the + // Remove commit byte limit to make sure the private mutation(s) associated with the // `addRange` are committed. unlimitedCommitBytes = UnlimitedCommitBytes::True; } @@ -11961,7 +11961,7 @@ ACTOR Future updateStorage(StorageServer* data) { data->fetchKeysBytesBudget += bytesLeft; data->fetchKeysBudgetUsed.set(data->fetchKeysBytesBudget <= 0); - // Dependng on how negative the fetchKeys budget was it could still be used up + // Depending on how negative the fetchKeys budget was it could still be used up if (!data->fetchKeysBudgetUsed.get()) { wait(durableDelay || data->fetchKeysBudgetUsed.onChange()); } @@ -12137,7 +12137,7 @@ ACTOR Future updateStorage(StorageServer* data) { debug_advanceMinCommittedVersion(data->thisServerID, data->storageMinRecoverVersion); if (removeKVSRanges) { - TraceEvent(SevDebug, "RemoveKVSRangesComitted", data->thisServerID) + TraceEvent(SevDebug, "RemoveKVSRangesCommitted", data->thisServerID) .detail("NewDurableVersion", newOldestVersion) .detail("DesiredVersion", desiredVersion) .detail("OldestRemoveKVSRangesVersion", data->pendingRemoveRanges.begin()->first); @@ -12257,7 +12257,7 @@ ACTOR Future updateStorage(StorageServer* data) { data->counters.changeFeedMutationsDurable += durableChangeFeedMutations; durableInProgress.send(Void()); - wait(delay(0, TaskPriority::UpdateStorage)); // Setting durableInProgess could cause the storage server to + wait(delay(0, TaskPriority::UpdateStorage)); // Setting durableInProgress could cause the storage server to // shut down, so delay to check for cancellation // Taking and releasing the durableVersionLock ensures that no eager reads both begin before the commit was diff --git a/fdbserver/swift_fdbserver_cxx_swift_value_conformance.swift b/fdbserver/swift_fdbserver_cxx_swift_value_conformance.swift index e08b9a8a794..bd157994199 100644 --- a/fdbserver/swift_fdbserver_cxx_swift_value_conformance.swift +++ b/fdbserver/swift_fdbserver_cxx_swift_value_conformance.swift @@ -29,7 +29,7 @@ public struct ExposeTypeConf { /*****************************************************************************/ /* Whenever you see a 'type cannot be used in a Swift generic context' error */ -/* you need to expose the the type in question via an expose... function. */ +/* you need to expose the type in question via an expose... function. */ /* */ /* These functions function ensures that the value witness table for `T` */ /* to C++ is exposed in the generated C++ header. */ diff --git a/fdbserver/workloads/ApiCorrectness.actor.cpp b/fdbserver/workloads/ApiCorrectness.actor.cpp index 5aa52560e20..6e86453dad8 100644 --- a/fdbserver/workloads/ApiCorrectness.actor.cpp +++ b/fdbserver/workloads/ApiCorrectness.actor.cpp @@ -33,7 +33,7 @@ #include "flow/actorcompiler.h" // This must be the last #include. #include "flow/genericactors.actor.h" -// Valdiate at-rest encryption guarantees. If enabled, test injects a known 'marker' in Key and/or Values +// Validate at-rest encryption guarantees. If enabled, test injects a known 'marker' in Key and/or Values // inserted into FDB by the workload. On shutdown, all test generated files (under simfdb/) can scanned to find if // 'plaintext marker' is present. const std::string ENCRYPTION_AT_REST_MARKER_STRING = "Expecto..Patronum..."; diff --git a/fdbserver/workloads/AsyncFile.cpp b/fdbserver/workloads/AsyncFile.cpp index 0dae37423f4..3994879d5cb 100644 --- a/fdbserver/workloads/AsyncFile.cpp +++ b/fdbserver/workloads/AsyncFile.cpp @@ -55,7 +55,7 @@ void RandomByteGenerator::writeRandomBytesToBuffer(void* buf, int bytes) { //} } -//// Asynch File Workload +//// Async File Workload const int AsyncFileWorkload::_PAGE_SIZE = 4096; diff --git a/fdbserver/workloads/AtomicOpsApiCorrectness.actor.cpp b/fdbserver/workloads/AtomicOpsApiCorrectness.actor.cpp index 8cc919a792d..2349b8826ee 100644 --- a/fdbserver/workloads/AtomicOpsApiCorrectness.actor.cpp +++ b/fdbserver/workloads/AtomicOpsApiCorrectness.actor.cpp @@ -89,7 +89,7 @@ struct AtomicOpsApiCorrectnessWorkload : TestWorkload { void getMetrics(std::vector& m) override {} - // Test Atomic ops on non existing keys that results in a set + // Test Atomic ops on nonexistent keys that results in a set ACTOR Future testAtomicOpSetOnNonExistingKey(Database cx, AtomicOpsApiCorrectnessWorkload* self, uint32_t opType, @@ -153,7 +153,7 @@ struct AtomicOpsApiCorrectnessWorkload : TestWorkload { return Void(); } - // Test Atomic ops on non existing keys that results in a unset + // Test Atomic ops on nonexistent keys that results in a unset ACTOR Future testAtomicOpUnsetOnNonExistingKey(Database cx, AtomicOpsApiCorrectnessWorkload* self, uint32_t opType, diff --git a/fdbserver/workloads/BackupAndParallelRestoreCorrectness.actor.cpp b/fdbserver/workloads/BackupAndParallelRestoreCorrectness.actor.cpp index 2765a6a6822..00b4d734d4e 100644 --- a/fdbserver/workloads/BackupAndParallelRestoreCorrectness.actor.cpp +++ b/fdbserver/workloads/BackupAndParallelRestoreCorrectness.actor.cpp @@ -384,7 +384,7 @@ struct BackupAndParallelRestoreCorrectnessWorkload : TestWorkload { Key(), Key(), self->locked))); - TraceEvent(SevError, "BARW_RestoreAllowedOverwrittingDatabase", randomID).log(); + TraceEvent(SevError, "BARW_RestoreAllowedOverwritingDatabase", randomID).log(); ASSERT(false); } catch (Error& e) { if (e.code() != error_code_restore_destination_not_empty) { diff --git a/fdbserver/workloads/BackupCorrectness.actor.cpp b/fdbserver/workloads/BackupCorrectness.actor.cpp index e1d1d7553da..4a0d153a967 100644 --- a/fdbserver/workloads/BackupCorrectness.actor.cpp +++ b/fdbserver/workloads/BackupCorrectness.actor.cpp @@ -504,7 +504,7 @@ struct BackupAndRestoreCorrectnessWorkload : TestWorkload { Key(), Key(), self->locked))); - TraceEvent(SevError, "BARW_RestoreAllowedOverwrittingDatabase", randomID).log(); + TraceEvent(SevError, "BARW_RestoreAllowedOverwritingDatabase", randomID).log(); ASSERT(false); } catch (Error& e) { if (e.code() != error_code_restore_destination_not_empty) { diff --git a/fdbserver/workloads/BlobGranuleCorrectnessWorkload.actor.cpp b/fdbserver/workloads/BlobGranuleCorrectnessWorkload.actor.cpp index 02525d78c1d..b179b0898dd 100644 --- a/fdbserver/workloads/BlobGranuleCorrectnessWorkload.actor.cpp +++ b/fdbserver/workloads/BlobGranuleCorrectnessWorkload.actor.cpp @@ -1245,7 +1245,7 @@ struct BlobGranuleCorrectnessWorkload : TestWorkload { result = self->validateResult(threadData, blob, 0, std::numeric_limits::max(), 0, readVersion); finalRowsValidated = blob.first.size(); - // then if we are still good, do another check at a higher version (not checking data) to ensure availabiity + // then if we are still good, do another check at a higher version (not checking data) to ensure availability // of empty versions if (result) { if (BGW_DEBUG) { diff --git a/fdbserver/workloads/BlobGranuleVerifier.actor.cpp b/fdbserver/workloads/BlobGranuleVerifier.actor.cpp index 33d393c9347..a1f728285e8 100644 --- a/fdbserver/workloads/BlobGranuleVerifier.actor.cpp +++ b/fdbserver/workloads/BlobGranuleVerifier.actor.cpp @@ -168,7 +168,7 @@ struct BlobGranuleVerifierWorkload : TestWorkload { } } - // Sets the whole user keyspace to be blobified + // Sets the whole user keyspace to be blobbified ACTOR Future setUpBlobRange(Database cx) { bool success = wait(cx->blobbifyRange(normalKeys)); ASSERT(success); diff --git a/fdbserver/workloads/ClogTlog.actor.cpp b/fdbserver/workloads/ClogTlog.actor.cpp index a42608f3663..576d7a568a0 100644 --- a/fdbserver/workloads/ClogTlog.actor.cpp +++ b/fdbserver/workloads/ClogTlog.actor.cpp @@ -177,7 +177,7 @@ struct ClogTlogWorkload : TestWorkload { state bool useGrayFailureToRecover = false; if (deterministicRandom()->coinflip() && self->useDisconnection) { // Use gray failure instead of exclusion to recover the cluster. - TraceEvent("ClogTlogUseGrayFailreToRecover").log(); + TraceEvent("ClogTlogUseGrayFailureToRecover").log(); useGrayFailureToRecover = true; } diff --git a/fdbserver/workloads/ConsistencyCheck.actor.cpp b/fdbserver/workloads/ConsistencyCheck.actor.cpp index 5359817e69f..13ace8a6e77 100644 --- a/fdbserver/workloads/ConsistencyCheck.actor.cpp +++ b/fdbserver/workloads/ConsistencyCheck.actor.cpp @@ -232,7 +232,7 @@ struct ConsistencyCheckWorkload : TestWorkload { ACTOR Future runCheck(Database cx, ConsistencyCheckWorkload* self) { CODE_PROBE(self->performQuiescentChecks, "Quiescent consistency check"); CODE_PROBE(!self->performQuiescentChecks, "Non-quiescent consistency check"); - state double consistenyCheckerBeginTime = now(); + state double consistencyCheckerBeginTime = now(); if (self->firstClient || self->distributed) { try { @@ -415,7 +415,7 @@ struct ConsistencyCheckWorkload : TestWorkload { TraceEvent("ConsistencyCheck_FinishedCheck") .detail("Repetitions", self->repetitions) - .detail("TimeSpan", now() - consistenyCheckerBeginTime); + .detail("TimeSpan", now() - consistencyCheckerBeginTime); return Void(); } diff --git a/fdbserver/workloads/ConsistencyCheckUrgent.actor.cpp b/fdbserver/workloads/ConsistencyCheckUrgent.actor.cpp index a1821224c92..2202f443aea 100644 --- a/fdbserver/workloads/ConsistencyCheckUrgent.actor.cpp +++ b/fdbserver/workloads/ConsistencyCheckUrgent.actor.cpp @@ -383,7 +383,7 @@ struct ConsistencyCheckUrgentWorkload : TestWorkload { // Last value mismatch KeyRef valueMismatchKey; - // Loop indeces + // Loop indices int currentI = 0; int referenceI = 0; while (currentI < current.data.size() || referenceI < reference.data.size()) { diff --git a/fdbserver/workloads/DDMetrics.actor.cpp b/fdbserver/workloads/DDMetrics.actor.cpp index a2e52fe828d..37b64d82ce0 100644 --- a/fdbserver/workloads/DDMetrics.actor.cpp +++ b/fdbserver/workloads/DDMetrics.actor.cpp @@ -37,7 +37,7 @@ struct DDMetricsWorkload : TestWorkload { ACTOR Future getHighPriorityRelocationsInFlight(Database cx, DDMetricsWorkload* self) { WorkerInterface masterWorker = wait(getMasterWorker(cx, self->dbInfo)); - TraceEvent("GetHighPriorityReliocationsInFlight").detail("Stage", "ContactingMaster"); + TraceEvent("GetHighPriorityRelocationsInFlight").detail("Stage", "ContactingMaster"); TraceEventFields md = wait(timeoutError(masterWorker.eventLogRequest.getReply(EventLogRequest("MovingData"_sr)), 1.0)); int relocations; diff --git a/fdbserver/workloads/DataLossRecovery.actor.cpp b/fdbserver/workloads/DataLossRecovery.actor.cpp index f085849e496..8558b136dac 100644 --- a/fdbserver/workloads/DataLossRecovery.actor.cpp +++ b/fdbserver/workloads/DataLossRecovery.actor.cpp @@ -105,7 +105,7 @@ struct DataLossRecoveryWorkload : TestWorkload { wait(self->readAndVerify(self, cx, key, Optional())); TraceEvent("DataLossRecovery").detail("Phase", "VerifiedDataDropped"); - // Write will scceed. + // Write will succeed. wait(self->writeAndVerify(self, cx, key, newValue)); return Void(); diff --git a/fdbserver/workloads/EncryptKeyProxyTest.actor.cpp b/fdbserver/workloads/EncryptKeyProxyTest.actor.cpp index 36f62997c73..31394fca930 100644 --- a/fdbserver/workloads/EncryptKeyProxyTest.actor.cpp +++ b/fdbserver/workloads/EncryptKeyProxyTest.actor.cpp @@ -134,7 +134,7 @@ struct EncryptKeyProxyTestWorkload : TestWorkload { state int numIterations = deterministicRandom()->randomInt(512, 786); for (; numIterations > 0;) { - // Randomly select baseCipherIds to be lookedup in the cache + // Randomly select baseCipherIds to be looked up in the cache int idx = deterministicRandom()->randomInt(1, cipherKeysVec.size()); int nIds = deterministicRandom()->randomInt(1, cipherKeysVec.size()); std::unordered_set cipherDetails; @@ -155,7 +155,7 @@ struct EncryptKeyProxyTestWorkload : TestWorkload { std::unordered_map> cipherKeys = wait(GetEncryptCipherKeys::getEncryptCipherKeys( self->dbInfo, cipherDetails, BlobCipherMetrics::UsageType::TEST)); - // Ensure the sanity of the lookedup data + // Ensure the sanity of the looked up data for (auto item : cipherKeys) { bool found = false; for (auto key : cipherKeysVec) { diff --git a/fdbserver/workloads/EncryptionOps.actor.cpp b/fdbserver/workloads/EncryptionOps.actor.cpp index 1ded9d3a661..fa09f5aea01 100644 --- a/fdbserver/workloads/EncryptionOps.actor.cpp +++ b/fdbserver/workloads/EncryptionOps.actor.cpp @@ -59,12 +59,12 @@ struct WorkloadMetrics { WorkloadMetrics() { reset(); } double computeEncryptThroughputMBPS() { - // convert bytes -> MBs & nano-seonds -> seconds + // convert bytes -> MBs & nano-seconds -> seconds return (totalBytes * NANO_SECOND) / (totalEncryptTimeNS * MEGA_BYTES); } double computeDecryptThroughputMBPS() { - // convert bytes -> MBs & nano-seonds -> seconds + // convert bytes -> MBs & nano-seconds -> seconds return (totalBytes * NANO_SECOND) / (totalDecryptTimeNS * MEGA_BYTES); } diff --git a/fdbserver/workloads/ExpectStableThroughput.actor.cpp b/fdbserver/workloads/ExpectStableThroughput.actor.cpp index 2a4203e513d..355f8b6fcf8 100644 --- a/fdbserver/workloads/ExpectStableThroughput.actor.cpp +++ b/fdbserver/workloads/ExpectStableThroughput.actor.cpp @@ -7,7 +7,7 @@ #include "flow/actorcompiler.h" // This must be the last include -// This workload is meant to be run with the ThroughputQuotaWorklaod. +// This workload is meant to be run with the ThroughputQuotaWorkload. // The ThroughputQuotaWorkload sets a total quota, and then this workload runs // with tagged transactions for a long duration, attempting to achieve a higher // throughput than the specified quota. The check phase of this workload then diff --git a/fdbserver/workloads/FuzzApiCorrectness.actor.cpp b/fdbserver/workloads/FuzzApiCorrectness.actor.cpp index 2cea043e15e..beeeea35d36 100644 --- a/fdbserver/workloads/FuzzApiCorrectness.actor.cpp +++ b/fdbserver/workloads/FuzzApiCorrectness.actor.cpp @@ -42,22 +42,22 @@ namespace ph = std::placeholders; // We can use this to suppress expected exceptions, and take action // if we don't get an exception wqe should have gotten. struct ExceptionContract { - enum occurance_t { Never = 0, Possible = 1, Always = 2 }; + enum occurrence_t { Never = 0, Possible = 1, Always = 2 }; std::string func; - std::map expected; + std::map expected; std::function augment; ExceptionContract(const char* func_, const std::function& augment_) : func(func_), augment(augment_) {} - ExceptionContract& operator=(const std::map& e) { + ExceptionContract& operator=(const std::map& e) { expected = e; return *this; } - static occurance_t possibleButRequiredIf(bool in) { return in ? Always : Possible; } - static occurance_t requiredIf(bool in) { return in ? Always : Never; } - static occurance_t possibleIf(bool in) { return in ? Possible : Never; } + static occurrence_t possibleButRequiredIf(bool in) { return in ? Always : Possible; } + static occurrence_t requiredIf(bool in) { return in ? Always : Never; } + static occurrence_t possibleIf(bool in) { return in ? Possible : Never; } void handleException(const Error& e, Reference tr) const { // We should always ignore these. diff --git a/fdbserver/workloads/GetMappedRange.actor.cpp b/fdbserver/workloads/GetMappedRange.actor.cpp index 6e98e058826..cebd7773f28 100644 --- a/fdbserver/workloads/GetMappedRange.actor.cpp +++ b/fdbserver/workloads/GetMappedRange.actor.cpp @@ -549,13 +549,13 @@ struct GetMappedRangeWorkload : ApiWorkload { state Key mapper = getMapper(self, false); // The scanned range cannot be too large to hit get_mapped_key_values_has_more. We have a unit validating the // error is thrown when the range is large. - state bool originalStrictlyEnforeByteLimit = SERVER_KNOBS->STRICTLY_ENFORCE_BYTE_LIMIT; + state bool originalStrictlyEnforceByteLimit = SERVER_KNOBS->STRICTLY_ENFORCE_BYTE_LIMIT; (const_cast SERVER_KNOBS)->STRICTLY_ENFORCE_BYTE_LIMIT = deterministicRandom()->coinflip(); wait(self->scanMappedRange(cx, 10, 490, mapper, self)); wait(testMetric(cx, self, 10, 490, mapper, self->checkStorageQueueSeconds)); // reset it to default - (const_cast SERVER_KNOBS)->STRICTLY_ENFORCE_BYTE_LIMIT = originalStrictlyEnforeByteLimit; + (const_cast SERVER_KNOBS)->STRICTLY_ENFORCE_BYTE_LIMIT = originalStrictlyEnforceByteLimit; return Void(); } diff --git a/fdbserver/workloads/HTTPKeyValueStore.actor.cpp b/fdbserver/workloads/HTTPKeyValueStore.actor.cpp index 3f5f8243019..bb8c9a4d1aa 100644 --- a/fdbserver/workloads/HTTPKeyValueStore.actor.cpp +++ b/fdbserver/workloads/HTTPKeyValueStore.actor.cpp @@ -230,7 +230,7 @@ struct HTTPKeyValueStoreWorkload : TestWorkload { try { while (!self->conn) { // sometimes do resolve and connect directly, other times simulate what rest kms connector does and - // resolve endpoints themself and then connect to one directly + // resolve endpoints themselves and then connect to one directly if (self->manualResolve) { state std::vector addrs = wait(INetworkConnections::net()->resolveTCPEndpoint(self->hostname, self->service)); diff --git a/fdbserver/workloads/IncrementalBackup.actor.cpp b/fdbserver/workloads/IncrementalBackup.actor.cpp index 88db02e5db8..08f9b8f9dcc 100644 --- a/fdbserver/workloads/IncrementalBackup.actor.cpp +++ b/fdbserver/workloads/IncrementalBackup.actor.cpp @@ -80,7 +80,7 @@ struct IncrementalBackupWorkload : TestWorkload { ACTOR static Future _check(Database cx, IncrementalBackupWorkload* self) { if (self->waitForBackup) { // Undergoing recovery with the snapshot system keys set will pause the backup agent - // Pre-emptively unpause any backup agents before attempting to wait to avoid getting stuck + // Preemptively unpause any backup agents before attempting to wait to avoid getting stuck wait(self->backupAgent.changePause(cx, false)); state Reference backupContainer; state UID backupUID; diff --git a/fdbserver/workloads/KVStoreTest.actor.cpp b/fdbserver/workloads/KVStoreTest.actor.cpp index 76f5f923cf1..ebaeabbed29 100644 --- a/fdbserver/workloads/KVStoreTest.actor.cpp +++ b/fdbserver/workloads/KVStoreTest.actor.cpp @@ -63,7 +63,7 @@ class TestHistogram { } } } - // void addHistogram(const Histrogram& h2); + // void addHistogram(const Histogram& h2); T mean() const { return sum * (1.0 / N); } // exact const T& min() const { return minSample; } diff --git a/fdbserver/workloads/MetaclusterManagementWorkload.actor.cpp b/fdbserver/workloads/MetaclusterManagementWorkload.actor.cpp index 9dde63686d0..33895231373 100644 --- a/fdbserver/workloads/MetaclusterManagementWorkload.actor.cpp +++ b/fdbserver/workloads/MetaclusterManagementWorkload.actor.cpp @@ -1019,7 +1019,7 @@ struct MetaclusterManagementWorkload : TestWorkload { // In this case, the condition of `hasCapacity` will be further tightened since we will exclude those // data clusters with autoTenantAssignment being false. // It's possible that all the data clusters are excluded from the auto-assignment pool. - // Consequently, even if the the metacluster has capacity, the capacity index has no available data + // Consequently, even if the metacluster has capacity, the capacity index has no available data // clusters. In this case, trying to assign a cluster to the tenant automatically will fail. bool emptyCapacityIndex = true; for (auto dataDb : self->dataDbs) { diff --git a/fdbserver/workloads/Performance.actor.cpp b/fdbserver/workloads/Performance.actor.cpp index f00fd2bc1ee..6caefb47904 100644 --- a/fdbserver/workloads/Performance.actor.cpp +++ b/fdbserver/workloads/Performance.actor.cpp @@ -94,7 +94,7 @@ struct PerformanceWorkload : TestWorkload { } void logOptions(Standalone>> options) { - TraceEvent start("PerformaceSetupStarting"); + TraceEvent start("PerformanceSetupStarting"); for (int i = 0; i < options.size(); i++) { for (int j = 0; j < options[i].size(); j++) { start.detail(format("Option-%d-%d", i, j).c_str(), @@ -159,7 +159,7 @@ struct PerformanceWorkload : TestWorkload { loop { Standalone>> options = self->getOpts(tps); - TraceEvent start("PerformaceProbeStarting"); + TraceEvent start("PerformanceProbeStarting"); start.detail("RateTarget", tps); for (int i = 0; i < options.size(); i++) { for (int j = 0; j < options[i].size(); j++) { diff --git a/fdbserver/workloads/PerpetualWiggleStorageMigrationWorkload.actor.cpp b/fdbserver/workloads/PerpetualWiggleStorageMigrationWorkload.actor.cpp index 8bce87a0a39..392fb00c7ad 100644 --- a/fdbserver/workloads/PerpetualWiggleStorageMigrationWorkload.actor.cpp +++ b/fdbserver/workloads/PerpetualWiggleStorageMigrationWorkload.actor.cpp @@ -122,7 +122,7 @@ struct PerpetualWiggleStorageMigrationWorkload : public TestWorkload { wait(validateDatabase(cx, ssToExcludeInclude, ssToWiggle, /*wiggleStorageType=*/"ssd-rocksdb-v1")); - // We probablistically validate that resetting perpetual_storage_wiggle_engine to none works as expected. + // We probabilistically validate that resetting perpetual_storage_wiggle_engine to none works as expected. if (deterministicRandom()->coinflip()) { TraceEvent("Test_ClearPerpetualStorageWiggleEngine").log(); bool change = wait(IssueConfigurationChange(cx, "perpetual_storage_wiggle_engine=none", true)); diff --git a/fdbserver/workloads/PubSubMultiples.actor.cpp b/fdbserver/workloads/PubSubMultiples.actor.cpp index a129bba1153..bc7f903b2e5 100644 --- a/fdbserver/workloads/PubSubMultiples.actor.cpp +++ b/fdbserver/workloads/PubSubMultiples.actor.cpp @@ -99,7 +99,7 @@ struct PubSubMultiplesWorkload : TestWorkload { } /*ACTOR*/ Future messageSender(PubSubMultiplesWorkload* self, Database cx) { - // use a possion loop and post messages to feeds + // use a posting loop and post messages to feeds return Void(); } diff --git a/fdbserver/workloads/RemoveServersSafely.actor.cpp b/fdbserver/workloads/RemoveServersSafely.actor.cpp index d37291a8f3f..5be7f855b4a 100644 --- a/fdbserver/workloads/RemoveServersSafely.actor.cpp +++ b/fdbserver/workloads/RemoveServersSafely.actor.cpp @@ -683,8 +683,8 @@ struct RemoveServersSafelyWorkload : TestWorkload { .detail("Step", "Excluding localities with failed option") .detail("FailedAddressesSize", toKillMarkFailedArray.size()) .detail("FailedAddresses", describe(toKillMarkFailedArray)) - .detail("FailedLocaitiesSize", toKillLocalitiesFailed.size()) - .detail("FailedLocaities", describe(toKillLocalitiesFailed)); + .detail("FailedLocalitiesSize", toKillLocalitiesFailed.size()) + .detail("FailedLocalities", describe(toKillLocalitiesFailed)); wait(excludeLocalities(cx, toKillLocalitiesFailed, true)); } else { @@ -703,8 +703,8 @@ struct RemoveServersSafelyWorkload : TestWorkload { .detail("Step", "Excluding localities without failed option") .detail("AddressesSize", toKillArray.size()) .detail("Addresses", describe(toKillArray)) - .detail("LocaitiesSize", toKillLocalities.size()) - .detail("Locaities", describe(toKillLocalities)); + .detail("LocalitiesSize", toKillLocalities.size()) + .detail("Localities", describe(toKillLocalities)); wait(excludeLocalities(cx, toKillLocalities, false)); } else { diff --git a/fdbserver/workloads/SpecialKeySpaceCorrectness.actor.cpp b/fdbserver/workloads/SpecialKeySpaceCorrectness.actor.cpp index 8d992d5412b..010eb37eb0d 100644 --- a/fdbserver/workloads/SpecialKeySpaceCorrectness.actor.cpp +++ b/fdbserver/workloads/SpecialKeySpaceCorrectness.actor.cpp @@ -234,7 +234,7 @@ struct SpecialKeySpaceCorrectnessWorkload : TestWorkload { if (res1.size() != res2.size()) { TraceEvent(SevError, "TestFailure") .detail("Reason", "Results' sizes are inconsistent") - .detail("CorrestResultSize", res1.size()) + .detail("CorrectResultSize", res1.size()) .detail("TestResultSize", res2.size()); return false; } diff --git a/fdbserver/workloads/SpecialKeySpaceRobustness.actor.cpp b/fdbserver/workloads/SpecialKeySpaceRobustness.actor.cpp index fbb0c8abc7d..b7c93e1045e 100644 --- a/fdbserver/workloads/SpecialKeySpaceRobustness.actor.cpp +++ b/fdbserver/workloads/SpecialKeySpaceRobustness.actor.cpp @@ -128,7 +128,7 @@ struct SpecialKeySpaceRobustnessWorkload : TestWorkload { } tx->reset(); } - // "Exclude" same address multiple times, and only the first excluson should trigger a system metadata update. + // "Exclude" same address multiple times, and only the first exclusion should trigger a system metadata update. { try { state std::string excludeWorker; @@ -363,7 +363,7 @@ struct SpecialKeySpaceRobustnessWorkload : TestWorkload { } } // test lock and unlock - // maske sure we lock the database + // make sure we lock the database loop { try { tx->setOption(FDBTransactionOptions::RAW_ACCESS); diff --git a/fdbserver/workloads/TenantEntryCacheWorkload.actor.cpp b/fdbserver/workloads/TenantEntryCacheWorkload.actor.cpp index 4c44bce0aa9..004add5414d 100644 --- a/fdbserver/workloads/TenantEntryCacheWorkload.actor.cpp +++ b/fdbserver/workloads/TenantEntryCacheWorkload.actor.cpp @@ -232,14 +232,14 @@ struct TenantEntryCacheWorkload : TestWorkload { // trigger with a max wait of 5 mins; timed_out error is thrown if cache refresh isn't triggered. state int64_t startTime = now(); - state int64_t waitUntill = startTime + 300; // 5 mins max wait + state int64_t waitUntil = startTime + 300; // 5 mins max wait loop { // InitRefresh + multiple timer based invocations (at least 2 invocations of cache->refresh()) if (cache->numCacheRefreshes() >= 2) { break; } - if (now() > waitUntill) { + if (now() > waitUntil) { throw timed_out(); } @@ -328,13 +328,13 @@ struct TenantEntryCacheWorkload : TestWorkload { ASSERT(entry.present()); state int64_t startTime = now(); - state int64_t waitUntill = startTime + 300; // 5 mins max wait + state int64_t waitUntil = startTime + 300; // 5 mins max wait loop { if (cache->numWatchRefreshes() >= 1) { break; } - if (now() > waitUntill) { + if (now() > waitUntil) { throw timed_out(); } diff --git a/fdbserver/workloads/TenantManagementWorkload.actor.cpp b/fdbserver/workloads/TenantManagementWorkload.actor.cpp index 5b314571efb..f273df698e8 100644 --- a/fdbserver/workloads/TenantManagementWorkload.actor.cpp +++ b/fdbserver/workloads/TenantManagementWorkload.actor.cpp @@ -1162,7 +1162,7 @@ struct TenantManagementWorkload : TestWorkload { state OperationType operationType = self->randomOperationType(); state Reference tr = makeReference(self->dataDb); - // True if the tenant should should exist and return a result + // True if the tenant should exist and return a result auto itr = self->createdTenants.find(tenant); state bool alreadyExists = itr != self->createdTenants.end() && !(operationType == OperationType::METACLUSTER && !self->useMetacluster); @@ -1631,7 +1631,7 @@ struct TenantManagementWorkload : TestWorkload { } // In the future after we enable tenant movement, this may be - // state bool configurationChanging = tenangGroupCanging || assignToDifferentCluster. + // state bool configurationChanging = tenantGroupChanging || assignToDifferentCluster. state bool configurationChanging = tenantGroupChanging; // If true, the options generated may include an unknown option @@ -1729,7 +1729,7 @@ struct TenantManagementWorkload : TestWorkload { state OperationType operationType = self->randomOperationType(); state Reference tr = makeReference(self->dataDb); - // True if the tenant group should should exist and return a result + // True if the tenant group should exist and return a result auto itr = self->createdTenantGroups.find(tenantGroup); state bool alreadyExists = itr != self->createdTenantGroups.end() && !(operationType == OperationType::METACLUSTER && !self->useMetacluster); @@ -1946,7 +1946,7 @@ struct TenantManagementWorkload : TestWorkload { state Error err = e; if (err.code() == error_code_tenant_not_found) { ASSERT(!tenantPresent); - CODE_PROBE(true, "Attempted to read key from non-existent tenant"); + CODE_PROBE(true, "Attempted to read key from nonexistent tenant"); return Void(); } else if (err.code() == error_code_tenant_locked) { ASSERT(!lockAware); diff --git a/fdbserver/workloads/TriggerRecovery.actor.cpp b/fdbserver/workloads/TriggerRecovery.actor.cpp index 9ce082bf407..305df2395c8 100644 --- a/fdbserver/workloads/TriggerRecovery.actor.cpp +++ b/fdbserver/workloads/TriggerRecovery.actor.cpp @@ -129,7 +129,7 @@ struct TriggerRecoveryLoopWorkload : TestWorkload { else tr.set("\xff\xff/reboot_worker"_sr, it.second); } - TraceEvent(SevInfo, "TriggerRecoveryLoop_AttempedKillAll").log(); + TraceEvent(SevInfo, "TriggerRecoveryLoop_AttemptedKillAll").log(); return Void(); } catch (Error& e) { wait(tr.onError(e)); @@ -148,7 +148,7 @@ struct TriggerRecoveryLoopWorkload : TestWorkload { wait(self->changeResolverConfig(cx, self)); } numRecoveriesDone++; - TraceEvent(SevInfo, "TriggerRecoveryLoop_AttempedRecovery").detail("RecoveryNum", numRecoveriesDone); + TraceEvent(SevInfo, "TriggerRecoveryLoop_AttemptedRecovery").detail("RecoveryNum", numRecoveriesDone); if (numRecoveriesDone == self->numRecoveries) { break; } diff --git a/fdbserver/workloads/UDPWorkload.actor.cpp b/fdbserver/workloads/UDPWorkload.actor.cpp index 034e0a6329b..b91f5a139ab 100644 --- a/fdbserver/workloads/UDPWorkload.actor.cpp +++ b/fdbserver/workloads/UDPWorkload.actor.cpp @@ -259,7 +259,7 @@ struct UDPWorkload : TestWorkload { } m.emplace_back("Sent", totalSent, Averaged::False); m.emplace_back("Received", totalReceived, Averaged::False); - m.emplace_back("Acknknowledged", totalAcked, Averaged::False); + m.emplace_back("Acknowledged", totalAcked, Averaged::False); m.emplace_back("Successes", totalSuccess, Averaged::False); } }; diff --git a/fdbserver/workloads/ValidateStorage.actor.cpp b/fdbserver/workloads/ValidateStorage.actor.cpp index 4ece80aee74..487274de27c 100644 --- a/fdbserver/workloads/ValidateStorage.actor.cpp +++ b/fdbserver/workloads/ValidateStorage.actor.cpp @@ -293,7 +293,7 @@ struct ValidateStorage : TestWorkload { } self->testStringToAuditPhaseFunctionality(); - TraceEvent("TestAuditStorageStringToAuditPhaseFuncionalityDone"); + TraceEvent("TestAuditStorageStringToAuditPhaseFunctionalityDone"); wait(self->testSSUserDataValidation(self, cx, KeyRangeRef("TestKeyA"_sr, "TestKeyF"_sr))); TraceEvent("TestAuditStorageValidateValueDone"); @@ -485,7 +485,7 @@ struct ValidateStorage : TestWorkload { UID auditIdD = wait(self->auditStorageForType( self, cx, AuditType::ValidateStorageServerShard, "TestAuditStorageFunctionality")); TraceEvent("TestFunctionalitySSShardInfoDone", auditIdD); - wait(self->testGetAuditStateWhenNoOngingAudit(self, cx)); + wait(self->testGetAuditStateWhenNoOngoingAudit(self, cx)); TraceEvent("TestGetAuditStateDone"); return Void(); } @@ -506,7 +506,7 @@ struct ValidateStorage : TestWorkload { return Void(); } - ACTOR Future testGetAuditStateWhenNoOngingAuditForType(ValidateStorage* self, Database cx, AuditType type) { + ACTOR Future testGetAuditStateWhenNoOngoingAuditForType(ValidateStorage* self, Database cx, AuditType type) { TraceEvent("TestGetAuditStateBegin").detail("AuditType", type); std::vector res1 = wait(getAuditStates(cx, type, /*newFirst=*/true, 1)); if (res1.size() > 1) { // == 0 if empty range when testAuditStorageFunctionality @@ -557,17 +557,17 @@ struct ValidateStorage : TestWorkload { return Void(); } - ACTOR Future testGetAuditStateWhenNoOngingAudit(ValidateStorage* self, Database cx) { - wait(self->testGetAuditStateWhenNoOngingAuditForType(self, cx, AuditType::ValidateHA)); + ACTOR Future testGetAuditStateWhenNoOngoingAudit(ValidateStorage* self, Database cx) { + wait(self->testGetAuditStateWhenNoOngoingAuditForType(self, cx, AuditType::ValidateHA)); TraceEvent("TestGetAuditStateHADone"); - wait(self->testGetAuditStateWhenNoOngingAuditForType(self, cx, AuditType::ValidateReplica)); + wait(self->testGetAuditStateWhenNoOngoingAuditForType(self, cx, AuditType::ValidateReplica)); TraceEvent("TestGetAuditStateReplicaDone"); - wait(self->testGetAuditStateWhenNoOngingAuditForType(self, cx, AuditType::ValidateLocationMetadata)); + wait(self->testGetAuditStateWhenNoOngoingAuditForType(self, cx, AuditType::ValidateLocationMetadata)); TraceEvent("TestGetAuditStateShardLocationMetadataDone"); - wait(self->testGetAuditStateWhenNoOngingAuditForType(self, cx, AuditType::ValidateStorageServerShard)); + wait(self->testGetAuditStateWhenNoOngoingAuditForType(self, cx, AuditType::ValidateStorageServerShard)); TraceEvent("TestGetAuditStateSSShardInfoDone"); return Void(); } @@ -694,14 +694,14 @@ struct ValidateStorage : TestWorkload { }; state std::vector progressRanges = shuffleRanges(progressRangesCollection); state int i = 0; - state std::vector alreadyPersisteRanges; + state std::vector alreadyPersistedRanges; for (; i < progressRanges.size(); i++) { state AuditStorageState auditState(auditId, auditType); auditState.range = progressRanges[i]; auditState.ddId = ddId; auditState.setPhase(AuditPhase::Complete); wait(self->persistAuditStateByRange(self, cx, auditState)); - alreadyPersisteRanges.push_back(progressRanges[i]); + alreadyPersistedRanges.push_back(progressRanges[i]); std::vector auditStates = wait(getAuditStateByRange(cx, auditType, auditId, allKeys)); for (int i = 0; i < auditStates.size(); i++) { KeyRange toCompare = auditStates[i].range; @@ -709,8 +709,8 @@ struct ValidateStorage : TestWorkload { bool fullyCovered = false; std::vector unCoveredRanges; unCoveredRanges.push_back(toCompare); - // check if toCompare is overlapped/fullyCovered by alreadyPersisteRanges - for (const auto& persistedRange : alreadyPersisteRanges) { + // check if toCompare is overlapped/fullyCovered by alreadyPersistedRanges + for (const auto& persistedRange : alreadyPersistedRanges) { KeyRange overlappedRange = toCompare & persistedRange; if (!overlappedRange.empty()) { overlapped = true; @@ -725,10 +725,10 @@ struct ValidateStorage : TestWorkload { unCoveredRanges = unCoveredRangesNow; } fullyCovered = unCoveredRanges.empty(); - if (fullyCovered) { // toCompare is fully covered by alreadyPersisteRanges + if (fullyCovered) { // toCompare is fully covered by alreadyPersistedRanges ASSERT(auditStates[i].getPhase() == AuditPhase::Complete); } else { - // toCompare cannot be partially covered by alreadyPersisteRanges + // toCompare cannot be partially covered by alreadyPersistedRanges ASSERT(!overlapped); ASSERT(auditStates[i].getPhase() == AuditPhase::Invalid); } diff --git a/flow/ActorCollection.actor.cpp b/flow/ActorCollection.actor.cpp index 39ecac56d4d..b9bbc8bec23 100644 --- a/flow/ActorCollection.actor.cpp +++ b/flow/ActorCollection.actor.cpp @@ -74,7 +74,7 @@ ACTOR Future actorCollection(FutureStream> addActor, loop choose { when(Future f = waitNext(addActor)) { - // Insert new Runner at the end of the instrusive list and get an iterator to it + // Insert new Runner at the end of the intrusive list and get an iterator to it auto i = runners.insert(runners.end(), *new Runner()); // Start the handler for completions or errors from f, sending runner to complete stream diff --git a/flow/ApiVersion.h.cmake b/flow/ApiVersion.h.cmake index 7d206dff9ed..7a9e7b47698 100644 --- a/flow/ApiVersion.h.cmake +++ b/flow/ApiVersion.h.cmake @@ -74,7 +74,7 @@ public: // introduced features API_VERSION_FEATURE(@FDB_AV_CLIENT_TMP_DIR@, ClientTmpDir); API_VERSION_FEATURE(@FDB_AV_DISABLE_CLIENT_BYPASS@, DisableClientBypass) API_VERSION_FEATURE(@FDB_AV_GRV_CACHE@, GrvCache); - API_VERSION_FEATURE(@FDB_AV_CLIENT_RROFILING_DEPRECATED@, ClientProfilingDeprecated); + API_VERSION_FEATURE(@FDB_AV_CLIENT_PROFILING_DEPRECATED@, ClientProfilingDeprecated); API_VERSION_FEATURE(@FDB_AV_TENANT_API_RELEASED@, TenantApiReleased); API_VERSION_FEATURE(@FDB_AV_GET_TOTAL_COST@, GetTotalCost); API_VERSION_FEATURE(@FDB_AV_FAIL_ON_EXTERNAL_CLIENT_ERRORS@, FailOnExternalClientErrors); diff --git a/flow/ApiVersions.cmake b/flow/ApiVersions.cmake index 1f4994505e6..bf63bd5d593 100644 --- a/flow/ApiVersions.cmake +++ b/flow/ApiVersions.cmake @@ -16,7 +16,7 @@ set(FDB_AV_TENANT_BLOB_RANGE_API "720") set(FDB_AV_CLIENT_TMP_DIR "720") set(FDB_AV_DISABLE_CLIENT_BYPASS "720") set(FDB_AV_GRV_CACHE "720") -set(FDB_AV_CLIENT_RROFILING_DEPRECATED "720") +set(FDB_AV_CLIENT_PROFILING_DEPRECATED "720") set(FDB_AV_TENANT_API_RELEASED "720") set(FDB_AV_GET_TOTAL_COST "730") set(FDB_AV_FAIL_ON_EXTERNAL_CLIENT_ERRORS "730") diff --git a/flow/Arena.cpp b/flow/Arena.cpp index 1c5d077a29a..93b534831e1 100644 --- a/flow/Arena.cpp +++ b/flow/Arena.cpp @@ -75,7 +75,7 @@ void makeUndefined(void* addr, size_t size) { VALGRIND_MAKE_MEM_UNDEFINED(addr, size); } } -#elif defined(ADDRESS_SANITZER) +#elif defined(ADDRESS_SANITIZER) void allowAccess(ArenaBlock* b) { if (b) { ASAN_UNPOISON_MEMORY_REGION(b, ArenaBlock::TINY_HEADER); diff --git a/flow/CMakeLists.txt b/flow/CMakeLists.txt index 2dff6b3c2e3..d1d1a13f65d 100644 --- a/flow/CMakeLists.txt +++ b/flow/CMakeLists.txt @@ -247,7 +247,7 @@ if (WITH_SWIFT) FLAGS -Xcc -std=c++20 -Xcc -DNO_INTELLISENSE -Xcc -ivfsoverlay${CMAKE_BINARY_DIR}/flow/include/headeroverlay.yaml # Important: This is needed to avoid including headers that depends on this generated header. - -Xcc -DSWIFT_FUTURE_SUPPORT_H -Xcc -DSWIFT_STREAM_SUPPORT_H -Xcc -DSWIFT_HIDE_CHECKED_CONTINUTATION + -Xcc -DSWIFT_FUTURE_SUPPORT_H -Xcc -DSWIFT_STREAM_SUPPORT_H -Xcc -DSWIFT_HIDE_CHECKED_CONTINUATION ) add_swift_to_cxx_header_gen_target( diff --git a/flow/Knobs.cpp b/flow/Knobs.cpp index 4837afafc6d..3b0bbd16302 100644 --- a/flow/Knobs.cpp +++ b/flow/Knobs.cpp @@ -188,8 +188,8 @@ void FlowKnobs::initialize(Randomize randomize, IsSimulated isSimulated) { init( MAX_PRIOR_MODIFICATION_DELAY, 1.0 ); if( randomize && BUGGIFY ) MAX_PRIOR_MODIFICATION_DELAY = 10.0; //AsyncFileWriteChecker - init( ASYNC_FILE_WRITE_CHEKCER_LOGGING_INTERVAL, 60.0 ); - init( ASYNC_FILE_WRITE_CHEKCER_CHECKING_DELAY, 5.0 ); + init( ASYNC_FILE_WRITE_CHECKER_LOGGING_INTERVAL, 60.0 ); + init( ASYNC_FILE_WRITE_CHECKER_CHECKING_DELAY, 5.0 ); //GenericActors init( BUGGIFY_FLOW_LOCK_RELEASE_DELAY, 1.0 ); diff --git a/flow/Platform.actor.cpp b/flow/Platform.actor.cpp index 59aa5d2a8d6..8309c4c3061 100644 --- a/flow/Platform.actor.cpp +++ b/flow/Platform.actor.cpp @@ -581,7 +581,7 @@ void getMachineRAMInfo(MachineRAMInfo& memInfo) { throw platform_error(); } - PERFORMACE_INFORMATION perf; + PERFORMANCE_INFORMATION perf; if (!GetPerformanceInfo(&perf, sizeof(perf))) { TraceEvent(SevError, "WindowsGetMemPerformanceInfo").GetLastError(); throw platform_error(); @@ -3271,7 +3271,7 @@ void outOfMemory() { TraceEvent("MemSample") .detail("Count", memSampleSize) .detail("TotalSize", memSampleSize * ((int)(sizeof(void*) + sizeof(uint32_t) + sizeof(size_t)))) - .detail("SapmleCount", memSampleSize) + .detail("SampleCount", memSampleSize) .detail("Hash", "memSamples") .detail("Bt", "na"); TRACEALLOCATOR(16); @@ -3488,7 +3488,7 @@ ImageInfo getImageInfo() { size_t raw_backtrace(void** addresses, int maxStackDepth) { #if !defined(__APPLE__) - // absl::GetStackTrace doesn't have an implementation for MacOS. + // absl::GetStackTrace doesn't have an implementation for macOS. return absl::GetStackTrace(addresses, maxStackDepth, 0); #else return backtrace(addresses, maxStackDepth); diff --git a/flow/README.md b/flow/README.md index a1063ca16ff..0c17a76e9f1 100644 --- a/flow/README.md +++ b/flow/README.md @@ -599,7 +599,7 @@ longer be valid. #### Use of Standalone Objects in ACTOR Functions -Special care needs to be taken when using using `Standalone` values in actor functions. +Special care needs to be taken when using `Standalone` values in actor functions. Consider the following example: ``` diff --git a/flow/StreamCipher.cpp b/flow/StreamCipher.cpp index 8cd4f95c558..f4e7d1e1b07 100644 --- a/flow/StreamCipher.cpp +++ b/flow/StreamCipher.cpp @@ -198,7 +198,7 @@ TEST_CASE("flow/StreamCipher") { while (index < plaintext.size()) { const auto chunkSize = std::min(deterministicRandom()->randomInt(1, 101), plaintext.size() - index); const auto encrypted = encryptor.encrypt(&plaintext[index], chunkSize, arena); - TraceEvent("StreamCipherTestEcryptedChunk") + TraceEvent("StreamCipherTestEncryptedChunk") .detail("EncryptedSize", encrypted.size()) .detail("EncryptedOffset", encryptedOffset) .detail("Index", index); diff --git a/flow/aarch64/asmdefs.h b/flow/aarch64/asmdefs.h index 0c142984584..e083accff60 100644 --- a/flow/aarch64/asmdefs.h +++ b/flow/aarch64/asmdefs.h @@ -12,7 +12,7 @@ // clang-format off -/* Branch Target Identitication support. */ +/* Branch Target Identification support. */ #define BTI_C hint 34 #define BTI_J hint 36 /* Return address signing support (pac-ret). */ diff --git a/flow/flow.cpp b/flow/flow.cpp index aa4f4651459..73939d9f901 100644 --- a/flow/flow.cpp +++ b/flow/flow.cpp @@ -88,7 +88,7 @@ void* rte_memcpy_noinline(void* __restrict __dest, const void* __restrict __src, return rte_memcpy(__dest, __src, __n); } -// This compilation unit will be linked in to the main binary, so this should override glibc memcpy +// This compilation unit will be linked into the main binary, so this should override glibc memcpy __attribute__((visibility("default"))) void* memcpy(void* __restrict __dest, const void* __restrict __src, size_t __n) { // folly_memcpy is faster for small copies, but rte seems to win out in most other circumstances return rte_memcpy(__dest, __src, __n); diff --git a/flow/include/flow/CompressionUtils.h b/flow/include/flow/CompressionUtils.h index 807d45c60dc..074af1e7378 100644 --- a/flow/include/flow/CompressionUtils.h +++ b/flow/include/flow/CompressionUtils.h @@ -69,4 +69,4 @@ struct CompressionUtils { static std::unordered_set supportedFilters; }; -#endif // FLOW_COMPRRESSION_UTILS_H +#endif // FLOW_COMPRESSION_UTILS_H diff --git a/flow/include/flow/CoroUtils.h b/flow/include/flow/CoroUtils.h index 65547f19d71..43e3df40538 100644 --- a/flow/include/flow/CoroUtils.h +++ b/flow/include/flow/CoroUtils.h @@ -189,7 +189,7 @@ class ChooseClause { template auto When(FutureStream const& futureStream, std::invocable auto fun) { static_assert(std::is_same_v())), void>, - "When-handler must return void (and't can't be awaitable)"); + "When-handler must return void (and can't be awaitable)"); if (noop) { return getNoop(futureStream); } diff --git a/flow/include/flow/DeterministicRandom.h b/flow/include/flow/DeterministicRandom.h index a7e4a40c14b..b39441aa077 100644 --- a/flow/include/flow/DeterministicRandom.h +++ b/flow/include/flow/DeterministicRandom.h @@ -18,8 +18,8 @@ * limitations under the License. */ -#ifndef FLOW_DETERIMINISTIC_RANDOM_H -#define FLOW_DETERIMINISTIC_RANDOM_H +#ifndef FLOW_DETERMINISTIC_RANDOM_H +#define FLOW_DETERMINISTIC_RANDOM_H #pragma once #include diff --git a/flow/include/flow/EncryptUtils.h b/flow/include/flow/EncryptUtils.h index f2626ce55d4..0f780f9a95f 100644 --- a/flow/include/flow/EncryptUtils.h +++ b/flow/include/flow/EncryptUtils.h @@ -61,7 +61,7 @@ static const std::unordered_set ENCRYPT_CIPHER_SYSTEM_DOM ENCRYPT_HEADER_DOMAIN_ID }; -static const std::unordered_set ENCRYPT_CIPHER_DETAULT_DOMAINS = { +static const std::unordered_set ENCRYPT_CIPHER_DEFAULT_DOMAINS = { SYSTEM_KEYSPACE_ENCRYPT_DOMAIN_ID, ENCRYPT_HEADER_DOMAIN_ID, FDB_DEFAULT_ENCRYPT_DOMAIN_ID, @@ -102,7 +102,7 @@ typedef enum { } EncryptAuthTokenAlgo; static_assert(EncryptAuthTokenAlgo::ENCRYPT_HEADER_AUTH_TOKEN_ALGO_LAST <= std::numeric_limits::max(), - "EncryptHeaerAuthTokenAlgo value overflow"); + "EncryptHeaderAuthTokenAlgo value overflow"); bool isEncryptHeaderAuthTokenModeValid(const EncryptAuthTokenMode mode); bool isEncryptHeaderAuthTokenAlgoValid(const EncryptAuthTokenAlgo algo); diff --git a/flow/include/flow/Error.h b/flow/include/flow/Error.h index df4a7b492ae..f2ae1d27796 100644 --- a/flow/include/flow/Error.h +++ b/flow/include/flow/Error.h @@ -233,7 +233,7 @@ bool assert_check_ge(T const& a, U const& b) { EXTERNC void breakpoint_me(); #ifdef FDB_CLEAN_BUILD -#define NOT_IN_CLEAN static_assert(false, "This code can not be enabled in a clean build."); +#define NOT_IN_CLEAN static_assert(false, "This code cannot be enabled in a clean build."); #else #define NOT_IN_CLEAN #endif diff --git a/flow/include/flow/Knobs.h b/flow/include/flow/Knobs.h index 695ab5999be..7d93f5464ba 100644 --- a/flow/include/flow/Knobs.h +++ b/flow/include/flow/Knobs.h @@ -253,8 +253,8 @@ class FlowKnobs : public KnobsImpl { double MAX_PRIOR_MODIFICATION_DELAY; // AsyncFileWriteChecker - double ASYNC_FILE_WRITE_CHEKCER_LOGGING_INTERVAL; - double ASYNC_FILE_WRITE_CHEKCER_CHECKING_DELAY; + double ASYNC_FILE_WRITE_CHECKER_LOGGING_INTERVAL; + double ASYNC_FILE_WRITE_CHECKER_CHECKING_DELAY; // GenericActors double BUGGIFY_FLOW_LOCK_RELEASE_DELAY; @@ -296,7 +296,7 @@ class FlowKnobs : public KnobsImpl { bool RESOLVE_PREFER_IPV4_ADDR; // Sim2 - // FIMXE: more parameters could be factored out + // FIXME: more parameters could be factored out double MIN_OPEN_TIME; double MAX_OPEN_TIME; int64_t SIM_DISK_IOPS; diff --git a/flow/include/flow/OTELMetrics.h b/flow/include/flow/OTELMetrics.h index 320931b2eb9..a49f40b9828 100644 --- a/flow/include/flow/OTELMetrics.h +++ b/flow/include/flow/OTELMetrics.h @@ -36,7 +36,7 @@ we choose to not cover the version of OTELSum which uses double Furthermore, we also diverge from the protobuf definition of HistogramDataPoint by using DDSketch. - This means that that there is an additional field for storing the errorGuarantee (a double). Also, to save some + This means that there is an additional field for storing the errorGuarantee (a double). Also, to save some space the buckets are uint32_t instead of uint64_t. The reason for this is due to the fact that it is highly unlikely that a single bucket would hit it's threshold with the default error guarantee of 1%. diff --git a/flow/include/flow/OwningResource.h b/flow/include/flow/OwningResource.h index 96351297338..72b5071a61f 100644 --- a/flow/include/flow/OwningResource.h +++ b/flow/include/flow/OwningResource.h @@ -18,8 +18,8 @@ * limitations under the License. */ -#ifndef FLOW_OWNING_REOSURCE_H -#define FLOW_OWNING_REOSURCE_H +#ifndef FLOW_OWNING_RESOURCE_H +#define FLOW_OWNING_RESOURCE_H #include "flow/FastRef.h" @@ -152,4 +152,4 @@ class ActorWeakSelfRef : public ResourceWeakRef { T& operator*() const { return *(this->operator->()); } }; -#endif // FLOW_OWNING_REOSURCE_H \ No newline at end of file +#endif // FLOW_OWNING_RESOURCE_H \ No newline at end of file diff --git a/flow/include/flow/Platform.h b/flow/include/flow/Platform.h index 06f082afc47..a88d4a37a51 100644 --- a/flow/include/flow/Platform.h +++ b/flow/include/flow/Platform.h @@ -381,8 +381,8 @@ std::string popPath(const std::string& path); // abspath() resolves the given path to a canonical form. // If path is relative, the result will be based on the current working directory. // If resolveLinks is true then symbolic links will be expanded BEFORE resolving '..' references. -// An empty path or a non-existent path when mustExist is true will result in a platform_error() exception. -// Upon success, all '..' references will be resolved with the assumption that non-existent components +// An empty path or a nonexistent path when mustExist is true will result in a platform_error() exception. +// Upon success, all '..' references will be resolved with the assumption that nonexistent components // are NOT symbolic links. // User directory references such as '~' or '~user' are effectively treated as symbolic links which // are impossible to resolve, so resolveLinks=true results in failure and resolveLinks=false results @@ -429,10 +429,10 @@ int setEnvironmentVar(const char* name, const char* value, int overwrite); std::string getWorkingDirectory(); -// Returns the absolute platform-dependant path for server-based files +// Returns the absolute platform-dependent path for server-based files std::string getDefaultConfigPath(); -// Returns the absolute platform-dependant path for the default fdb.cluster file +// Returns the absolute platform-dependent path for the default fdb.cluster file std::string getDefaultClusterFilePath(); struct ImageInfo { diff --git a/flow/include/flow/PriorityMultiLock.actor.h b/flow/include/flow/PriorityMultiLock.actor.h index e525b580663..a6edc6289ed 100644 --- a/flow/include/flow/PriorityMultiLock.actor.h +++ b/flow/include/flow/PriorityMultiLock.actor.h @@ -111,7 +111,7 @@ class PriorityMultiLock : public ReferenceCounted { // If this priority currently has no waiters if (q.empty()) { // Add this priority's weight to the total for priorities with pending work. This must be done - // so that currenctCapacity() below will assign capacaity to this priority. + // so that currentCapacity() below will assign capacity to this priority. totalPendingWeights += p.weight; // If there are slots available and the priority has capacity then don't make the caller wait @@ -244,7 +244,7 @@ class PriorityMultiLock : public ReferenceCounted { typedef boost::intrusive::list> WaitingPrioritiesList; // List of all priorities with 1 or more waiters. This list exists so that the scheduling loop - // does not have to iterage over the priorities vector checking priorities without waiters. + // does not have to iterate over the priorities vector checking priorities without waiters. WaitingPrioritiesList waitingPriorities; Future fRunner; diff --git a/flow/include/flow/TDMetric.actor.h b/flow/include/flow/TDMetric.actor.h index 73b1c6eb121..f45944b4b88 100644 --- a/flow/include/flow/TDMetric.actor.h +++ b/flow/include/flow/TDMetric.actor.h @@ -651,7 +651,7 @@ class FieldLevel { self->header.update(oldHeader); // Any blocks already in the metrics queue will need to be patched at the time that they are - // flushed to the DB (which isn't necessarity part of the current flush) so set the last time + // flushed to the DB (which isn't necessarily part of the current flush) so set the last time // that requires a patch to the time of the last MetricData in the queue self->lastTimeRequiringHeaderPatch = self->metrics.back().rollTime; } else { diff --git a/flow/include/flow/ThreadPrimitives.h b/flow/include/flow/ThreadPrimitives.h index 88761ca394f..f83ae5d3b6d 100644 --- a/flow/include/flow/ThreadPrimitives.h +++ b/flow/include/flow/ThreadPrimitives.h @@ -139,7 +139,7 @@ class Event { }; class Mutex { - // A re-entrant process-local blocking lock (e.g. CRITICAL_SECTION on Windows) + // A reentrant process-local blocking lock (e.g. CRITICAL_SECTION on Windows) // Thread safe even if !FLOW_THREAD_SAFE public: Mutex(); diff --git a/flow/include/flow/Trace.h b/flow/include/flow/Trace.h index fbf1efec49d..6837d55951c 100644 --- a/flow/include/flow/Trace.h +++ b/flow/include/flow/Trace.h @@ -414,7 +414,7 @@ struct SWIFT_CXX_IMPORT_OWNED BaseTraceEvent { // Return the number of invocations of TraceEvent() at the specified logging level. static unsigned long CountEventsLoggedAt(Severity); - std::unique_ptr tmpEventMetric; // This just just a place to store fields + std::unique_ptr tmpEventMetric; // This is just a place to store fields const TraceEventFields& getFields() const { return fields; } Severity getSeverity() const { return severity; } diff --git a/flow/include/flow/error_definitions.h b/flow/include/flow/error_definitions.h index a17e5f5b581..79e9627f489 100755 --- a/flow/include/flow/error_definitions.h +++ b/flow/include/flow/error_definitions.h @@ -216,7 +216,7 @@ ERROR( invalid_config_db_key, 2028, "Invalid configuration database key provided ERROR( invalid_config_path, 2029, "Invalid configuration path" ) ERROR( mapper_bad_index, 2030, "The index in K[] or V[] is not a valid number or out of range" ) ERROR( mapper_no_such_key, 2031, "A mapped key is not set in database" ) -ERROR( mapper_bad_range_decriptor, 2032, "\"{...}\" must be the last element of the mapper tuple" ) +ERROR( mapper_bad_range_descriptor, 2032, "\"{...}\" must be the last element of the mapper tuple" ) ERROR( quick_get_key_values_has_more, 2033, "One of the mapped range queries is too large" ) ERROR( quick_get_value_miss, 2034, "Found a mapped key that is not served in the same SS" ) ERROR( quick_get_key_values_miss, 2035, "Found a mapped range that is not served in the same SS" ) diff --git a/flow/include/flow/flow.h b/flow/include/flow/flow.h index 682efdf13aa..6b8a5b8457d 100644 --- a/flow/include/flow/flow.h +++ b/flow/include/flow/flow.h @@ -61,7 +61,7 @@ // Flow_CheckedContinuation.h depends on this header, so we first parse it // without relying on any imported Swift types. -#ifndef SWIFT_HIDE_CHECKED_CONTINUTATION +#ifndef SWIFT_HIDE_CHECKED_CONTINUATION #include "SwiftModules/Flow_CheckedContinuation.h" #endif /* SWIFT_HIDE_CHECKED_CONTINUATION */ @@ -958,7 +958,7 @@ template class Promise; #ifdef WITH_SWIFT -#ifndef SWIFT_HIDE_CHECKED_CONTINUTATION +#ifndef SWIFT_HIDE_CHECKED_CONTINUATION using flow_swift::FlowCheckedContinuation; template @@ -1013,7 +1013,7 @@ FlowCallbackForSwiftContinuation : Callback { template class SWIFT_SENDABLE -#ifndef SWIFT_HIDE_CHECKED_CONTINUTATION +#ifndef SWIFT_HIDE_CHECKED_CONTINUATION #ifdef WITH_SWIFT SWIFT_CONFORMS_TO_PROTOCOL(flow_swift.FlowFutureOps) #endif @@ -1022,7 +1022,7 @@ SWIFT_CONFORMS_TO_PROTOCOL(flow_swift.FlowFutureOps) public: using Element = T; #ifdef WITH_SWIFT -#ifndef SWIFT_HIDE_CHECKED_CONTINUTATION +#ifndef SWIFT_HIDE_CHECKED_CONTINUATION using FlowCallbackForSwiftContinuation = FlowCallbackForSwiftContinuation; #endif #endif /* WITH_SWIFT */ diff --git a/flow/include/flow/genericactors.actor.h b/flow/include/flow/genericactors.actor.h index 73582f371d0..dad582de8eb 100644 --- a/flow/include/flow/genericactors.actor.h +++ b/flow/include/flow/genericactors.actor.h @@ -2401,7 +2401,7 @@ class UnsafeWeakFutureReference { }; // Utility class to provide FLOW compliant singleton pattern. -// In similuation, the approach allows per-virtual process singleton as desired compared to one singleton instance +// In simulation, the approach allows per-virtual process singleton as desired compared to one singleton instance // shared across all virtual processes if 'static singleton' pattern is implemented. // // API NOTE: Client are expected to pass functor allowing instantiation of the template class diff --git a/flow/include/flow/network.h b/flow/include/flow/network.h index ba5da2e9eb2..d8b33b2ae0a 100644 --- a/flow/include/flow/network.h +++ b/flow/include/flow/network.h @@ -71,7 +71,7 @@ struct NetworkMetrics { std::unordered_map activeTrackers; double lastRunLoopBusyness; // network thread busyness (measured every 5s by default) std::atomic - networkBusyness; // network thread busyness which is returned to the the client (measured every 1s by default) + networkBusyness; // network thread busyness which is returned to the client (measured every 1s by default) // starvation trackers which keeps track of different task priorities std::vector starvationTrackers; diff --git a/flow/include/flow/sse2neon.h b/flow/include/flow/sse2neon.h index 3e95d90c1b7..6949dbe9e8f 100644 --- a/flow/include/flow/sse2neon.h +++ b/flow/include/flow/sse2neon.h @@ -1,4 +1,4 @@ -///// THIS IS AN EXTRNAL PROJECT: https://github.com/DLTcollab/sse2neon ///// +///// THIS IS AN EXTERNAL PROJECT: https://github.com/DLTcollab/sse2neon ///// #ifndef SSE2NEON_H #define SSE2NEON_H @@ -2029,7 +2029,7 @@ FORCE_INLINE __m128i _mm_max_epi16(__m128i a, __m128i b) { } // epi versions of min/max -// Computes the pariwise maximums of the four signed 32-bit integer values of a +// Computes the pairwise maximums of the four signed 32-bit integer values of a // and b. // // A 128-bit parameter that can be defined with the following equations: @@ -2043,7 +2043,7 @@ FORCE_INLINE __m128i _mm_max_epi32(__m128i a, __m128i b) { return vreinterpretq_m128i_s32(vmaxq_s32(vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(b))); } -// Computes the pariwise minima of the four signed 32-bit integer values of a +// Computes the pairwise minima of the four signed 32-bit integer values of a // and b. // // A 128-bit parameter that can be defined with the following equations: diff --git a/flow/include/flow/swift_concurrency_hooks.h b/flow/include/flow/swift_concurrency_hooks.h index 806c23df772..ac4caf49314 100644 --- a/flow/include/flow/swift_concurrency_hooks.h +++ b/flow/include/flow/swift_concurrency_hooks.h @@ -110,7 +110,7 @@ // are known to be exported from a different image. This never // includes a definition. // -// Getting the right attribute on a declaratioon can be pretty awkward, +// Getting the right attribute on a declaration can be pretty awkward, // but it's necessary under the C translation model. All of this // ceremony is familiar to Windows programmers; C/C++ programmers // everywhere else usually don't bother, but since we have to get it diff --git a/flow/include/flow/xxhash.h b/flow/include/flow/xxhash.h index 4679b867278..71a18b11931 100644 --- a/flow/include/flow/xxhash.h +++ b/flow/include/flow/xxhash.h @@ -292,7 +292,7 @@ XXH_PUBLIC_API XXH32_hash_t XXH32(const void* input, size_t length, XXH32_hash_t /******* Streaming *******/ /* - * Streaming functions generate the xxHash value from an incrememtal input. + * Streaming functions generate the xxHash value from an incremental input. * This method is slower than single-call functions, due to state management. * For small inputs, prefer `XXH32()` and `XXH64()`, which are better optimized. * @@ -1038,8 +1038,8 @@ static xxh_u32 XXH_read32(const void* memPtr) { #endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */ -/* *** Endianess *** */ -typedef enum { XXH_bigEndian = 0, XXH_littleEndian = 1 } XXH_endianess; +/* *** Endianness *** */ +typedef enum { XXH_bigEndian = 0, XXH_littleEndian = 1 } XXH_endianness; /*! * XXH_CPU_LITTLE_ENDIAN: @@ -1184,7 +1184,7 @@ static xxh_u32 XXH32_round(xxh_u32 acc, xxh_u32 input) { * UGLY HACK: * This inline assembly hack forces acc into a normal register. This is the * only thing that prevents GCC and Clang from autovectorizing the XXH32 - * loop (pragmas and attributes don't work for some resason) without globally + * loop (pragmas and attributes don't work for some reason) without globally * disabling SSE4.1. * * The reason we want to avoid vectorization is because despite working on @@ -4578,7 +4578,7 @@ XXH_PUBLIC_API XXH128_hash_t XXH128(const void* input, size_t len, XXH64_hash_t /* * All the functions are actually the same as for 64-bit streaming variant. - * The only difference is the finalizatiom routine. + * The only difference is the finalization routine. */ static void XXH3_128bits_reset_internal(XXH3_state_t* statePtr, diff --git a/flow/protocolversion/protocol_version.py b/flow/protocolversion/protocol_version.py index 9a450b7a8f9..cf2c19711b4 100755 --- a/flow/protocolversion/protocol_version.py +++ b/flow/protocolversion/protocol_version.py @@ -30,7 +30,7 @@ class ProtocolVersion: def __init__(self): self._default_version = None self._future_version = None - self._min_compatibile_version = None + self._min_compatible_version = None self._min_invalid_version = None self._left_most_check = None self._lsb_mask = None @@ -44,7 +44,7 @@ def set_future_version(self, version: int): self._future_version = version def set_min_compatible_version(self, version: int): - self._min_compatibile_version = version + self._min_compatible_version = version def set_min_invalid_version(self, version: int): self._min_invalid_version = version @@ -65,7 +65,7 @@ def future_version(self): @property def min_compatible_version(self): - return self._min_compatibile_version + return self._min_compatible_version @property def min_invalid_version(self): @@ -158,7 +158,7 @@ def _save(self, protocol_version: ProtocolVersion, stream: io.TextIOWrapper): raise NotImplementedError() -class JSONProtocolVersionSerialzer(ProtocolVersionSerializerBase): +class JSONProtocolVersionSerializer(ProtocolVersionSerializerBase): def _load(self, stream: io.TextIOWrapper) -> ProtocolVersion: raise NotImplementedError() diff --git a/flowbench/BenchEncrypt.cpp b/flowbench/BenchEncrypt.cpp index 54f00a3f607..1578506054b 100644 --- a/flowbench/BenchEncrypt.cpp +++ b/flowbench/BenchEncrypt.cpp @@ -82,7 +82,7 @@ BENCHMARK(bench_decrypt)->Ranges({ { 1 << 12, 1 << 20 }, { 1, 1 << 12 } }); // DEPRECATED -- Use EncryptionOps for benchmarking purposes. -// blob_chipher* benchmarks are following the encrypt and decrypt unittests from BlobCipher.cpp +// blob_cipher* benchmarks are following the encrypt and decrypt unittests from BlobCipher.cpp // Construct a dummy External Key Manager representation and populate with some keys class BaseCipher : public ReferenceCounted, NonCopyable { public: @@ -145,7 +145,7 @@ void static SetupEncryptCipher() { } } -static void blob_chipher_encrypt(benchmark::State& state) { +static void blob_cipher_encrypt(benchmark::State& state) { const EncryptCipherDomainId minDomainId = 1; const int pageLen = state.range(0); const bool isInplace = state.range(1); @@ -180,7 +180,7 @@ static void blob_chipher_encrypt(benchmark::State& state) { } } -static void blob_chipher_decrypt(benchmark::State& state) { +static void blob_cipher_decrypt(benchmark::State& state) { const EncryptCipherDomainId minDomainId = 1; const int pageLen = state.range(0); const bool isInplace = state.range(1); @@ -224,7 +224,7 @@ static void blob_chipher_decrypt(benchmark::State& state) { } } -static void blob_chipher_args(benchmark::internal::Benchmark* b) { +static void blob_cipher_args(benchmark::internal::Benchmark* b) { for (int pageLen : { 8000, 16000 }) { for (bool isInplace : { false, true }) { b->Args({ pageLen, isInplace }); @@ -233,5 +233,5 @@ static void blob_chipher_args(benchmark::internal::Benchmark* b) { b->ArgNames({ "pageLen", "isInplace" }); } -BENCHMARK(blob_chipher_encrypt)->Apply(blob_chipher_args); -BENCHMARK(blob_chipher_decrypt)->Apply(blob_chipher_args); +BENCHMARK(blob_cipher_encrypt)->Apply(blob_cipher_args); +BENCHMARK(blob_cipher_decrypt)->Apply(blob_cipher_args); diff --git a/flowbench/CMakeLists.txt b/flowbench/CMakeLists.txt index 50d7e27b310..2c02923c14a 100644 --- a/flowbench/CMakeLists.txt +++ b/flowbench/CMakeLists.txt @@ -14,7 +14,7 @@ elseif(EXISTS /opt/googlebenchmark-f91b6b-g++ AND NOT USE_LIBCXX) target_include_directories(flowbench PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/include" "${CMAKE_CURRENT_BINARY_DIR}/include" /opt/googlebenchmark-f91b6b-g++/include) target_link_directories(flowbench PRIVATE /opt/googlebenchmark-f91b6b-g++/lib64) else() - ## This seems to be copy-pasted from the the google benchmark documentation. + ## This seems to be copy-pasted from the google benchmark documentation. ## It breaks if you attempt to re-use a build of googlebenchmark across FDB ## builds. diff --git a/layers/pubsub/ps_tutorial.py b/layers/pubsub/ps_tutorial.py index fb1a1a382ce..862c48ba1a3 100644 --- a/layers/pubsub/ps_tutorial.py +++ b/layers/pubsub/ps_tutorial.py @@ -28,7 +28,7 @@ - Inbox: An inbox receives messages posted to feeds - Subscriptions: A subscription connects an inbox to a feed -Simply put, an inbox receives receives all messages posted to the feeds it subscribes to. +Simply put, an inbox receives all messages posted to the feeds it subscribes to. The pub/sub layer allows management of feeds, inboxes, and subscriptions, as well as the actual delivery of messages. diff --git a/metacluster/include/metacluster/ConfigureTenant.actor.h b/metacluster/include/metacluster/ConfigureTenant.actor.h index 41b47b5c226..833c71f12a8 100644 --- a/metacluster/include/metacluster/ConfigureTenant.actor.h +++ b/metacluster/include/metacluster/ConfigureTenant.actor.h @@ -154,7 +154,7 @@ struct ConfigureTenantImpl { state Optional tenantEntry = wait(tryGetTenantTransaction(tr, self->tenantName)); if (!tenantEntry.present()) { - CODE_PROBE(true, "Configure non-existent tenant"); + CODE_PROBE(true, "Configure nonexistent tenant"); throw tenant_not_found(); } @@ -266,7 +266,7 @@ struct ConfigureTenantImpl { state Optional tenantEntry = wait(tryGetTenantTransaction(tr, self->tenantName)); if (!tenantEntry.present()) { - CODE_PROBE(true, "Configure tenant state for non-existent tenant", probe::decoration::rare); + CODE_PROBE(true, "Configure tenant state for nonexistent tenant", probe::decoration::rare); throw tenant_not_found(); } diff --git a/metacluster/include/metacluster/RemoveCluster.actor.h b/metacluster/include/metacluster/RemoveCluster.actor.h index 1d992d92ac9..b1aa686faab 100644 --- a/metacluster/include/metacluster/RemoveCluster.actor.h +++ b/metacluster/include/metacluster/RemoveCluster.actor.h @@ -146,7 +146,7 @@ struct RemoveClusterImpl { } } } else { - CODE_PROBE(true, "Data cluster remove non-existent cluster"); + CODE_PROBE(true, "Data cluster remove nonexistent cluster"); } // Insert a tombstone marking this cluster removed even if we aren't registered diff --git a/metacluster/include/metacluster/RestoreCluster.actor.h b/metacluster/include/metacluster/RestoreCluster.actor.h index abf152aada0..f3338415806 100644 --- a/metacluster/include/metacluster/RestoreCluster.actor.h +++ b/metacluster/include/metacluster/RestoreCluster.actor.h @@ -118,7 +118,7 @@ struct RestoreClusterImpl { static Future eraseRestoreId(RestoreClusterImpl* self, Transaction tr) { Optional transactionId = wait(metadata::activeRestoreIds().get(tr, self->clusterName)); if (!transactionId.present()) { - CODE_PROBE(true, "Erasing non-existent restore ID"); + CODE_PROBE(true, "Erasing nonexistent restore ID"); return false; } else if (transactionId.get() != self->restoreId) { CODE_PROBE(true, "Conflicting restore detected while erasing restore ID"); diff --git a/packaging/docker/build-images.sh b/packaging/docker/build-images.sh index 324321ef4d1..32dab64f4e8 100755 --- a/packaging/docker/build-images.sh +++ b/packaging/docker/build-images.sh @@ -244,8 +244,8 @@ aws_region="us-west-2" aws_account_id=$(aws --output text sts get-caller-identity --query 'Account') build_date=$(date +"%Y-%m-%dT%H:%M:%S%z") build_output_directory="${script_dir}/../../" -source_code_diretory=$(awk -F= '/foundationdb_SOURCE_DIR:STATIC/{print $2}' "${build_output_directory}/CMakeCache.txt") -commit_sha=$(cd "${source_code_diretory}" && git rev-parse --verify HEAD --short=10) +source_code_directory=$(awk -F= '/foundationdb_SOURCE_DIR:STATIC/{print $2}' "${build_output_directory}/CMakeCache.txt") +commit_sha=$(cd "${source_code_directory}" && git rev-parse --verify HEAD --short=10) fdb_version=$(cat "${build_output_directory}/version.txt") fdb_library_versions=( '5.1.7' '6.1.13' '6.2.30' '6.3.18' "${fdb_version}" ) fdb_website="https://github.com/apple/foundationdb/releases/download" diff --git a/recipes/java-recipes/MicroSpatial.java b/recipes/java-recipes/MicroSpatial.java index cc18ca80739..eb404405854 100644 --- a/recipes/java-recipes/MicroSpatial.java +++ b/recipes/java-recipes/MicroSpatial.java @@ -37,7 +37,7 @@ public class MicroSpatial { zLabel = new Subspace(Tuple.from("Z")); } - // TODO These three methods, xyToZ, zToXy, and setLocation, are allin the recipe book. + // TODO These three methods, xyToZ, zToXy, and setLocation, are all in the recipe book. public long xyToZ(long[] p){ long x,y,z; x = p[0]; y = p[1]; diff --git a/recipes/java-recipes/MicroTable.java b/recipes/java-recipes/MicroTable.java index 2b46252284e..015a4d2d654 100644 --- a/recipes/java-recipes/MicroTable.java +++ b/recipes/java-recipes/MicroTable.java @@ -168,7 +168,7 @@ public static void smokeTest(){ * 10 Clarence Thomas Bush (41) Yale * * During the test, we make one row change (replacing Kagan with - * her predcessor, John Paul Stevens) and one column change + * her predecessor, John Paul Stevens) and one column change * (replacing the short forms of the Presidential names with * longer versions). */ diff --git a/tests/TestRunner/TestRunner.py b/tests/TestRunner/TestRunner.py index ef1b5d6ce17..fcbf599eb1a 100755 --- a/tests/TestRunner/TestRunner.py +++ b/tests/TestRunner/TestRunner.py @@ -145,7 +145,7 @@ def log_trace_parse_error(self, linenr, e): _logger.error("Exception {} args: {}".format(type(e), e.args)) _logger.error("Line: '{}'".format(linenr)) obj["Severity"] = "warning" - obj["Type"] = "TestInfastructureLogLineGarbled" + obj["Type"] = "TestInfrastructureLogLineGarbled" obj["isLastLine"] = "TestFailure" obj["TraceLine"] = linenr obj["File"] = self.infile diff --git a/tests/TestRunner/binary_download.py b/tests/TestRunner/binary_download.py index e2c6b351223..ab85ae47407 100644 --- a/tests/TestRunner/binary_download.py +++ b/tests/TestRunner/binary_download.py @@ -119,13 +119,13 @@ def download_old_binary( assert local_file_tmp.exists(), "{} does not exist".format(local_file_tmp) assert local_sha256.exists(), "{} does not exist".format(local_sha256) expected_checksum = read_to_str(local_sha256)[0:64] - actual_checkum = compute_sha256(local_file_tmp) - if expected_checksum == actual_checkum: + actual_checksum = compute_sha256(local_file_tmp) + if expected_checksum == actual_checksum: print("Checksum OK") break print( "Checksum mismatch. Expected: {} Actual: {}".format( - expected_checksum, actual_checkum + expected_checksum, actual_checksum ) ) diff --git a/tests/python_tests/python_correctness.py b/tests/python_tests/python_correctness.py index aa0cdcefaae..30884cf27f5 100755 --- a/tests/python_tests/python_correctness.py +++ b/tests/python_tests/python_correctness.py @@ -254,7 +254,7 @@ def test_functions(self, db): except KeyboardInterrupt: raise except Exception: - self.result.add_error(self.get_error("Get non-existent key failed")) + self.result.add_error(self.get_error("Get nonexistent key failed")) try: tr.commit().wait() @@ -315,7 +315,7 @@ def test_functions(self, db): except KeyboardInterrupt: raise except Exception: - self.result.add_error(self.get_error("Delete non-existent key failed")) + self.result.add_error(self.get_error("Delete nonexistent key failed")) try: tr.set("testkey", "testvalue")