From 94cd2887f22f6d1bb82f9929b388c27c63353d77 Mon Sep 17 00:00:00 2001 From: BenWhitehead Date: Tue, 20 Dec 2022 11:26:54 -0500 Subject: [PATCH] feat: improve throughput of http based storage#reader between 100 MiB/s and 200 MiB/s (#1799) ### Work Implement new BlobReadChannelV2 which replaces BlobReadChannel and improves on its resource usage to reduce min number of RPCs to 1 from (objSize / chunkSize + 1) while still maintaining the ability to restart a stream that may have been interrupted. ### Results Throughput in MiB/s has increased across the board: ``` ClassName mean 25% 50% 75% 90% 95% 99% max READ[0] BlobReadChannel 32.2 25.3 29.0 32.6 42.1 56.1 111.9 214.1 READ[1] BlobReadChannel 32.1 25.4 28.7 32.6 41.7 55.4 106.1 224.4 READ[2] BlobReadChannel 31.9 25.2 28.6 32.8 41.6 55.2 105.4 227.2 READ[0] BlobReadChannelV2 214.1 196.4 219.8 239.3 254.3 261.2 278.0 315.2 READ[1] BlobReadChannelV2 215.9 198.8 221.0 240.0 254.4 261.8 281.8 315.6 READ[2] BlobReadChannelV2 216.4 199.5 221.2 239.4 253.9 261.6 281.6 308.6 ``` Data collected using all default settings, against a regional bucket, across a range of object sizes [256KiB, 2GiB]. Each object is read in full three times to account for any GCS caching variability. ### Internal implementation notes Add ByteRangeSpec to encapsulate relative vs explicit(open) vs explicit(closed) vs null vs open-ended ranges and their associated logical subtleties. New StorageReadChannel interface possible candidate for new storage specific interface we can expose to folks for improvements independent of core and BigQuery. ### Future Breaking Change In order to facilitate migrating any `RestorableState` customers might have, we have left the existing class hierarchy in place and updated `BlobReadChannel.StateImpl#restore()` to produce a new `BlobReadChannelV2` instance when called. In the next major version this compatibility path will be removed. --- .../clirr-ignored-differences.xml | 12 + .../ApiaryUnbufferedReadableByteChannel.java | 338 +++++++++ .../google/cloud/storage/BlobReadChannel.java | 247 ++----- .../cloud/storage/BlobReadChannelV2.java | 299 ++++++++ .../google/cloud/storage/ByteRangeSpec.java | 523 ++++++++++++++ .../storage/DefaultStorageRetryStrategy.java | 9 + .../GapicUnbufferedReadableByteChannel.java | 10 - .../cloud/storage/GrpcBlobReadChannel.java | 51 +- .../storage/GzipReadableByteChannel.java | 10 - .../storage/HttpDownloadSessionBuilder.java | 156 +++++ .../storage/HttpRetryAlgorithmManager.java | 32 + .../google/cloud/storage/LazyReadChannel.java | 51 ++ .../java/com/google/cloud/storage/Maths.java | 73 ++ .../google/cloud/storage/ResumableMedia.java | 14 + .../cloud/storage/StorageByteChannels.java | 5 + .../cloud/storage/StorageException.java | 26 + .../com/google/cloud/storage/StorageImpl.java | 15 +- .../cloud/storage/StorageReadChannel.java | 66 ++ .../cloud/storage/StorageRetryStrategy.java | 2 +- .../UnbufferedReadableByteChannelSession.java | 14 +- .../cloud/storage/spi/v1/HttpStorageRpc.java | 4 + .../cloud/storage/spi/v1/StorageRpc.java | 4 + .../storage/testing/StorageRpcTestBase.java | 6 + .../cloud/storage/BlobReadChannelTest.java | 287 -------- .../com/google/cloud/storage/BlobTest.java | 3 +- .../cloud/storage/ByteRangeSpecTest.java | 651 ++++++++++++++++++ .../DefaultRetryHandlingBehaviorTest.java | 2 +- .../com/google/cloud/storage/MathsTest.java | 46 ++ .../ScatteringByteChannelFacadeTest.java | 180 +++++ .../cloud/storage/SerializationTest.java | 53 +- .../cloud/storage/StorageBatchTest.java | 5 +- .../cloud/storage/StorageImplMockitoTest.java | 112 +-- .../com/google/cloud/storage/TestUtils.java | 49 ++ .../storage/it/ChecksummedTestContent.java | 2 +- .../storage/it/ITBlobReadChannelTest.java | 306 ++++++-- .../it/ITBlobReadChannelV2RetryTest.java | 132 ++++ .../google/cloud/storage/it/ITObjectTest.java | 72 +- .../cloud/storage/it/RequestAuditing.java | 45 +- .../it/runner/registry/ObjectsFixture.java | 34 + .../storage/blobWriteChannel.ser.properties | 70 ++ 40 files changed, 3252 insertions(+), 764 deletions(-) create mode 100644 google-cloud-storage/src/main/java/com/google/cloud/storage/ApiaryUnbufferedReadableByteChannel.java create mode 100644 google-cloud-storage/src/main/java/com/google/cloud/storage/BlobReadChannelV2.java create mode 100644 google-cloud-storage/src/main/java/com/google/cloud/storage/ByteRangeSpec.java create mode 100644 google-cloud-storage/src/main/java/com/google/cloud/storage/HttpDownloadSessionBuilder.java create mode 100644 google-cloud-storage/src/main/java/com/google/cloud/storage/LazyReadChannel.java create mode 100644 google-cloud-storage/src/main/java/com/google/cloud/storage/Maths.java create mode 100644 google-cloud-storage/src/main/java/com/google/cloud/storage/StorageReadChannel.java delete mode 100644 google-cloud-storage/src/test/java/com/google/cloud/storage/BlobReadChannelTest.java create mode 100644 google-cloud-storage/src/test/java/com/google/cloud/storage/ByteRangeSpecTest.java create mode 100644 google-cloud-storage/src/test/java/com/google/cloud/storage/MathsTest.java create mode 100644 google-cloud-storage/src/test/java/com/google/cloud/storage/ScatteringByteChannelFacadeTest.java create mode 100644 google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITBlobReadChannelV2RetryTest.java create mode 100644 google-cloud-storage/src/test/resources/com/google/cloud/storage/blobWriteChannel.ser.properties diff --git a/google-cloud-storage/clirr-ignored-differences.xml b/google-cloud-storage/clirr-ignored-differences.xml index 7182f2a96..51e0258c4 100644 --- a/google-cloud-storage/clirr-ignored-differences.xml +++ b/google-cloud-storage/clirr-ignored-differences.xml @@ -1,6 +1,18 @@ + + + 7012 + com/google/cloud/storage/UnbufferedReadableByteChannelSession$UnbufferedReadableByteChannel + * read(*) + + + + 7012 + com/google/cloud/storage/spi/v1/StorageRpc + * getStorage() + 8001 diff --git a/google-cloud-storage/src/main/java/com/google/cloud/storage/ApiaryUnbufferedReadableByteChannel.java b/google-cloud-storage/src/main/java/com/google/cloud/storage/ApiaryUnbufferedReadableByteChannel.java new file mode 100644 index 000000000..078b32788 --- /dev/null +++ b/google-cloud-storage/src/main/java/com/google/cloud/storage/ApiaryUnbufferedReadableByteChannel.java @@ -0,0 +1,338 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.cloud.storage.Utils.ifNonNull; + +import com.google.api.client.http.HttpHeaders; +import com.google.api.client.http.HttpResponse; +import com.google.api.client.http.HttpResponseException; +import com.google.api.core.SettableApiFuture; +import com.google.api.gax.retrying.ResultRetryAlgorithm; +import com.google.api.services.storage.Storage; +import com.google.api.services.storage.Storage.Objects; +import com.google.api.services.storage.Storage.Objects.Get; +import com.google.api.services.storage.model.StorageObject; +import com.google.cloud.storage.UnbufferedReadableByteChannelSession.UnbufferedReadableByteChannel; +import com.google.cloud.storage.spi.v1.StorageRpc; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.MoreObjects; +import com.google.common.hash.HashFunction; +import com.google.common.hash.Hashing; +import com.google.common.io.BaseEncoding; +import com.google.gson.Gson; +import com.google.gson.stream.JsonReader; +import java.io.IOException; +import java.io.InputStream; +import java.io.ObjectInputStream; +import java.io.ObjectOutputStream; +import java.io.Serializable; +import java.io.StringReader; +import java.nio.ByteBuffer; +import java.nio.channels.Channels; +import java.nio.channels.ReadableByteChannel; +import java.nio.channels.ScatteringByteChannel; +import java.util.List; +import java.util.Map; +import java.util.function.Consumer; +import java.util.function.Function; +import javax.annotation.concurrent.Immutable; + +class ApiaryUnbufferedReadableByteChannel implements UnbufferedReadableByteChannel { + + private final ApiaryReadRequest apiaryReadRequest; + private final Storage storage; + private final SettableApiFuture result; + private final HttpStorageOptions options; + private final ResultRetryAlgorithm resultRetryAlgorithm; + private final Consumer resolvedObjectCallback; + + private long position; + private ScatteringByteChannel sbc; + private boolean open; + + // returned X-Goog-Generation header value + private Long xGoogGeneration; + + ApiaryUnbufferedReadableByteChannel( + ApiaryReadRequest apiaryReadRequest, + Storage storage, + SettableApiFuture result, + HttpStorageOptions options, + ResultRetryAlgorithm resultRetryAlgorithm, + Consumer resolvedObjectCallback) { + this.apiaryReadRequest = apiaryReadRequest; + this.storage = storage; + this.result = result; + this.options = options; + this.resultRetryAlgorithm = resultRetryAlgorithm; + this.resolvedObjectCallback = resolvedObjectCallback; + this.open = true; + this.position = + apiaryReadRequest.getByteRangeSpec() != null + ? apiaryReadRequest.getByteRangeSpec().beginOffset() + : 0; + } + + @SuppressWarnings("UnnecessaryContinue") + @Override + public long read(ByteBuffer[] dsts, int offset, int length) throws IOException { + do { + if (sbc == null) { + sbc = Retrying.run(options, resultRetryAlgorithm, this::open, Function.identity()); + } + + try { + // According to the contract of Retrying#run it's possible for sbc to be null even after + // invocation. However, the function we provide is guaranteed to return non-null or throw + // an exception. So we suppress the warning from intellij here. + //noinspection ConstantConditions + long read = sbc.read(dsts, offset, length); + if (read == -1) { + open = false; + } else { + position += read; + } + return read; + } catch (Exception t) { + if (resultRetryAlgorithm.shouldRetry(t, null)) { + // if our retry algorithm COULD allow a retry, continue the loop and allow trying to + // open the stream again. + sbc = null; + continue; + } else if (t instanceof IOException) { + IOException ioE = (IOException) t; + if (resultRetryAlgorithm.shouldRetry(StorageException.translate(ioE), null)) { + sbc = null; + continue; + } else { + throw ioE; + } + } else { + throw new IOException(StorageException.coalesce(t)); + } + } + } while (true); + } + + @Override + public boolean isOpen() { + return open; + } + + @Override + public void close() throws IOException { + open = false; + if (sbc != null) { + sbc.close(); + } + } + + private ScatteringByteChannel open() { + try { + Boolean b = + (Boolean) apiaryReadRequest.options.get(StorageRpc.Option.RETURN_RAW_INPUT_STREAM); + boolean returnRawInputStream = b != null ? b : true; + ApiaryReadRequest request = apiaryReadRequest.withNewBeginOffset(position); + Get get = createGetRequest(request, storage.objects(), xGoogGeneration, returnRawInputStream); + + HttpResponse media = get.executeMedia(); + InputStream content = media.getContent(); + if (xGoogGeneration == null) { + HttpHeaders responseHeaders = media.getHeaders(); + //noinspection unchecked + List xGoogGenHeader = (List) responseHeaders.get("x-goog-generation"); + // TODO: wire in result metadata population + if (xGoogGenHeader != null && !xGoogGenHeader.isEmpty()) { + String s = xGoogGenHeader.get(0); + Long generation = Long.valueOf(s); + this.xGoogGeneration = generation; + resolvedObjectCallback.accept( + apiaryReadRequest.getObject().clone().setGeneration(generation)); + } + } + + ReadableByteChannel rbc = Channels.newChannel(content); + return StorageByteChannels.readable().asScatteringByteChannel(rbc); + } catch (HttpResponseException e) { + if (xGoogGeneration != null) { + int statusCode = e.getStatusCode(); + if (statusCode == 404) { + throw new StorageException(404, "Failure while trying to resume download", e); + } + } + throw StorageException.translate(e); + } catch (IOException e) { + throw StorageException.translate(e); + } catch (Throwable t) { + throw StorageException.coalesce(t); + } + } + + @VisibleForTesting + static Get createGetRequest( + ApiaryReadRequest apiaryReadRequest, + Objects objects, + Long xGoogGeneration, + boolean returnRawInputStream) + throws IOException { + StorageObject from = apiaryReadRequest.getObject(); + Map options = apiaryReadRequest.getOptions(); + Get get = objects.get(from.getBucket(), from.getName()); + if (from.getGeneration() != null) { + get.setGeneration(from.getGeneration()); + } else if (xGoogGeneration != null) { + get.setGeneration(xGoogGeneration); + } + ifNonNull( + options.get(StorageRpc.Option.IF_GENERATION_MATCH), + ApiaryUnbufferedReadableByteChannel::cast, + get::setIfGenerationMatch); + ifNonNull( + options.get(StorageRpc.Option.IF_GENERATION_NOT_MATCH), + ApiaryUnbufferedReadableByteChannel::cast, + get::setIfGenerationNotMatch); + ifNonNull( + options.get(StorageRpc.Option.IF_METAGENERATION_MATCH), + ApiaryUnbufferedReadableByteChannel::cast, + get::setIfMetagenerationMatch); + ifNonNull( + options.get(StorageRpc.Option.IF_METAGENERATION_NOT_MATCH), + ApiaryUnbufferedReadableByteChannel::cast, + get::setIfMetagenerationNotMatch); + ifNonNull( + options.get(StorageRpc.Option.USER_PROJECT), + ApiaryUnbufferedReadableByteChannel::cast, + get::setUserProject); + HttpHeaders headers = get.getRequestHeaders(); + ifNonNull( + options.get(StorageRpc.Option.CUSTOMER_SUPPLIED_KEY), + ApiaryUnbufferedReadableByteChannel::cast, + (String key) -> { + BaseEncoding base64 = BaseEncoding.base64(); + HashFunction hashFunction = Hashing.sha256(); + headers.set("x-goog-encryption-algorithm", "AES256"); + headers.set("x-goog-encryption-key", key); + headers.set( + "x-goog-encryption-key-sha256", + base64.encode(hashFunction.hashBytes(base64.decode(key)).asBytes())); + }); + + get.setReturnRawInputStream(returnRawInputStream); + String range = apiaryReadRequest.getByteRangeSpec().getHttpRangeHeader(); + if (range != null) { + get.getRequestHeaders().setRange(range); + } + get.getMediaHttpDownloader().setDirectDownloadEnabled(true); + + return get; + } + + @SuppressWarnings("unchecked") + private static T cast(Object o) { + return (T) o; + } + + @Immutable + static final class ApiaryReadRequest implements Serializable { + private static final long serialVersionUID = -4059435314115374448L; + private static final Gson gson = new Gson(); + private transient StorageObject object; + private final Map options; + private final ByteRangeSpec byteRangeSpec; + + private volatile String objectJson; + + ApiaryReadRequest( + StorageObject object, Map options, ByteRangeSpec byteRangeSpec) { + this.object = object; + this.options = options; + this.byteRangeSpec = byteRangeSpec; + } + + StorageObject getObject() { + return object; + } + + Map getOptions() { + return options; + } + + ByteRangeSpec getByteRangeSpec() { + return byteRangeSpec; + } + + ApiaryReadRequest withNewBeginOffset(long beginOffset) { + if (beginOffset > 0 && beginOffset != byteRangeSpec.beginOffset()) { + return new ApiaryReadRequest( + object, options, byteRangeSpec.withNewBeginOffset(beginOffset)); + } else { + return this; + } + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof ApiaryReadRequest)) { + return false; + } + ApiaryReadRequest that = (ApiaryReadRequest) o; + return java.util.Objects.equals(object, that.object) + && java.util.Objects.equals(options, that.options) + && java.util.Objects.equals(byteRangeSpec, that.byteRangeSpec); + } + + @Override + public int hashCode() { + return java.util.Objects.hash(object, options, byteRangeSpec); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("byteRangeSpec", byteRangeSpec) + .add("options", options) + .add("object", getObjectJson()) + .toString(); + } + + private String getObjectJson() { + if (objectJson == null) { + synchronized (this) { + if (objectJson == null) { + objectJson = gson.toJson(object); + } + } + } + return objectJson; + } + + private void writeObject(ObjectOutputStream out) throws IOException { + String ignore = getObjectJson(); + out.defaultWriteObject(); + } + + private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException { + in.defaultReadObject(); + JsonReader jsonReader = gson.newJsonReader(new StringReader(this.objectJson)); + this.object = gson.fromJson(jsonReader, StorageObject.class); + } + } +} diff --git a/google-cloud-storage/src/main/java/com/google/cloud/storage/BlobReadChannel.java b/google-cloud-storage/src/main/java/com/google/cloud/storage/BlobReadChannel.java index d4d9135fe..3efb97b24 100644 --- a/google-cloud-storage/src/main/java/com/google/cloud/storage/BlobReadChannel.java +++ b/google-cloud-storage/src/main/java/com/google/cloud/storage/BlobReadChannel.java @@ -16,262 +16,105 @@ package com.google.cloud.storage; -import static com.google.cloud.RetryHelper.runWithRetries; - -import com.google.api.client.util.Preconditions; -import com.google.api.gax.retrying.ResultRetryAlgorithm; import com.google.api.services.storage.model.StorageObject; import com.google.cloud.ReadChannel; import com.google.cloud.RestorableState; -import com.google.cloud.RetryHelper; -import com.google.cloud.Tuple; +import com.google.cloud.storage.BlobReadChannelV2.BlobReadChannelContext; import com.google.cloud.storage.spi.v1.StorageRpc; +import com.google.common.annotations.VisibleForTesting; import com.google.common.base.MoreObjects; import java.io.IOException; import java.io.Serializable; import java.nio.ByteBuffer; -import java.nio.channels.ClosedChannelException; import java.util.Map; import java.util.Objects; -/** Default implementation for ReadChannel. */ +/** + * Hierarchy retained for {@link RestorableState#restore()}. Will be removed in next major version! + */ +@Deprecated class BlobReadChannel implements ReadChannel { - private static final int DEFAULT_CHUNK_SIZE = 2 * 1024 * 1024; - - private final HttpStorageOptions serviceOptions; - private final BlobId blob; - private final Map requestOptions; - private final HttpRetryAlgorithmManager retryAlgorithmManager; - private String lastEtag; - private long position; - private boolean isOpen; - private boolean endOfStream; - private int chunkSize = DEFAULT_CHUNK_SIZE; - - private final StorageRpc storageRpc; - private final StorageObject storageObject; - private int bufferPos; - private byte[] buffer; - private long limit; - - BlobReadChannel( - HttpStorageOptions serviceOptions, BlobId blob, Map requestOptions) { - this.serviceOptions = serviceOptions; - this.blob = blob; - this.requestOptions = requestOptions; - this.retryAlgorithmManager = serviceOptions.getRetryAlgorithmManager(); - isOpen = true; - storageRpc = serviceOptions.getStorageRpcV1(); - storageObject = Conversions.apiary().blobId().encode(blob); - this.limit = Long.MAX_VALUE; - } + private BlobReadChannel() {} @Override public RestorableState capture() { - StateImpl.Builder builder = - StateImpl.builder(serviceOptions, blob, requestOptions) - .setPosition(position) - .setIsOpen(isOpen) - .setEndOfStream(endOfStream) - .setChunkSize(chunkSize) - .setLimit(limit); - if (buffer != null) { - builder.setPosition(position + bufferPos); - builder.setEndOfStream(false); - } - return builder.build(); + throw new IllegalStateException("Illegal method access"); } @Override public boolean isOpen() { - return isOpen; + throw new IllegalStateException("Illegal method access"); } @Override public void close() { - if (isOpen) { - buffer = null; - isOpen = false; - } - } - - private void validateOpen() throws ClosedChannelException { - if (!isOpen) { - throw new ClosedChannelException(); - } + throw new IllegalStateException("Illegal method access"); } @Override public void seek(long position) throws IOException { - validateOpen(); - this.position = position; - buffer = null; - bufferPos = 0; - endOfStream = false; + throw new IllegalStateException("Illegal method access"); } @Override public void setChunkSize(int chunkSize) { - this.chunkSize = chunkSize <= 0 ? DEFAULT_CHUNK_SIZE : chunkSize; + throw new IllegalStateException("Illegal method access"); } @Override public int read(ByteBuffer byteBuffer) throws IOException { - validateOpen(); - if (buffer == null) { - if (endOfStream) { - return -1; - } - final int toRead = - Math.toIntExact(Math.min(limit - position, Math.max(byteBuffer.remaining(), chunkSize))); - if (toRead <= 0) { - endOfStream = true; - return -1; - } - try { - ResultRetryAlgorithm algorithm = - retryAlgorithmManager.getForObjectsGet(storageObject, requestOptions); - Tuple result = - runWithRetries( - () -> storageRpc.read(storageObject, requestOptions, position, toRead), - serviceOptions.getRetrySettings(), - algorithm, - serviceOptions.getClock()); - String etag = result.x(); - byte[] bytes = result.y(); - if (bytes.length > 0 && lastEtag != null && !Objects.equals(etag, lastEtag)) { - throw new IOException("Blob " + blob + " was updated while reading"); - } - lastEtag = etag; - buffer = bytes; - } catch (RetryHelper.RetryHelperException e) { - throw new IOException(e.getCause()); - } - if (toRead > buffer.length) { - endOfStream = true; - if (buffer.length == 0) { - buffer = null; - return -1; - } - } - } - int toWrite = Math.min(buffer.length - bufferPos, byteBuffer.remaining()); - byteBuffer.put(buffer, bufferPos, toWrite); - bufferPos += toWrite; - if (bufferPos >= buffer.length) { - position += buffer.length; - buffer = null; - bufferPos = 0; - } - return toWrite; + throw new IllegalStateException("Illegal method access"); } @Override public ReadChannel limit(long limit) { - Preconditions.checkArgument(limit >= 0, "Limit must be >= 0"); - this.limit = limit; - return this; + throw new IllegalStateException("Illegal method access"); } @Override public long limit() { - return limit; + throw new IllegalStateException("Illegal method access"); } + /** Retained for binary compatibility. Will be removed at next major version! */ + @SuppressWarnings("unused") + @Deprecated + @VisibleForTesting static class StateImpl implements RestorableState, Serializable { private static final long serialVersionUID = 7784852608213694645L; - private final HttpStorageOptions serviceOptions; - private final BlobId blob; - private final Map requestOptions; - private final String lastEtag; - private final long position; - private final boolean isOpen; - private final boolean endOfStream; - private final int chunkSize; - private final long limit; - - StateImpl(Builder builder) { - this.serviceOptions = builder.serviceOptions; - this.blob = builder.blob; - this.requestOptions = builder.requestOptions; - this.lastEtag = builder.lastEtag; - this.position = builder.position; - this.isOpen = builder.isOpen; - this.endOfStream = builder.endOfStream; - this.chunkSize = builder.chunkSize; - this.limit = builder.limit; - } - - static class Builder { - private final HttpStorageOptions serviceOptions; - private final BlobId blob; - private final Map requestOptions; - private String lastEtag; - private long position; - private boolean isOpen; - private boolean endOfStream; - private int chunkSize; - private long limit; - - private Builder( - HttpStorageOptions options, BlobId blob, Map reqOptions) { - this.serviceOptions = options; - this.blob = blob; - this.requestOptions = reqOptions; - } - - Builder setLastEtag(String lastEtag) { - this.lastEtag = lastEtag; - return this; - } - - Builder setPosition(long position) { - this.position = position; - return this; - } - - Builder setIsOpen(boolean isOpen) { - this.isOpen = isOpen; - return this; - } - - Builder setEndOfStream(boolean endOfStream) { - this.endOfStream = endOfStream; - return this; - } - - Builder setChunkSize(int chunkSize) { - this.chunkSize = chunkSize; - return this; - } - - Builder setLimit(long limit) { - this.limit = limit; - return this; - } + // the following fields are dangling as they are only set via object deserialization, and only + // read in #restore() + private HttpStorageOptions serviceOptions; + private BlobId blob; + private Map requestOptions; + private String lastEtag; + private long position; + private boolean isOpen; + private boolean endOfStream; + private int chunkSize; + private long limit; - RestorableState build() { - return new StateImpl(this); - } - } - - static Builder builder( - HttpStorageOptions options, BlobId blob, Map reqOptions) { - return new Builder(options, blob, reqOptions); - } + private StateImpl() {} @Override public ReadChannel restore() { - BlobReadChannel channel = new BlobReadChannel(serviceOptions, blob, requestOptions); - channel.lastEtag = lastEtag; - channel.position = position; - channel.isOpen = isOpen; - channel.endOfStream = endOfStream; - channel.chunkSize = chunkSize; - channel.limit = limit; + StorageObject encode = Conversions.apiary().blobId().encode(blob); + BlobReadChannelV2 channel = + new BlobReadChannelV2( + encode, requestOptions, BlobReadChannelContext.from(serviceOptions)); + try { + channel.seek(position); + channel.limit(limit); + channel.setChunkSize(chunkSize); + if (!isOpen) { + channel.close(); + } + } catch (IOException e) { + throw StorageException.coalesce(e); + } return channel; } diff --git a/google-cloud-storage/src/main/java/com/google/cloud/storage/BlobReadChannelV2.java b/google-cloud-storage/src/main/java/com/google/cloud/storage/BlobReadChannelV2.java new file mode 100644 index 000000000..e768b04eb --- /dev/null +++ b/google-cloud-storage/src/main/java/com/google/cloud/storage/BlobReadChannelV2.java @@ -0,0 +1,299 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.cloud.storage.ByteSizeConstants._2MiB; +import static java.util.Objects.requireNonNull; + +import com.google.api.services.storage.Storage; +import com.google.api.services.storage.model.StorageObject; +import com.google.cloud.ReadChannel; +import com.google.cloud.RestorableState; +import com.google.cloud.storage.ApiaryUnbufferedReadableByteChannel.ApiaryReadRequest; +import com.google.cloud.storage.BufferedReadableByteChannelSession.BufferedReadableByteChannel; +import com.google.cloud.storage.spi.v1.StorageRpc; +import com.google.common.base.MoreObjects; +import java.io.IOException; +import java.io.Serializable; +import java.nio.ByteBuffer; +import java.util.Map; +import java.util.Objects; + +final class BlobReadChannelV2 implements StorageReadChannel { + + private final StorageObject storageObject; + private final Map opts; + private final BlobReadChannelContext blobReadChannelContext; + + private LazyReadChannel lazyReadChannel; + private StorageObject resolvedObject; + private ByteRangeSpec byteRangeSpec; + + private int chunkSize = _2MiB; + private BufferHandle bufferHandle; + + BlobReadChannelV2( + StorageObject storageObject, + Map opts, + BlobReadChannelContext blobReadChannelContext) { + this.storageObject = storageObject; + this.opts = opts; + this.blobReadChannelContext = blobReadChannelContext; + this.byteRangeSpec = ByteRangeSpec.nullRange(); + } + + @Override + public synchronized void setChunkSize(int chunkSize) { + StorageException.wrapIOException(() -> maybeResetChannel(true)); + this.chunkSize = chunkSize; + } + + @Override + public synchronized boolean isOpen() { + if (lazyReadChannel == null) { + return true; + } else { + LazyReadChannel tmp = internalGetLazyChannel(); + return tmp.isOpen(); + } + } + + @Override + public synchronized void close() { + if (internalGetLazyChannel().isOpen()) { + StorageException.wrapIOException(internalGetLazyChannel().getChannel()::close); + } + } + + @Override + public synchronized StorageReadChannel setByteRangeSpec(ByteRangeSpec byteRangeSpec) { + requireNonNull(byteRangeSpec, "byteRangeSpec must be non null"); + StorageException.wrapIOException(() -> maybeResetChannel(false)); + this.byteRangeSpec = byteRangeSpec; + return this; + } + + @Override + public ByteRangeSpec getByteRangeSpec() { + return byteRangeSpec; + } + + @Override + public synchronized int read(ByteBuffer dst) throws IOException { + long diff = byteRangeSpec.length(); + if (diff <= 0) { + close(); + return -1; + } + try { + int read = internalGetLazyChannel().getChannel().read(dst); + if (read != -1) { + byteRangeSpec = byteRangeSpec.withShiftBeginOffset(read); + } else { + close(); + } + return read; + } catch (StorageException e) { + if (e.getCode() == 416) { + // HttpStorageRpc turns 416 into a null etag with an empty byte array, leading + // BlobReadChannel to believe it read 0 bytes, returning -1 and leaving the channel open. + // Emulate that same behavior here to preserve behavior compatibility, though this should + // be removed in the next major version. + return -1; + } else { + throw new IOException(e); + } + } catch (IOException e) { + throw e; + } catch (Exception e) { + throw new IOException(StorageException.coalesce(e)); + } + } + + @Override + public RestorableState capture() { + ApiaryReadRequest apiaryReadRequest = getApiaryReadRequest(); + return new BlobReadChannelV2State( + apiaryReadRequest, blobReadChannelContext.getStorageOptions(), chunkSize); + } + + private void maybeResetChannel(boolean umallocBuffer) throws IOException { + if (lazyReadChannel != null && lazyReadChannel.isOpen()) { + try (BufferedReadableByteChannel ignore = lazyReadChannel.getChannel()) { + if (bufferHandle != null && !umallocBuffer) { + bufferHandle.get().clear(); + } else if (umallocBuffer) { + bufferHandle = null; + } + lazyReadChannel = null; + } + } + } + + private LazyReadChannel internalGetLazyChannel() { + if (lazyReadChannel == null) { + lazyReadChannel = newLazyReadChannel(); + } + return lazyReadChannel; + } + + private LazyReadChannel newLazyReadChannel() { + return new LazyReadChannel<>( + () -> { + if (bufferHandle == null) { + bufferHandle = BufferHandle.allocate(chunkSize); + } + return ResumableMedia.http() + .read() + .byteChannel(blobReadChannelContext) + .setCallback(this::setResolvedObject) + .buffered(bufferHandle) + .setApiaryReadRequest(getApiaryReadRequest()) + .build(); + }); + } + + private void setResolvedObject(StorageObject resolvedObject) { + this.resolvedObject = resolvedObject; + } + + private ApiaryReadRequest getApiaryReadRequest() { + StorageObject object = resolvedObject != null ? resolvedObject : storageObject; + return new ApiaryReadRequest(object, opts, byteRangeSpec); + } + + static class BlobReadChannelV2State implements RestorableState, Serializable { + + private static final long serialVersionUID = -7595661593080505431L; + + private final ApiaryReadRequest request; + private final HttpStorageOptions options; + + private final Integer chunkSize; + + private BlobReadChannelV2State( + ApiaryReadRequest request, HttpStorageOptions options, Integer chunkSize) { + this.request = request; + this.options = options; + this.chunkSize = chunkSize; + } + + @Override + public ReadChannel restore() { + BlobReadChannelV2 channel = + new BlobReadChannelV2( + request.getObject(), request.getOptions(), BlobReadChannelContext.from(options)); + channel.setByteRangeSpec(request.getByteRangeSpec()); + if (chunkSize != null) { + channel.setChunkSize(chunkSize); + } + return channel; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof BlobReadChannelV2State)) { + return false; + } + BlobReadChannelV2State that = (BlobReadChannelV2State) o; + return Objects.equals(request, that.request) + && Objects.equals(options, that.options) + && Objects.equals(chunkSize, that.chunkSize); + } + + @Override + public int hashCode() { + return Objects.hash(request, options, chunkSize); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("request", request) + .add("options", options) + .add("chunkSize", chunkSize) + .toString(); + } + } + + static final class BlobReadChannelContext { + private final HttpStorageOptions storageOptions; + private final HttpRetryAlgorithmManager retryAlgorithmManager; + private final Storage apiaryClient; + + private BlobReadChannelContext( + HttpStorageOptions storageOptions, + Storage apiaryClient, + HttpRetryAlgorithmManager retryAlgorithmManager) { + this.storageOptions = storageOptions; + this.apiaryClient = apiaryClient; + this.retryAlgorithmManager = retryAlgorithmManager; + } + + public HttpStorageOptions getStorageOptions() { + return storageOptions; + } + + public HttpRetryAlgorithmManager getRetryAlgorithmManager() { + return retryAlgorithmManager; + } + + public Storage getApiaryClient() { + return apiaryClient; + } + + static BlobReadChannelContext from(HttpStorageOptions options) { + return new BlobReadChannelContext( + options, options.getStorageRpcV1().getStorage(), options.getRetryAlgorithmManager()); + } + + static BlobReadChannelContext from(com.google.cloud.storage.Storage s) { + StorageOptions options = s.getOptions(); + if (options instanceof HttpStorageOptions) { + HttpStorageOptions httpStorageOptions = (HttpStorageOptions) options; + return from(httpStorageOptions); + } + throw new IllegalArgumentException("Only HttpStorageOptions based instance supported"); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof BlobReadChannelContext)) { + return false; + } + BlobReadChannelContext that = (BlobReadChannelContext) o; + return Objects.equals(storageOptions, that.storageOptions) + && Objects.equals(retryAlgorithmManager, that.retryAlgorithmManager); + } + + @Override + public int hashCode() { + return Objects.hash(storageOptions, retryAlgorithmManager); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this).add("storageOptions", storageOptions).toString(); + } + } +} diff --git a/google-cloud-storage/src/main/java/com/google/cloud/storage/ByteRangeSpec.java b/google-cloud-storage/src/main/java/com/google/cloud/storage/ByteRangeSpec.java new file mode 100644 index 000000000..676f521ca --- /dev/null +++ b/google-cloud-storage/src/main/java/com/google/cloud/storage/ByteRangeSpec.java @@ -0,0 +1,523 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import com.google.api.core.InternalApi; +import com.google.common.base.MoreObjects; +import com.google.common.base.MoreObjects.ToStringHelper; +import java.io.Serializable; +import java.util.Objects; +import java.util.function.BiFunction; +import javax.annotation.concurrent.Immutable; +import javax.annotation.concurrent.ThreadSafe; +import org.checkerframework.checker.nullness.qual.NonNull; +import org.checkerframework.checker.nullness.qual.Nullable; + +/** + * Typeful sealed class hierarchy for representing an HTTP + * Range Header There are certain subtleties when building these header values depending on + * whether explicit boundaries or relative lengths are used. This class encapsulates the edge cases + * as well as the concept of an effective infinity value for end of range. + * + *

This class does not currently support negative offsets, i.e. start from end of content. + */ +@InternalApi +@ThreadSafe +abstract class ByteRangeSpec implements Serializable { + + public static final long EFFECTIVE_INFINITY = Long.MAX_VALUE; + + @Nullable private volatile String httpRangeHeader; + + private ByteRangeSpec() {} + + abstract long beginOffset(); + + abstract long endOffset() throws ArithmeticException; + + abstract long length() throws ArithmeticException; + + // TODO: add validation to this if it ever becomes public + abstract ByteRangeSpec withNewBeginOffset(long beginOffset); + + abstract ByteRangeSpec withShiftBeginOffset(long beginOffsetIncrement) throws ArithmeticException; + + abstract ByteRangeSpec withNewEndOffset(long endOffsetExclusive); + + abstract ByteRangeSpec withNewEndOffsetClosed(long endOffsetInclusive); + + abstract ByteRangeSpec withNewRelativeLength(long relativeLength); + + /** + * If a range has no effectively declared beginning and end the string returned here will be null. + */ + @Nullable + final String getHttpRangeHeader() throws ArithmeticException { + if (httpRangeHeader == null) { + synchronized (this) { + if (httpRangeHeader == null) { + httpRangeHeader = fmtAsHttpRangeHeader(); + } + } + } + return httpRangeHeader; + } + + @Nullable + protected abstract String fmtAsHttpRangeHeader() throws ArithmeticException; + + @Override + public int hashCode() { + return Objects.hash(getHttpRangeHeader()); + } + + @Override + public boolean equals(Object o) { + if (o instanceof ByteRangeSpec) { + ByteRangeSpec that = (ByteRangeSpec) o; + return Objects.equals(this.getHttpRangeHeader(), that.getHttpRangeHeader()); + } + return false; + } + + @Override + public String toString() { + return append(MoreObjects.toStringHelper("")) + .add("httpRangeHeader", getHttpRangeHeader()) + .toString(); + } + + protected abstract MoreObjects.ToStringHelper append(MoreObjects.ToStringHelper tsh); + + static ByteRangeSpec nullRange() { + return NullByteRangeSpec.INSTANCE; + } + + static ByteRangeSpec relativeLength(@Nullable Long beginOffset, @Nullable Long length) { + return create(beginOffset, length, RelativeByteRangeSpec::new); + } + + static ByteRangeSpec explicit(@Nullable Long beginOffset, @Nullable Long endOffsetExclusive) { + return create(beginOffset, endOffsetExclusive, LeftClosedRightOpenByteRangeSpec::new); + } + + static ByteRangeSpec explicitClosed( + @Nullable Long beginOffset, @Nullable Long endOffsetInclusive) { + return create(beginOffset, endOffsetInclusive, LeftClosedRightClosedByteRangeSpec::new); + } + + private static ByteRangeSpec create( + @Nullable Long beginOffset, + @Nullable Long length, + BiFunction<@NonNull Long, @NonNull Long, ByteRangeSpec> f) { + if (beginOffset == null && length == null) { + return nullRange(); + } else if (beginOffset != null && length != null) { + if (beginOffset == 0 && length == EFFECTIVE_INFINITY) { + return nullRange(); + } else if (length == EFFECTIVE_INFINITY) { + return new LeftClosedByteRangeSpec(beginOffset); + } + return f.apply(beginOffset, length); + } else if (beginOffset == null /* && length != null*/) { + if (length == EFFECTIVE_INFINITY) { + return nullRange(); + } + return f.apply(0L, length); + } else { + return new LeftClosedByteRangeSpec(beginOffset); + } + } + + @Immutable + private static final class RelativeByteRangeSpec extends ByteRangeSpec { + private static final long serialVersionUID = -3910856417374881377L; + private final long beginOffset; + private final long length; + + private RelativeByteRangeSpec(long beginOffset, long length) { + this.beginOffset = beginOffset; + this.length = length; + } + + @Override + long beginOffset() { + return beginOffset; + } + + @Override + long endOffset() throws ArithmeticException { + return Math.addExact(beginOffset, length) - 1; + } + + @Override + long length() throws ArithmeticException { + return length; + } + + @Override + ByteRangeSpec withNewBeginOffset(long beginOffset) { + if (beginOffset != this.beginOffset) { + return new RelativeByteRangeSpec(beginOffset, length); + } else { + return this; + } + } + + @Override + ByteRangeSpec withShiftBeginOffset(long beginOffsetIncrement) throws ArithmeticException { + if (beginOffsetIncrement != 0) { + return new RelativeByteRangeSpec(Math.addExact(beginOffset, beginOffsetIncrement), length); + } else { + return this; + } + } + + @Override + ByteRangeSpec withNewEndOffset(long endOffsetExclusive) { + return new LeftClosedRightOpenByteRangeSpec(beginOffset, endOffsetExclusive); + } + + @Override + ByteRangeSpec withNewEndOffsetClosed(long endOffsetInclusive) { + return new LeftClosedRightClosedByteRangeSpec(beginOffset, endOffsetInclusive); + } + + @Override + ByteRangeSpec withNewRelativeLength(long relativeLength) { + if (relativeLength != this.length) { + return new RelativeByteRangeSpec(beginOffset, relativeLength); + } else { + return this; + } + } + + @Override + protected String fmtAsHttpRangeHeader() throws ArithmeticException { + return String.format("bytes=%d-%d", beginOffset, endOffset()); + } + + @Override + protected ToStringHelper append(ToStringHelper tsh) { + return tsh.addValue(String.format("%d + %d", beginOffset, length)); + } + } + + @Immutable + private static final class LeftClosedRightOpenByteRangeSpec extends ByteRangeSpec { + private static final long serialVersionUID = -2706235472358072141L; + private final long beginOffset; + private final long endOffsetExclusive; + + private LeftClosedRightOpenByteRangeSpec(long beginOffset, long endOffsetExclusive) { + this.beginOffset = beginOffset; + this.endOffsetExclusive = endOffsetExclusive; + } + + @Override + long beginOffset() { + return beginOffset; + } + + @Override + long endOffset() throws ArithmeticException { + return endOffsetExclusive; + } + + @Override + long length() throws ArithmeticException { + return endOffsetExclusive - beginOffset; + } + + @Override + ByteRangeSpec withNewBeginOffset(long beginOffset) { + if (beginOffset != this.beginOffset) { + return new LeftClosedRightOpenByteRangeSpec(beginOffset, endOffsetExclusive); + } else { + return this; + } + } + + @Override + ByteRangeSpec withShiftBeginOffset(long beginOffsetIncrement) throws ArithmeticException { + if (beginOffsetIncrement != 0) { + return new LeftClosedRightOpenByteRangeSpec( + Math.addExact(beginOffset, beginOffsetIncrement), endOffsetExclusive); + } else { + return this; + } + } + + @Override + ByteRangeSpec withNewEndOffset(long endOffsetExclusive) { + if (endOffsetExclusive != this.endOffsetExclusive) { + return new LeftClosedRightOpenByteRangeSpec(beginOffset, endOffsetExclusive); + } else { + return this; + } + } + + @Override + ByteRangeSpec withNewEndOffsetClosed(long endOffsetInclusive) { + return new LeftClosedRightClosedByteRangeSpec(beginOffset, endOffsetInclusive); + } + + @Override + ByteRangeSpec withNewRelativeLength(long relativeLength) { + return new RelativeByteRangeSpec(beginOffset, relativeLength); + } + + @Override + protected String fmtAsHttpRangeHeader() throws ArithmeticException { + return String.format("bytes=%d-%d", beginOffset, endOffsetExclusive - 1); + } + + @Override + protected ToStringHelper append(ToStringHelper tsh) { + return tsh.addValue(String.format("[%d, %d)", beginOffset, endOffsetExclusive)); + } + } + + @Immutable + private static final class LeftClosedRightClosedByteRangeSpec extends ByteRangeSpec { + private static final long serialVersionUID = -2706235472358072141L; + private final long beginOffset; + private final long endOffsetInclusive; + + private LeftClosedRightClosedByteRangeSpec(long beginOffset, long endOffsetInclusive) { + this.beginOffset = beginOffset; + this.endOffsetInclusive = endOffsetInclusive; + } + + @Override + long beginOffset() { + return beginOffset; + } + + @Override + long endOffset() throws ArithmeticException { + return endOffsetInclusive; + } + + @Override + long length() throws ArithmeticException { + return endOffsetInclusive - beginOffset; + } + + @Override + ByteRangeSpec withNewBeginOffset(long beginOffset) { + if (beginOffset != this.beginOffset) { + return new LeftClosedRightClosedByteRangeSpec(beginOffset, endOffsetInclusive); + } else { + return this; + } + } + + @Override + ByteRangeSpec withShiftBeginOffset(long beginOffsetIncrement) throws ArithmeticException { + if (beginOffsetIncrement != 0) { + return new LeftClosedRightClosedByteRangeSpec( + Math.addExact(beginOffset, beginOffsetIncrement), endOffsetInclusive); + } else { + return this; + } + } + + @Override + ByteRangeSpec withNewEndOffset(long endOffsetExclusive) { + return new LeftClosedRightOpenByteRangeSpec(beginOffset, endOffsetExclusive); + } + + @Override + ByteRangeSpec withNewEndOffsetClosed(long endOffsetInclusive) { + if (endOffsetInclusive != this.endOffsetInclusive) { + return new LeftClosedRightOpenByteRangeSpec(beginOffset, endOffsetInclusive); + } else { + return this; + } + } + + @Override + ByteRangeSpec withNewRelativeLength(long relativeLength) { + return new RelativeByteRangeSpec(beginOffset, relativeLength); + } + + @Override + protected String fmtAsHttpRangeHeader() throws ArithmeticException { + return String.format("bytes=%d-%d", beginOffset, endOffsetInclusive); + } + + @Override + protected ToStringHelper append(ToStringHelper tsh) { + return tsh.addValue(String.format("[%d, %d]", beginOffset, endOffsetInclusive)); + } + } + + @Immutable + private static final class LeftClosedByteRangeSpec extends ByteRangeSpec { + private static final long serialVersionUID = 4732278479149027012L; + private final long beginOffset; + + private LeftClosedByteRangeSpec(long beginOffset) { + this.beginOffset = beginOffset; + } + + @Override + long beginOffset() { + return beginOffset; + } + + @Override + long endOffset() throws ArithmeticException { + return EFFECTIVE_INFINITY; + } + + @Override + long length() throws ArithmeticException { + return EFFECTIVE_INFINITY; + } + + @Override + ByteRangeSpec withNewBeginOffset(long beginOffset) { + if (beginOffset != this.beginOffset) { + return new LeftClosedByteRangeSpec(beginOffset); + } else { + return this; + } + } + + @Override + ByteRangeSpec withShiftBeginOffset(long beginOffsetIncrement) throws ArithmeticException { + if (beginOffsetIncrement != 0) { + return new LeftClosedByteRangeSpec(Math.addExact(beginOffset, beginOffsetIncrement)); + } else { + return this; + } + } + + @Override + ByteRangeSpec withNewEndOffset(long endOffsetExclusive) { + return new LeftClosedRightOpenByteRangeSpec(beginOffset, endOffsetExclusive); + } + + @Override + ByteRangeSpec withNewEndOffsetClosed(long endOffsetInclusive) { + return new LeftClosedRightClosedByteRangeSpec(beginOffset, endOffsetInclusive); + } + + @Override + ByteRangeSpec withNewRelativeLength(long relativeLength) { + return new RelativeByteRangeSpec(beginOffset, relativeLength); + } + + @Override + protected String fmtAsHttpRangeHeader() throws ArithmeticException { + if (beginOffset > 0) { + return String.format("bytes=%d-", beginOffset); + } else { + return null; + } + } + + @Override + protected ToStringHelper append(ToStringHelper tsh) { + return tsh.addValue(String.format("[%d, +INF)", beginOffset)); + } + } + + @Immutable + private static final class NullByteRangeSpec extends ByteRangeSpec { + private static final NullByteRangeSpec INSTANCE = new NullByteRangeSpec(); + private static final long serialVersionUID = 9110512497431639881L; + + @Override + long beginOffset() { + return 0; + } + + @Override + long endOffset() throws ArithmeticException { + return EFFECTIVE_INFINITY; + } + + @Override + long length() throws ArithmeticException { + return EFFECTIVE_INFINITY; + } + + @Override + ByteRangeSpec withNewBeginOffset(long beginOffset) { + if (beginOffset > 0) { + return new LeftClosedByteRangeSpec(beginOffset); + } else { + return this; + } + } + + @Override + ByteRangeSpec withShiftBeginOffset(long beginOffsetIncrement) throws ArithmeticException { + if (beginOffsetIncrement != 0) { + return new LeftClosedByteRangeSpec(beginOffsetIncrement); + } else { + return this; + } + } + + @Override + ByteRangeSpec withNewEndOffset(long endOffsetExclusive) { + if (endOffsetExclusive != EFFECTIVE_INFINITY) { + return new LeftClosedRightOpenByteRangeSpec(0, endOffsetExclusive); + } else { + return this; + } + } + + @Override + ByteRangeSpec withNewEndOffsetClosed(long endOffsetInclusive) { + if (endOffsetInclusive != EFFECTIVE_INFINITY) { + return new LeftClosedRightClosedByteRangeSpec(0, endOffsetInclusive); + } else { + return this; + } + } + + @Override + ByteRangeSpec withNewRelativeLength(long relativeLength) { + if (relativeLength != EFFECTIVE_INFINITY) { + return new RelativeByteRangeSpec(0, relativeLength); + } else { + return this; + } + } + + @Override + protected String fmtAsHttpRangeHeader() throws ArithmeticException { + return null; + } + + @Override + protected ToStringHelper append(ToStringHelper tsh) { + return tsh.addValue("[0, INF]"); + } + + /** prevent java serialization from using a new instance */ + private Object readResolve() { + return INSTANCE; + } + } +} diff --git a/google-cloud-storage/src/main/java/com/google/cloud/storage/DefaultStorageRetryStrategy.java b/google-cloud-storage/src/main/java/com/google/cloud/storage/DefaultStorageRetryStrategy.java index 75172d8a8..d580f5483 100644 --- a/google-cloud-storage/src/main/java/com/google/cloud/storage/DefaultStorageRetryStrategy.java +++ b/google-cloud-storage/src/main/java/com/google/cloud/storage/DefaultStorageRetryStrategy.java @@ -28,6 +28,8 @@ final class DefaultStorageRetryStrategy implements StorageRetryStrategy { + static final DefaultStorageRetryStrategy INSTANCE = new DefaultStorageRetryStrategy(); + private static final long serialVersionUID = 7928177703325504905L; private static final Interceptor INTERCEPTOR_IDEMPOTENT = @@ -40,6 +42,8 @@ final class DefaultStorageRetryStrategy implements StorageRetryStrategy { private static final ExceptionHandler NON_IDEMPOTENT_HANDLER = newHandler(INTERCEPTOR_NON_IDEMPOTENT); + private DefaultStorageRetryStrategy() {} + @Override public ExceptionHandler getIdempotentHandler() { return IDEMPOTENT_HANDLER; @@ -54,6 +58,11 @@ private static ExceptionHandler newHandler(Interceptor... interceptors) { return ExceptionHandler.newBuilder().addInterceptors(interceptors).build(); } + /** prevent java serialization from using a new instance */ + private Object readResolve() { + return INSTANCE; + } + private static class InterceptorImpl implements BaseInterceptor { private static final long serialVersionUID = 5283634944744417128L; diff --git a/google-cloud-storage/src/main/java/com/google/cloud/storage/GapicUnbufferedReadableByteChannel.java b/google-cloud-storage/src/main/java/com/google/cloud/storage/GapicUnbufferedReadableByteChannel.java index daea9281d..36dfd24cb 100644 --- a/google-cloud-storage/src/main/java/com/google/cloud/storage/GapicUnbufferedReadableByteChannel.java +++ b/google-cloud-storage/src/main/java/com/google/cloud/storage/GapicUnbufferedReadableByteChannel.java @@ -72,16 +72,6 @@ final class GapicUnbufferedReadableByteChannel this.iter = new LazyServerStreamIterator(); } - @Override - public int read(ByteBuffer dst) throws IOException { - return Math.toIntExact(read(new ByteBuffer[] {dst})); - } - - @Override - public long read(ByteBuffer[] dsts) throws IOException { - return read(dsts, 0, dsts.length); - } - @Override public long read(ByteBuffer[] dsts, int offset, int length) throws IOException { if (complete && open) { diff --git a/google-cloud-storage/src/main/java/com/google/cloud/storage/GrpcBlobReadChannel.java b/google-cloud-storage/src/main/java/com/google/cloud/storage/GrpcBlobReadChannel.java index d3c5058c2..ba9b5b506 100644 --- a/google-cloud-storage/src/main/java/com/google/cloud/storage/GrpcBlobReadChannel.java +++ b/google-cloud-storage/src/main/java/com/google/cloud/storage/GrpcBlobReadChannel.java @@ -17,6 +17,7 @@ package com.google.cloud.storage; import static com.google.cloud.storage.ByteSizeConstants._16MiB; +import static com.google.cloud.storage.Maths.sub; import static com.google.cloud.storage.StorageV2ProtoUtils.seekReadObjectRequest; import static com.google.common.base.Preconditions.checkArgument; import static com.google.common.base.Preconditions.checkState; @@ -25,19 +26,15 @@ import com.google.api.gax.rpc.ServerStreamingCallable; import com.google.cloud.ReadChannel; import com.google.cloud.RestorableState; -import com.google.cloud.storage.BufferedReadableByteChannelSession.BufferedReadableByteChannel; -import com.google.common.base.Suppliers; import com.google.storage.v2.Object; import com.google.storage.v2.ReadObjectRequest; import com.google.storage.v2.ReadObjectResponse; import java.io.IOException; import java.nio.ByteBuffer; -import java.util.function.Supplier; -import org.checkerframework.checker.nullness.qual.Nullable; final class GrpcBlobReadChannel implements ReadChannel { - private final LazyReadChannel lazyReadChannel; + private final LazyReadChannel lazyReadChannel; private Long position; private Long limit; @@ -48,7 +45,7 @@ final class GrpcBlobReadChannel implements ReadChannel { ReadObjectRequest request, boolean autoGzipDecompression) { this.lazyReadChannel = - new LazyReadChannel( + new LazyReadChannel<>( () -> { ReadObjectRequest req = seekReadObjectRequest(request, position, sub(limit, position)); @@ -76,7 +73,7 @@ public boolean isOpen() { @Override public void close() { - if (isOpen()) { + if (lazyReadChannel.isOpen()) { try { lazyReadChannel.getChannel().close(); } catch (IOException e) { @@ -128,44 +125,6 @@ public int read(ByteBuffer dst) throws IOException { } ApiFuture getResults() { - return lazyReadChannel.session.get().getResult(); - } - - /** - * Null aware subtraction. If both {@code l} and {@code r} are non-null, return {@code l - r}. - * Otherwise, return {@code null}. - */ - @Nullable - private static Long sub(@Nullable Long l, @Nullable Long r) { - if (l == null || r == null) { - return null; - } else { - return l - r; - } - } - - private static final class LazyReadChannel { - private final Supplier> session; - private final Supplier channel; - - private boolean open = false; - - public LazyReadChannel(Supplier> session) { - this.session = session; - this.channel = - Suppliers.memoize( - () -> { - open = true; - return session.get().open(); - }); - } - - public BufferedReadableByteChannel getChannel() { - return channel.get(); - } - - public boolean isOpen() { - return open; - } + return lazyReadChannel.getSession().get().getResult(); } } diff --git a/google-cloud-storage/src/main/java/com/google/cloud/storage/GzipReadableByteChannel.java b/google-cloud-storage/src/main/java/com/google/cloud/storage/GzipReadableByteChannel.java index 6ef048b3f..396b8be76 100644 --- a/google-cloud-storage/src/main/java/com/google/cloud/storage/GzipReadableByteChannel.java +++ b/google-cloud-storage/src/main/java/com/google/cloud/storage/GzipReadableByteChannel.java @@ -36,16 +36,6 @@ final class GzipReadableByteChannel implements UnbufferedReadableByteChannel { this.source = source; } - @Override - public int read(ByteBuffer dst) throws IOException { - return Math.toIntExact(read(new ByteBuffer[] {dst})); - } - - @Override - public long read(ByteBuffer[] dsts) throws IOException { - return read(dsts, 0, dsts.length); - } - @Override public long read(ByteBuffer[] dsts, int offset, int length) throws IOException { if (retEOF) { diff --git a/google-cloud-storage/src/main/java/com/google/cloud/storage/HttpDownloadSessionBuilder.java b/google-cloud-storage/src/main/java/com/google/cloud/storage/HttpDownloadSessionBuilder.java new file mode 100644 index 000000000..af9c4978d --- /dev/null +++ b/google-cloud-storage/src/main/java/com/google/cloud/storage/HttpDownloadSessionBuilder.java @@ -0,0 +1,156 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static java.util.Objects.requireNonNull; + +import com.google.api.core.ApiFutures; +import com.google.api.core.SettableApiFuture; +import com.google.api.services.storage.model.StorageObject; +import com.google.cloud.storage.ApiaryUnbufferedReadableByteChannel.ApiaryReadRequest; +import com.google.cloud.storage.BlobReadChannelV2.BlobReadChannelContext; +import com.google.cloud.storage.BufferedReadableByteChannelSession.BufferedReadableByteChannel; +import com.google.cloud.storage.UnbufferedReadableByteChannelSession.UnbufferedReadableByteChannel; +import java.nio.ByteBuffer; +import java.util.function.BiFunction; +import java.util.function.Consumer; +import javax.annotation.concurrent.Immutable; + +@Immutable +final class HttpDownloadSessionBuilder { + private static final HttpDownloadSessionBuilder INSTANCE = new HttpDownloadSessionBuilder(); + + private static final int DEFAULT_BUFFER_CAPACITY = ByteSizeConstants._2MiB; + + private HttpDownloadSessionBuilder() {} + + public static HttpDownloadSessionBuilder create() { + return INSTANCE; + } + + public ReadableByteChannelSessionBuilder byteChannel( + BlobReadChannelContext blobReadChannelContext) { + // TODO: refactor BlobReadChannelContext to push retry to a lower individual config + // similar to GapicWritableByteChannelSessionBuilder.ResumableUploadBuilder.withRetryConfig + return new ReadableByteChannelSessionBuilder(blobReadChannelContext); + } + + public static final class ReadableByteChannelSessionBuilder { + + private final BlobReadChannelContext blobReadChannelContext; + // private Hasher hasher; // TODO: wire in Hasher + private Consumer callback; + + private ReadableByteChannelSessionBuilder(BlobReadChannelContext blobReadChannelContext) { + this.blobReadChannelContext = blobReadChannelContext; + } + + public ReadableByteChannelSessionBuilder setCallback(Consumer callback) { + this.callback = callback; + return this; + } + + public BufferedReadableByteChannelSessionBuilder buffered() { + return buffered(BufferHandle.allocate(DEFAULT_BUFFER_CAPACITY)); + } + + public BufferedReadableByteChannelSessionBuilder buffered(BufferHandle bufferHandle) { + return new BufferedReadableByteChannelSessionBuilder(bufferHandle, bindFunction()); + } + + public BufferedReadableByteChannelSessionBuilder buffered(ByteBuffer buffer) { + return buffered(BufferHandle.handleOf(buffer)); + } + + public UnbufferedReadableByteChannelSessionBuilder unbuffered() { + return new UnbufferedReadableByteChannelSessionBuilder(bindFunction()); + } + + private BiFunction< + ApiaryReadRequest, SettableApiFuture, UnbufferedReadableByteChannel> + bindFunction() { + // for any non-final value, create a reference to the value at this point in time + return (request, resultFuture) -> + new ApiaryUnbufferedReadableByteChannel( + request, + blobReadChannelContext.getApiaryClient(), + resultFuture, + blobReadChannelContext.getStorageOptions(), + blobReadChannelContext.getRetryAlgorithmManager().idempotent(), + callback); + } + + public static final class BufferedReadableByteChannelSessionBuilder { + + private final BiFunction< + ApiaryReadRequest, SettableApiFuture, BufferedReadableByteChannel> + f; + private ApiaryReadRequest request; + + private BufferedReadableByteChannelSessionBuilder( + BufferHandle buffer, + BiFunction< + ApiaryReadRequest, + SettableApiFuture, + UnbufferedReadableByteChannel> + f) { + this.f = f.andThen(c -> new DefaultBufferedReadableByteChannel(buffer, c)); + } + + public BufferedReadableByteChannelSessionBuilder setApiaryReadRequest( + ApiaryReadRequest request) { + this.request = requireNonNull(request, "request must be non null"); + return this; + } + + public BufferedReadableByteChannelSession build() { + return new ChannelSession.BufferedReadSession<>( + ApiFutures.immediateFuture(request), + f.andThen(StorageByteChannels.readable()::createSynchronized)); + } + } + + public static final class UnbufferedReadableByteChannelSessionBuilder { + + private final BiFunction< + ApiaryReadRequest, SettableApiFuture, UnbufferedReadableByteChannel> + f; + private ApiaryReadRequest request; + + private UnbufferedReadableByteChannelSessionBuilder( + BiFunction< + ApiaryReadRequest, + SettableApiFuture, + UnbufferedReadableByteChannel> + f) { + this.f = f; + } + + public UnbufferedReadableByteChannelSessionBuilder setApiaryReadRequest( + ApiaryReadRequest request) { + this.request = requireNonNull(request, "request must be non null"); + return this; + } + + public UnbufferedReadableByteChannelSession build() { + return new ChannelSession.UnbufferedReadSession<>( + ApiFutures.immediateFuture(request), + f.andThen(StorageByteChannels.readable()::createSynchronized)); + } + } + } +} diff --git a/google-cloud-storage/src/main/java/com/google/cloud/storage/HttpRetryAlgorithmManager.java b/google-cloud-storage/src/main/java/com/google/cloud/storage/HttpRetryAlgorithmManager.java index 5a7df8621..7cc053ea4 100644 --- a/google-cloud-storage/src/main/java/com/google/cloud/storage/HttpRetryAlgorithmManager.java +++ b/google-cloud-storage/src/main/java/com/google/cloud/storage/HttpRetryAlgorithmManager.java @@ -26,9 +26,11 @@ import com.google.api.services.storage.model.StorageObject; import com.google.cloud.storage.spi.v1.StorageRpc; import com.google.cloud.storage.spi.v1.StorageRpc.RewriteRequest; +import com.google.common.base.MoreObjects; import java.io.Serializable; import java.util.List; import java.util.Map; +import java.util.Objects; final class HttpRetryAlgorithmManager implements Serializable { @@ -39,6 +41,14 @@ final class HttpRetryAlgorithmManager implements Serializable { this.retryStrategy = retryStrategy; } + /** + * Some operations are inherently idempotent after they're started (Resumable uploads, rewrites) + * provide access to the idempotent {@link ResultRetryAlgorithm} for those uses. + */ + ResultRetryAlgorithm idempotent() { + return retryStrategy.getIdempotentHandler(); + } + public ResultRetryAlgorithm getForBucketAclCreate( BucketAccessControl pb, Map optionsMap) { return retryStrategy.getNonidempotentHandler(); @@ -260,4 +270,26 @@ public ResultRetryAlgorithm getForNotificationList(String bucket) { public ResultRetryAlgorithm getForNotificationDelete(String bucket, String notificationId) { return retryStrategy.getIdempotentHandler(); } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof HttpRetryAlgorithmManager)) { + return false; + } + HttpRetryAlgorithmManager that = (HttpRetryAlgorithmManager) o; + return Objects.equals(retryStrategy, that.retryStrategy); + } + + @Override + public int hashCode() { + return Objects.hash(retryStrategy); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this).add("retryStrategy", retryStrategy).toString(); + } } diff --git a/google-cloud-storage/src/main/java/com/google/cloud/storage/LazyReadChannel.java b/google-cloud-storage/src/main/java/com/google/cloud/storage/LazyReadChannel.java new file mode 100644 index 000000000..250937ac7 --- /dev/null +++ b/google-cloud-storage/src/main/java/com/google/cloud/storage/LazyReadChannel.java @@ -0,0 +1,51 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import com.google.cloud.storage.BufferedReadableByteChannelSession.BufferedReadableByteChannel; +import com.google.common.base.Suppliers; +import java.util.function.Supplier; + +final class LazyReadChannel { + + private final Supplier> session; + private final Supplier channel; + + private boolean open = false; + + LazyReadChannel(Supplier> session) { + this.session = session; + this.channel = + Suppliers.memoize( + () -> { + open = true; + return session.get().open(); + }); + } + + BufferedReadableByteChannel getChannel() { + return channel.get(); + } + + Supplier> getSession() { + return session; + } + + boolean isOpen() { + return open && channel.get().isOpen(); + } +} diff --git a/google-cloud-storage/src/main/java/com/google/cloud/storage/Maths.java b/google-cloud-storage/src/main/java/com/google/cloud/storage/Maths.java new file mode 100644 index 000000000..a8a25fefd --- /dev/null +++ b/google-cloud-storage/src/main/java/com/google/cloud/storage/Maths.java @@ -0,0 +1,73 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import org.checkerframework.checker.nullness.qual.Nullable; + +final class Maths { + + private Maths() {} + + /** + * Null aware subtraction. + * + *
    + *
  • If {@code l} is non-null while {@code r} is null, return {@code l} + *
  • If both {@code l} and {@code r} are non-null, return {@code l - r} + *
  • Otherwise, return {@code null}. + *
+ */ + @Nullable + static Long sub(@Nullable Long l, @Nullable Long r) { + if (l != null && r == null) { + return l; + } else if (l == null) { + return null; + } else { + return l - r; + } + } + + /** + * Null aware subtraction. + * + *
    + *
  • If {@code l} is non-null while {@code r} is null, return {@code l} + *
  • If both {@code l} and {@code r} are non-null, return {@code l - r} + *
  • Otherwise, return {@code null}. + *
+ */ + @Nullable + static Integer sub(@Nullable Integer l, @Nullable Integer r) { + if (l != null && r == null) { + return l; + } else if (l == null) { + return null; + } else { + return l - r; + } + } + + /** + * Increment some {@code base} {@link Long} by {@code factor}. + * + *

If {@code base} is null, {@code factor} will be returned + */ + static long add(@Nullable Long base, long factor) { + return base != null ? base + factor : factor; + } +} diff --git a/google-cloud-storage/src/main/java/com/google/cloud/storage/ResumableMedia.java b/google-cloud-storage/src/main/java/com/google/cloud/storage/ResumableMedia.java index caf2e44f1..85c96bd8d 100644 --- a/google-cloud-storage/src/main/java/com/google/cloud/storage/ResumableMedia.java +++ b/google-cloud-storage/src/main/java/com/google/cloud/storage/ResumableMedia.java @@ -60,6 +60,10 @@ static GapicMediaSession gapic() { return GapicMediaSession.INSTANCE; } + static HttpMediaSession http() { + return HttpMediaSession.INSTANCE; + } + private static boolean isValidSignedURL(String signedURLQuery) { boolean isValid = true; if (signedURLQuery.startsWith("X-Goog-Algorithm=")) { @@ -93,4 +97,14 @@ GapicDownloadSessionBuilder read() { return GapicDownloadSessionBuilder.create(); } } + + static final class HttpMediaSession { + private static final HttpMediaSession INSTANCE = new HttpMediaSession(); + + private HttpMediaSession() {} + + HttpDownloadSessionBuilder read() { + return HttpDownloadSessionBuilder.create(); + } + } } diff --git a/google-cloud-storage/src/main/java/com/google/cloud/storage/StorageByteChannels.java b/google-cloud-storage/src/main/java/com/google/cloud/storage/StorageByteChannels.java index 48857cf2a..c3ff80138 100644 --- a/google-cloud-storage/src/main/java/com/google/cloud/storage/StorageByteChannels.java +++ b/google-cloud-storage/src/main/java/com/google/cloud/storage/StorageByteChannels.java @@ -221,6 +221,7 @@ public long read(ByteBuffer[] dsts, int offset, int length) throws IOException { long totalBytesRead = 0; for (int i = offset; i < length; i++) { ByteBuffer dst = dsts[i]; + int goal = dst.remaining(); if (dst.hasRemaining()) { int read = c.read(dst); if (read == -1) { @@ -230,6 +231,10 @@ public long read(ByteBuffer[] dsts, int offset, int length) throws IOException { } else { break; } + } else if (read != goal) { + // if we weren't able to fill up the current buffer with this last read, return so we + // don't block and wait for another read call. + return totalBytesRead + read; } totalBytesRead += read; } diff --git a/google-cloud-storage/src/main/java/com/google/cloud/storage/StorageException.java b/google-cloud-storage/src/main/java/com/google/cloud/storage/StorageException.java index 61c100219..f25677a72 100644 --- a/google-cloud-storage/src/main/java/com/google/cloud/storage/StorageException.java +++ b/google-cloud-storage/src/main/java/com/google/cloud/storage/StorageException.java @@ -164,6 +164,22 @@ public static StorageException translate(IOException exception) { } } + static T wrapIOException(IOExceptionCallable c) { + try { + return c.call(); + } catch (IOException e) { + throw StorageException.coalesce(e); + } + } + + static void wrapIOException(IOExceptionRunnable r) { + try { + r.run(); + } catch (IOException e) { + throw StorageException.coalesce(e); + } + } + @Nullable private static String getStatusExceptionMessage(ApiException apiEx) { Throwable cause = apiEx.getCause(); @@ -181,4 +197,14 @@ private static ApiException asApiExceptionOrNull(Throwable cause) { return null; } } + + @FunctionalInterface + interface IOExceptionCallable { + T call() throws IOException; + } + + @FunctionalInterface + interface IOExceptionRunnable { + void run() throws IOException; + } } diff --git a/google-cloud-storage/src/main/java/com/google/cloud/storage/StorageImpl.java b/google-cloud-storage/src/main/java/com/google/cloud/storage/StorageImpl.java index 7241d76cb..efbbd9982 100644 --- a/google-cloud-storage/src/main/java/com/google/cloud/storage/StorageImpl.java +++ b/google-cloud-storage/src/main/java/com/google/cloud/storage/StorageImpl.java @@ -37,6 +37,7 @@ import com.google.cloud.ReadChannel; import com.google.cloud.WriteChannel; import com.google.cloud.storage.Acl.Entity; +import com.google.cloud.storage.BlobReadChannelV2.BlobReadChannelContext; import com.google.cloud.storage.HmacKey.HmacKeyMetadata; import com.google.cloud.storage.PostPolicyV4.ConditionV4Type; import com.google.cloud.storage.PostPolicyV4.PostConditionsV4; @@ -103,8 +104,8 @@ final class StorageImpl extends BaseService implements Storage { private static final ApiaryConversions codecs = Conversions.apiary(); - private final HttpRetryAlgorithmManager retryAlgorithmManager; - private final StorageRpc storageRpc; + final HttpRetryAlgorithmManager retryAlgorithmManager; + final StorageRpc storageRpc; StorageImpl(HttpStorageOptions options) { super(options); @@ -581,15 +582,15 @@ public StorageBatch batch() { @Override public ReadChannel reader(String bucket, String blob, BlobSourceOption... options) { - ImmutableMap optionsMap = Opts.unwrap(options).getRpcOptions(); - return new BlobReadChannel(getOptions(), BlobId.of(bucket, blob), optionsMap); + return reader(BlobId.of(bucket, blob), options); } @Override public ReadChannel reader(BlobId blob, BlobSourceOption... options) { - ImmutableMap optionsMap = - Opts.unwrap(options).resolveFrom(blob).getRpcOptions(); - return new BlobReadChannel(getOptions(), blob, optionsMap); + Opts opts = Opts.unwrap(options).resolveFrom(blob); + StorageObject storageObject = Conversions.apiary().blobId().encode(blob); + ImmutableMap optionsMap = opts.getRpcOptions(); + return new BlobReadChannelV2(storageObject, optionsMap, BlobReadChannelContext.from(this)); } @Override diff --git a/google-cloud-storage/src/main/java/com/google/cloud/storage/StorageReadChannel.java b/google-cloud-storage/src/main/java/com/google/cloud/storage/StorageReadChannel.java new file mode 100644 index 000000000..dfcc7fb50 --- /dev/null +++ b/google-cloud-storage/src/main/java/com/google/cloud/storage/StorageReadChannel.java @@ -0,0 +1,66 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.common.base.Preconditions.checkArgument; + +import com.google.cloud.ReadChannel; +import java.io.IOException; + +interface StorageReadChannel extends ReadChannel { + + StorageReadChannel setByteRangeSpec(ByteRangeSpec byteRangeSpec); + + default ByteRangeSpec getByteRangeSpec() { + return ByteRangeSpec.nullRange(); + } + + /** @deprecated Use {@link #setByteRangeSpec(ByteRangeSpec)} */ + @Deprecated + @SuppressWarnings("resource") + @Override + default void seek(long position) throws IOException { + checkArgument(position >= 0, "position must be >= 0"); + try { + setByteRangeSpec(getByteRangeSpec().withNewBeginOffset(position)); + } catch (StorageException e) { + Throwable cause = e.getCause(); + if (cause instanceof IOException) { + throw (IOException) cause; + } else { + throw e; + } + } + } + + /** @deprecated Use {@link #setByteRangeSpec(ByteRangeSpec)} */ + @SuppressWarnings("resource") + @Deprecated + @Override + default ReadChannel limit(long limit) { + checkArgument(limit >= 0, "limit must be >= 0"); + setByteRangeSpec(getByteRangeSpec().withNewEndOffset(limit)); + return this; + } + + /** @deprecated Use {@link #getByteRangeSpec()} */ + @Deprecated + @Override + default long limit() { + return getByteRangeSpec().endOffset(); + } +} diff --git a/google-cloud-storage/src/main/java/com/google/cloud/storage/StorageRetryStrategy.java b/google-cloud-storage/src/main/java/com/google/cloud/storage/StorageRetryStrategy.java index 7175c5882..19a332d41 100644 --- a/google-cloud-storage/src/main/java/com/google/cloud/storage/StorageRetryStrategy.java +++ b/google-cloud-storage/src/main/java/com/google/cloud/storage/StorageRetryStrategy.java @@ -88,7 +88,7 @@ public interface StorageRetryStrategy extends Serializable { * @see #getUniformStorageRetryStrategy() */ static StorageRetryStrategy getDefaultStorageRetryStrategy() { - return new DefaultStorageRetryStrategy(); + return DefaultStorageRetryStrategy.INSTANCE; } /** diff --git a/google-cloud-storage/src/main/java/com/google/cloud/storage/UnbufferedReadableByteChannelSession.java b/google-cloud-storage/src/main/java/com/google/cloud/storage/UnbufferedReadableByteChannelSession.java index 323d40479..6248b1d5b 100644 --- a/google-cloud-storage/src/main/java/com/google/cloud/storage/UnbufferedReadableByteChannelSession.java +++ b/google-cloud-storage/src/main/java/com/google/cloud/storage/UnbufferedReadableByteChannelSession.java @@ -17,11 +17,23 @@ package com.google.cloud.storage; import com.google.cloud.storage.UnbufferedReadableByteChannelSession.UnbufferedReadableByteChannel; +import java.io.IOException; +import java.nio.ByteBuffer; import java.nio.channels.ReadableByteChannel; import java.nio.channels.ScatteringByteChannel; interface UnbufferedReadableByteChannelSession extends ReadableByteChannelSession { - interface UnbufferedReadableByteChannel extends ReadableByteChannel, ScatteringByteChannel {} + interface UnbufferedReadableByteChannel extends ReadableByteChannel, ScatteringByteChannel { + @Override + default int read(ByteBuffer dst) throws IOException { + return Math.toIntExact(read(new ByteBuffer[] {dst}, 0, 1)); + } + + @Override + default long read(ByteBuffer[] dsts) throws IOException { + return read(dsts, 0, dsts.length); + } + } } diff --git a/google-cloud-storage/src/main/java/com/google/cloud/storage/spi/v1/HttpStorageRpc.java b/google-cloud-storage/src/main/java/com/google/cloud/storage/spi/v1/HttpStorageRpc.java index 9112da052..d53854cdb 100644 --- a/google-cloud-storage/src/main/java/com/google/cloud/storage/spi/v1/HttpStorageRpc.java +++ b/google-cloud-storage/src/main/java/com/google/cloud/storage/spi/v1/HttpStorageRpc.java @@ -132,6 +132,10 @@ public HttpStorageRpc(StorageOptions options, JsonFactory jsonFactory) { .build(); } + public Storage getStorage() { + return storage; + } + private static final class InvocationIdInitializer implements HttpRequestInitializer { @Nullable HttpRequestInitializer initializer; diff --git a/google-cloud-storage/src/main/java/com/google/cloud/storage/spi/v1/StorageRpc.java b/google-cloud-storage/src/main/java/com/google/cloud/storage/spi/v1/StorageRpc.java index c600723bd..1e1e93e01 100644 --- a/google-cloud-storage/src/main/java/com/google/cloud/storage/spi/v1/StorageRpc.java +++ b/google-cloud-storage/src/main/java/com/google/cloud/storage/spi/v1/StorageRpc.java @@ -17,6 +17,7 @@ package com.google.cloud.storage.spi.v1; import com.google.api.core.InternalApi; +import com.google.api.services.storage.Storage; import com.google.api.services.storage.model.Bucket; import com.google.api.services.storage.model.BucketAccessControl; import com.google.api.services.storage.model.HmacKey; @@ -607,4 +608,7 @@ TestIamPermissionsResponse testIamPermissions( * @throws StorageException upon failure */ ServiceAccount getServiceAccount(String projectId); + + @InternalApi + Storage getStorage(); } diff --git a/google-cloud-storage/src/main/java/com/google/cloud/storage/testing/StorageRpcTestBase.java b/google-cloud-storage/src/main/java/com/google/cloud/storage/testing/StorageRpcTestBase.java index 81e7eee58..97104634f 100644 --- a/google-cloud-storage/src/main/java/com/google/cloud/storage/testing/StorageRpcTestBase.java +++ b/google-cloud-storage/src/main/java/com/google/cloud/storage/testing/StorageRpcTestBase.java @@ -16,6 +16,7 @@ package com.google.cloud.storage.testing; +import com.google.api.services.storage.Storage; import com.google.api.services.storage.model.Bucket; import com.google.api.services.storage.model.BucketAccessControl; import com.google.api.services.storage.model.HmacKey; @@ -315,4 +316,9 @@ public Bucket lockRetentionPolicy(Bucket bucket, Map options) { public ServiceAccount getServiceAccount(String projectId) { throw new UnsupportedOperationException("Not implemented yet"); } + + @Override + public Storage getStorage() { + throw new UnsupportedOperationException("Not implemented yet"); + } } diff --git a/google-cloud-storage/src/test/java/com/google/cloud/storage/BlobReadChannelTest.java b/google-cloud-storage/src/test/java/com/google/cloud/storage/BlobReadChannelTest.java deleted file mode 100644 index 2f759ea23..000000000 --- a/google-cloud-storage/src/test/java/com/google/cloud/storage/BlobReadChannelTest.java +++ /dev/null @@ -1,287 +0,0 @@ -/* - * Copyright 2015 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.google.cloud.storage; - -import static org.easymock.EasyMock.anyObject; -import static org.easymock.EasyMock.createMock; -import static org.easymock.EasyMock.expect; -import static org.easymock.EasyMock.replay; -import static org.easymock.EasyMock.verify; -import static org.junit.Assert.assertArrayEquals; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; - -import com.google.cloud.ReadChannel; -import com.google.cloud.RestorableState; -import com.google.cloud.Tuple; -import com.google.cloud.storage.spi.StorageRpcFactory; -import com.google.cloud.storage.spi.v1.StorageRpc; -import com.google.common.collect.ImmutableMap; -import java.io.IOException; -import java.nio.ByteBuffer; -import java.nio.channels.ClosedChannelException; -import java.util.Arrays; -import java.util.Map; -import java.util.Random; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; - -public class BlobReadChannelTest { - - private static final String BUCKET_NAME = "b"; - private static final String BLOB_NAME = "n"; - private static final BlobId BLOB_ID = BlobId.of(BUCKET_NAME, BLOB_NAME, -1L); - private static final Map EMPTY_RPC_OPTIONS = ImmutableMap.of(); - private static final int DEFAULT_CHUNK_SIZE = 2 * 1024 * 1024; - private static final int CUSTOM_CHUNK_SIZE = 2 * 1024 * 1024; - private static final Random RANDOM = new Random(); - - private HttpStorageOptions options; - private StorageRpcFactory rpcFactoryMock; - private StorageRpc storageRpcMock; - private BlobReadChannel reader; - - @Before - public void setUp() { - rpcFactoryMock = createMock(StorageRpcFactory.class); - storageRpcMock = createMock(StorageRpc.class); - expect(rpcFactoryMock.create(anyObject(StorageOptions.class))).andReturn(storageRpcMock); - replay(rpcFactoryMock); - options = - HttpStorageOptions.newBuilder() - .setProjectId("projectId") - .setServiceRpcFactory(rpcFactoryMock) - .build(); - } - - @After - public void tearDown() { - verify(rpcFactoryMock, storageRpcMock); - } - - @Test - public void testCreate() { - replay(storageRpcMock); - reader = new BlobReadChannel(options, BLOB_ID, EMPTY_RPC_OPTIONS); - assertTrue(reader.isOpen()); - } - - @Test - public void testReadBuffered() throws IOException { - reader = new BlobReadChannel(options, BLOB_ID, EMPTY_RPC_OPTIONS); - byte[] result = randomByteArray(DEFAULT_CHUNK_SIZE); - ByteBuffer firstReadBuffer = ByteBuffer.allocate(42); - ByteBuffer secondReadBuffer = ByteBuffer.allocate(42); - expect( - storageRpcMock.read( - Conversions.apiary().blobId().encode(BLOB_ID), - EMPTY_RPC_OPTIONS, - 0, - DEFAULT_CHUNK_SIZE)) - .andReturn(Tuple.of("etag", result)); - replay(storageRpcMock); - reader.read(firstReadBuffer); - reader.read(secondReadBuffer); - assertArrayEquals(Arrays.copyOf(result, firstReadBuffer.capacity()), firstReadBuffer.array()); - assertArrayEquals( - Arrays.copyOfRange( - result, - firstReadBuffer.capacity(), - firstReadBuffer.capacity() + secondReadBuffer.capacity()), - secondReadBuffer.array()); - } - - @Test - public void testReadBig() throws IOException { - reader = new BlobReadChannel(options, BLOB_ID, EMPTY_RPC_OPTIONS); - reader.setChunkSize(CUSTOM_CHUNK_SIZE); - byte[] firstResult = randomByteArray(DEFAULT_CHUNK_SIZE); - byte[] secondResult = randomByteArray(DEFAULT_CHUNK_SIZE); - ByteBuffer firstReadBuffer = ByteBuffer.allocate(DEFAULT_CHUNK_SIZE); - ByteBuffer secondReadBuffer = ByteBuffer.allocate(42); - expect( - storageRpcMock.read( - Conversions.apiary().blobId().encode(BLOB_ID), - EMPTY_RPC_OPTIONS, - 0, - DEFAULT_CHUNK_SIZE)) - .andReturn(Tuple.of("etag", firstResult)); - expect( - storageRpcMock.read( - Conversions.apiary().blobId().encode(BLOB_ID), - EMPTY_RPC_OPTIONS, - DEFAULT_CHUNK_SIZE, - CUSTOM_CHUNK_SIZE)) - .andReturn(Tuple.of("etag", secondResult)); - replay(storageRpcMock); - reader.read(firstReadBuffer); - reader.read(secondReadBuffer); - assertArrayEquals(firstResult, firstReadBuffer.array()); - assertArrayEquals( - Arrays.copyOf(secondResult, secondReadBuffer.capacity()), secondReadBuffer.array()); - } - - @Test - public void testReadFinish() throws IOException { - reader = new BlobReadChannel(options, BLOB_ID, EMPTY_RPC_OPTIONS); - byte[] result = {}; - ByteBuffer readBuffer = ByteBuffer.allocate(DEFAULT_CHUNK_SIZE); - expect( - storageRpcMock.read( - Conversions.apiary().blobId().encode(BLOB_ID), - EMPTY_RPC_OPTIONS, - 0, - DEFAULT_CHUNK_SIZE)) - .andReturn(Tuple.of("etag", result)); - replay(storageRpcMock); - assertEquals(-1, reader.read(readBuffer)); - } - - @Test - public void testSeek() throws IOException { - reader = new BlobReadChannel(options, BLOB_ID, EMPTY_RPC_OPTIONS); - reader.seek(42); - byte[] result = randomByteArray(DEFAULT_CHUNK_SIZE); - ByteBuffer readBuffer = ByteBuffer.allocate(DEFAULT_CHUNK_SIZE); - expect( - storageRpcMock.read( - Conversions.apiary().blobId().encode(BLOB_ID), - EMPTY_RPC_OPTIONS, - 42, - DEFAULT_CHUNK_SIZE)) - .andReturn(Tuple.of("etag", result)); - replay(storageRpcMock); - reader.read(readBuffer); - assertArrayEquals(result, readBuffer.array()); - } - - @Test - public void testClose() { - replay(storageRpcMock); - reader = new BlobReadChannel(options, BLOB_ID, EMPTY_RPC_OPTIONS); - assertTrue(reader.isOpen()); - reader.close(); - assertFalse(reader.isOpen()); - } - - @Test - public void testReadClosed() throws IOException { - replay(storageRpcMock); - reader = new BlobReadChannel(options, BLOB_ID, EMPTY_RPC_OPTIONS); - reader.close(); - try { - ByteBuffer readBuffer = ByteBuffer.allocate(DEFAULT_CHUNK_SIZE); - reader.read(readBuffer); - fail("Expected BlobReadChannel read to throw ClosedChannelException"); - } catch (ClosedChannelException ex) { - // expected - } - } - - @Test - public void testReadGenerationChanged() throws IOException { - BlobId blobId = BlobId.of(BUCKET_NAME, BLOB_NAME); - reader = new BlobReadChannel(options, blobId, EMPTY_RPC_OPTIONS); - byte[] firstResult = randomByteArray(DEFAULT_CHUNK_SIZE); - byte[] secondResult = randomByteArray(DEFAULT_CHUNK_SIZE); - ByteBuffer firstReadBuffer = ByteBuffer.allocate(DEFAULT_CHUNK_SIZE); - ByteBuffer secondReadBuffer = ByteBuffer.allocate(DEFAULT_CHUNK_SIZE); - expect( - storageRpcMock.read( - Conversions.apiary().blobId().encode(blobId), - EMPTY_RPC_OPTIONS, - 0, - DEFAULT_CHUNK_SIZE)) - .andReturn(Tuple.of("etag1", firstResult)); - expect( - storageRpcMock.read( - Conversions.apiary().blobId().encode(blobId), - EMPTY_RPC_OPTIONS, - DEFAULT_CHUNK_SIZE, - DEFAULT_CHUNK_SIZE)) - .andReturn(Tuple.of("etag2", secondResult)); - replay(storageRpcMock); - reader.read(firstReadBuffer); - try { - reader.read(secondReadBuffer); - fail("Expected ReadChannel read to throw StorageException"); - } catch (IOException ex) { - StringBuilder messageBuilder = new StringBuilder(); - messageBuilder.append("Blob ").append(blobId).append(" was updated while reading"); - assertEquals(messageBuilder.toString(), ex.getMessage()); - } - } - - @Test - public void testSaveAndRestore() throws IOException { - byte[] firstResult = randomByteArray(DEFAULT_CHUNK_SIZE); - byte[] secondResult = randomByteArray(DEFAULT_CHUNK_SIZE); - ByteBuffer firstReadBuffer = ByteBuffer.allocate(42); - ByteBuffer secondReadBuffer = ByteBuffer.allocate(DEFAULT_CHUNK_SIZE); - expect( - storageRpcMock.read( - Conversions.apiary().blobId().encode(BLOB_ID), - EMPTY_RPC_OPTIONS, - 0, - DEFAULT_CHUNK_SIZE)) - .andReturn(Tuple.of("etag", firstResult)); - expect( - storageRpcMock.read( - Conversions.apiary().blobId().encode(BLOB_ID), - EMPTY_RPC_OPTIONS, - 42, - DEFAULT_CHUNK_SIZE)) - .andReturn(Tuple.of("etag", secondResult)); - replay(storageRpcMock); - reader = new BlobReadChannel(options, BLOB_ID, EMPTY_RPC_OPTIONS); - reader.read(firstReadBuffer); - RestorableState readerState = reader.capture(); - ReadChannel restoredReader = readerState.restore(); - restoredReader.read(secondReadBuffer); - assertArrayEquals( - Arrays.copyOf(firstResult, firstReadBuffer.capacity()), firstReadBuffer.array()); - assertArrayEquals(secondResult, secondReadBuffer.array()); - } - - @Test - public void testStateEquals() { - replay(storageRpcMock); - reader = new BlobReadChannel(options, BLOB_ID, EMPTY_RPC_OPTIONS); - int limit = 342; - reader.limit(limit); - @SuppressWarnings("resource") // avoid closing when you don't want partial writes to GCS - ReadChannel secondReader = new BlobReadChannel(options, BLOB_ID, EMPTY_RPC_OPTIONS); - secondReader.limit(limit); - RestorableState state = reader.capture(); - RestorableState secondState = secondReader.capture(); - assertEquals(state, secondState); - assertEquals(state.hashCode(), secondState.hashCode()); - assertEquals(state.toString(), secondState.toString()); - - ReadChannel restore = secondState.restore(); - assertEquals(limit, restore.limit()); - } - - private static byte[] randomByteArray(int size) { - byte[] byteArray = new byte[size]; - RANDOM.nextBytes(byteArray); - return byteArray; - } -} diff --git a/google-cloud-storage/src/test/java/com/google/cloud/storage/BlobTest.java b/google-cloud-storage/src/test/java/com/google/cloud/storage/BlobTest.java index b9bea7de0..68ac4683b 100644 --- a/google-cloud-storage/src/test/java/com/google/cloud/storage/BlobTest.java +++ b/google-cloud-storage/src/test/java/com/google/cloud/storage/BlobTest.java @@ -42,6 +42,7 @@ import com.google.cloud.storage.BlobInfo.BuilderImpl; import com.google.cloud.storage.Storage.BlobWriteOption; import com.google.cloud.storage.Storage.CopyRequest; +import com.google.cloud.storage.spi.v1.HttpStorageRpc; import com.google.cloud.storage.spi.v1.StorageRpc; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; @@ -610,7 +611,7 @@ public void testBuilder() { } private StorageRpc prepareForDownload() { - StorageRpc mockStorageRpc = createNiceMock(StorageRpc.class); + HttpStorageRpc mockStorageRpc = createNiceMock(HttpStorageRpc.class); expect(storage.getOptions()).andReturn(mockOptions).anyTimes(); replay(storage); expect(mockOptions.getStorageRpcV1()).andReturn(mockStorageRpc); diff --git a/google-cloud-storage/src/test/java/com/google/cloud/storage/ByteRangeSpecTest.java b/google-cloud-storage/src/test/java/com/google/cloud/storage/ByteRangeSpecTest.java new file mode 100644 index 000000000..bdf1ce5ee --- /dev/null +++ b/google-cloud-storage/src/test/java/com/google/cloud/storage/ByteRangeSpecTest.java @@ -0,0 +1,651 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.cloud.storage.TestUtils.assertAll; +import static com.google.common.truth.Truth.assertThat; + +import com.google.common.base.MoreObjects; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.Streams; +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.ObjectInputStream; +import java.io.ObjectOutputStream; +import java.util.stream.Stream; +import org.checkerframework.checker.nullness.qual.Nullable; +import org.junit.Test; +import org.junit.experimental.runners.Enclosed; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameters; + +@RunWith(Enclosed.class) +public final class ByteRangeSpecTest { + + public static final class Behavior { + + @Test + public void beginNonNullZero_endNonNullNonInfinity() throws Exception { + ByteRangeSpec rel = ByteRangeSpec.relativeLength(0L, 52L); + ByteRangeSpec exO = ByteRangeSpec.explicit(0L, 52L); + ByteRangeSpec exC = ByteRangeSpec.explicitClosed(0L, 51L); + + threeWayEqual(exO, exC, rel); + } + + @Test + public void beginNonNullNonZero_endNonNullNonInfinity() throws Exception { + ByteRangeSpec rel = ByteRangeSpec.relativeLength(10L, 10L); + ByteRangeSpec exO = ByteRangeSpec.explicit(10L, 20L); + ByteRangeSpec exC = ByteRangeSpec.explicitClosed(10L, 19L); + + threeWayEqual(exO, exC, rel); + } + + @Test + public void beginNull_endNonNullNonInfinity() throws Exception { + ByteRangeSpec rel = ByteRangeSpec.relativeLength(null, 10L); + ByteRangeSpec exO = ByteRangeSpec.explicit(null, 10L); + ByteRangeSpec exC = ByteRangeSpec.explicitClosed(null, 9L); + + threeWayEqual(exO, exC, rel); + } + + @Test + public void beginNonNullNonZero_endNull() throws Exception { + ByteRangeSpec rel = ByteRangeSpec.relativeLength(10L, null); + ByteRangeSpec exO = ByteRangeSpec.explicit(10L, null); + ByteRangeSpec exC = ByteRangeSpec.explicitClosed(10L, null); + + threeWayEqual(exO, exC, rel); + } + + @Test + public void bothNull_relative() { + assertThat(ByteRangeSpec.relativeLength(null, null)) + .isSameInstanceAs(ByteRangeSpec.nullRange()); + } + + @Test + public void bothNull_explicit() { + assertThat(ByteRangeSpec.explicit(null, null)).isSameInstanceAs(ByteRangeSpec.nullRange()); + } + + @Test + public void bothNull_explicitClosed() { + assertThat(ByteRangeSpec.explicitClosed(null, null)) + .isSameInstanceAs(ByteRangeSpec.nullRange()); + } + + @Test + public void httpRangeHeaderIsCached() { + ByteRangeSpec relative = ByteRangeSpec.relativeLength(5L, null); + + String header1 = relative.getHttpRangeHeader(); + String header2 = relative.getHttpRangeHeader(); + + assertThat(header1).isSameInstanceAs(header2); + } + + @Test + public void withNewBeginOffset_sameInstanceIfNotDifferent_relative() { + ByteRangeSpec spec = ByteRangeSpec.relativeLength(3L, 10L); + assertThat(spec.withNewBeginOffset(3L)).isSameInstanceAs(spec); + } + + @Test + public void withNewBeginOffset_sameInstanceIfNotDifferent_null() { + ByteRangeSpec spec = ByteRangeSpec.relativeLength(null, null); + assertThat(spec.withNewBeginOffset(0)).isSameInstanceAs(spec); + } + + @Test + public void withNewBeginOffset_sameInstanceIfNotDifferent_leftClosed() { + ByteRangeSpec spec = ByteRangeSpec.relativeLength(3L, null); + assertThat(spec.withNewBeginOffset(3L)).isSameInstanceAs(spec); + } + + @Test + public void withNewBeginOffset_sameInstanceIfNotDifferent_leftClosedRightOpen() { + ByteRangeSpec spec = ByteRangeSpec.explicit(3L, 10L); + assertThat(spec.withNewBeginOffset(3L)).isSameInstanceAs(spec); + } + + @Test + public void withNewBeginOffset_sameInstanceIfNotDifferent_leftClosedRightClosed() { + ByteRangeSpec spec = ByteRangeSpec.explicitClosed(3L, 10L); + assertThat(spec.withNewBeginOffset(3L)).isSameInstanceAs(spec); + } + + @Test + public void withShiftBeginOffset_sameInstanceIfNotDifferent_relative() { + ByteRangeSpec spec = ByteRangeSpec.relativeLength(3L, 10L); + assertThat(spec.withShiftBeginOffset(0)).isSameInstanceAs(spec); + } + + @Test + public void withShiftBeginOffset_sameInstanceIfNotDifferent_null() { + ByteRangeSpec spec = ByteRangeSpec.relativeLength(null, null); + assertThat(spec.withShiftBeginOffset(0)).isSameInstanceAs(spec); + } + + @Test + public void withShiftBeginOffset_sameInstanceIfNotDifferent_leftClosed() { + ByteRangeSpec spec = ByteRangeSpec.relativeLength(3L, null); + assertThat(spec.withShiftBeginOffset(0)).isSameInstanceAs(spec); + } + + @Test + public void withShiftBeginOffset_sameInstanceIfNotDifferent_leftClosedRightOpen() { + ByteRangeSpec spec = ByteRangeSpec.explicit(3L, 10L); + assertThat(spec.withShiftBeginOffset(0)).isSameInstanceAs(spec); + } + + @Test + public void withShiftBeginOffset_sameInstanceIfNotDifferent_leftClosedRightClosed() { + ByteRangeSpec spec = ByteRangeSpec.explicitClosed(3L, 10L); + assertThat(spec.withShiftBeginOffset(0)).isSameInstanceAs(spec); + } + + @Test + public void withRelativeLength_sameInstanceIfNotDifferent_relative() { + ByteRangeSpec spec = ByteRangeSpec.relativeLength(3L, 10L); + assertThat(spec.withNewRelativeLength(10L)).isSameInstanceAs(spec); + } + + @Test + public void withNewEndOffset_sameInstanceIfNotDifferent_null() { + ByteRangeSpec spec = ByteRangeSpec.relativeLength(null, null); + assertThat(spec.withNewEndOffset(RangeScenarios.INF)).isSameInstanceAs(spec); + } + + @Test + public void withNewEndOffsetClosed_sameInstanceIfNotDifferent_null() { + ByteRangeSpec spec = ByteRangeSpec.relativeLength(null, null); + assertThat(spec.withNewEndOffsetClosed(RangeScenarios.INF)).isSameInstanceAs(spec); + } + + @Test + public void withNewRelativeLength_sameInstanceIfNotDifferent_null() { + ByteRangeSpec spec = ByteRangeSpec.relativeLength(null, null); + assertThat(spec.withNewRelativeLength(RangeScenarios.INF)).isSameInstanceAs(spec); + } + + @Test + public void withNewEndOffset_sameInstanceIfNotDifferent_leftClosedRightOpen() { + ByteRangeSpec spec = ByteRangeSpec.explicit(3L, 41L); + assertThat(spec.withNewEndOffset(41L)).isSameInstanceAs(spec); + } + + @Test + public void withNewEndOffsetClosed_sameInstanceIfNotDifferent_leftClosedRightClosed() { + ByteRangeSpec spec = ByteRangeSpec.explicitClosed(3L, 41L); + assertThat(spec.withNewEndOffsetClosed(41L)).isSameInstanceAs(spec); + } + + @Test + public void withNewBeginOffset_relative() { + ByteRangeSpec spec = ByteRangeSpec.relativeLength(3L, 10L); + ByteRangeSpec actual = spec.withNewBeginOffset(4L); + assertThat(actual.beginOffset()).isEqualTo(4); + } + + @Test + public void withNewBeginOffset_null() { + ByteRangeSpec spec = ByteRangeSpec.relativeLength(null, null); + ByteRangeSpec actual = spec.withNewBeginOffset(4L); + assertThat(actual.beginOffset()).isEqualTo(4); + } + + @Test + public void withNewBeginOffset_leftClosed() { + ByteRangeSpec spec = ByteRangeSpec.relativeLength(3L, null); + ByteRangeSpec actual = spec.withNewBeginOffset(4L); + assertThat(actual.beginOffset()).isEqualTo(4); + } + + @Test + public void withNewBeginOffset_leftClosedRightOpen() { + ByteRangeSpec spec = ByteRangeSpec.explicit(3L, 10L); + ByteRangeSpec actual = spec.withNewBeginOffset(4L); + assertThat(actual.beginOffset()).isEqualTo(4); + } + + @Test + public void withNewBeginOffset_leftClosedRightClosed() { + ByteRangeSpec spec = ByteRangeSpec.explicitClosed(3L, 10L); + ByteRangeSpec actual = spec.withNewBeginOffset(4L); + assertThat(actual.beginOffset()).isEqualTo(4); + } + + @Test + public void withShiftBeginOffset_relative() { + ByteRangeSpec spec = ByteRangeSpec.relativeLength(3L, 10L); + ByteRangeSpec actual = spec.withShiftBeginOffset(4L); + assertThat(actual.beginOffset()).isEqualTo(7); + } + + @Test + public void withShiftBeginOffset_null() { + ByteRangeSpec spec = ByteRangeSpec.relativeLength(null, null); + ByteRangeSpec actual = spec.withShiftBeginOffset(4L); + assertThat(actual.beginOffset()).isEqualTo(4); + } + + @Test + public void withShiftBeginOffset_leftClosed() { + ByteRangeSpec spec = ByteRangeSpec.relativeLength(3L, null); + ByteRangeSpec actual = spec.withShiftBeginOffset(4L); + assertThat(actual.beginOffset()).isEqualTo(7); + } + + @Test + public void withShiftBeginOffset_leftClosedRightOpen() { + ByteRangeSpec spec = ByteRangeSpec.explicit(3L, 10L); + ByteRangeSpec actual = spec.withShiftBeginOffset(4L); + assertThat(actual.beginOffset()).isEqualTo(7); + } + + @Test + public void withShiftBeginOffset_leftClosedRightClosed() { + ByteRangeSpec spec = ByteRangeSpec.explicitClosed(3L, 10L); + ByteRangeSpec actual = spec.withShiftBeginOffset(4L); + assertThat(actual.beginOffset()).isEqualTo(7); + } + + @Test + public void withNewEndOffset_relative() throws Exception { + ByteRangeSpec spec = ByteRangeSpec.relativeLength(3L, 10L); + ByteRangeSpec actual = spec.withNewEndOffset(4L); + assertAll( + () -> assertThat(actual.beginOffset()).isEqualTo(3), + () -> assertThat(actual.endOffset()).isEqualTo(4)); + } + + @Test + public void withNewEndOffset_null() throws Exception { + ByteRangeSpec spec = ByteRangeSpec.relativeLength(null, null); + ByteRangeSpec actual = spec.withNewEndOffset(4L); + assertAll( + () -> assertThat(actual.beginOffset()).isEqualTo(0), + () -> assertThat(actual.endOffset()).isEqualTo(4)); + } + + @Test + public void withNewEndOffset_leftClosed() throws Exception { + ByteRangeSpec spec = ByteRangeSpec.relativeLength(3L, null); + ByteRangeSpec actual = spec.withNewEndOffset(4L); + assertAll( + () -> assertThat(actual.beginOffset()).isEqualTo(3), + () -> assertThat(actual.endOffset()).isEqualTo(4)); + } + + @Test + public void withNewEndOffset_leftClosedRightOpen() throws Exception { + ByteRangeSpec spec = ByteRangeSpec.explicit(3L, 10L); + ByteRangeSpec actual = spec.withNewEndOffset(4L); + assertAll( + () -> assertThat(actual.beginOffset()).isEqualTo(3), + () -> assertThat(actual.endOffset()).isEqualTo(4)); + } + + @Test + public void withNewEndOffset_leftClosedRightClosed() throws Exception { + ByteRangeSpec spec = ByteRangeSpec.explicitClosed(3L, 10L); + ByteRangeSpec actual = spec.withNewEndOffset(4L); + assertAll( + () -> assertThat(actual.beginOffset()).isEqualTo(3), + () -> assertThat(actual.endOffset()).isEqualTo(4)); + } + + @Test + public void withNewEndOffsetClosed_relative() throws Exception { + ByteRangeSpec spec = ByteRangeSpec.relativeLength(3L, 10L); + ByteRangeSpec actual = spec.withNewEndOffsetClosed(4L); + assertAll( + () -> assertThat(actual.beginOffset()).isEqualTo(3), + () -> assertThat(actual.endOffset()).isEqualTo(4)); + } + + @Test + public void withNewEndOffsetClosed_null() throws Exception { + ByteRangeSpec spec = ByteRangeSpec.relativeLength(null, null); + ByteRangeSpec actual = spec.withNewEndOffsetClosed(4L); + assertAll( + () -> assertThat(actual.beginOffset()).isEqualTo(0), + () -> assertThat(actual.endOffset()).isEqualTo(4)); + } + + @Test + public void withNewEndOffsetClosed_leftClosed() throws Exception { + ByteRangeSpec spec = ByteRangeSpec.relativeLength(3L, null); + ByteRangeSpec actual = spec.withNewEndOffsetClosed(4L); + assertAll( + () -> assertThat(actual.beginOffset()).isEqualTo(3), + () -> assertThat(actual.endOffset()).isEqualTo(4)); + } + + @Test + public void withNewEndOffsetClosed_leftClosedRightOpen() throws Exception { + ByteRangeSpec spec = ByteRangeSpec.explicit(3L, 10L); + ByteRangeSpec actual = spec.withNewEndOffsetClosed(4L); + assertAll( + () -> assertThat(actual.beginOffset()).isEqualTo(3), + () -> assertThat(actual.endOffset()).isEqualTo(4)); + } + + @Test + public void withNewEndOffsetClosed_leftClosedRightClosed() throws Exception { + ByteRangeSpec spec = ByteRangeSpec.explicitClosed(3L, 10L); + ByteRangeSpec actual = spec.withNewEndOffsetClosed(4L); + assertAll( + () -> assertThat(actual.beginOffset()).isEqualTo(3), + () -> assertThat(actual.endOffset()).isEqualTo(4)); + } + + @Test + public void withNewRelativeLength_relative() throws Exception { + ByteRangeSpec spec = ByteRangeSpec.relativeLength(3L, 10L); + ByteRangeSpec actual = spec.withNewRelativeLength(4L); + assertAll( + () -> assertThat(actual.beginOffset()).isEqualTo(3), + () -> assertThat(actual.length()).isEqualTo(4)); + } + + @Test + public void withNewRelativeLength_null() throws Exception { + ByteRangeSpec spec = ByteRangeSpec.relativeLength(null, null); + ByteRangeSpec actual = spec.withNewRelativeLength(4L); + assertAll( + () -> assertThat(actual.beginOffset()).isEqualTo(0), + () -> assertThat(actual.length()).isEqualTo(4)); + } + + @Test + public void withNewRelativeLength_leftClosed() throws Exception { + ByteRangeSpec spec = ByteRangeSpec.relativeLength(3L, null); + ByteRangeSpec actual = spec.withNewRelativeLength(4L); + assertAll( + () -> assertThat(actual.beginOffset()).isEqualTo(3), + () -> assertThat(actual.length()).isEqualTo(4)); + } + + @Test + public void withNewRelativeLength_leftClosedRightOpen() throws Exception { + ByteRangeSpec spec = ByteRangeSpec.explicit(3L, 10L); + ByteRangeSpec actual = spec.withNewRelativeLength(4L); + assertAll( + () -> assertThat(actual.beginOffset()).isEqualTo(3), + () -> assertThat(actual.length()).isEqualTo(4)); + } + + @Test + public void withNewRelativeLength_leftClosedRightClosed() throws Exception { + ByteRangeSpec spec = ByteRangeSpec.explicitClosed(3L, 10L); + ByteRangeSpec actual = spec.withNewRelativeLength(4L); + assertAll( + () -> assertThat(actual.beginOffset()).isEqualTo(3), + () -> assertThat(actual.length()).isEqualTo(4)); + } + + @SuppressWarnings("EqualsBetweenInconvertibleTypes") + @Test + public void negativeEquals() { + assertThat(ByteRangeSpec.nullRange().equals("")).isFalse(); + } + + @Test + public void nullRangeShouldBeASingletonAcrossJavaSerialization() + throws IOException, ClassNotFoundException { + ByteRangeSpec orig = ByteRangeSpec.nullRange(); + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + try (ObjectOutputStream oos = new ObjectOutputStream(baos)) { + oos.writeObject(orig); + } + + byte[] serializedBytes = baos.toByteArray(); + ByteRangeSpec deserialized; + try (ByteArrayInputStream bais = new ByteArrayInputStream(serializedBytes); + ObjectInputStream ois = new ObjectInputStream(bais)) { + deserialized = (ByteRangeSpec) ois.readObject(); + } + assertThat(deserialized).isSameInstanceAs(orig); + } + } + + private static void threeWayEqual( + ByteRangeSpec explicitO, ByteRangeSpec explicitC, ByteRangeSpec relative) throws Exception { + + assertAll( + () -> assertThat(explicitO).isEqualTo(relative), + () -> assertThat(explicitO).isEqualTo(explicitC), + () -> assertThat(explicitC).isEqualTo(relative)); + } + + @RunWith(Parameterized.class) + public static final class RangeScenarios { + + private static final long INF = Long.MAX_VALUE; + private final RangeScenario rs; + + public RangeScenarios(RangeScenario rs) { + this.rs = rs; + } + + @Test + public void httpRangeHeader() { + assertThat(rs.getSpec().getHttpRangeHeader()).isEqualTo(rs.getExpectedHttpRange()); + } + + @Test + public void beginOffset() { + assertThat(rs.getSpec().beginOffset()).isEqualTo(rs.getExpectedBeginOffset()); + } + + @Test + public void endOffset() { + assertThat(rs.getSpec().endOffset()).isEqualTo(rs.getExpectedEndOffset()); + } + + @Test + public void length() { + assertThat(rs.getSpec().length()).isEqualTo(rs.getExpectedLength()); + } + + @Parameters(name = "{0}") + public static Iterable testCases() { + Stream bothNullOrEmpty = + Stream.of( + ByteRangeSpec.relativeLength(null, null), + ByteRangeSpec.explicit(null, null), + ByteRangeSpec.explicitClosed(null, null), + ByteRangeSpec.relativeLength(0L, null), + ByteRangeSpec.explicit(0L, null), + ByteRangeSpec.explicitClosed(0L, null), + ByteRangeSpec.relativeLength(null, INF), + ByteRangeSpec.explicit(null, INF), + ByteRangeSpec.explicitClosed(null, INF), + ByteRangeSpec.relativeLength(0L, INF), + ByteRangeSpec.explicit(0L, INF), + ByteRangeSpec.explicitClosed(0L, INF)) + .map(brs -> RangeScenario.of(brs, 0, INF, INF, null)); + Stream effectivelyOnlyBegin = + Stream.of( + ByteRangeSpec.relativeLength(3L, null), + ByteRangeSpec.explicit(3L, null), + ByteRangeSpec.explicitClosed(3L, null), + // effective infinity means it should not impact things + ByteRangeSpec.relativeLength(3L, INF), + ByteRangeSpec.explicit(3L, INF), + ByteRangeSpec.explicitClosed(3L, INF)) + .map(brs -> RangeScenario.of(brs, 3, INF, INF, rangeOpen(3))); + Stream effectivelyOnlyEnd = + Stream.of( + RangeScenario.of( + ByteRangeSpec.relativeLength(null, 31L), 0L, 30L, 31L, rangeClosed(0, 30)), + RangeScenario.of(ByteRangeSpec.explicit(null, 31L), 0L, 31L, 31L, rangeClosed(0, 30)), + RangeScenario.of( + ByteRangeSpec.explicitClosed(null, 31L), 0L, 31L, 31L, rangeClosed(0, 31)), + RangeScenario.of( + ByteRangeSpec.relativeLength(0L, 31L), 0L, 30L, 31L, rangeClosed(0, 30)), + RangeScenario.of(ByteRangeSpec.explicit(0L, 31L), 0L, 31L, 31L, rangeClosed(0, 30)), + RangeScenario.of( + ByteRangeSpec.explicitClosed(0L, 31L), 0L, 31L, 31L, rangeClosed(0, 31))); + + Stream bothSpecified = + Stream.of( + RangeScenario.of( + ByteRangeSpec.relativeLength(3L, 15L), 3L, 17L, 15L, rangeClosed(3, 17)), + RangeScenario.of(ByteRangeSpec.explicit(3L, 15L), 3L, 15L, 12L, rangeClosed(3, 14)), + RangeScenario.of( + ByteRangeSpec.explicitClosed(3L, 15L), 3L, 15L, 12L, rangeClosed(3, 15))); + + long effectiveMax = INF - 1; + Stream edgeCases = + Stream.of( + // edge cases near default values + RangeScenario.of(ByteRangeSpec.relativeLength(1L, null), 1L, INF, INF, rangeOpen(1)), + RangeScenario.of( + ByteRangeSpec.relativeLength(null, effectiveMax), + 0, + effectiveMax - 1, + effectiveMax, + rangeClosed(0, effectiveMax - 1)), + RangeScenario.of( + ByteRangeSpec.relativeLength(INF, null), INF, INF, INF, rangeOpen(INF)), + RangeScenario.of( + ByteRangeSpec.relativeLength(1L, effectiveMax), + 1L, + effectiveMax, + effectiveMax, + rangeClosed(1L, effectiveMax)), + RangeScenario.of(ByteRangeSpec.explicit(1L, null), 1L, INF, INF, rangeOpen(1)), + RangeScenario.of( + ByteRangeSpec.explicit(null, effectiveMax), + 0, + effectiveMax, + effectiveMax, + rangeClosed(0, effectiveMax - 1)), + RangeScenario.of(ByteRangeSpec.explicit(INF, null), INF, INF, INF, rangeOpen(INF)), + RangeScenario.of( + ByteRangeSpec.explicit(1L, effectiveMax), + 1L, + effectiveMax, + effectiveMax - 1, + rangeClosed(1L, effectiveMax - 1)), + RangeScenario.of(ByteRangeSpec.explicitClosed(1L, null), 1L, INF, INF, rangeOpen(1)), + RangeScenario.of( + ByteRangeSpec.explicitClosed(null, effectiveMax), + 0, + effectiveMax, + effectiveMax, + rangeClosed(0, effectiveMax)), + RangeScenario.of( + ByteRangeSpec.explicitClosed(INF, null), INF, INF, INF, rangeOpen(INF)), + RangeScenario.of( + ByteRangeSpec.explicitClosed(1L, effectiveMax), + 1L, + effectiveMax, + effectiveMax - 1, + rangeClosed(1L, effectiveMax))); + + return Streams.concat( + bothNullOrEmpty, effectivelyOnlyBegin, effectivelyOnlyEnd, bothSpecified, edgeCases) + .map(rs -> new Object[] {rs}) + .collect(ImmutableList.toImmutableList()); + } + } + + private static String rangeOpen(long min) { + return String.format("bytes=%d-", min); + } + + private static String rangeClosed(long min, long max) { + return String.format("bytes=%d-%d", min, max); + } + + private static final class RangeScenario { + private final ByteRangeSpec spec; + private final long expectedBeginOffset; + private final long expectedEndOffset; + private final long expectedLength; + @Nullable private final String expectedHttpRange; + + private RangeScenario( + ByteRangeSpec spec, + long expectedBeginOffset, + long expectedEndOffset, + long expectedLength, + @Nullable String expectedHttpRange) { + this.spec = spec; + this.expectedBeginOffset = expectedBeginOffset; + this.expectedEndOffset = expectedEndOffset; + this.expectedLength = expectedLength; + this.expectedHttpRange = expectedHttpRange; + } + + public ByteRangeSpec getSpec() { + return spec; + } + + public long getExpectedBeginOffset() { + return expectedBeginOffset; + } + + public long getExpectedEndOffset() { + return expectedEndOffset; + } + + public long getExpectedLength() { + return expectedLength; + } + + public @Nullable String getExpectedHttpRange() { + return expectedHttpRange; + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("spec", spec) + .add("expectedBeginOffset", fmt(expectedBeginOffset)) + .add("expectedEndOffset", fmt(expectedEndOffset)) + .add("expectedLength", fmt(expectedLength)) + .add("expectedHttpRange", expectedHttpRange) + .toString(); + } + + static RangeScenario of( + ByteRangeSpec spec, + long expectedBeginOffset, + long expectedEndOffset, + long expectedLength, + @Nullable String expectedHttpRange) { + return new RangeScenario( + spec, expectedBeginOffset, expectedEndOffset, expectedLength, expectedHttpRange); + } + + private static String fmt(@Nullable Long l) { + if (l == null) { + return null; + } + return l == Long.MAX_VALUE ? "Long.MAX_VALUE" : l.toString(); + } + } +} diff --git a/google-cloud-storage/src/test/java/com/google/cloud/storage/DefaultRetryHandlingBehaviorTest.java b/google-cloud-storage/src/test/java/com/google/cloud/storage/DefaultRetryHandlingBehaviorTest.java index 587577a5b..477594ddf 100644 --- a/google-cloud-storage/src/test/java/com/google/cloud/storage/DefaultRetryHandlingBehaviorTest.java +++ b/google-cloud-storage/src/test/java/com/google/cloud/storage/DefaultRetryHandlingBehaviorTest.java @@ -62,7 +62,7 @@ public final class DefaultRetryHandlingBehaviorTest { @SuppressWarnings("deprecation") public DefaultRetryHandlingBehaviorTest(Case c) { this.c = c; - defaultStrategy = new DefaultStorageRetryStrategy(); + defaultStrategy = DefaultStorageRetryStrategy.INSTANCE; legacyStrategy = StorageRetryStrategy.getLegacyStorageRetryStrategy(); } diff --git a/google-cloud-storage/src/test/java/com/google/cloud/storage/MathsTest.java b/google-cloud-storage/src/test/java/com/google/cloud/storage/MathsTest.java new file mode 100644 index 000000000..77128f812 --- /dev/null +++ b/google-cloud-storage/src/test/java/com/google/cloud/storage/MathsTest.java @@ -0,0 +1,46 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.cloud.storage.Maths.sub; +import static com.google.common.truth.Truth.assertThat; + +import org.junit.Test; + +@SuppressWarnings("ConstantConditions") +public final class MathsTest { + + @Test + public void sub_bothNull() { + assertThat(sub((Long) null, null)).isNull(); + } + + @Test + public void sub_lNull_rNonNull() { + assertThat(sub(null, 3L)).isNull(); + } + + @Test + public void sub_lNonNull_rNull() { + assertThat(sub(3L, null)).isEqualTo(3L); + } + + @Test + public void sub_bothNonNull() { + assertThat(sub(5L, 3L)).isEqualTo(2L); + } +} diff --git a/google-cloud-storage/src/test/java/com/google/cloud/storage/ScatteringByteChannelFacadeTest.java b/google-cloud-storage/src/test/java/com/google/cloud/storage/ScatteringByteChannelFacadeTest.java new file mode 100644 index 000000000..a5dd37781 --- /dev/null +++ b/google-cloud-storage/src/test/java/com/google/cloud/storage/ScatteringByteChannelFacadeTest.java @@ -0,0 +1,180 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.cloud.storage.StorageByteChannels.readable; +import static com.google.cloud.storage.TestUtils.snapshotData; +import static com.google.common.truth.Truth.assertThat; + +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.io.SequenceInputStream; +import java.nio.ByteBuffer; +import java.nio.channels.Channels; +import java.nio.channels.ClosedChannelException; +import java.nio.channels.ReadableByteChannel; +import java.nio.channels.ScatteringByteChannel; +import java.util.concurrent.atomic.AtomicBoolean; +import org.junit.Test; + +public final class ScatteringByteChannelFacadeTest { + + @Test + public void lackOfAvailabilityDoesNotBlock() throws IOException { + ByteArrayInputStream bais1 = new ByteArrayInputStream(new byte[] {0, 1, 2, 3, 4}); + ByteArrayInputStream bais2 = new ByteArrayInputStream(new byte[] {5, 6, 7, 8, 9}); + SequenceInputStream all = new SequenceInputStream(bais1, bais2); + ReadableByteChannel rbc = Channels.newChannel(all); + ScatteringByteChannel sbc = newSbc(rbc); + + ByteBuffer[] bufs = {ByteBuffer.allocate(6), ByteBuffer.allocate(6)}; + sbc.read(bufs); + assertThat(snapshotData(bufs[0])).isEqualTo(new byte[] {0, 1, 2, 3, 4}); + assertThat(snapshotData(bufs[1])).isEqualTo(new byte[] {}); + sbc.read(bufs); + assertThat(snapshotData(bufs[0])).isEqualTo(new byte[] {0, 1, 2, 3, 4, 5}); + assertThat(snapshotData(bufs[1])).isEqualTo(new byte[] {6, 7, 8, 9}); + } + + @Test + public void lackOfCapacityReturnsFast() throws IOException { + ByteArrayInputStream bais = new ByteArrayInputStream(new byte[] {0, 1, 2, 3, 4}); + ReadableByteChannel rbc = Channels.newChannel(bais); + ScatteringByteChannel sbc = newSbc(rbc); + + ByteBuffer[] bufs = {ByteBuffer.allocate(0)}; + long read = sbc.read(bufs); + assertThat(read).isEqualTo(0); + } + + @Test + public void readNegativeOneReturnIfPreviouslyReadBytes() throws IOException { + ByteArrayInputStream bais = new ByteArrayInputStream(new byte[] {0}); + ReadableByteChannel rbc = Channels.newChannel(bais); + ScatteringByteChannel sbc = newSbc(rbc); + + ByteBuffer[] bufs = {ByteBuffer.allocate(1), ByteBuffer.allocate(1)}; + long read = sbc.read(bufs); + assertThat(read).isEqualTo(1); + assertThat(snapshotData(bufs[0])).isEqualTo(new byte[] {0}); + assertThat(snapshotData(bufs[1])).isEqualTo(new byte[] {}); + } + + @Test + public void readNegativeOneReturnsNegativeOneIfPreviouslyReadZeroBytes() throws IOException { + AtomicBoolean closeCalled = new AtomicBoolean(false); + ScatteringByteChannel sbc = + newSbc( + new ReadableByteChannelStub() { + @Override + public int read(ByteBuffer dst) { + return -1; + } + + @Override + public boolean isOpen() { + return true; + } + + @Override + public void close() { + closeCalled.compareAndSet(false, true); + } + }); + + ByteBuffer buf = ByteBuffer.allocate(1); + int read = sbc.read(buf); + assertThat(read).isEqualTo(-1); + assertThat(snapshotData(buf)).isEqualTo(new byte[] {}); + assertThat(closeCalled.get()).isTrue(); + } + + @Test(expected = ClosedChannelException.class) + public void closeChannelExceptionIfUnderlyingIsNotOpen() throws IOException { + ScatteringByteChannel sbc = newSbc(new ClosedReadableByteChannel()); + sbc.read(null, 0, 0); + } + + @Test + public void openDelegates() { + AtomicBoolean open = new AtomicBoolean(false); + ScatteringByteChannel sbc = + newSbc( + new ReadableByteChannelStub() { + @Override + public boolean isOpen() { + return open.get(); + } + }); + + assertThat(sbc.isOpen()).isFalse(); + open.set(true); + assertThat(sbc.isOpen()).isTrue(); + } + + @Test + public void closeDelegates() throws IOException { + AtomicBoolean closeCalled = new AtomicBoolean(false); + ScatteringByteChannel sbc = + newSbc( + new ReadableByteChannelStub() { + @Override + public void close() { + closeCalled.compareAndSet(false, true); + } + }); + + sbc.close(); + assertThat(closeCalled.get()).isTrue(); + } + + private static ScatteringByteChannel newSbc(ReadableByteChannel c) { + return readable().asScatteringByteChannel(c); + } + + private static final class ClosedReadableByteChannel implements ReadableByteChannel { + + @Override + public int read(ByteBuffer dst) throws IOException { + throw new ClosedChannelException(); + } + + @Override + public boolean isOpen() { + return false; + } + + @Override + public void close() {} + } + + private abstract static class ReadableByteChannelStub implements ReadableByteChannel { + + @Override + public int read(ByteBuffer dst) throws IOException { + return 0; + } + + @Override + public boolean isOpen() { + return false; + } + + @Override + public void close() throws IOException {} + } +} diff --git a/google-cloud-storage/src/test/java/com/google/cloud/storage/SerializationTest.java b/google-cloud-storage/src/test/java/com/google/cloud/storage/SerializationTest.java index e5ce50ba9..9fc4b16f7 100644 --- a/google-cloud-storage/src/test/java/com/google/cloud/storage/SerializationTest.java +++ b/google-cloud-storage/src/test/java/com/google/cloud/storage/SerializationTest.java @@ -17,15 +17,21 @@ package com.google.cloud.storage; import static com.google.common.base.MoreObjects.firstNonNull; +import static com.google.common.truth.Truth.assertThat; import static org.junit.Assert.assertEquals; import com.google.api.gax.retrying.ResultRetryAlgorithm; +import com.google.api.services.storage.model.StorageObject; import com.google.cloud.BaseSerializationTest; import com.google.cloud.NoCredentials; import com.google.cloud.PageImpl; import com.google.cloud.ReadChannel; import com.google.cloud.Restorable; +import com.google.cloud.RestorableState; import com.google.cloud.storage.Acl.Project.ProjectRole; +import com.google.cloud.storage.BlobReadChannel.StateImpl; +import com.google.cloud.storage.BlobReadChannelV2.BlobReadChannelContext; +import com.google.cloud.storage.BlobReadChannelV2.BlobReadChannelV2State; import com.google.cloud.storage.Storage.BucketField; import com.google.cloud.storage.Storage.PredefinedAcl; import com.google.cloud.storage.UnifiedOpts.Opt; @@ -33,9 +39,15 @@ import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableSet; +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.ObjectInputStream; import java.io.Serializable; +import java.util.Base64; import java.util.Collections; import java.util.Map; +import java.util.Properties; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; @@ -190,17 +202,50 @@ protected Serializable[] serializableObjects() { } @Override + @SuppressWarnings("resource") protected Restorable[] restorableObjects() { HttpStorageOptions options = HttpStorageOptions.newBuilder().setProjectId("p2").build(); ResultRetryAlgorithm algorithm = options.getRetryAlgorithmManager().getForResumableUploadSessionWrite(EMPTY_RPC_OPTIONS); - ReadChannel reader = new BlobReadChannel(options, BlobId.of("b", "n"), EMPTY_RPC_OPTIONS); - // avoid closing when you don't want partial writes to GCS upon failure - @SuppressWarnings("resource") + ReadChannel readerV2 = + new BlobReadChannelV2( + new StorageObject().setBucket("b").setName("n"), + EMPTY_RPC_OPTIONS, + BlobReadChannelContext.from(options)); BlobWriteChannel writer = new BlobWriteChannel( options, BlobInfo.newBuilder(BlobId.of("b", "n")).build(), "upload-id", algorithm); - return new Restorable[] {reader, writer}; + return new Restorable[] {readerV2, writer}; + } + + @SuppressWarnings({"deprecation", "rawtypes"}) + @Test + public void restoreOfV1BlobReadChannelShouldReturnV2Channel() + throws IOException, ClassNotFoundException { + + Properties properties = new Properties(); + try (InputStream is = + SerializationTest.class + .getClassLoader() + .getResourceAsStream("com/google/cloud/storage/blobWriteChannel.ser.properties")) { + properties.load(is); + } + String b64bytes = properties.getProperty("b64bytes"); + assertThat(b64bytes).isNotEmpty(); + + byte[] decode = Base64.getDecoder().decode(b64bytes); + try (ByteArrayInputStream bais = new ByteArrayInputStream(decode); + ObjectInputStream ois = new ObjectInputStream(bais)) { + Object o = ois.readObject(); + assertThat(o).isInstanceOf(RestorableState.class); + RestorableState restorableState = (RestorableState) o; + assertThat(o).isInstanceOf(StateImpl.class); + StateImpl state = (StateImpl) restorableState; + ReadChannel restore = state.restore(); + assertThat(restore).isInstanceOf(BlobReadChannelV2.class); + RestorableState capture = restore.capture(); + assertThat(capture).isInstanceOf(BlobReadChannelV2State.class); + } } /** diff --git a/google-cloud-storage/src/test/java/com/google/cloud/storage/StorageBatchTest.java b/google-cloud-storage/src/test/java/com/google/cloud/storage/StorageBatchTest.java index 33c273db3..d91b1e740 100644 --- a/google-cloud-storage/src/test/java/com/google/cloud/storage/StorageBatchTest.java +++ b/google-cloud-storage/src/test/java/com/google/cloud/storage/StorageBatchTest.java @@ -28,6 +28,7 @@ import com.google.cloud.storage.Storage.BlobGetOption; import com.google.cloud.storage.Storage.BlobSourceOption; import com.google.cloud.storage.Storage.BlobTargetOption; +import com.google.cloud.storage.spi.v1.HttpStorageRpc; import com.google.cloud.storage.spi.v1.RpcBatch; import com.google.cloud.storage.spi.v1.StorageRpc; import com.google.common.collect.ImmutableMap; @@ -56,7 +57,7 @@ public class StorageBatchTest { HttpStorageOptions.getDefaultInstance().getRetryAlgorithmManager(); private HttpStorageOptions optionsMock; - private StorageRpc storageRpcMock; + private HttpStorageRpc storageRpcMock; private RpcBatch batchMock; private StorageBatch storageBatch; private final Storage storage = EasyMock.createStrictMock(Storage.class); @@ -64,7 +65,7 @@ public class StorageBatchTest { @Before public void setUp() { optionsMock = EasyMock.createMock(HttpStorageOptions.class); - storageRpcMock = EasyMock.createMock(StorageRpc.class); + storageRpcMock = EasyMock.createMock(HttpStorageRpc.class); batchMock = EasyMock.createMock(RpcBatch.class); EasyMock.expect(optionsMock.getStorageRpcV1()).andReturn(storageRpcMock); EasyMock.expect(optionsMock.getRetryAlgorithmManager()) diff --git a/google-cloud-storage/src/test/java/com/google/cloud/storage/StorageImplMockitoTest.java b/google-cloud-storage/src/test/java/com/google/cloud/storage/StorageImplMockitoTest.java index a11453fb2..1e162f982 100644 --- a/google-cloud-storage/src/test/java/com/google/cloud/storage/StorageImplMockitoTest.java +++ b/google-cloud-storage/src/test/java/com/google/cloud/storage/StorageImplMockitoTest.java @@ -30,7 +30,6 @@ import com.google.api.core.ApiClock; import com.google.api.gax.paging.Page; import com.google.api.services.storage.model.StorageObject; -import com.google.cloud.ReadChannel; import com.google.cloud.ServiceOptions; import com.google.cloud.Tuple; import com.google.cloud.WriteChannel; @@ -45,7 +44,6 @@ import java.io.IOException; import java.io.InputStream; import java.math.BigInteger; -import java.nio.ByteBuffer; import java.nio.file.Files; import java.nio.file.Path; import java.security.Key; @@ -305,6 +303,7 @@ public long millisTime() { private StorageRpcFactory rpcFactoryMock; private StorageRpc storageRpcMock; private Storage storage; + private com.google.api.services.storage.Storage apiary; private Blob expectedBlob1, expectedBlob2, expectedBlob3, expectedUpdated; private Bucket expectedBucket1, expectedBucket2, expectedBucket3; @@ -338,7 +337,9 @@ public static void beforeClass() throws NoSuchAlgorithmException, InvalidKeySpec public void setUp() { rpcFactoryMock = mock(StorageRpcFactory.class, UNEXPECTED_CALL_ANSWER); storageRpcMock = mock(StorageRpc.class, UNEXPECTED_CALL_ANSWER); + apiary = mock(com.google.api.services.storage.Storage.class, UNEXPECTED_CALL_ANSWER); doReturn(storageRpcMock).when(rpcFactoryMock).create(Mockito.any(StorageOptions.class)); + doReturn(apiary).when(storageRpcMock).getStorage(); options = StorageOptions.http() .setProjectId("projectId") @@ -1319,113 +1320,6 @@ public void testListBlobsWithException() { } } - private void verifyChannelRead(ReadChannel channel, byte[] bytes) throws IOException { - assertNotNull(channel); - assertTrue(channel.isOpen()); - - ByteBuffer buffer = ByteBuffer.allocate(42); - byte[] expectedBytes = new byte[buffer.capacity()]; - System.arraycopy(bytes, 0, expectedBytes, 0, bytes.length); - - int size = channel.read(buffer); - assertEquals(bytes.length, size); - assertEquals(bytes.length, buffer.position()); - assertArrayEquals(expectedBytes, buffer.array()); - } - - @Test - public void testReader() { - initializeService(); - ReadChannel channel = storage.reader(BUCKET_NAME1, BLOB_NAME1); - assertNotNull(channel); - assertTrue(channel.isOpen()); - // Storage.reader() does not issue any RPC, channel.read() does - try { - channel.read(ByteBuffer.allocate(100)); - fail(); - } catch (IOException e) { - assertTrue(e.getMessage().contains("java.lang.IllegalArgumentException: Unexpected call")); - } - } - - @Test - public void testReaderWithOptions() throws IOException { - doReturn(Tuple.of("etag", BLOB_CONTENT)) - .doThrow(UNEXPECTED_CALL_EXCEPTION) - .when(storageRpcMock) - .read( - Conversions.apiary().blobInfo().encode(BLOB_INFO2), - BLOB_SOURCE_OPTIONS, - 0, - DEFAULT_CHUNK_SIZE); - initializeService(); - ReadChannel channel = - storage.reader( - BUCKET_NAME1, BLOB_NAME2, BLOB_SOURCE_GENERATION, BLOB_SOURCE_METAGENERATION); - verifyChannelRead(channel, BLOB_CONTENT); - } - - @Test - public void testReaderWithDecryptionKey() throws IOException { - doReturn(Tuple.of("a", BLOB_CONTENT), Tuple.of("b", BLOB_SUB_CONTENT)) - .doThrow(UNEXPECTED_CALL_EXCEPTION) - .when(storageRpcMock) - .read( - Conversions.apiary().blobInfo().encode(BLOB_INFO2), - ENCRYPTION_KEY_OPTIONS, - 0, - DEFAULT_CHUNK_SIZE); - initializeService(); - ReadChannel channel = - storage.reader(BUCKET_NAME1, BLOB_NAME2, Storage.BlobSourceOption.decryptionKey(KEY)); - - verifyChannelRead(channel, BLOB_CONTENT); - channel = - storage.reader( - BUCKET_NAME1, BLOB_NAME2, Storage.BlobSourceOption.decryptionKey(BASE64_KEY)); - verifyChannelRead(channel, BLOB_SUB_CONTENT); - } - - @Test - public void testReaderWithOptionsFromBlobId() throws IOException { - doReturn(Tuple.of("etag", BLOB_CONTENT)) - .doThrow(UNEXPECTED_CALL_EXCEPTION) - .when(storageRpcMock) - .read( - Conversions.apiary().blobId().encode(BLOB_INFO1.getBlobId()), - BLOB_SOURCE_OPTIONS, - 0, - DEFAULT_CHUNK_SIZE); - initializeService(); - ReadChannel channel = - storage.reader( - BLOB_INFO1.getBlobId(), - BLOB_SOURCE_GENERATION_FROM_BLOB_ID, - BLOB_SOURCE_METAGENERATION); - verifyChannelRead(channel, BLOB_CONTENT); - } - - @Test - public void testReaderFailure() throws IOException { - doThrow(STORAGE_FAILURE) - .when(storageRpcMock) - .read( - Conversions.apiary().blobId().encode(BLOB_INFO2.getBlobId()), - EMPTY_RPC_OPTIONS, - 0, - DEFAULT_CHUNK_SIZE); - initializeService(); - ReadChannel channel = storage.reader(BUCKET_NAME1, BLOB_NAME2); - assertNotNull(channel); - assertTrue(channel.isOpen()); - try { - channel.read(ByteBuffer.allocate(42)); - fail(); - } catch (IOException e) { - assertTrue(e.getMessage().contains(STORAGE_FAILURE.toString())); - } - } - @Test public void testWriter() { // verify that md5 and crc32c are cleared if present when calling create diff --git a/google-cloud-storage/src/test/java/com/google/cloud/storage/TestUtils.java b/google-cloud-storage/src/test/java/com/google/cloud/storage/TestUtils.java index 7e5ac8c77..03b287b36 100644 --- a/google-cloud-storage/src/test/java/com/google/cloud/storage/TestUtils.java +++ b/google-cloud-storage/src/test/java/com/google/cloud/storage/TestUtils.java @@ -40,11 +40,16 @@ import com.google.storage.v2.WriteObjectRequest; import io.grpc.Status.Code; import io.grpc.StatusRuntimeException; +import io.grpc.netty.shaded.io.netty.buffer.ByteBufUtil; +import io.grpc.netty.shaded.io.netty.buffer.Unpooled; import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.OutputStream; import java.nio.Buffer; import java.nio.ByteBuffer; +import java.util.Arrays; +import java.util.List; +import java.util.Objects; import java.util.concurrent.Callable; import java.util.function.Function; import java.util.stream.IntStream; @@ -52,6 +57,8 @@ import java.util.zip.GZIPOutputStream; import org.checkerframework.checker.nullness.qual.NonNull; import org.checkerframework.checker.nullness.qual.Nullable; +import org.junit.function.ThrowingRunnable; +import org.junit.runners.model.MultipleFailureException; public final class TestUtils { @@ -190,4 +197,46 @@ public boolean shouldRetry(Throwable previousThrowable, Object previousResponse) public static Function onlyUploadId() { return uId -> WriteObjectRequest.newBuilder().setUploadId(uId).build(); } + + public static byte[] snapshotData(ByteBuffer buf) { + ByteBuffer dup = buf.duplicate(); + dup.flip(); + byte[] bytes = new byte[dup.remaining()]; + dup.get(bytes); + return bytes; + } + + public static byte[] slice(byte[] bs, int begin, int end) { + int len = end - begin; + byte[] dst = new byte[len]; + System.arraycopy(bs, begin, dst, 0, len); + return dst; + } + + public static String xxd(byte[] bytes) { + return ByteBufUtil.prettyHexDump(Unpooled.wrappedBuffer(bytes)); + } + + public static String xxd(ByteBuffer bytes) { + ByteBuffer dup = bytes.duplicate(); + dup.flip(); + return ByteBufUtil.prettyHexDump(Unpooled.wrappedBuffer(dup)); + } + + public static void assertAll(ThrowingRunnable... trs) throws Exception { + List x = + Arrays.stream(trs) + .map( + tr -> { + try { + tr.run(); + return null; + } catch (Throwable e) { + return e; + } + }) + .filter(Objects::nonNull) + .collect(ImmutableList.toImmutableList()); + MultipleFailureException.assertEmpty(x); + } } diff --git a/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ChecksummedTestContent.java b/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ChecksummedTestContent.java index 037bdeb28..7b8e12443 100644 --- a/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ChecksummedTestContent.java +++ b/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ChecksummedTestContent.java @@ -26,7 +26,7 @@ import java.util.Arrays; import java.util.Base64; -final class ChecksummedTestContent { +public final class ChecksummedTestContent { private final byte[] bytes; private final int crc32c; diff --git a/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITBlobReadChannelTest.java b/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITBlobReadChannelTest.java index d08c6494f..ae37b3c9f 100644 --- a/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITBlobReadChannelTest.java +++ b/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITBlobReadChannelTest.java @@ -16,27 +16,33 @@ package com.google.cloud.storage.it; +import static com.google.cloud.storage.TestUtils.xxd; import static com.google.common.truth.Truth.assertThat; import static java.nio.charset.StandardCharsets.UTF_8; import static org.junit.Assert.assertArrayEquals; -import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.fail; import com.google.cloud.ReadChannel; +import com.google.cloud.RestorableState; import com.google.cloud.WriteChannel; import com.google.cloud.storage.Blob; import com.google.cloud.storage.BlobId; import com.google.cloud.storage.BlobInfo; import com.google.cloud.storage.BucketInfo; import com.google.cloud.storage.DataGeneration; +import com.google.cloud.storage.DataGenerator; import com.google.cloud.storage.Storage; import com.google.cloud.storage.Storage.BlobSourceOption; +import com.google.cloud.storage.Storage.BlobTargetOption; +import com.google.cloud.storage.StorageException; import com.google.cloud.storage.TransportCompatibility.Transport; import com.google.cloud.storage.it.runner.StorageITRunner; import com.google.cloud.storage.it.runner.annotations.Backend; import com.google.cloud.storage.it.runner.annotations.CrossRun; import com.google.cloud.storage.it.runner.annotations.Inject; +import com.google.cloud.storage.it.runner.registry.ObjectsFixture; +import com.google.cloud.storage.it.runner.registry.ObjectsFixture.ObjectAndContent; import com.google.common.io.BaseEncoding; import com.google.common.io.ByteStreams; import java.io.ByteArrayInputStream; @@ -46,12 +52,15 @@ import java.nio.ByteBuffer; import java.nio.channels.Channels; import java.nio.channels.FileChannel; +import java.nio.channels.WritableByteChannel; +import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Paths; import java.nio.file.StandardOpenOption; import java.util.Arrays; import java.util.Random; import java.util.zip.GZIPInputStream; +import org.checkerframework.checker.nullness.qual.Nullable; import org.junit.Rule; import org.junit.Test; import org.junit.rules.TemporaryFolder; @@ -79,6 +88,7 @@ public final class ITBlobReadChannelTest { @Inject public Storage storage; @Inject public BucketInfo bucket; + @Inject public ObjectsFixture objectsFixture; @Test public void testLimit_smallerThanOneChunk() throws IOException { @@ -89,6 +99,15 @@ public void testLimit_smallerThanOneChunk() throws IOException { doLimitTest(srcContentSize, rangeBegin, rangeEnd, chunkSize); } + @Test + public void testLimit_noSeek() throws IOException { + int srcContentSize = 16; + int rangeBegin = 0; + int rangeEnd = 10; + int chunkSize = _256KiB; + doLimitTest(srcContentSize, rangeBegin, rangeEnd, chunkSize); + } + @Test public void testLimit_pastEndOfBlob() throws IOException { int srcContentSize = _256KiB; @@ -197,43 +216,6 @@ public void testReadChannel_preconditionFailureResultsInIOException_generationMa } } - @Test - @CrossRun.Exclude(transports = Transport.GRPC) - public void testReadChannelFailUpdatedGeneration() throws IOException { - // this test scenario is valid for both grpc and json, however the current semantics of actual - // request interleaving are very different, so this specific test is only applicable to json. - String blobName = "test-read-blob-fail-updated-generation"; - BlobInfo blob = BlobInfo.newBuilder(bucket, blobName).build(); - Random random = new Random(); - int chunkSize = 1024; - int blobSize = 2 * chunkSize; - byte[] content = new byte[blobSize]; - random.nextBytes(content); - Blob remoteBlob = storage.create(blob, content); - assertNotNull(remoteBlob); - assertEquals(blobSize, (long) remoteBlob.getSize()); - try (ReadChannel reader = storage.reader(blob.getBlobId())) { - reader.setChunkSize(chunkSize); - ByteBuffer readBytes = ByteBuffer.allocate(chunkSize); - int numReadBytes = reader.read(readBytes); - assertEquals(chunkSize, numReadBytes); - assertArrayEquals(Arrays.copyOf(content, chunkSize), readBytes.array()); - try (WriteChannel writer = storage.writer(blob)) { - byte[] newContent = new byte[blobSize]; - random.nextBytes(newContent); - int numWrittenBytes = writer.write(ByteBuffer.wrap(newContent)); - assertEquals(blobSize, numWrittenBytes); - } - readBytes = ByteBuffer.allocate(chunkSize); - reader.read(readBytes); - fail("StorageException was expected"); - } catch (IOException ex) { - StringBuilder messageBuilder = new StringBuilder(); - messageBuilder.append("Blob ").append(blob.getBlobId()).append(" was updated while reading"); - assertEquals(messageBuilder.toString(), ex.getMessage()); - } - } - @Test public void ensureReaderReturnsCompressedBytesByDefault() throws IOException { String blobName = testName.getMethodName(); @@ -310,6 +292,232 @@ public void returnRawInputStream_true() throws IOException { } } + @Test + @CrossRun.Exclude(transports = Transport.GRPC) + public void channelIsConsideredOpenUponConstruction() { + ReadChannel reader = storage.reader(objectsFixture.getInfo1().getBlobId()); + assertThat(reader.isOpen()).isTrue(); + reader.close(); + } + + @Test + public void optionsWork() { + byte[] bytes1 = "A".getBytes(StandardCharsets.UTF_8); + + BlobInfo info = BlobInfo.newBuilder(bucket, testName.getMethodName()).build(); + Blob gen1 = storage.create(info, bytes1, BlobTargetOption.doesNotExist()); + + // attempt to read generation=1 && ifGenerationNotMatch=1 + try (ReadChannel r = + storage.reader( + gen1.getBlobId(), BlobSourceOption.generationNotMatch(gen1.getGeneration()))) { + r.read(ByteBuffer.allocate(1)); + } catch (IOException e) { + assertThat(e).hasCauseThat().isInstanceOf(StorageException.class); + StorageException se = (StorageException) e.getCause(); + // b/261214971 for differing response code + assertThat(se.getCode()).isAnyOf(/*json*/ 304, /*grpc*/ 409); + } + } + + @Test + @CrossRun.Exclude(transports = Transport.GRPC) + public void captureAndRestore_position_Limit() throws IOException { + captureAndRestoreTest(26, 51); + } + + @Test + @CrossRun.Exclude(transports = Transport.GRPC) + public void captureAndRestore_position_noLimit() throws IOException { + captureAndRestoreTest(26, null); + } + + @Test + @CrossRun.Exclude(transports = Transport.GRPC) + public void captureAndRestore_noPosition_limit() throws IOException { + captureAndRestoreTest(null, 51); + } + + @Test + @CrossRun.Exclude(transports = Transport.GRPC) + public void captureAndRestore_noPosition_noLimit() throws IOException { + captureAndRestoreTest(null, null); + } + + @Test + @CrossRun.Exclude(transports = Transport.GRPC) + public void seekAfterReadWorks() throws IOException { + ObjectAndContent obj512KiB = objectsFixture.getObj512KiB(); + BlobInfo gen1 = obj512KiB.getInfo(); + byte[] bytes = obj512KiB.getContent().getBytes(); + + byte[] expected1 = Arrays.copyOfRange(bytes, 0, 4); + byte[] expected2 = Arrays.copyOfRange(bytes, 8, 13); + + String xxdExpected1 = xxd(expected1); + String xxdExpected2 = xxd(expected2); + try (ReadChannel reader = storage.reader(gen1.getBlobId())) { + // read some bytes + byte[] bytes1 = new byte[expected1.length]; + reader.read(ByteBuffer.wrap(bytes1)); + String xxd1 = xxd(bytes1); + assertThat(xxd1).isEqualTo(xxdExpected1); + + // seek forward to a new offset + reader.seek(8); + + // read again + byte[] bytes2 = new byte[expected2.length]; + reader.read(ByteBuffer.wrap(bytes2)); + String xxd2 = xxd(bytes2); + assertThat(xxd2).isEqualTo(xxdExpected2); + } + } + + @Test + @CrossRun.Exclude(transports = Transport.GRPC) + public void limitAfterReadWorks() throws IOException { + ObjectAndContent obj512KiB = objectsFixture.getObj512KiB(); + BlobInfo gen1 = obj512KiB.getInfo(); + byte[] bytes = obj512KiB.getContent().getBytes(); + + byte[] expected1 = Arrays.copyOfRange(bytes, 0, 4); + byte[] expected2 = Arrays.copyOfRange(bytes, 4, 10); + + String xxdExpected1 = xxd(expected1); + String xxdExpected2 = xxd(expected2); + try (ReadChannel reader = storage.reader(gen1.getBlobId())) { + // read some bytes + byte[] bytes1 = new byte[expected1.length]; + reader.read(ByteBuffer.wrap(bytes1)); + String xxd1 = xxd(bytes1); + assertThat(xxd1).isEqualTo(xxdExpected1); + + // seek forward to a new offset + reader.limit(10); + + // read again + byte[] bytes2 = new byte[expected2.length]; + reader.read(ByteBuffer.wrap(bytes2)); + String xxd2 = xxd(bytes2); + assertThat(xxd2).isEqualTo(xxdExpected2); + } + } + + @Test + @CrossRun.Exclude(transports = Transport.GRPC) + public void readingLastByteReturnsOneByte_seekOnly() throws IOException { + int length = 10; + byte[] bytes = DataGenerator.base64Characters().genBytes(length); + + BlobInfo info1 = BlobInfo.newBuilder(bucket, testName.getMethodName()).build(); + Blob gen1 = storage.create(info1, bytes, BlobTargetOption.doesNotExist()); + + byte[] expected1 = Arrays.copyOfRange(bytes, 9, 10); + String xxdExpected1 = xxd(expected1); + try (ReadChannel reader = storage.reader(gen1.getBlobId()); + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + WritableByteChannel writer = Channels.newChannel(baos)) { + reader.seek(length - 1); + ByteStreams.copy(reader, writer); + byte[] bytes1 = baos.toByteArray(); + String xxd1 = xxd(bytes1); + assertThat(xxd1).isEqualTo(xxdExpected1); + } + } + + @Test + @CrossRun.Exclude(transports = Transport.GRPC) + public void readingLastByteReturnsOneByte_seekAndLimit() throws IOException { + int length = 10; + byte[] bytes = DataGenerator.base64Characters().genBytes(length); + + BlobInfo info1 = BlobInfo.newBuilder(bucket, testName.getMethodName()).build(); + Blob gen1 = storage.create(info1, bytes, BlobTargetOption.doesNotExist()); + + byte[] expected1 = Arrays.copyOfRange(bytes, 9, 10); + String xxdExpected1 = xxd(expected1); + try (ReadChannel reader = storage.reader(gen1.getBlobId()); + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + WritableByteChannel writer = Channels.newChannel(baos)) { + reader.seek(length - 1); + reader.limit(length); + ByteStreams.copy(reader, writer); + byte[] bytes1 = baos.toByteArray(); + String xxd1 = xxd(bytes1); + assertThat(xxd1).isEqualTo(xxdExpected1); + } + } + + /** + * This is specifically in place for compatibility with BlobReadChannelV1. + * + *

This is behavior is a bug, and should be fixed at the next major version + */ + @Test + @CrossRun.Exclude(transports = Transport.GRPC) + public void responseWith416ReturnsZeroAndLeavesTheChannelOpen() throws IOException { + int length = 10; + byte[] bytes = DataGenerator.base64Characters().genBytes(length); + + BlobInfo info1 = BlobInfo.newBuilder(bucket, testName.getMethodName()).build(); + Blob gen1 = storage.create(info1, bytes, BlobTargetOption.doesNotExist()); + + try (ReadChannel reader = storage.reader(gen1.getBlobId())) { + reader.seek(length); + ByteBuffer buf = ByteBuffer.allocate(1); + int read = reader.read(buf); + assertThat(read).isEqualTo(-1); + assertThat(reader.isOpen()).isTrue(); + int read2 = reader.read(buf); + assertThat(read2).isEqualTo(-1); + } + } + + private void captureAndRestoreTest(@Nullable Integer position, @Nullable Integer endOffset) + throws IOException { + ObjectAndContent obj512KiB = objectsFixture.getObj512KiB(); + BlobInfo gen1 = obj512KiB.getInfo(); + byte[] bytes = obj512KiB.getContent().getBytes(); + + String xxdExpected1; + String xxdExpected2; + { + int begin = position != null ? position : 0; + int end = endOffset != null ? endOffset : bytes.length; + byte[] expected1 = Arrays.copyOfRange(bytes, begin, begin + 10); + byte[] expected2 = Arrays.copyOfRange(bytes, begin, end); + xxdExpected1 = xxd(expected1); + xxdExpected2 = xxd(expected2); + } + + ReadChannel reader = storage.reader(gen1.getBlobId()); + if (position != null) { + reader.seek(position); + } + if (endOffset != null) { + reader.limit(endOffset); + } + + ByteBuffer buf = ByteBuffer.allocate(bytes.length); + buf.limit(10); + + int read1 = reader.read(buf); + assertThat(read1).isEqualTo(10); + String xxd1 = xxd(buf); + assertThat(xxd1).isEqualTo(xxdExpected1); + buf.limit(buf.capacity()); + + RestorableState capture = reader.capture(); + reader.close(); + + try (ReadChannel restore = capture.restore()) { + restore.read(buf); + String xxd2 = xxd(buf); + assertThat(xxd2).isEqualTo(xxdExpected2); + } + } + private void doLimitTest(int srcContentSize, int rangeBegin, int rangeEnd, int chunkSize) throws IOException { String blobName = String.format("%s/src", testName.getMethodName()); @@ -317,7 +525,8 @@ private void doLimitTest(int srcContentSize, int rangeBegin, int rangeEnd, int c ByteBuffer content = dataGeneration.randByteBuffer(srcContentSize); ByteBuffer dup = content.duplicate(); dup.position(rangeBegin); - dup.limit(Math.min(dup.capacity(), rangeEnd)); + int newLimit = Math.min(dup.capacity(), rangeEnd); + dup.limit(newLimit); byte[] expectedSubContent = new byte[dup.remaining()]; dup.get(expectedSubContent); @@ -325,19 +534,18 @@ private void doLimitTest(int srcContentSize, int rangeBegin, int rangeEnd, int c writer.write(content); } - ByteBuffer buffer = ByteBuffer.allocate(srcContentSize); - - try (ReadChannel reader = storage.reader(src.getBlobId())) { + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + try (ReadChannel reader = storage.reader(src.getBlobId()); + WritableByteChannel writer = Channels.newChannel(baos)) { reader.setChunkSize(chunkSize); - reader.seek(rangeBegin); + if (rangeBegin > 0) { + reader.seek(rangeBegin); + } reader.limit(rangeEnd); - reader.read(buffer); - buffer.flip(); + ByteStreams.copy(reader, writer); } - byte[] actual = new byte[buffer.limit()]; - buffer.get(actual); - - assertThat(actual).isEqualTo(expectedSubContent); + byte[] actual = baos.toByteArray(); + assertThat(xxd(actual)).isEqualTo(xxd(expectedSubContent)); } } diff --git a/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITBlobReadChannelV2RetryTest.java b/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITBlobReadChannelV2RetryTest.java new file mode 100644 index 000000000..d30943581 --- /dev/null +++ b/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITBlobReadChannelV2RetryTest.java @@ -0,0 +1,132 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage.it; + +import static com.google.cloud.storage.TestUtils.slice; +import static com.google.cloud.storage.TestUtils.xxd; +import static com.google.common.truth.Truth.assertThat; + +import com.google.api.gax.rpc.FixedHeaderProvider; +import com.google.cloud.ReadChannel; +import com.google.cloud.storage.Blob; +import com.google.cloud.storage.BlobId; +import com.google.cloud.storage.BlobInfo; +import com.google.cloud.storage.BucketInfo; +import com.google.cloud.storage.DataGenerator; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.Storage.BlobTargetOption; +import com.google.cloud.storage.StorageOptions; +import com.google.cloud.storage.TransportCompatibility.Transport; +import com.google.cloud.storage.it.runner.StorageITRunner; +import com.google.cloud.storage.it.runner.annotations.Backend; +import com.google.cloud.storage.it.runner.annotations.Inject; +import com.google.cloud.storage.it.runner.annotations.SingleBackend; +import com.google.cloud.storage.it.runner.annotations.StorageFixture; +import com.google.cloud.storage.it.runner.registry.TestBench; +import com.google.cloud.storage.it.runner.registry.TestBench.RetryTestResource; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.gson.JsonArray; +import com.google.gson.JsonObject; +import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TestName; +import org.junit.runner.RunWith; + +@RunWith(StorageITRunner.class) +@SingleBackend(Backend.TEST_BENCH) +public final class ITBlobReadChannelV2RetryTest { + + private static final int _512KiB = 512 * 1024; + + @Rule public TestName testName = new TestName(); + + @Inject public TestBench testBench; + + @Inject + @StorageFixture(Transport.HTTP) + public Storage storage; + + @Inject public BucketInfo bucket; + + @Test + public void generationIsLockedForRetries() throws Exception { + + StorageOptions baseOptions = storage.getOptions(); + byte[] bytes = DataGenerator.base64Characters().genBytes(_512KiB); + + BlobId id = BlobId.of(bucket.getName(), testName.getMethodName()); + Blob gen1 = + storage.create(BlobInfo.newBuilder(id).build(), bytes, BlobTargetOption.doesNotExist()); + + byte[] slice1 = slice(bytes, 0, 10); + byte[] slice2 = slice(bytes, 10, bytes.length); + + String xxdExpected1 = xxd(slice1); + String xxdExpected2 = xxd(slice2); + + JsonObject instructions = new JsonObject(); + JsonArray value = new JsonArray(); + value.add("return-broken-stream-after-256K"); + instructions.add("storage.objects.get", value); + RetryTestResource retryTestResource = new RetryTestResource(instructions); + RetryTestResource retryTest = testBench.createRetryTest(retryTestResource); + + ImmutableMap headers = ImmutableMap.of("x-retry-test-id", retryTest.id); + + RequestAuditing requestAuditing = new RequestAuditing(); + StorageOptions testStorageOptions = + baseOptions + .toBuilder() + .setTransportOptions(requestAuditing) + .setHeaderProvider(FixedHeaderProvider.create(headers)) + .build(); + + ByteBuffer buf1 = ByteBuffer.allocate(10); + ByteBuffer buf2 = ByteBuffer.allocate(_512KiB); + try (Storage testStorage = testStorageOptions.getService(); + // explicitly use id rather than gen1, we want to start the read without the generation + // present + ReadChannel r = testStorage.reader(id)) { + r.setChunkSize(16); + // perform a read to open the first socket against gen1 + // This should leave the socket open with bytes left to read since we've set our 'chunkSize' + // to 16, we won't read far enough into the object yet to trigger the + // 'broken-stream-after-256K'. + r.read(buf1); + String xxd1 = xxd(buf1); + assertThat(xxd1).isEqualTo(xxdExpected1); + // verify no generation was passed + requestAuditing.assertQueryParam("generation", ImmutableList.of()); + + // now that the socket is open, modify the object so that it will get a new generation + Blob gen2 = + storage.create( + gen1, "A".getBytes(StandardCharsets.UTF_8), BlobTargetOption.generationMatch()); + + // Now try and read the rest of the object. + // after reaching the 256Kth byte the stream should break causing the ReadChannel to try and + // resume the download + r.read(buf2); + String xxd2 = xxd(buf2); + assertThat(xxd2).isEqualTo(xxdExpected2); + requestAuditing.assertQueryParam("generation", gen1.getGeneration(), Long::new); + } + } +} diff --git a/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITObjectTest.java b/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITObjectTest.java index 458fc1b92..56f0a37ce 100644 --- a/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITObjectTest.java +++ b/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ITObjectTest.java @@ -61,12 +61,14 @@ import com.google.common.collect.Iterators; import com.google.common.hash.Hashing; import com.google.common.io.BaseEncoding; +import com.google.common.io.ByteStreams; import com.google.common.primitives.Ints; import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.InputStream; import java.nio.ByteBuffer; +import java.nio.channels.Channels; import java.security.Key; import java.util.Arrays; import java.util.HashMap; @@ -1057,30 +1059,56 @@ public void testReadAndWriteChannelWithEncryptionKey() throws IOException { } @Test - public void testReadAndWriteChannelsWithDifferentFileSize() throws IOException { - String blobNamePrefix = "test-read-and-write-channels-blob-"; - int[] blobSizes = {0, 700, 1024 * 256, 2 * 1024 * 1024, 4 * 1024 * 1024, 4 * 1024 * 1024 + 1}; + public void testReadAndWriteChannelsWithDifferentFileSize_0B() throws IOException { + doTestReadAndWriteChannelsWithSize(0); + } + + @Test + public void testReadAndWriteChannelsWithDifferentFileSize_700B() throws IOException { + doTestReadAndWriteChannelsWithSize(700); + } + + @Test + public void testReadAndWriteChannelsWithDifferentFileSize_8193B() throws IOException { + doTestReadAndWriteChannelsWithSize(4 * 1024); + } + + @Test + public void testReadAndWriteChannelsWithDifferentFileSize_256KiB() throws IOException { + doTestReadAndWriteChannelsWithSize(256 * 1024); + } + + @Test + public void testReadAndWriteChannelsWithDifferentFileSize_2MiB() throws IOException { + doTestReadAndWriteChannelsWithSize(2 * 1024 * 1024); + } + + @Test + public void testReadAndWriteChannelsWithDifferentFileSize_4MiB() throws IOException { + doTestReadAndWriteChannelsWithSize(4 * 1024 * 1024); + } + + @Test + public void testReadAndWriteChannelsWithDifferentFileSize_4MiB_plus1() throws IOException { + doTestReadAndWriteChannelsWithSize((4 * 1024 * 1024) + 1); + } + + private void doTestReadAndWriteChannelsWithSize(int blobSize) throws IOException { + String blobName = String.format("test-read-and-write-channels-blob-%d", blobSize); + BlobInfo blob = BlobInfo.newBuilder(bucket, blobName).build(); Random rnd = new Random(); - for (int blobSize : blobSizes) { - String blobName = blobNamePrefix + blobSize; - BlobInfo blob = BlobInfo.newBuilder(bucket, blobName).build(); - byte[] bytes = new byte[blobSize]; - rnd.nextBytes(bytes); - try (WriteChannel writer = storage.writer(blob)) { - writer.write(ByteBuffer.wrap(bytes)); - } - ByteArrayOutputStream output = new ByteArrayOutputStream(); - try (ReadChannel reader = storage.reader(blob.getBlobId())) { - ByteBuffer buffer = ByteBuffer.allocate(64 * 1024); - while (reader.read(buffer) > 0) { - buffer.flip(); - output.write(buffer.array(), 0, buffer.limit()); - buffer.clear(); - } - } - assertArrayEquals(bytes, output.toByteArray()); - assertTrue(storage.delete(bucket.getName(), blobName)); + byte[] bytes = new byte[blobSize]; + rnd.nextBytes(bytes); + try (WriteChannel writer = storage.writer(blob)) { + writer.write(ByteBuffer.wrap(bytes)); + } + ByteArrayOutputStream output = new ByteArrayOutputStream(); + try (ReadChannel reader = storage.reader(blob.getBlobId())) { + ByteStreams.copy(reader, Channels.newChannel(output)); } + byte[] actual = output.toByteArray(); + assertThat(actual).isEqualTo(bytes); + assertTrue(storage.delete(bucket.getName(), blobName)); } @Test diff --git a/google-cloud-storage/src/test/java/com/google/cloud/storage/it/RequestAuditing.java b/google-cloud-storage/src/test/java/com/google/cloud/storage/it/RequestAuditing.java index a4c215a36..1087ad8b2 100644 --- a/google-cloud-storage/src/test/java/com/google/cloud/storage/it/RequestAuditing.java +++ b/google-cloud-storage/src/test/java/com/google/cloud/storage/it/RequestAuditing.java @@ -16,7 +16,7 @@ package com.google.cloud.storage.it; -import static com.google.common.truth.Truth.assertThat; +import static com.google.common.truth.Truth.assertWithMessage; import com.google.api.client.http.GenericUrl; import com.google.api.client.http.HttpContent; @@ -65,21 +65,33 @@ void clear() { } void assertQueryParam(String paramName, String expectedValue) { + assertQueryParam(paramName, ImmutableList.of(expectedValue), Function.identity()); + } + + void assertQueryParam(String paramName, ImmutableList expectedValue) { assertQueryParam(paramName, expectedValue, Function.identity()); } - void assertQueryParam(String paramName, T expectedValue, Function transform) { + void assertQueryParam(String paramName, T expected, Function transform) { + assertQueryParam(paramName, ImmutableList.of(expected), transform); + } + + private void assertQueryParam( + String paramName, ImmutableList expected, Function transform) { ImmutableList requests = getRequests(); List actual = requests.stream() .map(HttpRequest::getUrl) - .distinct() // todo: figure out why requests seem to be recorded twice for blob create + // When a multipart (http, not MPU) request is sent it will show up as multiple requests + // de-dupe before processing + .distinct() .map(u -> (String) u.getFirst(paramName)) + .filter(Objects::nonNull) .map(transform) .collect(Collectors.toList()); - assertThat(actual).isEqualTo(ImmutableList.of(expectedValue)); + assertWithMessage("Query Param " + paramName).that(actual).isEqualTo(expected); } void assertPathParam(String resourceName, String expectedValue) { @@ -88,7 +100,9 @@ void assertPathParam(String resourceName, String expectedValue) { List actual = requests.stream() .map(HttpRequest::getUrl) - .distinct() // todo: figure out why requests seem to be recorded twice for blob create + // When a multipart (http, not MPU) request is sent it will show up as multiple requests + // de-dupe before processing + .distinct() .map(GenericUrl::getRawPath) .map( s -> { @@ -109,7 +123,9 @@ void assertPathParam(String resourceName, String expectedValue) { .filter(Objects::nonNull) .collect(Collectors.toList()); - assertThat(actual).isEqualTo(ImmutableList.of(expectedValue)); + assertWithMessage("Path Param " + resourceName) + .that(actual) + .isEqualTo(ImmutableList.of(expectedValue)); } void assertNoContentEncoding() { @@ -122,7 +138,7 @@ void assertNoContentEncoding() { .filter(Objects::nonNull) .collect(Collectors.toList()); - assertThat(actual).isEmpty(); + assertWithMessage("Header Content-Encoding").that(actual).isEmpty(); } void assertEncryptionKeyHeaders(EncryptionKeyTuple tuple) { @@ -139,8 +155,11 @@ void assertEncryptionKeyHeaders(EncryptionKeyTuple tuple) { (String) h.get("x-goog-encryption-key-sha256"))) .collect(Collectors.toList()); - // todo: figure out why requests seem to be recorded twice for blob create - assertThat(actual).containsAtLeastElementsIn(ImmutableList.of(tuple)); + // When a multipart (http, not MPU) request is sent it will show up as multiple requests, + // constrain our assertion to contains rather than exact matching + assertWithMessage("Headers x-goog-encryption-*") + .that(actual) + .containsAtLeastElementsIn(ImmutableList.of(tuple)); } void assertMultipartContentJsonAndText() { @@ -153,7 +172,9 @@ void assertMultipartContentJsonAndText() { .map(HttpContent::getType) .collect(Collectors.toList()); - assertThat(actual).isEqualTo(ImmutableList.of("application/json; charset=UTF-8", "text/plain")); + assertWithMessage("Multipart Content-Type") + .that(actual) + .isEqualTo(ImmutableList.of("application/json; charset=UTF-8", "text/plain")); } void assertMultipartJsonField(String jsonField, Object expectedValue) { @@ -169,6 +190,8 @@ void assertMultipartJsonField(String jsonField, Object expectedValue) { .map(c -> (GenericJson) c.getData()) .map(json -> json.get(jsonField)) .collect(Collectors.toList()); - assertThat(collect).isEqualTo(ImmutableList.of(expectedValue)); + assertWithMessage("Multipart json field " + jsonField) + .that(collect) + .isEqualTo(ImmutableList.of(expectedValue)); } } diff --git a/google-cloud-storage/src/test/java/com/google/cloud/storage/it/runner/registry/ObjectsFixture.java b/google-cloud-storage/src/test/java/com/google/cloud/storage/it/runner/registry/ObjectsFixture.java index af85d82eb..6abdee6ce 100644 --- a/google-cloud-storage/src/test/java/com/google/cloud/storage/it/runner/registry/ObjectsFixture.java +++ b/google-cloud-storage/src/test/java/com/google/cloud/storage/it/runner/registry/ObjectsFixture.java @@ -16,12 +16,15 @@ package com.google.cloud.storage.it.runner.registry; +import com.google.cloud.storage.Blob; import com.google.cloud.storage.BlobId; import com.google.cloud.storage.BlobInfo; import com.google.cloud.storage.BucketInfo; +import com.google.cloud.storage.DataGenerator; import com.google.cloud.storage.Storage; import com.google.cloud.storage.Storage.BlobTargetOption; import com.google.cloud.storage.Storage.ComposeRequest; +import com.google.cloud.storage.it.ChecksummedTestContent; import com.google.common.collect.ImmutableMap; import java.nio.charset.StandardCharsets; @@ -35,6 +38,7 @@ public final class ObjectsFixture implements ManagedLifecycle { private BlobInfo info2; private BlobInfo info3; private BlobInfo info4; + private ObjectAndContent obj512KiB; ObjectsFixture(Storage s, BucketInfo bucket) { this.s = s; @@ -62,6 +66,10 @@ public BlobInfo getInfo4() { return info4; } + public ObjectAndContent getObj512KiB() { + return obj512KiB; + } + @Override public void start() { String bucketName = bucket.getName(); @@ -103,6 +111,14 @@ public void start() { this.info2 = s.get(blobId2).asBlobInfo(); this.info3 = s.get(blobId3).asBlobInfo(); this.info4 = s.get(blobId4).asBlobInfo(); + + byte[] bytes = DataGenerator.base64Characters().genBytes(512); + Blob obj512KiB = + s.create( + BlobInfo.newBuilder(bucket, "obj512KiB").build(), + bytes, + BlobTargetOption.doesNotExist()); + this.obj512KiB = new ObjectAndContent(obj512KiB.asBlobInfo(), ChecksummedTestContent.of(bytes)); } @Override @@ -111,4 +127,22 @@ public void stop() {} private static String objName(String name) { return String.format("%s/%s", ObjectsFixture.class.getSimpleName(), name); } + + public static final class ObjectAndContent { + private final BlobInfo info; + private final ChecksummedTestContent content; + + private ObjectAndContent(BlobInfo info, ChecksummedTestContent content) { + this.info = info; + this.content = content; + } + + public BlobInfo getInfo() { + return info; + } + + public ChecksummedTestContent getContent() { + return content; + } + } } diff --git a/google-cloud-storage/src/test/resources/com/google/cloud/storage/blobWriteChannel.ser.properties b/google-cloud-storage/src/test/resources/com/google/cloud/storage/blobWriteChannel.ser.properties new file mode 100644 index 000000000..c9d3dc5ff --- /dev/null +++ b/google-cloud-storage/src/test/resources/com/google/cloud/storage/blobWriteChannel.ser.properties @@ -0,0 +1,70 @@ +# +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# Base 64 Encoded bytes of a BlobReadChannel circa v2.16.0 +# Generated using the following snippet: +# +# Storage s = StorageOptions.http() +# .setProjectId("proj") +# .setCredentials(NoCredentials.getInstance()) +# .build() +# .getService(); +# +# ReadChannel reader = s.reader(BlobId.of("buck", "obj", 1L)); +# RestorableState capture = reader.capture(); +# +# ByteArrayOutputStream baos = new ByteArrayOutputStream(); +# try (ObjectOutputStream oos = new ObjectOutputStream(baos)) { +# oos.writeObject(capture); +# } +# +# byte[] bytes = baos.toByteArray(); +# String b64Ser = Base64.getEncoder().encodeToString(bytes); +# +# System.out.println("b64Ser = " + b64Ser); +# +b64bytes=\ + rO0ABXNyADJjb20uZ29vZ2xlLmNsb3VkLnN0b3JhZ2UuQmxvYlJlYWRDaGFubmVsJFN0YXRlSW1wbGwJWjOFWbi1AgAJSQAJY2h1bmtTaXplWgALZW5kT2ZTdHJlYW1a\ + AAZpc09wZW5KAAVsaW1pdEoACHBvc2l0aW9uTAAEYmxvYnQAIUxjb20vZ29vZ2xlL2Nsb3VkL3N0b3JhZ2UvQmxvYklkO0wACGxhc3RFdGFndAASTGphdmEvbGFuZy9T\ + dHJpbmc7TAAOcmVxdWVzdE9wdGlvbnN0AA9MamF2YS91dGlsL01hcDtMAA5zZXJ2aWNlT3B0aW9uc3QALUxjb20vZ29vZ2xlL2Nsb3VkL3N0b3JhZ2UvSHR0cFN0b3Jh\ + Z2VPcHRpb25zO3hwACAAAAABf/////////8AAAAAAAAAAHNyAB9jb20uZ29vZ2xlLmNsb3VkLnN0b3JhZ2UuQmxvYklkcdHeVjWP2d0CAANMAAZidWNrZXRxAH4AAkwA\ + CmdlbmVyYXRpb250ABBMamF2YS9sYW5nL0xvbmc7TAAEbmFtZXEAfgACeHB0AARidWNrc3IADmphdmEubGFuZy5Mb25nO4vkkMyPI98CAAFKAAV2YWx1ZXhyABBqYXZh\ + LmxhbmcuTnVtYmVyhqyVHQuU4IsCAAB4cAAAAAAAAAABdAADb2JqcHNyADVjb20uZ29vZ2xlLmNvbW1vbi5jb2xsZWN0LkltbXV0YWJsZU1hcCRTZXJpYWxpemVkRm9y\ + bQAAAAAAAAAAAgACTAAEa2V5c3QAEkxqYXZhL2xhbmcvT2JqZWN0O0wABnZhbHVlc3EAfgAPeHB1cgATW0xqYXZhLmxhbmcuT2JqZWN0O5DOWJ8QcylsAgAAeHAAAAAA\ + dXEAfgARAAAAAHNyACtjb20uZ29vZ2xlLmNsb3VkLnN0b3JhZ2UuSHR0cFN0b3JhZ2VPcHRpb25ztmk+4Fw7cvMCAAFMABVyZXRyeUFsZ29yaXRobU1hbmFnZXJ0ADRM\ + Y29tL2dvb2dsZS9jbG91ZC9zdG9yYWdlL0h0dHBSZXRyeUFsZ29yaXRobU1hbmFnZXI7eHIAJ2NvbS5nb29nbGUuY2xvdWQuc3RvcmFnZS5TdG9yYWdlT3B0aW9uc5q/\ + 8jOW5d5PAgAAeHIAH2NvbS5nb29nbGUuY2xvdWQuU2VydmljZU9wdGlvbnN/qQsz9VFyfgIAC0wADmNsaWVudExpYlRva2VucQB+AAJMAAVjbG9ja3QAHkxjb20vZ29v\ + Z2xlL2FwaS9jb3JlL0FwaUNsb2NrO0wAC2NyZWRlbnRpYWxzdAAdTGNvbS9nb29nbGUvYXV0aC9DcmVkZW50aWFscztMAA5oZWFkZXJQcm92aWRlcnQAJ0xjb20vZ29v\ + Z2xlL2FwaS9nYXgvcnBjL0hlYWRlclByb3ZpZGVyO0wABGhvc3RxAH4AAkwACXByb2plY3RJZHEAfgACTAAOcXVvdGFQcm9qZWN0SWRxAH4AAkwADXJldHJ5U2V0dGlu\ + Z3N0ACtMY29tL2dvb2dsZS9hcGkvZ2F4L3JldHJ5aW5nL1JldHJ5U2V0dGluZ3M7TAAXc2VydmljZUZhY3RvcnlDbGFzc05hbWVxAH4AAkwAGnNlcnZpY2VScGNGYWN0\ + b3J5Q2xhc3NOYW1lcQB+AAJMABB0cmFuc3BvcnRPcHRpb25zdAAjTGNvbS9nb29nbGUvY2xvdWQvVHJhbnNwb3J0T3B0aW9uczt4cHQABGdjY2xzcgAmY29tLmdvb2ds\ + ZS5hcGkuY29yZS5DdXJyZW50TWlsbGlzQ2xvY2usd0sHJ9YTCwIAAHhwc3IAHmNvbS5nb29nbGUuY2xvdWQuTm9DcmVkZW50aWFsc6kR5wOeLAxAAgAAeHIAKGNvbS5n\ + b29nbGUuYXV0aC5vYXV0aDIuT0F1dGgyQ3JlZGVudGlhbHM/PX166aVRVwIABEwAEGV4cGlyYXRpb25NYXJnaW50ABRMamF2YS90aW1lL0R1cmF0aW9uO0wABGxvY2tx\ + AH4AD0wADXJlZnJlc2hNYXJnaW5xAH4AI0wABXZhbHVldAA1TGNvbS9nb29nbGUvYXV0aC9vYXV0aDIvT0F1dGgyQ3JlZGVudGlhbHMkT0F1dGhWYWx1ZTt4cgAbY29t\ + Lmdvb2dsZS5hdXRoLkNyZWRlbnRpYWxzCzii14w9kIECAAB4cHNyAA1qYXZhLnRpbWUuU2VylV2EuhsiSLIMAAB4cHcNAQAAAAAAAAEsAAAAAHh1cgACW0Ks8xf4BghU\ + 4AIAAHhwAAAAAHNxAH4AJ3cNAQAAAAAAAAFoAAAAAHhwc3IAJ2NvbS5nb29nbGUuYXBpLmdheC5ycGMuTm9IZWFkZXJQcm92aWRlcmWjEqhqxXthAgAAeHB0AB5odHRw\ + czovL3N0b3JhZ2UuZ29vZ2xlYXBpcy5jb210AARwcm9qcHNyADNjb20uZ29vZ2xlLmFwaS5nYXgucmV0cnlpbmcuQXV0b1ZhbHVlX1JldHJ5U2V0dGluZ3Nym/9/a0d0\ + swIACVoACGppdHRlcmVkSQALbWF4QXR0ZW1wdHNEABRyZXRyeURlbGF5TXVsdGlwbGllckQAFHJwY1RpbWVvdXRNdWx0aXBsaWVyTAARaW5pdGlhbFJldHJ5RGVsYXl0\ + ABpMb3JnL3RocmVldGVuL2JwL0R1cmF0aW9uO0wAEWluaXRpYWxScGNUaW1lb3V0cQB+ADFMAA1tYXhSZXRyeURlbGF5cQB+ADFMAA1tYXhScGNUaW1lb3V0cQB+ADFM\ + AAx0b3RhbFRpbWVvdXRxAH4AMXhyACljb20uZ29vZ2xlLmFwaS5nYXgucmV0cnlpbmcuUmV0cnlTZXR0aW5nc3Kb/39rR3SzAgAAeHABAAAABkAAAAAAAAAAP/AAAAAA\ + AABzcgATb3JnLnRocmVldGVuLmJwLlNlcpVdhLobIkiyDAAAeHB3DQEAAAAAAAAAAQAAAAB4c3EAfgA0dw0BAAAAAAAAADIAAAAAeHNxAH4ANHcNAQAAAAAAAAAgAAAA\ + AHhzcQB+ADR3DQEAAAAAAAAAMgAAAAB4c3EAfgA0dw0BAAAAAAAAADIAAAAAeHQAPmNvbS5nb29nbGUuY2xvdWQuc3RvcmFnZS5IdHRwU3RvcmFnZU9wdGlvbnMkSHR0\ + cFN0b3JhZ2VGYWN0b3J5dABBY29tLmdvb2dsZS5jbG91ZC5zdG9yYWdlLkh0dHBTdG9yYWdlT3B0aW9ucyRIdHRwU3RvcmFnZVJwY0ZhY3RvcnlzcgAqY29tLmdvb2ds\ + ZS5jbG91ZC5odHRwLkh0dHBUcmFuc3BvcnRPcHRpb25zbX9UTb2H/yICAANJAA5jb25uZWN0VGltZW91dEkAC3JlYWRUaW1lb3V0TAAdaHR0cFRyYW5zcG9ydEZhY3Rv\ + cnlDbGFzc05hbWVxAH4AAnhw//////////90AEZjb20uZ29vZ2xlLmNsb3VkLmh0dHAuSHR0cFRyYW5zcG9ydE9wdGlvbnMkRGVmYXVsdEh0dHBUcmFuc3BvcnRGYWN0\ + b3J5c3IAMmNvbS5nb29nbGUuY2xvdWQuc3RvcmFnZS5IdHRwUmV0cnlBbGdvcml0aG1NYW5hZ2Vy0i1ymVA0mEUCAAFMAA1yZXRyeVN0cmF0ZWd5dAAvTGNvbS9nb29n\ + bGUvY2xvdWQvc3RvcmFnZS9TdG9yYWdlUmV0cnlTdHJhdGVneTt4cHNyADRjb20uZ29vZ2xlLmNsb3VkLnN0b3JhZ2UuRGVmYXVsdFN0b3JhZ2VSZXRyeVN0cmF0ZWd5\ + bgaLnarjlYkCAAB4cA==