From aac69da6393fbe67281e78a8afa1e77707ead71a Mon Sep 17 00:00:00 2001 From: Sumeet Attree Date: Mon, 4 Apr 2022 18:38:19 +0530 Subject: [PATCH 01/26] Support single-request multipart upload for setting metadata --- Cargo.toml | 2 +- src/client/object.rs | 62 ++++++++++++++++++++++++++++++++++ src/resources/object.rs | 75 +++++++++++++++++++++++++++++++++++++++++ 3 files changed, 138 insertions(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index 2724d8c..8b39fad 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -23,7 +23,7 @@ rustls-tls = ["reqwest/rustls-tls", "ring", "pem"] trust-dns = ["reqwest/trust-dns"] [dependencies] -reqwest = { version = "0.11", default-features = false, features = ["json", "stream"] } +reqwest = { version = "0.11", default-features = false, features = ["json", "stream", "multipart"] } percent-encoding = { version = "2", default-features = false } jsonwebtoken = { version = "7", default-features = false } serde = { version = "1", default-features = false, features = ["derive"] } diff --git a/src/client/object.rs b/src/client/object.rs index e6b8b60..c33d18b 100644 --- a/src/client/object.rs +++ b/src/client/object.rs @@ -65,6 +65,68 @@ impl<'a> ObjectClient<'a> { } } + /// Create a new object. This works in the same way as `ObjectClient::create` but allows setting of metadata for this object. + /// Upload a file as that is loaded in memory to google cloud storage, where it will be + /// interpreted according to the mime type you specified. The metadata will be set at the time of creation. + /// ## Example + /// ```rust,no_run + /// # #[tokio::main] + /// # async fn main() -> Result<(), Box> { + /// # fn read_cute_cat(_in: &str) -> Vec { vec![0, 1] } + /// use cloud_storage::Client; + /// use cloud_storage::Object; + /// + /// let file: Vec = read_cute_cat("cat.png"); + /// let client = Client::default(); + /// let metadata = serde_json::json!({ + /// "metadata": { + /// "custom_id": "1234" + /// } + /// }); + /// client.object().create_with("cat-photos", file, "recently read cat.png", "image/png", &metadata).await?; + /// # Ok(()) + /// # } + /// ``` + pub async fn create_with( + &self, + bucket: &str, + file: Vec, + filename: &str, + mime_type: &str, + metadata: &serde_json::Value, + ) -> crate::Result { + let url = &format!( + "{}/{}/o?uploadType=multipart&name={}", + BASE_URL, + percent_encode(bucket), + percent_encode(filename), + ); + + // single-request upload that includes metadata require a mutlipart request where + // part 1 is metadata, and part2 is the file to upload + let metadata_part = reqwest::multipart::Part::text(metadata.to_string()) + .mime_str("application/json")?; + let file_part = reqwest::multipart::Part::bytes(file).mime_str(mime_type)?; + let form = reqwest::multipart::Form::new() + .part("metadata", metadata_part) + .part("file", file_part); + let headers = self.0.get_headers().await?; + let response = self + .0 + .client + .post(url) + .headers(headers) + .multipart(form) + .send() + .await?; + + if response.status() == 200 { + Ok(serde_json::from_str(&response.text().await?)?) + } else { + Err(crate::Error::new(&response.text().await?)) + } + } + /// Create a new object. This works in the same way as `ObjectClient::create`, except it does not need /// to load the entire file in ram. /// ## Example diff --git a/src/resources/object.rs b/src/resources/object.rs index 0f247bc..1f17634 100644 --- a/src/resources/object.rs +++ b/src/resources/object.rs @@ -279,6 +279,55 @@ impl Object { crate::runtime()?.block_on(Self::create(bucket, file, filename, mime_type)) } + /// Create a new object with metadata. + /// Upload a file as that is loaded in memory to google cloud storage, where it will be + /// interpreted according to the mime type you specified. + /// ## Example + /// ```rust,no_run + /// # #[tokio::main] + /// # async fn main() -> Result<(), Box> { + /// # fn read_cute_cat(_in: &str) -> Vec { vec![0, 1] } + /// use cloud_storage::Object; + /// + /// let file: Vec = read_cute_cat("cat.png"); + /// let metadata = serde_json::json!({ + /// "metadata": { + /// "custom_id": "1234" + /// } + /// }); + /// Object::create("cat-photos", file, "recently read cat.png", "image/png", &metadata).await?; + /// # Ok(()) + /// # } + /// ``` + #[cfg(feature = "global-client")] + pub async fn create_with( + bucket: &str, + file: Vec, + filename: &str, + mime_type: &str, + metadata: &serde_json::Value, + ) -> crate::Result { + crate::CLOUD_CLIENT + .object() + .create_with(bucket, file, filename, mime_type, metadata) + .await + } + + /// Synchronous equivalent of `Object::create_with` + /// + /// ### Features + /// This function requires that the feature flag `sync` is enabled in `Cargo.toml`. + #[cfg(all(feature = "global-client", feature = "sync"))] + pub fn create_with_sync( + bucket: &str, + file: Vec, + filename: &str, + mime_type: &str, + metadata: &serde_json::Value, + ) -> crate::Result { + crate::runtime()?.block_on(Self::create_with(bucket, file, filename, mime_type, metadata)) + } + /// Create a new object. This works in the same way as `Object::create`, except it does not need /// to load the entire file in ram. /// ## Example @@ -980,6 +1029,19 @@ mod tests { Ok(()) } + #[tokio::test] + async fn create_with() -> Result<(), Box> { + let bucket = crate::read_test_bucket().await; + let metadata = serde_json::json!({ + "metadata": { + "object_id": "1234" + } + }); + let obj = Object::create_with(&bucket.name, vec![0, 1], "test-create-meta", "text/plain", &metadata).await?; + assert_eq!(obj.metadata.unwrap().get("object_id"), Some(&String::from("1234"))); + Ok(()) + } + #[tokio::test] async fn create_streamed() -> Result<(), Box> { let bucket = crate::read_test_bucket().await; @@ -1305,6 +1367,19 @@ mod tests { Ok(()) } + #[test] + fn create_with() -> Result<(), Box> { + let bucket = crate::read_test_bucket_sync(); + let metadata = serde_json::json!({ + "metadata": { + "object_id": "1234" + } + }); + let obj = Object::create_with_sync(&bucket.name, vec![0, 1], "test-create-meta", "text/plain", &metadata)?; + assert_eq!(obj.metadata.unwrap().get("object_id"), Some(&String::from("1234"))); + Ok(()) + } + #[test] fn create_streamed() -> Result<(), Box> { let bucket = crate::read_test_bucket_sync(); From 953251c78c333e620700f2e86ee1c5cc6eadb978 Mon Sep 17 00:00:00 2001 From: Elykz Date: Wed, 6 Apr 2022 15:44:31 +0200 Subject: [PATCH 02/26] Support for create_streamed_with (sync and async) --- src/client/object.rs | 69 ++++++++++++++++++++++++++++++++++++++++++-- src/sync/object.rs | 60 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 127 insertions(+), 2 deletions(-) diff --git a/src/client/object.rs b/src/client/object.rs index c33d18b..e71b4d9 100644 --- a/src/client/object.rs +++ b/src/client/object.rs @@ -104,8 +104,8 @@ impl<'a> ObjectClient<'a> { // single-request upload that includes metadata require a mutlipart request where // part 1 is metadata, and part2 is the file to upload - let metadata_part = reqwest::multipart::Part::text(metadata.to_string()) - .mime_str("application/json")?; + let metadata_part = + reqwest::multipart::Part::text(metadata.to_string()).mime_str("application/json")?; let file_part = reqwest::multipart::Part::bytes(file).mime_str(mime_type)?; let form = reqwest::multipart::Form::new() .part("metadata", metadata_part) @@ -127,6 +127,71 @@ impl<'a> ObjectClient<'a> { } } + /// Create a new object. This works in the same way as `ObjectClient::create`, except it does not need + /// to load the entire file in ram. + /// ## Example + /// ```rust,no_run + /// # #[tokio::main] + /// # async fn main() -> Result<(), Box> { + /// use cloud_storage::Client; + /// use cloud_storage::Object; + /// + /// let client = Client::default(); + /// let file = reqwest::Client::new() + /// .get("https://my_domain.rs/nice_cat_photo.png") + /// .send() + /// .await? + /// .bytes_stream(); + /// client.object().create_streamed("cat-photos", file, 10, "recently read cat.png", "image/png").await?; + /// # Ok(()) + /// # } + /// ``` + pub async fn create_streamed_with( + &self, + bucket: &str, + stream: S, + filename: &str, + mime_type: &str, + metadata: &serde_json::Value, + ) -> crate::Result + where + S: TryStream + Send + Sync + 'static, + S::Error: Into>, + bytes::Bytes: From, + { + let url = &format!( + "{}/{}/o?uploadType=multipart&name={}", + BASE_URL, + percent_encode(bucket), + percent_encode(filename), + ); + let headers = self.0.get_headers().await?; + + // single-request upload that includes metadata require a mutlipart request where + // part 1 is metadata, and part2 is the file to upload + let body = reqwest::Body::wrap_stream(stream); + let metadata_part = + reqwest::multipart::Part::text(metadata.to_string()).mime_str("application/json")?; + let file_part = reqwest::multipart::Part::stream(body).mime_str(mime_type)?; + let form = reqwest::multipart::Form::new() + .part("metadata", metadata_part) + .part("file", file_part); + + let response = self + .0 + .client + .post(url) + .headers(headers) + .multipart(form) + .send() + .await?; + if response.status() == 200 { + Ok(serde_json::from_str(&response.text().await?)?) + } else { + Err(crate::Error::new(&response.text().await?)) + } + } + /// Create a new object. This works in the same way as `ObjectClient::create`, except it does not need /// to load the entire file in ram. /// ## Example diff --git a/src/sync/object.rs b/src/sync/object.rs index 65a94d6..8b589e8 100644 --- a/src/sync/object.rs +++ b/src/sync/object.rs @@ -40,6 +40,43 @@ impl<'a> ObjectClient<'a> { ) } + /// Create a new object. + /// Upload a file as that is loaded in memory to google cloud storage, where it will be + /// interpreted according to the mime type you specified. + /// ## Example + /// ```rust,no_run + /// # fn main() -> Result<(), Box> { + /// # fn read_cute_cat(_in: &str) -> Vec { vec![0, 1] } + /// use cloud_storage::sync::Client; + /// use cloud_storage::Object; + /// + /// let file: Vec = read_cute_cat("cat.png"); + /// let client = Client::new()?; + /// let metadata = serde_json::json!({ + /// "metadata": { + /// "custom_id": "1234" + /// } + /// }); + /// client.object().create_with("cat-photos", file, "recently read cat.png", "image/png", &metadata)?; + /// # Ok(()) + /// # } + /// ``` + pub fn create_with( + &self, + bucket: &str, + file: Vec, + filename: &str, + mime_type: &str, + metadata: &serde_json::Value, + ) -> crate::Result { + self.0.runtime.block_on( + self.0 + .client + .object() + .create_with(bucket, file, filename, mime_type, metadata), + ) + } + /// Create a new object. This works in the same way as `ObjectClient::create`, except it does not need /// to load the entire file in ram. pub fn create_streamed( @@ -63,6 +100,29 @@ impl<'a> ObjectClient<'a> { ) } + /// Create a new object with metadata. This works in the same way as `ObjectClient::create`, except it does not need + /// to load the entire file in ram. + pub fn create_streamed_with( + &self, + bucket: &str, + file: R, + filename: &str, + mime_type: &str, + metadata: &serde_json::Value, + ) -> crate::Result + where + R: std::io::Read + Send + Sync + Unpin + 'static, + { + let stream = super::helpers::ReaderStream::new(file); + + self.0.runtime.block_on( + self.0 + .client + .object() + .create_streamed_with(bucket, stream, filename, mime_type, metadata), + ) + } + /// Obtain a list of objects within this Bucket. /// ### Example /// ```no_run From 20cf311219032e60ade9ee7d95f7aee449ed5ef8 Mon Sep 17 00:00:00 2001 From: elykz Date: Tue, 19 Apr 2022 17:56:10 +0200 Subject: [PATCH 03/26] Adding download_streamed in sync api --- Cargo.toml | 1 + src/sync/object.rs | 42 ++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 43 insertions(+) diff --git a/Cargo.toml b/Cargo.toml index 8b39fad..e7753d7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -37,6 +37,7 @@ pem = { version = "0.8", default-features = false, optional = true chrono = { version = "0.4", default-features = false, features = ["serde"] } hex = { version = "0.4", default-features = false, features = ["alloc"] } tokio = { version = "1.0", default-features = false, features = ["macros", "rt"] } +tokio-util = { version = "0.7", default-features = false, features = ["compat"] } futures-util = { version = "0.3", default_features = false, features = ["alloc"] } bytes = { version = "1.0", default-features = false } async-trait = { version = "0.1.48", default-features = false } diff --git a/src/sync/object.rs b/src/sync/object.rs index 8b589e8..3f0fb6e 100644 --- a/src/sync/object.rs +++ b/src/sync/object.rs @@ -2,7 +2,12 @@ use crate::{ object::{ComposeRequest, ObjectList}, ListRequest, Object, }; + +use futures_util::io::AllowStdIo; +use futures_util::StreamExt; use futures_util::TryStreamExt; +use tokio::io::AsyncWriteExt; +use tokio_util::compat::FuturesAsyncWriteCompatExt; /// Operations on [`Object`](Object)s. #[derive(Debug)] @@ -181,6 +186,43 @@ impl<'a> ObjectClient<'a> { .block_on(self.0.client.object().download(bucket, file_name)) } + /// Download the content of the object with the specified name in the specified bucket. + /// This works in the same way as `ObjectClient::download_streamed`, except it does not + /// need to load the entire result in ram. + /// + /// ### Example + /// ```no_run + /// # fn main() -> Result<(), Box> { + /// use cloud_storage::sync::Client; + /// use cloud_storage::Object; + /// + /// let client = Client::new()?; + /// let file = File::create("somefile")?; + /// let bytes = client.object().download("my_bucket", "path/to/my/file.png", file)?; + /// # Ok(()) + /// # } + /// ``` + pub fn download_streamed(&self, bucket: &str, file_name: &str, file: W) -> crate::Result<()> + where + W: std::io::Write, // + Send + Sync + Unpin + 'static, + { + self.0.runtime.block_on(async { + let mut stream = self + .0 + .client + .object() + .download_streamed(bucket, file_name) + .await?; + + let mut writer = tokio::io::BufWriter::new(AllowStdIo::new(file).compat_write()); + while let Some(byte) = stream.next().await { + writer.write_all(&[byte?]).await?; + } + writer.flush().await?; + Ok(()) + }) + } + /// Obtains a single object with the specified name in the specified bucket. /// ### Example /// ```no_run From ce54bbc1afe4a378b6b37fdc5e0015906bfae1b8 Mon Sep 17 00:00:00 2001 From: Shell Turner Date: Thu, 21 Apr 2022 15:52:13 +0200 Subject: [PATCH 04/26] Remove chrono dependency --- Cargo.toml | 4 ++-- src/client/bucket.rs | 2 +- src/lib.rs | 3 +++ src/resources/bucket.rs | 21 +++++++++++++-------- src/resources/hmac_key.rs | 6 ++++-- src/resources/object.rs | 27 ++++++++++++++++----------- src/sync/bucket.rs | 2 +- 7 files changed, 40 insertions(+), 25 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 2724d8c..5ec9141 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -25,7 +25,7 @@ trust-dns = ["reqwest/trust-dns"] [dependencies] reqwest = { version = "0.11", default-features = false, features = ["json", "stream"] } percent-encoding = { version = "2", default-features = false } -jsonwebtoken = { version = "7", default-features = false } +jsonwebtoken = { version = "8.1", default-features = false, features = ["use_pem"] } serde = { version = "1", default-features = false, features = ["derive"] } serde_json = { version = "1", default-features = false } base64 = { version = "0.13", default-features = false } @@ -34,7 +34,7 @@ dotenv = { version = "0.15", default-features = false } openssl = { version = "0.10", default-features = false, optional = true } ring = { version = "0.16", default-features = false, optional = true } pem = { version = "0.8", default-features = false, optional = true } -chrono = { version = "0.4", default-features = false, features = ["serde"] } +time = { version = "0.3", default-features = false, features = ["serde-well-known", "serde-human-readable", "macros"]} hex = { version = "0.4", default-features = false, features = ["alloc"] } tokio = { version = "1.0", default-features = false, features = ["macros", "rt"] } futures-util = { version = "0.3", default_features = false, features = ["alloc"] } diff --git a/src/client/bucket.rs b/src/client/bucket.rs index bdf5558..6ea3591 100644 --- a/src/client/bucket.rs +++ b/src/client/bucket.rs @@ -150,7 +150,7 @@ impl<'a> BucketClient<'a> { /// let mut bucket = client.bucket().read("cloud-storage-rs-doc-3").await?; /// bucket.retention_policy = Some(RetentionPolicy { /// retention_period: 50, - /// effective_time: chrono::Utc::now() + chrono::Duration::seconds(50), + /// effective_time: time::OffsetDateTime::now_utc() + std::time::Duration::from_secs(50), /// is_locked: Some(false), /// }); /// client.bucket().update(&bucket).await?; diff --git a/src/lib.rs b/src/lib.rs index df08a11..0ac7946 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -133,6 +133,9 @@ pub type Result = std::result::Result; const BASE_URL: &str = "https://storage.googleapis.com/storage/v1"; +const ISO_8601_BASIC_FORMAT: &[::time::format_description::FormatItem<'_>] = time::macros::format_description!("[year][month][day]T[hour][minute][second]Z"); +time::serde::format_description!(rfc3339_date, Date, "[year]-[month]-[day]"); + fn from_str<'de, T, D>(deserializer: D) -> std::result::Result where T: std::str::FromStr, diff --git a/src/resources/bucket.rs b/src/resources/bucket.rs index 4e6b094..156ef81 100644 --- a/src/resources/bucket.rs +++ b/src/resources/bucket.rs @@ -30,9 +30,11 @@ pub struct Bucket { /// The name of the bucket. pub name: String, /// The creation time of the bucket in RFC 3339 format. - pub time_created: chrono::DateTime, + #[serde(with = "time::serde::rfc3339")] + pub time_created: time::OffsetDateTime, /// The modification time of the bucket in RFC 3339 format. - pub updated: chrono::DateTime, + #[serde(with = "time::serde::rfc3339")] + pub updated: time::OffsetDateTime, /// Whether or not to automatically apply an eventBasedHold to new objects added to the bucket. pub default_event_based_hold: Option, /// The bucket's retention policy, which defines the minimum age an object in the bucket must @@ -147,7 +149,8 @@ pub struct RetentionPolicy { #[serde(deserialize_with = "crate::from_str")] pub retention_period: u64, /// The time from which the retentionPolicy was effective, in RFC 3339 format. - pub effective_time: chrono::DateTime, + #[serde(with = "time::serde::rfc3339")] + pub effective_time: time::OffsetDateTime, /// Whether or not the retentionPolicy is locked. If true, the retentionPolicy cannot be removed /// and the retention period cannot be reduced. pub is_locked: Option, @@ -177,7 +180,8 @@ pub struct UniformBucketLevelAccess { /// /// iamConfiguration.uniformBucketLevelAccess.enabled may be changed from true to false until /// the locked time, after which the field is immutable. - pub locked_time: Option>, + #[serde(with = "time::serde::rfc3339::option")] + pub locked_time: Option, } /// Contains information about the encryption used for data in this Bucket. @@ -297,7 +301,8 @@ pub struct Condition { /// A date in `RFC 3339` format with only the date part (for instance, "2013-01-15"). This /// condition is satisfied when an object is created before midnight of the specified date in /// UTC. - pub created_before: Option, + #[serde(with = "crate::rfc3339_date::option")] + pub created_before: Option, /// Relevant only for versioned objects. If the value is true, this condition matches the live /// version of objects; if the value is `false`, it matches noncurrent versions of objects. pub is_live: Option, @@ -649,7 +654,7 @@ impl Bucket { /// let mut bucket = Bucket::read("cloud-storage-rs-doc-3").await?; /// bucket.retention_policy = Some(RetentionPolicy { /// retention_period: 50, - /// effective_time: chrono::Utc::now() + chrono::Duration::seconds(50), + /// effective_time: time::OffsetDateTime::now_utc() + std::time::Duration::from_secs(50), /// is_locked: Some(false), /// }); /// bucket.update().await?; @@ -862,7 +867,7 @@ mod tests { let mut bucket = crate::create_test_bucket("test-update").await; bucket.retention_policy = Some(RetentionPolicy { retention_period: 50, - effective_time: chrono::Utc::now() + chrono::Duration::seconds(50), + effective_time: time::OffsetDateTime::now_utc() + std::time::Duration::from_secs(50), is_locked: Some(false), }); bucket.update().await?; @@ -968,7 +973,7 @@ mod tests { let mut bucket = crate::create_test_bucket_sync("test-update"); bucket.retention_policy = Some(RetentionPolicy { retention_period: 50, - effective_time: chrono::Utc::now() + chrono::Duration::seconds(50), + effective_time: time::OffsetDateTime::now_utc() + std::time::Duration::from_secs(50), is_locked: Some(false), }); bucket.update_sync()?; diff --git a/src/resources/hmac_key.rs b/src/resources/hmac_key.rs index 00da6e5..32fb3bb 100644 --- a/src/resources/hmac_key.rs +++ b/src/resources/hmac_key.rs @@ -39,9 +39,11 @@ pub struct HmacMeta { /// The state of the key. pub state: HmacState, /// The creation time of the HMAC key. - pub time_created: chrono::DateTime, + #[serde(with = "time::serde::rfc3339")] + pub time_created: time::OffsetDateTime, /// The last modification time of the HMAC key metadata. - pub updated: chrono::DateTime, + #[serde(with = "time::serde::rfc3339")] + pub updated: time::OffsetDateTime, /// HTTP 1.1 Entity tag for the HMAC key. pub etag: String, } diff --git a/src/resources/object.rs b/src/resources/object.rs index 0f247bc..9106cde 100644 --- a/src/resources/object.rs +++ b/src/resources/object.rs @@ -32,24 +32,29 @@ pub struct Object { /// as application/octet-stream. pub content_type: Option, /// The creation time of the object in RFC 3339 format. - pub time_created: chrono::DateTime, + #[serde(with = "time::serde::rfc3339")] + pub time_created: time::OffsetDateTime, /// The modification time of the object metadata in RFC 3339 format. - pub updated: chrono::DateTime, + #[serde(with = "time::serde::rfc3339")] + pub updated: time::OffsetDateTime, /// The deletion time of the object in RFC 3339 format. Returned if and only if this version of /// the object is no longer a live version, but remains in the bucket as a noncurrent version. - pub time_deleted: Option>, + #[serde(with = "time::serde::rfc3339::option")] + pub time_deleted: Option, /// Whether or not the object is subject to a temporary hold. pub temporary_hold: Option, /// Whether or not the object is subject to an event-based hold. pub event_based_hold: Option, /// The earliest time that the object can be deleted, based on a bucket's retention policy, in /// RFC 3339 format. - pub retention_expiration_time: Option>, + #[serde(with = "time::serde::rfc3339::option")] + pub retention_expiration_time: Option, /// Storage class of the object. pub storage_class: String, /// The time at which the object's storage class was last changed. When the object is initially /// created, it will be set to timeCreated. - pub time_storage_class_updated: chrono::DateTime, + #[serde(with = "time::serde::rfc3339")] + pub time_storage_class_updated: time::OffsetDateTime, /// Content-Length of the data in bytes. #[serde(deserialize_with = "crate::from_str")] pub size: u64, @@ -782,7 +787,7 @@ impl Object { .join(";"); // 1 construct the canonical request - let issue_date = chrono::Utc::now(); + let issue_date = time::OffsetDateTime::now_utc(); let file_path = self.path_to_resource(file_path); let query_string = Self::get_canonical_query_string( &issue_date, @@ -808,7 +813,7 @@ impl Object { {credential_scope}\n\ {hashed_canonical_request}", signing_algorithm = "GOOG4-RSA-SHA256", - current_datetime = issue_date.format("%Y%m%dT%H%M%SZ"), + current_datetime = issue_date.format(crate::ISO_8601_BASIC_FORMAT).unwrap(), credential_scope = Self::get_credential_scope(&issue_date), hashed_canonical_request = hex_hash, ); @@ -855,7 +860,7 @@ impl Object { #[inline(always)] fn get_canonical_query_string( - date: &chrono::DateTime, + date: &time::OffsetDateTime, exp: u32, headers: &str, content_disposition: Option, @@ -873,7 +878,7 @@ impl Object { X-Goog-SignedHeaders={signed}", algo = "GOOG4-RSA-SHA256", cred = percent_encode(&credential), - date = date.format("%Y%m%dT%H%M%SZ"), + date = date.format(crate::ISO_8601_BASIC_FORMAT).unwrap(), exp = exp, signed = percent_encode(headers), ); @@ -893,8 +898,8 @@ impl Object { } #[inline(always)] - fn get_credential_scope(date: &chrono::DateTime) -> String { - format!("{}/henk/storage/goog4_request", date.format("%Y%m%d")) + fn get_credential_scope(date: &time::OffsetDateTime) -> String { + format!("{}/henk/storage/goog4_request", date.format(time::macros::format_description!("[year][month][day]")).unwrap()) } } diff --git a/src/sync/bucket.rs b/src/sync/bucket.rs index 14c279f..874a66e 100644 --- a/src/sync/bucket.rs +++ b/src/sync/bucket.rs @@ -99,7 +99,7 @@ impl<'a> BucketClient<'a> { /// let mut bucket = client.bucket().read("cloud-storage-rs-doc-3")?; /// bucket.retention_policy = Some(RetentionPolicy { /// retention_period: 50, - /// effective_time: chrono::Utc::now() + chrono::Duration::seconds(50), + /// effective_time: time::OffsetDateTime::now_utc() + std::time::Duration::from_secs(50), /// is_locked: Some(false), /// }); /// client.bucket().update(&bucket)?; From e6b007c6de40f961b62b2d9ca2d1545dec9957cd Mon Sep 17 00:00:00 2001 From: Shell Turner Date: Tue, 10 May 2022 08:59:25 +0200 Subject: [PATCH 05/26] Make options #[serde(default)] --- src/resources/bucket.rs | 2 +- src/resources/object.rs | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/resources/bucket.rs b/src/resources/bucket.rs index 156ef81..297ac4d 100644 --- a/src/resources/bucket.rs +++ b/src/resources/bucket.rs @@ -301,7 +301,7 @@ pub struct Condition { /// A date in `RFC 3339` format with only the date part (for instance, "2013-01-15"). This /// condition is satisfied when an object is created before midnight of the specified date in /// UTC. - #[serde(with = "crate::rfc3339_date::option")] + #[serde(default, with = "crate::rfc3339_date::option")] pub created_before: Option, /// Relevant only for versioned objects. If the value is true, this condition matches the live /// version of objects; if the value is `false`, it matches noncurrent versions of objects. diff --git a/src/resources/object.rs b/src/resources/object.rs index 9106cde..ed1a048 100644 --- a/src/resources/object.rs +++ b/src/resources/object.rs @@ -39,7 +39,7 @@ pub struct Object { pub updated: time::OffsetDateTime, /// The deletion time of the object in RFC 3339 format. Returned if and only if this version of /// the object is no longer a live version, but remains in the bucket as a noncurrent version. - #[serde(with = "time::serde::rfc3339::option")] + #[serde(default, with = "time::serde::rfc3339::option")] pub time_deleted: Option, /// Whether or not the object is subject to a temporary hold. pub temporary_hold: Option, @@ -47,7 +47,7 @@ pub struct Object { pub event_based_hold: Option, /// The earliest time that the object can be deleted, based on a bucket's retention policy, in /// RFC 3339 format. - #[serde(with = "time::serde::rfc3339::option")] + #[serde(default, with = "time::serde::rfc3339::option")] pub retention_expiration_time: Option, /// Storage class of the object. pub storage_class: String, From e3357289a10501a8f4cf8b9c25c814be658237e6 Mon Sep 17 00:00:00 2001 From: Alex Puschinsky Date: Tue, 23 Aug 2022 13:13:34 +0300 Subject: [PATCH 06/26] Add customizable reqwest client passing --- src/client.rs | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/src/client.rs b/src/client.rs index 7296f85..e71f729 100644 --- a/src/client.rs +++ b/src/client.rs @@ -56,6 +56,14 @@ impl Client { Default::default() } + /// Constucts a client with given reqwest client + pub fn with_client(client: reqwest::Client) -> Self { + Self { + client: client, + token_cache: sync::Arc::new(crate::Token::default()), + } + } + /// Initializer with a provided refreshable token pub fn with_cache(token: impl TokenCache + Send + 'static) -> Self { Self { From adcde09bf8eb5aebed9837d5a1a785331cf2514e Mon Sep 17 00:00:00 2001 From: Alex Puschinsky Date: Fri, 26 Aug 2022 21:06:42 +0300 Subject: [PATCH 07/26] Add ClientBuilder and support a configureable reqwest client --- src/client.rs | 46 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 46 insertions(+) diff --git a/src/client.rs b/src/client.rs index 7296f85..6d961c3 100644 --- a/src/client.rs +++ b/src/client.rs @@ -64,6 +64,11 @@ impl Client { } } + /// Creates a new [ClientBuilder] + pub fn builder() -> ClientBuilder { + ClientBuilder::new() + } + /// Operations on [`Bucket`](crate::bucket::Bucket)s. pub fn bucket(&self) -> BucketClient<'_> { BucketClient(self) @@ -104,3 +109,44 @@ impl Client { Ok(result) } } + +/// A ClientBuilder can be used to create a Client with custom configuration. +#[derive(Default)] +pub struct ClientBuilder { + client: Option, + /// Static `Token` struct that caches + token_cache: Option>, +} + +impl ClientBuilder { + /// Constructs a new ClientBuilder + pub fn new() -> Self { + Default::default() + } + + /// Returns a `Client` that uses this `ClientBuilder` configuration. + pub fn build(self) -> Client { + Client { + client: self.client.unwrap_or_default(), + token_cache: self + .token_cache + .unwrap_or(sync::Arc::new(crate::Token::default())), + } + } + + /// Sets refreshable token + pub fn with_cache(self, token: impl TokenCache + Send + 'static) -> Self { + ClientBuilder { + token_cache: Some(sync::Arc::new(token)), + ..self + } + } + + /// Sets internal [reqwest Client](https://docs.rs/reqwest/latest/reqwest/struct.Client.html) + pub fn with_reqwest_client(self, reqwest_client: reqwest::Client) -> Self { + ClientBuilder { + client: Some(reqwest_client), + ..self + } + } +} From 87261d57396e5d3f3a7608be260ac8d51985f46c Mon Sep 17 00:00:00 2001 From: Sergen Nurel Date: Fri, 23 Sep 2022 14:56:13 +0200 Subject: [PATCH 08/26] Implementation of url parameters for object CRUD operations --- src/client/object.rs | 80 ++++-- src/resources/object.rs | 591 +++++++++++++++++++++++++++++++++------- src/sync/object.rs | 118 +++++--- 3 files changed, 627 insertions(+), 162 deletions(-) diff --git a/src/client/object.rs b/src/client/object.rs index f3abc18..8d4d859 100644 --- a/src/client/object.rs +++ b/src/client/object.rs @@ -3,7 +3,11 @@ use reqwest::StatusCode; use crate::{ error::GoogleResponse, - object::{percent_encode, ComposeRequest, ObjectList, RewriteResponse, SizedByteStream}, + object::{ + percent_encode, ComposeParameters, ComposeRequest, CopyParameters, CreateParameters, + DeleteParameters, ObjectList, ReadParameters, RewriteParameters, RewriteResponse, + SizedByteStream, UpdateParameters, + }, ListRequest, Object, }; @@ -28,7 +32,7 @@ impl<'a> ObjectClient<'a> { /// /// let file: Vec = read_cute_cat("cat.png"); /// let client = Client::default(); - /// client.object().create("cat-photos", file, "recently read cat.png", "image/png").await?; + /// client.object().create("cat-photos", file, "recently read cat.png", "image/png", None).await?; /// # Ok(()) /// # } /// ``` @@ -38,6 +42,7 @@ impl<'a> ObjectClient<'a> { file: Vec, filename: &str, mime_type: &str, + parameters: Option, ) -> crate::Result { use reqwest::header::{CONTENT_LENGTH, CONTENT_TYPE}; @@ -54,6 +59,7 @@ impl<'a> ObjectClient<'a> { .0 .client .post(url) + .query(¶meters) .headers(headers) .body(file) .send() @@ -80,7 +86,7 @@ impl<'a> ObjectClient<'a> { /// .send() /// .await? /// .bytes_stream(); - /// client.object().create_streamed("cat-photos", file, 10, "recently read cat.png", "image/png").await?; + /// client.object().create_streamed("cat-photos", file, 10, "recently read cat.png", "image/png", None).await?; /// # Ok(()) /// # } /// ``` @@ -91,6 +97,7 @@ impl<'a> ObjectClient<'a> { length: impl Into>, filename: &str, mime_type: &str, + parameters: Option, ) -> crate::Result where S: TryStream + Send + Sync + 'static, @@ -116,6 +123,7 @@ impl<'a> ObjectClient<'a> { .0 .client .post(url) + .query(¶meters) .headers(headers) .body(body) .send() @@ -236,11 +244,17 @@ impl<'a> ObjectClient<'a> { /// use cloud_storage::Object; /// /// let client = Client::default(); - /// let object = client.object().read("my_bucket", "path/to/my/file.png").await?; + /// let object = client.object().read("my_bucket", "path/to/my/file.png", None).await?; /// # Ok(()) /// # } /// ``` - pub async fn read(&self, bucket: &str, file_name: &str) -> crate::Result { + pub async fn read( + &self, + bucket: &str, + file_name: &str, + parameters: Option, + ) -> crate::Result { + //let paramters = qs:: let url = format!( "{}/b/{}/o/{}", crate::BASE_URL, @@ -251,6 +265,7 @@ impl<'a> ObjectClient<'a> { .0 .client .get(&url) + .query(¶meters) .headers(self.0.get_headers().await?) .send() .await? @@ -271,11 +286,16 @@ impl<'a> ObjectClient<'a> { /// use cloud_storage::Object; /// /// let client = Client::default(); - /// let bytes = client.object().download("my_bucket", "path/to/my/file.png").await?; + /// let bytes = client.object().download("my_bucket", "path/to/my/file.png", None).await?; /// # Ok(()) /// # } /// ``` - pub async fn download(&self, bucket: &str, file_name: &str) -> crate::Result> { + pub async fn download( + &self, + bucket: &str, + file_name: &str, + parameters: Option, + ) -> crate::Result> { let url = format!( "{}/b/{}/o/{}?alt=media", crate::BASE_URL, @@ -286,6 +306,7 @@ impl<'a> ObjectClient<'a> { .0 .client .get(&url) + .query(¶meters) .headers(self.0.get_headers().await?) .send() .await?; @@ -309,7 +330,7 @@ impl<'a> ObjectClient<'a> { /// use tokio::io::{AsyncWriteExt, BufWriter}; /// /// let client = Client::default(); - /// let mut stream = client.object().download_streamed("my_bucket", "path/to/my/file.png").await?; + /// let mut stream = client.object().download_streamed("my_bucket", "path/to/my/file.png", None).await?; /// let mut file = BufWriter::new(File::create("file.png").await.unwrap()); /// while let Some(byte) = stream.next().await { /// file.write_all(&[byte.unwrap()]).await.unwrap(); @@ -322,6 +343,7 @@ impl<'a> ObjectClient<'a> { &self, bucket: &str, file_name: &str, + parameters: Option, ) -> crate::Result> + Unpin> { use futures_util::{StreamExt, TryStreamExt}; let url = format!( @@ -334,6 +356,7 @@ impl<'a> ObjectClient<'a> { .0 .client .get(&url) + .query(¶meters) .headers(self.0.get_headers().await?) .send() .await? @@ -359,13 +382,17 @@ impl<'a> ObjectClient<'a> { /// use cloud_storage::Object; /// /// let client = Client::default(); - /// let mut object = client.object().read("my_bucket", "path/to/my/file.png").await?; + /// let mut object = client.object().read("my_bucket", "path/to/my/file.png", None).await?; /// object.content_type = Some("application/xml".to_string()); - /// client.object().update(&object).await?; + /// client.object().update(&object, None).await?; /// # Ok(()) /// # } /// ``` - pub async fn update(&self, object: &Object) -> crate::Result { + pub async fn update( + &self, + object: &Object, + parameters: Option, + ) -> crate::Result { let url = format!( "{}/b/{}/o/{}", crate::BASE_URL, @@ -376,6 +403,7 @@ impl<'a> ObjectClient<'a> { .0 .client .put(&url) + .query(¶meters) .headers(self.0.get_headers().await?) .json(&object) .send() @@ -397,11 +425,16 @@ impl<'a> ObjectClient<'a> { /// use cloud_storage::Object; /// /// let client = Client::default(); - /// client.object().delete("my_bucket", "path/to/my/file.png").await?; + /// client.object().delete("my_bucket", "path/to/my/file.png", None).await?; /// # Ok(()) /// # } /// ``` - pub async fn delete(&self, bucket: &str, file_name: &str) -> crate::Result<()> { + pub async fn delete( + &self, + bucket: &str, + file_name: &str, + parameters: Option, + ) -> crate::Result<()> { let url = format!( "{}/b/{}/o/{}", crate::BASE_URL, @@ -412,6 +445,7 @@ impl<'a> ObjectClient<'a> { .0 .client .delete(&url) + .query(¶meters) .headers(self.0.get_headers().await?) .send() .await?; @@ -431,8 +465,8 @@ impl<'a> ObjectClient<'a> { /// use cloud_storage::object::{Object, ComposeRequest, SourceObject}; /// /// let client = Client::default(); - /// let obj1 = client.object().read("my_bucket", "file1").await?; - /// let obj2 = client.object().read("my_bucket", "file2").await?; + /// let obj1 = client.object().read("my_bucket", "file1", None).await?; + /// let obj2 = client.object().read("my_bucket", "file2", None).await?; /// let compose_request = ComposeRequest { /// kind: "storage#composeRequest".to_string(), /// source_objects: vec![ @@ -449,7 +483,7 @@ impl<'a> ObjectClient<'a> { /// ], /// destination: None, /// }; - /// let obj3 = client.object().compose("my_bucket", &compose_request, "test-concatted-file").await?; + /// let obj3 = client.object().compose("my_bucket", &compose_request, "test-concatted-file", None).await?; /// // obj3 is now a file with the content of obj1 and obj2 concatted together. /// # Ok(()) /// # } @@ -459,6 +493,7 @@ impl<'a> ObjectClient<'a> { bucket: &str, req: &ComposeRequest, destination_object: &str, + parameters: Option, ) -> crate::Result { let url = format!( "{}/b/{}/o/{}/compose", @@ -470,6 +505,7 @@ impl<'a> ObjectClient<'a> { .0 .client .post(&url) + .query(¶meters) .headers(self.0.get_headers().await?) .json(req) .send() @@ -491,8 +527,8 @@ impl<'a> ObjectClient<'a> { /// use cloud_storage::object::{Object, ComposeRequest}; /// /// let client = Client::default(); - /// let obj1 = client.object().read("my_bucket", "file1").await?; - /// let obj2 = client.object().copy(&obj1, "my_other_bucket", "file2").await?; + /// let obj1 = client.object().read("my_bucket", "file1", None).await?; + /// let obj2 = client.object().copy(&obj1, "my_other_bucket", "file2", None).await?; /// // obj2 is now a copy of obj1. /// # Ok(()) /// # } @@ -502,6 +538,7 @@ impl<'a> ObjectClient<'a> { object: &Object, destination_bucket: &str, path: &str, + parameters: Option, ) -> crate::Result { use reqwest::header::CONTENT_LENGTH; @@ -519,6 +556,7 @@ impl<'a> ObjectClient<'a> { .0 .client .post(&url) + .query(¶meters) .headers(headers) .send() .await? @@ -546,8 +584,8 @@ impl<'a> ObjectClient<'a> { /// use cloud_storage::object::Object; /// /// let client = Client::default(); - /// let obj1 = client.object().read("my_bucket", "file1").await?; - /// let obj2 = client.object().rewrite(&obj1, "my_other_bucket", "file2").await?; + /// let obj1 = client.object().read("my_bucket", "file1", None).await?; + /// let obj2 = client.object().rewrite(&obj1, "my_other_bucket", "file2", None).await?; /// // obj2 is now a copy of obj1. /// # Ok(()) /// # } @@ -557,6 +595,7 @@ impl<'a> ObjectClient<'a> { object: &Object, destination_bucket: &str, path: &str, + parameters: Option, ) -> crate::Result { use reqwest::header::CONTENT_LENGTH; @@ -574,6 +613,7 @@ impl<'a> ObjectClient<'a> { .0 .client .post(&url) + .query(¶meters) .headers(headers) .send() .await? diff --git a/src/resources/object.rs b/src/resources/object.rs index 0fd7283..e909c1b 100644 --- a/src/resources/object.rs +++ b/src/resources/object.rs @@ -192,6 +192,304 @@ pub struct ListRequest { pub versions: Option, } +/// The parameters that are optionally supplied when creating an object. +#[derive(Debug, PartialEq, serde::Serialize, Default, Clone)] +#[serde(rename_all = "camelCase")] +pub struct CreateParameters { + /// Setting this value is equivalent of setting the `contentEncoding` metadata property of the object. + /// This can be useful when uploading an object with `uploadType=media` to indicate the encoding of the content being uploaded. + pub content_encoding: Option, + + /// Makes the operation conditional on whether the object's current generation matches the given value. + /// Setting to 0 makes the operation succeed only if there are no live versions of the object. + pub if_generation_match: Option, + + /// Makes the operation conditional on whether the object's current generation does not match the given value. + /// If no live object exists, the precondition fails. + /// Setting to 0 makes the operation succeed only if there is a live version of the object. + pub if_generation_not_match: Option, + + /// Makes the operation conditional on whether the object's current metageneration matches the given value. + pub if_metageneration_match: Option, + + /// Makes the operation conditional on whether the object's current metageneration does not match the given value. + pub if_metageneration_not_match: Option, + + /// Resource name of the Cloud KMS key that will be used to encrypt the object. + /// If not specified, the request uses the bucket's default Cloud KMS key, if any, or a Google-managed encryption key. + pub kms_key_name: Option, + + /// Apply a predefined set of access controls to this object. + /// + /// Acceptable values are: + /// `authenticatedRead`: Object owner gets OWNER access, and allAuthenticatedUsers get READER access. + /// `bucketOwnerFullControl`: Object owner gets OWNER access, and project team owners get OWNER access. + /// `bucketOwnerRead`: Object owner gets OWNER access, and project team owners get READER access. + /// `private`: Object owner gets OWNER access. + /// `projectPrivate`: Object owner gets OWNER access, and project team members get access according to their roles. + /// `publicRead`: Object owner gets OWNER access, and allUsers get READER access. + /// If `iamConfiguration.uniformBucketLevelAccess.enabled` is set to `true`, requests that include this parameter fail with a 400 Bad Request response. + pub predefined_acl: Option, + + /// Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full. + /// Acceptable values are: + /// `full`: Include all properties. + /// `noAcl`: Omit the owner, acl property. + pub projection: Option, +} + +/// The parameters that are optionally supplied when reading an object. +#[derive(Debug, PartialEq, serde::Serialize, Default, Clone)] +#[serde(rename_all = "camelCase")] +pub struct ReadParameters { + /// If present, selects a specific revision of this object (as opposed to the latest version, the default). + pub generation: Option, + + /// Makes the operation conditional on whether the object's current generation matches the given value. + /// Setting to 0 makes the operation succeed only if there are no live versions of the object. + pub if_generation_match: Option, + + /// Makes the operation conditional on whether the object's current generation does not match the given value. + /// If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object. + pub if_generation_not_match: Option, + + /// Makes the operation conditional on whether the object's current metageneration matches the given value. + pub if_metageneration_match: Option, + + /// Makes the operation conditional on whether the object's current metageneration does not match the given value. + pub if_metageneration_not_match: Option, + + /// Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full. + /// Acceptable values are: + /// `full`: Include all properties. + /// `noAcl`: Omit the owner, acl property. + pub projection: Option, +} + +/// The parameters that are optionally supplied when composing an object. +#[derive(Debug, PartialEq, serde::Serialize, Default, Clone)] +#[serde(rename_all = "camelCase")] +pub struct ComposeParameters { + /// Apply a predefined set of access controls to the destination object. + /// + /// Acceptable values are: + /// `authenticatedRead`: Object owner gets OWNER access, and allAuthenticatedUsers get READER access. + /// `bucketOwnerFullControl`: Object owner gets OWNER access, and project team owners get OWNER access. + /// `bucketOwnerRead`: Object owner gets OWNER access, and project team owners get READER access. + /// `private`: Object owner gets OWNER access. + /// `projectPrivate`: Object owner gets OWNER access, and project team members get access according to their roles. + /// `publicRead`: Object owner gets OWNER access, and allUsers get READER access. + /// If `iamConfiguration.uniformBucketLevelAccess.enabled` is set to `true`, requests that include this parameter fail with a 400 Bad Request response. + pub destination_predefined_acl: Option, + + /// Makes the operation conditional on there being a live destination object with a generation number that matches the given value. + /// Setting `ifGenerationMatch` to 0 makes the operation succeed only if there is no live destination object. + pub if_generation_match: Option, + + /// Makes the operation conditional on there being a live destination object with a metageneration number that matches the given value. + pub if_metageneration_match: Option, + + /// Resource name of the Cloud KMS key that will be used to encrypt the composed object. + /// If not specified, the request uses the bucket's default Cloud KMS key, if any, or a Google-managed encryption key. + pub kms_key_name: Option, +} + +/// The parameters that are optionally supplied when copying an object. +#[derive(Debug, PartialEq, serde::Serialize, Default, Clone)] +#[serde(rename_all = "camelCase")] +pub struct CopyParameters { + /// Resource name of the Cloud KMS key that will be used to encrypt the object. + /// The Cloud KMS key must be located in same location as the object. + // + // If the parameter is not specified, the request uses the destination bucket's default encryption key, if any, or the Google-managed encryption key. + // + // If the object is large, re-encryption with the key may take too long and result in a Deadline exceeded error. + // For large objects, consider using the rewrite method instead. + pub destination_kms_key_name: Option, + + /// Apply a predefined set of access controls to the destination object. + /// + /// Acceptable values are: + /// `authenticatedRead`: Object owner gets OWNER access, and allAuthenticatedUsers get READER access. + /// `bucketOwnerFullControl`: Object owner gets OWNER access, and project team owners get OWNER access. + /// `bucketOwnerRead`: Object owner gets OWNER access, and project team owners get READER access. + /// `private`: Object owner gets OWNER access. + /// `projectPrivate`: Object owner gets OWNER access, and project team members get access according to their roles. + /// `publicRead`: Object owner gets OWNER access, and allUsers get READER access. + /// If `iamConfiguration.uniformBucketLevelAccess.enabled` is set to `true`, requests that include this parameter fail with a 400 Bad Request response. + pub destination_predefined_acl: Option, + + /// Makes the operation conditional on there being a live destination object with a generation number that matches the given value. + /// Setting `ifGenerationMatch` to 0 makes the operation succeed only if there is no live destination object. + pub if_generation_match: Option, + + /// Makes the operation conditional on there being a live destination object with a generation number that does not match the given value. + /// If no live destination object exists, the precondition fails. + /// Setting `ifGenerationNotMatch` to 0 makes the operation succeed if there is a live version of the object. + pub if_generation_not_match: Option, + + /// Makes the operation conditional on there being a live destination object with a metageneration number that matches the given value. + pub if_metageneration_match: Option, + + /// Makes the operation conditional on there being a live destination object with a metageneration number that does not match the given value. + pub if_metageneration_not_match: Option, + + /// Makes the operation conditional on whether the source object's generation matches the given value. + pub if_source_generation_match: Option, + + /// Makes the operation conditional on whether the source object's generation does not match the given value. + pub if_source_generation_not_match: Option, + + /// Makes the operation conditional on whether the source object's current metageneration matches the given value. + pub if_source_metageneration_match: Option, + + /// Makes the operation conditional on whether the source object's current metageneration does not match the given value. + pub if_source_metageneration_not_match: Option, + + /// Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full. + /// Acceptable values are: + /// full: Include all properties. + /// noAcl: Omit the owner, acl property. + pub projection: Option, + + /// If present, selects a specific revision of the source object (as opposed to the latest version, the default). + pub source_generation: Option, +} + +/// The parameters that are optionally supplied when rewriting an object. +#[derive(Debug, PartialEq, serde::Serialize, Default, Clone)] +#[serde(rename_all = "camelCase")] +pub struct RewriteParameters { + ///Resource name of the Cloud KMS key that will be used to encrypt the object. + /// The Cloud KMS key must be located in same location as the object. + // + // If the parameter is not specified, the request uses the destination bucket's default encryption key, if any, or the Google-managed encryption key. + pub destination_kms_key_name: Option, + + /// Apply a predefined set of access controls to the destination object. + /// + /// Acceptable values are: + /// `authenticatedRead`: Object owner gets OWNER access, and allAuthenticatedUsers get READER access. + /// `bucketOwnerFullControl`: Object owner gets OWNER access, and project team owners get OWNER access. + /// `bucketOwnerRead`: Object owner gets OWNER access, and project team owners get READER access. + /// `private`: Object owner gets OWNER access. + /// `projectPrivate`: Object owner gets OWNER access, and project team members get access according to their roles. + /// `publicRead`: Object owner gets OWNER access, and allUsers get READER access. + /// If `iamConfiguration.uniformBucketLevelAccess.enabled` is set to `true`, requests that include this parameter fail with a 400 Bad Request response. + pub destination_predefined_acl: Option, + + /// Makes the operation conditional on there being a live destination object with a generation number that matches the given value. + /// Setting `ifGenerationMatch` to 0 makes the operation succeed only if there is no live destination object. + pub if_generation_match: Option, + + /// Makes the operation conditional on there being a live destination object with a generation number that does not match the given value. + /// If no live destination object exists, the precondition fails. + /// Setting `ifGenerationNotMatch` to 0 makes the operation succeed if there is a live version of the object. + pub if_generation_not_match: Option, + + /// Makes the operation conditional on there being a live destination object with a metageneration number that matches the given value. + pub if_metageneration_match: Option, + + /// Makes the operation conditional on there being a live destination object with a metageneration number that does not match the given value. + pub if_metageneration_not_match: Option, + + /// Makes the operation conditional on whether the source object's generation matches the given value. + pub if_source_generation_match: Option, + + /// Makes the operation conditional on whether the source object's generation does not match the given value. + pub if_source_generation_not_match: Option, + + /// Makes the operation conditional on whether the source object's current metageneration matches the given value. + pub if_source_metageneration_match: Option, + + /// Makes the operation conditional on whether the source object's current metageneration does not match the given value. + pub if_source_metageneration_not_match: Option, + + /// The maximum number of bytes that will be rewritten per rewrite request. + /// Most callers shouldn't need to specify this parameter - it is primarily in place to support testing. + /// If specified the value must be an integral multiple of 1 MiB (1048576). + /// Also, this only applies to requests where the source and destination span locations and/or storage classes. + /// Finally, this value must not change across rewrite calls else you'll get an error that the `rewriteToken` is invalid. + pub max_bytes_rewritten_per_call: Option, + + /// Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full. + /// Acceptable values are: + /// `full`: Include all properties. + /// `noAcl`: Omit the owner, acl property. + pub projection: Option, + + /// Include this field (from the previous rewrite response) on each rewrite request after the first one, until the rewrite response 'done' flag is true. + /// Calls that provide a `rewriteToken` can omit all other request fields, but if included those fields must match the values provided in the first rewrite request. + pub rewrite_token: Option, + + /// If present, selects a specific revision of the source object (as opposed to the latest version, the default). + pub source_generation: Option, +} + +/// The parameters that are optionally supplied when deleting an object. +#[derive(Debug, PartialEq, serde::Serialize, Default, Clone)] +#[serde(rename_all = "camelCase")] +pub struct DeleteParameters { + /// If present, permanently deletes a specific revision of this object (as opposed to the latest version, the default). + pub generation: Option, + + /// Makes the operation conditional on whether the object's current generation matches the given value. + /// Setting to 0 makes the operation succeed only if there are no live versions of the object. + pub if_generation_match: Option, + + /// Makes the operation conditional on whether the object's current generation does not match the given value. + /// If no live object exists, the precondition fails. + /// Setting to 0 makes the operation succeed only if there is a live version of the object. + pub if_generation_not_match: Option, + + /// Makes the operation conditional on whether the object's current metageneration matches the given value. + pub if_metageneration_match: Option, + + /// Makes the operation conditional on whether the object's current metageneration does not match the given value. + pub if_metageneration_not_match: Option, +} + +/// The parameters that are optionally supplied when updating an object. +#[derive(Debug, PartialEq, serde::Serialize, Default, Clone)] +#[serde(rename_all = "camelCase")] +pub struct UpdateParameters { + /// If present, selects a specific revision of this object (as opposed to the latest version, the default). + pub generation: Option, + + /// Makes the operation conditional on whether the object's current generation matches the given value. + /// Setting to 0 makes the operation succeed only if there are no live versions of the object. + pub if_generation_match: Option, + + /// Makes the operation conditional on whether the object's current generation does not match the given value. + /// If no live object exists, the precondition fails. + /// Setting to 0 makes the operation succeed only if there is a live version of the object. + pub if_generation_not_match: Option, + + /// Makes the operation conditional on whether the object's current metageneration matches the given value. + pub if_metageneration_match: Option, + + /// Makes the operation conditional on whether the object's current metageneration does not match the given value. + pub if_metageneration_not_match: Option, + + /// Apply a predefined set of access controls to this object. + /// + /// Acceptable values are: + /// `authenticatedRead`: Object owner gets OWNER access, and allAuthenticatedUsers get READER access. + /// `bucketOwnerFullControl`: Object owner gets OWNER access, and project team owners get OWNER access. + /// `bucketOwnerRead`: Object owner gets OWNER access, and project team owners get READER access. + /// `private`: Object owner gets OWNER access. + /// `projectPrivate`: Object owner gets OWNER access, and project team members get access according to their roles. + /// `publicRead`: Object owner gets OWNER access, and allUsers get READER access. + /// If `iamConfiguration.uniformBucketLevelAccess.enabled` is set to `true`, requests that include this parameter fail with a 400 Bad Request response. + pub predefined_acl: Option, + + /// Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full. + /// Acceptable values are: + /// `full`: Include all properties. + /// `noAcl`: Omit the owner, acl property. + pub projection: Option, +} + /// Acceptable values of `projection` properties to return from `Object::list` requests. #[derive(Debug, PartialEq, serde::Serialize, Clone)] #[serde(rename_all = "camelCase")] @@ -248,7 +546,7 @@ impl Object { /// use cloud_storage::Object; /// /// let file: Vec = read_cute_cat("cat.png"); - /// Object::create("cat-photos", file, "recently read cat.png", "image/png").await?; + /// Object::create("cat-photos", file, "recently read cat.png", "image/png", None).await?; /// # Ok(()) /// # } /// ``` @@ -258,10 +556,11 @@ impl Object { file: Vec, filename: &str, mime_type: &str, + parameters: Option, ) -> crate::Result { crate::CLOUD_CLIENT .object() - .create(bucket, file, filename, mime_type) + .create(bucket, file, filename, mime_type, parameters) .await } @@ -275,8 +574,9 @@ impl Object { file: Vec, filename: &str, mime_type: &str, + parameters: Option, ) -> crate::Result { - crate::runtime()?.block_on(Self::create(bucket, file, filename, mime_type)) + crate::runtime()?.block_on(Self::create(bucket, file, filename, mime_type, parameters)) } /// Create a new object. This works in the same way as `Object::create`, except it does not need @@ -292,7 +592,7 @@ impl Object { /// .send() /// .await? /// .bytes_stream(); - /// Object::create_streamed("cat-photos", file, 10, "recently read cat.png", "image/png").await?; + /// Object::create_streamed("cat-photos", file, 10, "recently read cat.png", "image/png", None).await?; /// # Ok(()) /// # } /// ``` @@ -303,6 +603,7 @@ impl Object { length: impl Into>, filename: &str, mime_type: &str, + parameters: Option, ) -> crate::Result where S: TryStream + Send + Sync + 'static, @@ -311,7 +612,7 @@ impl Object { { crate::CLOUD_CLIENT .object() - .create_streamed(bucket, stream, length, filename, mime_type) + .create_streamed(bucket, stream, length, filename, mime_type, parameters) .await } @@ -326,6 +627,7 @@ impl Object { length: impl Into>, filename: &str, mime_type: &str, + parameters: Option, ) -> crate::Result { let mut buffer = Vec::new(); file.read_to_end(&mut buffer) @@ -334,7 +636,7 @@ impl Object { let stream = futures_util::stream::once(async { Ok::<_, crate::Error>(buffer) }); crate::runtime()?.block_on(Self::create_streamed( - bucket, stream, length, filename, mime_type, + bucket, stream, length, filename, mime_type, parameters, )) } @@ -383,13 +685,20 @@ impl Object { /// # async fn main() -> Result<(), Box> { /// use cloud_storage::Object; /// - /// let object = Object::read("my_bucket", "path/to/my/file.png").await?; + /// let object = Object::read("my_bucket", "path/to/my/file.png", None).await?; /// # Ok(()) /// # } /// ``` #[cfg(feature = "global-client")] - pub async fn read(bucket: &str, file_name: &str) -> crate::Result { - crate::CLOUD_CLIENT.object().read(bucket, file_name).await + pub async fn read( + bucket: &str, + file_name: &str, + parameters: Option, + ) -> crate::Result { + crate::CLOUD_CLIENT + .object() + .read(bucket, file_name, parameters) + .await } /// The synchronous equivalent of `Object::read`. @@ -397,8 +706,12 @@ impl Object { /// ### Features /// This function requires that the feature flag `sync` is enabled in `Cargo.toml`. #[cfg(all(feature = "global-client", feature = "sync"))] - pub fn read_sync(bucket: &str, file_name: &str) -> crate::Result { - crate::runtime()?.block_on(Self::read(bucket, file_name)) + pub fn read_sync( + bucket: &str, + file_name: &str, + parameters: Option, + ) -> crate::Result { + crate::runtime()?.block_on(Self::read(bucket, file_name, parameters)) } /// Download the content of the object with the specified name in the specified bucket. @@ -408,15 +721,19 @@ impl Object { /// # async fn main() -> Result<(), Box> { /// use cloud_storage::Object; /// - /// let bytes = Object::download("my_bucket", "path/to/my/file.png").await?; + /// let bytes = Object::download("my_bucket", "path/to/my/file.png", None).await?; /// # Ok(()) /// # } /// ``` #[cfg(feature = "global-client")] - pub async fn download(bucket: &str, file_name: &str) -> crate::Result> { + pub async fn download( + bucket: &str, + file_name: &str, + parameters: Option, + ) -> crate::Result> { crate::CLOUD_CLIENT .object() - .download(bucket, file_name) + .download(bucket, file_name, parameters) .await } @@ -425,8 +742,12 @@ impl Object { /// ### Features /// This function requires that the feature flag `sync` is enabled in `Cargo.toml`. #[cfg(all(feature = "global-client", feature = "sync"))] - pub fn download_sync(bucket: &str, file_name: &str) -> crate::Result> { - crate::runtime()?.block_on(Self::download(bucket, file_name)) + pub fn download_sync( + bucket: &str, + file_name: &str, + parameters: Option, + ) -> crate::Result> { + crate::runtime()?.block_on(Self::download(bucket, file_name, parameters)) } /// Download the content of the object with the specified name in the specified bucket, without @@ -440,7 +761,7 @@ impl Object { /// use std::fs::File; /// use std::io::{BufWriter, Write}; /// - /// let mut stream = Object::download_streamed("my_bucket", "path/to/my/file.png").await?; + /// let mut stream = Object::download_streamed("my_bucket", "path/to/my/file.png", None).await?; /// let mut file = BufWriter::new(File::create("file.png").unwrap()); /// while let Some(byte) = stream.next().await { /// file.write_all(&[byte.unwrap()]).unwrap(); @@ -452,10 +773,11 @@ impl Object { pub async fn download_streamed( bucket: &str, file_name: &str, + parameters: Option, ) -> crate::Result> + Unpin> { crate::CLOUD_CLIENT .object() - .download_streamed(bucket, file_name) + .download_streamed(bucket, file_name, parameters) .await } @@ -466,15 +788,15 @@ impl Object { /// # async fn main() -> Result<(), Box> { /// use cloud_storage::Object; /// - /// let mut object = Object::read("my_bucket", "path/to/my/file.png").await?; + /// let mut object = Object::read("my_bucket", "path/to/my/file.png", None).await?; /// object.content_type = Some("application/xml".to_string()); - /// object.update().await?; + /// object.update(None).await?; /// # Ok(()) /// # } /// ``` #[cfg(feature = "global-client")] - pub async fn update(&self) -> crate::Result { - crate::CLOUD_CLIENT.object().update(self).await + pub async fn update(&self, parameters: Option) -> crate::Result { + crate::CLOUD_CLIENT.object().update(self, parameters).await } /// The synchronous equivalent of `Object::download`. @@ -482,8 +804,8 @@ impl Object { /// ### Features /// This function requires that the feature flag `sync` is enabled in `Cargo.toml`. #[cfg(all(feature = "global-client", feature = "sync"))] - pub fn update_sync(&self) -> crate::Result { - crate::runtime()?.block_on(self.update()) + pub fn update_sync(&self, parameters: Option) -> crate::Result { + crate::runtime()?.block_on(self.update(parameters)) } /// Deletes a single object with the specified name in the specified bucket. @@ -493,13 +815,20 @@ impl Object { /// # async fn main() -> Result<(), Box> { /// use cloud_storage::Object; /// - /// Object::delete("my_bucket", "path/to/my/file.png").await?; + /// Object::delete("my_bucket", "path/to/my/file.png", None).await?; /// # Ok(()) /// # } /// ``` #[cfg(feature = "global-client")] - pub async fn delete(bucket: &str, file_name: &str) -> crate::Result<()> { - crate::CLOUD_CLIENT.object().delete(bucket, file_name).await + pub async fn delete( + bucket: &str, + file_name: &str, + parameters: Option, + ) -> crate::Result<()> { + crate::CLOUD_CLIENT + .object() + .delete(bucket, file_name, parameters) + .await } /// The synchronous equivalent of `Object::delete`. @@ -507,8 +836,12 @@ impl Object { /// ### Features /// This function requires that the feature flag `sync` is enabled in `Cargo.toml`. #[cfg(all(feature = "global-client", feature = "sync"))] - pub fn delete_sync(bucket: &str, file_name: &str) -> crate::Result<()> { - crate::runtime()?.block_on(Self::delete(bucket, file_name)) + pub fn delete_sync( + bucket: &str, + file_name: &str, + parameters: Option, + ) -> crate::Result<()> { + crate::runtime()?.block_on(Self::delete(bucket, file_name, parameters)) } /// Obtains a single object with the specified name in the specified bucket. @@ -518,8 +851,8 @@ impl Object { /// # async fn main() -> Result<(), Box> { /// use cloud_storage::object::{Object, ComposeRequest, SourceObject}; /// - /// let obj1 = Object::read("my_bucket", "file1").await?; - /// let obj2 = Object::read("my_bucket", "file2").await?; + /// let obj1 = Object::read("my_bucket", "file1", None).await?; + /// let obj2 = Object::read("my_bucket", "file2", None).await?; /// let compose_request = ComposeRequest { /// kind: "storage#composeRequest".to_string(), /// source_objects: vec![ @@ -536,7 +869,7 @@ impl Object { /// ], /// destination: None, /// }; - /// let obj3 = Object::compose("my_bucket", &compose_request, "test-concatted-file").await?; + /// let obj3 = Object::compose("my_bucket", &compose_request, "test-concatted-file", None).await?; /// // obj3 is now a file with the content of obj1 and obj2 concatted together. /// # Ok(()) /// # } @@ -546,10 +879,11 @@ impl Object { bucket: &str, req: &ComposeRequest, destination_object: &str, + parameters: Option, ) -> crate::Result { crate::CLOUD_CLIENT .object() - .compose(bucket, req, destination_object) + .compose(bucket, req, destination_object, parameters) .await } @@ -562,8 +896,9 @@ impl Object { bucket: &str, req: &ComposeRequest, destination_object: &str, + parameters: Option, ) -> crate::Result { - crate::runtime()?.block_on(Self::compose(bucket, req, destination_object)) + crate::runtime()?.block_on(Self::compose(bucket, req, destination_object, parameters)) } /// Copy this object to the target bucket and path @@ -573,17 +908,22 @@ impl Object { /// # async fn main() -> Result<(), Box> { /// use cloud_storage::object::{Object, ComposeRequest}; /// - /// let obj1 = Object::read("my_bucket", "file1").await?; - /// let obj2 = obj1.copy("my_other_bucket", "file2").await?; + /// let obj1 = Object::read("my_bucket", "file1", None).await?; + /// let obj2 = obj1.copy("my_other_bucket", "file2", None).await?; /// // obj2 is now a copy of obj1. /// # Ok(()) /// # } /// ``` #[cfg(feature = "global-client")] - pub async fn copy(&self, destination_bucket: &str, path: &str) -> crate::Result { + pub async fn copy( + &self, + destination_bucket: &str, + path: &str, + parameters: Option, + ) -> crate::Result { crate::CLOUD_CLIENT .object() - .copy(self, destination_bucket, path) + .copy(self, destination_bucket, path, parameters) .await } @@ -592,8 +932,13 @@ impl Object { /// ### Features /// This function requires that the feature flag `sync` is enabled in `Cargo.toml`. #[cfg(all(feature = "global-client", feature = "sync"))] - pub fn copy_sync(&self, destination_bucket: &str, path: &str) -> crate::Result { - crate::runtime()?.block_on(self.copy(destination_bucket, path)) + pub fn copy_sync( + &self, + destination_bucket: &str, + path: &str, + parameters: Option, + ) -> crate::Result { + crate::runtime()?.block_on(self.copy(destination_bucket, path, parameters)) } /// Moves a file from the current location to the target bucket and path. @@ -610,17 +955,22 @@ impl Object { /// # async fn main() -> Result<(), Box> { /// use cloud_storage::object::Object; /// - /// let obj1 = Object::read("my_bucket", "file1").await?; - /// let obj2 = obj1.rewrite("my_other_bucket", "file2").await?; + /// let obj1 = Object::read("my_bucket", "file1", None).await?; + /// let obj2 = obj1.rewrite("my_other_bucket", "file2", None).await?; /// // obj2 is now a copy of obj1. /// # Ok(()) /// # } /// ``` #[cfg(feature = "global-client")] - pub async fn rewrite(&self, destination_bucket: &str, path: &str) -> crate::Result { + pub async fn rewrite( + &self, + destination_bucket: &str, + path: &str, + parameters: Option, + ) -> crate::Result { crate::CLOUD_CLIENT .object() - .rewrite(self, destination_bucket, path) + .rewrite(self, destination_bucket, path, parameters) .await } @@ -629,8 +979,13 @@ impl Object { /// ### Features /// This function requires that the feature flag `sync` is enabled in `Cargo.toml`. #[cfg(all(feature = "global-client", feature = "sync"))] - pub fn rewrite_sync(&self, destination_bucket: &str, path: &str) -> crate::Result { - crate::runtime()?.block_on(self.rewrite(destination_bucket, path)) + pub fn rewrite_sync( + &self, + destination_bucket: &str, + path: &str, + parameters: Option, + ) -> crate::Result { + crate::runtime()?.block_on(self.rewrite(destination_bucket, path, parameters)) } /// Creates a [Signed Url](https://cloud.google.com/storage/docs/access-control/signed-urls) @@ -643,7 +998,7 @@ impl Object { /// use cloud_storage::{Client, object::{Object, ComposeRequest}}; /// /// let client = Client::default(); - /// let obj1 = client.object().read("my_bucket", "file1").await?; + /// let obj1 = client.object().read("my_bucket", "file1", None).await?; /// let url = obj1.download_url(50)?; /// // url is now a url to which an unauthenticated user can make a request to download a file /// // for 50 seconds. @@ -664,7 +1019,7 @@ impl Object { /// use cloud_storage::{Client, object::{Object, ComposeRequest}}; /// /// let client = Client::default(); - /// let obj1 = client.object().read("my_bucket", "file1").await?; + /// let obj1 = client.object().read("my_bucket", "file1", None).await?; /// let url = obj1.download_url(50)?; /// // url is now a url to which an unauthenticated user can make a request to download a file /// // for 50 seconds. @@ -695,7 +1050,7 @@ impl Object { /// use cloud_storage::{Client, object::{Object, ComposeRequest}}; /// /// let client = Client::default(); - /// let obj1 = client.object().read("my_bucket", "file1").await?; + /// let obj1 = client.object().read("my_bucket", "file1", None).await?; /// let url = obj1.upload_url(50)?; /// // url is now a url to which an unauthenticated user can make a PUT request to upload a file /// // for 50 seconds. @@ -717,7 +1072,7 @@ impl Object { /// use std::collections::HashMap; /// /// let client = Client::default(); - /// let obj1 = client.object().read("my_bucket", "file1").await?; + /// let obj1 = client.object().read("my_bucket", "file1", None).await?; /// let mut custom_metadata = HashMap::new(); /// custom_metadata.insert(String::from("field"), String::from("value")); /// let (url, headers) = obj1.upload_url_with(50, custom_metadata)?; @@ -978,7 +1333,7 @@ mod tests { #[tokio::test] async fn create() -> Result<(), Box> { let bucket = crate::read_test_bucket().await; - Object::create(&bucket.name, vec![0, 1], "test-create", "text/plain").await?; + Object::create(&bucket.name, vec![0, 1], "test-create", "text/plain", None).await?; Ok(()) } @@ -994,6 +1349,7 @@ mod tests { 2, "test-create-streamed", "text/plain", + None, ) .await?; Ok(()) @@ -1037,7 +1393,7 @@ mod tests { ]; for name in &prefix_names { - Object::create(&test_bucket.name, vec![0, 1], name, "text/plain").await?; + Object::create(&test_bucket.name, vec![0, 1], name, "text/plain", None).await?; } let list = flattened_list_prefix_stream(&test_bucket.name, "test-list-prefix/").await?; @@ -1050,8 +1406,8 @@ mod tests { #[tokio::test] async fn read() -> Result<(), Box> { let bucket = crate::read_test_bucket().await; - Object::create(&bucket.name, vec![0, 1], "test-read", "text/plain").await?; - Object::read(&bucket.name, "test-read").await?; + Object::create(&bucket.name, vec![0, 1], "test-read", "text/plain", None).await?; + Object::read(&bucket.name, "test-read", None).await?; Ok(()) } @@ -1064,10 +1420,11 @@ mod tests { content.to_vec(), "test-download", "application/octet-stream", + None, ) .await?; - let data = Object::download(&bucket.name, "test-download").await?; + let data = Object::download(&bucket.name, "test-download", None).await?; assert_eq!(data, content); Ok(()) @@ -1082,10 +1439,11 @@ mod tests { content.to_vec(), "test-download", "application/octet-stream", + None, ) .await?; - let result = Object::download_streamed(&bucket.name, "test-download").await?; + let result = Object::download_streamed(&bucket.name, "test-download", None).await?; let data = result.try_collect::>().await?; assert_eq!(data, content); @@ -1101,10 +1459,12 @@ mod tests { content.to_vec(), "test-download-large", "application/octet-stream", + None, ) .await?; - let mut result = Object::download_streamed(&bucket.name, "test-download-large").await?; + let mut result = + Object::download_streamed(&bucket.name, "test-download-large", None).await?; let mut data: Vec = Vec::new(); while let Some(part) = result.next().await { data.push(part?); @@ -1117,18 +1477,19 @@ mod tests { #[tokio::test] async fn update() -> Result<(), Box> { let bucket = crate::read_test_bucket().await; - let mut obj = Object::create(&bucket.name, vec![0, 1], "test-update", "text/plain").await?; + let mut obj = + Object::create(&bucket.name, vec![0, 1], "test-update", "text/plain", None).await?; obj.content_type = Some("application/xml".to_string()); - obj.update().await?; + obj.update(None).await?; Ok(()) } #[tokio::test] async fn delete() -> Result<(), Box> { let bucket = crate::read_test_bucket().await; - Object::create(&bucket.name, vec![0, 1], "test-delete", "text/plain").await?; + Object::create(&bucket.name, vec![0, 1], "test-delete", "text/plain", None).await?; - Object::delete(&bucket.name, "test-delete").await?; + Object::delete(&bucket.name, "test-delete", None).await?; let list: Vec<_> = flattened_list_prefix_stream(&bucket.name, "test-delete").await?; assert!(list.is_empty()); @@ -1142,7 +1503,7 @@ mod tests { let nonexistent_object = "test-delete-nonexistent"; - let delete_result = Object::delete(&bucket.name, nonexistent_object).await; + let delete_result = Object::delete(&bucket.name, nonexistent_object, None).await; if let Err(Error::Google(google_error_response)) = delete_result { assert!(google_error_response.to_string().contains(&format!( @@ -1159,8 +1520,22 @@ mod tests { #[tokio::test] async fn compose() -> Result<(), Box> { let bucket = crate::read_test_bucket().await; - let obj1 = Object::create(&bucket.name, vec![0, 1], "test-compose-1", "text/plain").await?; - let obj2 = Object::create(&bucket.name, vec![2, 3], "test-compose-2", "text/plain").await?; + let obj1 = Object::create( + &bucket.name, + vec![0, 1], + "test-compose-1", + "text/plain", + None, + ) + .await?; + let obj2 = Object::create( + &bucket.name, + vec![2, 3], + "test-compose-2", + "text/plain", + None, + ) + .await?; let compose_request = ComposeRequest { kind: "storage#composeRequest".to_string(), source_objects: vec![ @@ -1177,7 +1552,8 @@ mod tests { ], destination: None, }; - let obj3 = Object::compose(&bucket.name, &compose_request, "test-concatted-file").await?; + let obj3 = + Object::compose(&bucket.name, &compose_request, "test-concatted-file", None).await?; let url = obj3.download_url(100)?; let content = reqwest::get(&url).await?.text().await?; assert_eq!(content.as_bytes(), &[0, 1, 2, 3]); @@ -1187,16 +1563,20 @@ mod tests { #[tokio::test] async fn copy() -> Result<(), Box> { let bucket = crate::read_test_bucket().await; - let original = Object::create(&bucket.name, vec![2, 3], "test-copy", "text/plain").await?; - original.copy(&bucket.name, "test-copy - copy").await?; + let original = + Object::create(&bucket.name, vec![2, 3], "test-copy", "text/plain", None).await?; + original + .copy(&bucket.name, "test-copy - copy", None) + .await?; Ok(()) } #[tokio::test] async fn rewrite() -> Result<(), Box> { let bucket = crate::read_test_bucket().await; - let obj = Object::create(&bucket.name, vec![0, 1], "test-rewrite", "text/plain").await?; - let obj = obj.rewrite(&bucket.name, "test-rewritten").await?; + let obj = + Object::create(&bucket.name, vec![0, 1], "test-rewrite", "text/plain", None).await?; + let obj = obj.rewrite(&bucket.name, "test-rewritten", None).await?; let url = obj.download_url(100)?; let client = reqwest::Client::default(); let download = client.head(&url).send().await?; @@ -1216,8 +1596,8 @@ mod tests { "测试很重要", ]; for name in &complicated_names { - let _obj = Object::create(&bucket.name, vec![0, 1], name, "text/plain").await?; - let obj = Object::read(&bucket.name, &name).await.unwrap(); + let _obj = Object::create(&bucket.name, vec![0, 1], name, "text/plain", None).await?; + let obj = Object::read(&bucket.name, &name, None).await.unwrap(); let url = obj.download_url(100)?; let client = reqwest::Client::default(); let download = client.head(&url).send().await?; @@ -1230,7 +1610,8 @@ mod tests { async fn test_download_url_with() -> Result<(), Box> { let bucket = crate::read_test_bucket().await; let client = reqwest::Client::new(); - let obj = Object::create(&bucket.name, vec![0, 1], "test-rewrite", "text/plain").await?; + let obj = + Object::create(&bucket.name, vec![0, 1], "test-rewrite", "text/plain", None).await?; let opts1 = crate::DownloadOptions::new().content_disposition("attachment"); let download_url1 = obj.download_url_with(100, opts1)?; @@ -1244,7 +1625,7 @@ mod tests { let bucket = crate::read_test_bucket().await; let client = reqwest::Client::new(); let blob_name = "test-upload-url"; - let obj = Object::create(&bucket.name, vec![0, 1], blob_name, "text/plain").await?; + let obj = Object::create(&bucket.name, vec![0, 1], blob_name, "text/plain", None).await?; let url = obj.upload_url(100).unwrap(); let updated_content = vec![2, 3]; @@ -1254,7 +1635,7 @@ mod tests { .send() .await?; assert!(response.status().is_success()); - let data = Object::download(&bucket.name, blob_name).await?; + let data = Object::download(&bucket.name, blob_name, None).await?; assert_eq!(data, updated_content); Ok(()) } @@ -1264,7 +1645,7 @@ mod tests { let bucket = crate::read_test_bucket().await; let client = reqwest::Client::new(); let blob_name = "test-upload-url"; - let obj = Object::create(&bucket.name, vec![0, 1], blob_name, "text/plain").await?; + let obj = Object::create(&bucket.name, vec![0, 1], blob_name, "text/plain", None).await?; let mut custom_metadata = HashMap::new(); custom_metadata.insert(String::from("field"), String::from("value")); @@ -1276,7 +1657,7 @@ mod tests { } let response = request.send().await?; assert!(response.status().is_success()); - let updated_obj = Object::read(&bucket.name, blob_name).await?; + let updated_obj = Object::read(&bucket.name, blob_name, None).await?; let obj_metadata = updated_obj.metadata.unwrap(); assert_eq!(obj_metadata.get("field").unwrap(), "value"); Ok(()) @@ -1303,7 +1684,7 @@ mod tests { #[test] fn create() -> Result<(), Box> { let bucket = crate::read_test_bucket_sync(); - Object::create_sync(&bucket.name, vec![0, 1], "test-create", "text/plain")?; + Object::create_sync(&bucket.name, vec![0, 1], "test-create", "text/plain", None)?; Ok(()) } @@ -1317,6 +1698,7 @@ mod tests { 2, "test-create-streamed", "text/plain", + None, )?; Ok(()) } @@ -1340,7 +1722,7 @@ mod tests { ]; for name in &prefix_names { - Object::create_sync(&test_bucket.name, vec![0, 1], name, "text/plain")?; + Object::create_sync(&test_bucket.name, vec![0, 1], name, "text/plain", None)?; } let request = ListRequest { @@ -1371,7 +1753,7 @@ mod tests { ]; for name in &prefix_names { - Object::create_sync(&test_bucket.name, vec![0, 1], name, "text/plain")?; + Object::create_sync(&test_bucket.name, vec![0, 1], name, "text/plain", None)?; } let request = ListRequest { @@ -1388,8 +1770,8 @@ mod tests { #[test] fn read() -> Result<(), Box> { let bucket = crate::read_test_bucket_sync(); - Object::create_sync(&bucket.name, vec![0, 1], "test-read", "text/plain")?; - Object::read_sync(&bucket.name, "test-read")?; + Object::create_sync(&bucket.name, vec![0, 1], "test-read", "text/plain", None)?; + Object::read_sync(&bucket.name, "test-read", None)?; Ok(()) } @@ -1402,9 +1784,10 @@ mod tests { content.to_vec(), "test-download", "application/octet-stream", + None, )?; - let data = Object::download_sync(&bucket.name, "test-download")?; + let data = Object::download_sync(&bucket.name, "test-download", None)?; assert_eq!(data, content); Ok(()) @@ -1414,18 +1797,18 @@ mod tests { fn update() -> Result<(), Box> { let bucket = crate::read_test_bucket_sync(); let mut obj = - Object::create_sync(&bucket.name, vec![0, 1], "test-update", "text/plain")?; + Object::create_sync(&bucket.name, vec![0, 1], "test-update", "text/plain", None)?; obj.content_type = Some("application/xml".to_string()); - obj.update_sync()?; + obj.update_sync(None)?; Ok(()) } #[test] fn delete() -> Result<(), Box> { let bucket = crate::read_test_bucket_sync(); - Object::create_sync(&bucket.name, vec![0, 1], "test-delete", "text/plain")?; + Object::create_sync(&bucket.name, vec![0, 1], "test-delete", "text/plain", None)?; - Object::delete_sync(&bucket.name, "test-delete")?; + Object::delete_sync(&bucket.name, "test-delete", None)?; let request = ListRequest { prefix: Some("test-delete".into()), @@ -1444,7 +1827,7 @@ mod tests { let nonexistent_object = "test-delete-nonexistent"; - let delete_result = Object::delete_sync(&bucket.name, nonexistent_object); + let delete_result = Object::delete_sync(&bucket.name, nonexistent_object, None); if let Err(Error::Google(google_error_response)) = delete_result { assert!(google_error_response.to_string().contains(&format!( @@ -1461,10 +1844,20 @@ mod tests { #[test] fn compose() -> Result<(), Box> { let bucket = crate::read_test_bucket_sync(); - let obj1 = - Object::create_sync(&bucket.name, vec![0, 1], "test-compose-1", "text/plain")?; - let obj2 = - Object::create_sync(&bucket.name, vec![2, 3], "test-compose-2", "text/plain")?; + let obj1 = Object::create_sync( + &bucket.name, + vec![0, 1], + "test-compose-1", + "text/plain", + None, + )?; + let obj2 = Object::create_sync( + &bucket.name, + vec![2, 3], + "test-compose-2", + "text/plain", + None, + )?; let compose_request = ComposeRequest { kind: "storage#composeRequest".to_string(), source_objects: vec![ @@ -1481,7 +1874,8 @@ mod tests { ], destination: None, }; - let obj3 = Object::compose_sync(&bucket.name, &compose_request, "test-concatted-file")?; + let obj3 = + Object::compose_sync(&bucket.name, &compose_request, "test-concatted-file", None)?; let url = obj3.download_url(100)?; let content = reqwest::blocking::get(&url)?.text()?; assert_eq!(content.as_bytes(), &[0, 1, 2, 3]); @@ -1492,16 +1886,17 @@ mod tests { fn copy() -> Result<(), Box> { let bucket = crate::read_test_bucket_sync(); let original = - Object::create_sync(&bucket.name, vec![2, 3], "test-copy", "text/plain")?; - original.copy_sync(&bucket.name, "test-copy - copy")?; + Object::create_sync(&bucket.name, vec![2, 3], "test-copy", "text/plain", None)?; + original.copy_sync(&bucket.name, "test-copy - copy", None)?; Ok(()) } #[test] fn rewrite() -> Result<(), Box> { let bucket = crate::read_test_bucket_sync(); - let obj = Object::create_sync(&bucket.name, vec![0, 1], "test-rewrite", "text/plain")?; - let obj = obj.rewrite_sync(&bucket.name, "test-rewritten")?; + let obj = + Object::create_sync(&bucket.name, vec![0, 1], "test-rewrite", "text/plain", None)?; + let obj = obj.rewrite_sync(&bucket.name, "test-rewritten", None)?; let url = obj.download_url(100)?; let client = reqwest::blocking::Client::new(); let download = client.head(&url).send()?; @@ -1521,8 +1916,8 @@ mod tests { "测试很重要", ]; for name in &complicated_names { - let _obj = Object::create_sync(&bucket.name, vec![0, 1], name, "text/plain")?; - let obj = Object::read_sync(&bucket.name, &name).unwrap(); + let _obj = Object::create_sync(&bucket.name, vec![0, 1], name, "text/plain", None)?; + let obj = Object::read_sync(&bucket.name, &name, None).unwrap(); let url = obj.download_url(100)?; let client = reqwest::blocking::Client::new(); let download = client.head(&url).send()?; diff --git a/src/sync/object.rs b/src/sync/object.rs index 65a94d6..f20c8db 100644 --- a/src/sync/object.rs +++ b/src/sync/object.rs @@ -1,5 +1,8 @@ use crate::{ - object::{ComposeRequest, ObjectList}, + object::{ + ComposeParameters, ComposeRequest, CopyParameters, CreateParameters, DeleteParameters, + ObjectList, ReadParameters, RewriteParameters, UpdateParameters, + }, ListRequest, Object, }; use futures_util::TryStreamExt; @@ -21,7 +24,7 @@ impl<'a> ObjectClient<'a> { /// /// let file: Vec = read_cute_cat("cat.png"); /// let client = Client::new()?; - /// client.object().create("cat-photos", file, "recently read cat.png", "image/png")?; + /// client.object().create("cat-photos", file, "recently read cat.png", "image/png", None)?; /// # Ok(()) /// # } /// ``` @@ -31,12 +34,13 @@ impl<'a> ObjectClient<'a> { file: Vec, filename: &str, mime_type: &str, + parameters: Option, ) -> crate::Result { self.0.runtime.block_on( self.0 .client .object() - .create(bucket, file, filename, mime_type), + .create(bucket, file, filename, mime_type, parameters), ) } @@ -49,6 +53,7 @@ impl<'a> ObjectClient<'a> { length: impl Into>, filename: &str, mime_type: &str, + parameters: Option, ) -> crate::Result where R: std::io::Read + Send + Sync + Unpin + 'static, @@ -59,7 +64,7 @@ impl<'a> ObjectClient<'a> { self.0 .client .object() - .create_streamed(bucket, stream, length, filename, mime_type), + .create_streamed(bucket, stream, length, filename, mime_type, parameters), ) } @@ -93,14 +98,19 @@ impl<'a> ObjectClient<'a> { /// use cloud_storage::Object; /// /// let client = Client::new()?; - /// let object = client.object().read("my_bucket", "path/to/my/file.png")?; + /// let object = client.object().read("my_bucket", "path/to/my/file.png", None)?; /// # Ok(()) /// # } /// ``` - pub fn read(&self, bucket: &str, file_name: &str) -> crate::Result { + pub fn read( + &self, + bucket: &str, + file_name: &str, + parameters: Option, + ) -> crate::Result { self.0 .runtime - .block_on(self.0.client.object().read(bucket, file_name)) + .block_on(self.0.client.object().read(bucket, file_name, parameters)) } /// Download the content of the object with the specified name in the specified bucket. @@ -111,14 +121,22 @@ impl<'a> ObjectClient<'a> { /// use cloud_storage::Object; /// /// let client = Client::new()?; - /// let bytes = client.object().download("my_bucket", "path/to/my/file.png")?; + /// let bytes = client.object().download("my_bucket", "path/to/my/file.png", None)?; /// # Ok(()) /// # } /// ``` - pub fn download(&self, bucket: &str, file_name: &str) -> crate::Result> { - self.0 - .runtime - .block_on(self.0.client.object().download(bucket, file_name)) + pub fn download( + &self, + bucket: &str, + file_name: &str, + parameters: Option, + ) -> crate::Result> { + self.0.runtime.block_on( + self.0 + .client + .object() + .download(bucket, file_name, parameters), + ) } /// Obtains a single object with the specified name in the specified bucket. @@ -129,16 +147,20 @@ impl<'a> ObjectClient<'a> { /// use cloud_storage::Object; /// /// let client = Client::new()?; - /// let mut object = client.object().read("my_bucket", "path/to/my/file.png")?; + /// let mut object = client.object().read("my_bucket", "path/to/my/file.png", None)?; /// object.content_type = Some("application/xml".to_string()); - /// client.object().update(&object)?; + /// client.object().update(&object, None)?; /// # Ok(()) /// # } /// ``` - pub fn update(&self, object: &Object) -> crate::Result { + pub fn update( + &self, + object: &Object, + parameters: Option, + ) -> crate::Result { self.0 .runtime - .block_on(self.0.client.object().update(object)) + .block_on(self.0.client.object().update(object, parameters)) } /// Deletes a single object with the specified name in the specified bucket. @@ -149,14 +171,19 @@ impl<'a> ObjectClient<'a> { /// use cloud_storage::Object; /// /// let client = Client::new()?; - /// client.object().delete("my_bucket", "path/to/my/file.png")?; + /// client.object().delete("my_bucket", "path/to/my/file.png", None)?; /// # Ok(()) /// # } /// ``` - pub fn delete(&self, bucket: &str, file_name: &str) -> crate::Result<()> { + pub fn delete( + &self, + bucket: &str, + file_name: &str, + parameters: Option, + ) -> crate::Result<()> { self.0 .runtime - .block_on(self.0.client.object().delete(bucket, file_name)) + .block_on(self.0.client.object().delete(bucket, file_name, parameters)) } /// Obtains a single object with the specified name in the specified bucket. @@ -167,8 +194,8 @@ impl<'a> ObjectClient<'a> { /// use cloud_storage::object::{Object, ComposeRequest, SourceObject}; /// /// let client = Client::new()?; - /// let obj1 = client.object().read("my_bucket", "file1")?; - /// let obj2 = client.object().read("my_bucket", "file2")?; + /// let obj1 = client.object().read("my_bucket", "file1", None)?; + /// let obj2 = client.object().read("my_bucket", "file2", None)?; /// let compose_request = ComposeRequest { /// kind: "storage#composeRequest".to_string(), /// source_objects: vec![ @@ -185,7 +212,7 @@ impl<'a> ObjectClient<'a> { /// ], /// destination: None, /// }; - /// let obj3 = client.object().compose("my_bucket", &compose_request, "test-concatted-file")?; + /// let obj3 = client.object().compose("my_bucket", &compose_request, "test-concatted-file", None)?; /// // obj3 is now a file with the content of obj1 and obj2 concatted together. /// # Ok(()) /// # } @@ -195,13 +222,14 @@ impl<'a> ObjectClient<'a> { bucket: &str, req: &ComposeRequest, destination_object: &str, + parameters: Option, ) -> crate::Result { - self.0.runtime.block_on( - self.0 - .client - .object() - .compose(bucket, req, destination_object), - ) + self.0.runtime.block_on(self.0.client.object().compose( + bucket, + req, + destination_object, + parameters, + )) } /// Copy this object to the target bucket and path @@ -212,8 +240,8 @@ impl<'a> ObjectClient<'a> { /// use cloud_storage::object::{Object, ComposeRequest}; /// /// let client = Client::new()?; - /// let obj1 = client.object().read("my_bucket", "file1")?; - /// let obj2 = client.object().copy(&obj1, "my_other_bucket", "file2")?; + /// let obj1 = client.object().read("my_bucket", "file1", None)?; + /// let obj2 = client.object().copy(&obj1, "my_other_bucket", "file2", None)?; /// // obj2 is now a copy of obj1. /// # Ok(()) /// # } @@ -223,13 +251,14 @@ impl<'a> ObjectClient<'a> { object: &Object, destination_bucket: &str, path: &str, + parameters: Option, ) -> crate::Result { - self.0.runtime.block_on( - self.0 - .client - .object() - .copy(object, destination_bucket, path), - ) + self.0.runtime.block_on(self.0.client.object().copy( + object, + destination_bucket, + path, + parameters, + )) } /// Moves a file from the current location to the target bucket and path. @@ -247,8 +276,8 @@ impl<'a> ObjectClient<'a> { /// use cloud_storage::object::Object; /// /// let client = Client::new()?; - /// let obj1 = client.object().read("my_bucket", "file1")?; - /// let obj2 = client.object().rewrite(&obj1, "my_other_bucket", "file2")?; + /// let obj1 = client.object().read("my_bucket", "file1", None)?; + /// let obj2 = client.object().rewrite(&obj1, "my_other_bucket", "file2", None)?; /// // obj2 is now a copy of obj1. /// # Ok(()) /// # } @@ -258,12 +287,13 @@ impl<'a> ObjectClient<'a> { object: &Object, destination_bucket: &str, path: &str, + parameters: Option, ) -> crate::Result { - self.0.runtime.block_on( - self.0 - .client - .object() - .rewrite(object, destination_bucket, path), - ) + self.0.runtime.block_on(self.0.client.object().rewrite( + object, + destination_bucket, + path, + parameters, + )) } } From b319da056084a2c419832fb2ec8e67b96b7f3730 Mon Sep 17 00:00:00 2001 From: Sergen Nurel Date: Fri, 23 Sep 2022 16:35:55 +0200 Subject: [PATCH 09/26] Updated Readme file --- README.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 1d61524..966d03c 100644 --- a/README.md +++ b/README.md @@ -17,14 +17,14 @@ let new_bucket = NewBucket { name: "mybucket", ..Default::default() } let bucket = Bucket::create(new_bucket).await?; // upload a file to our new bucket let content = b"Your file is now on google cloud storage!"; -bucket.upload(content, "folder/filename.txt", "application/text").await?; -let mut object = Object::create("mybucket", content, "folder/filename.txt", "application/text").await?; +bucket.upload(content, "folder/filename.txt", "application/text", None).await?; +let mut object = Object::create("mybucket", content, "folder/filename.txt", "application/text", None).await?; // let's copy the file -object.copy("mybucket2: electric boogaloo", "otherfolder/filename.txt").await?; +object.copy("mybucket2: electric boogaloo", "otherfolder/filename.txt", None).await?; // print a link to the file println!("{}", object.download_url(1000)); // download link for 1000 seconds // remove the file from the bucket -object.delete().await?; +object.delete(None).await?; ``` Authorization can be granted using the `SERVICE_ACCOUNT` or `GOOGLE_APPLICATION_CREDENTIALS` environment variable, which should contain path to the `service-account-*******.json` file that contains the Google credentials. Alternatively, the service account credentials can be provided as JSON directly through the `SERVICE_ACCOUNT_JSON` or `GOOGLE_APPLICATION_CREDENTIALS_JSON` environment variable, which is useful when providing secrets in CI or k8s. From 197122bbad54658714414cbff0ffb076a93feffe Mon Sep 17 00:00:00 2001 From: Ohad Ravid Date: Mon, 6 Mar 2023 14:17:29 +0200 Subject: [PATCH 10/26] Add missing GCP locations --- Cargo.toml | 2 +- src/resources/location.rs | 12 ++++++++++++ 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index bf2af2c..7db4b5b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "cloud-storage" -version = "0.11.1" +version = "0.11.2" authors = ["Luuk Wester "] edition = "2018" description = "A crate for uploading files to Google cloud storage, and for generating download urls." diff --git a/src/resources/location.rs b/src/resources/location.rs index 1551c4b..fb1c598 100644 --- a/src/resources/location.rs +++ b/src/resources/location.rs @@ -86,6 +86,18 @@ pub enum EuropeLocation { /// Store the files in Zurich. #[serde(rename = "EUROPE-WEST6")] Zurich, + /// Store the files in Milan. + #[serde(rename = "EUROPE-WEST8")] + Milan, + /// Store the files in Paris. + #[serde(rename = "EUROPE-WEST9")] + Paris, + /// Store the files in Warsaw. + #[serde(rename = "EUROPE-CENTRAL2")] + Warsaw, + /// Store the files in Madrid. + #[serde(rename = "EUROPE-SOUTHWEST1")] + Madrid, } /// ALl options in Asia. From e8ef6770f70769e4599f47153f738bfff09d267a Mon Sep 17 00:00:00 2001 From: SonnyX Date: Tue, 2 May 2023 15:40:57 +0200 Subject: [PATCH 11/26] Update ergonomics of API, move models to seperate files, update dependencies, and more --- .cargo/config.toml | 5 + .rustfmt.toml | 64 - Cargo.toml | 51 +- README.md | 10 +- rustfmt.toml | 3 + src/builders/bucket.rs | 1 + src/builders/mod.rs | 1 + src/client.rs | 160 -- src/client/bucket.rs | 202 +- src/client/bucket_access_control.rs | 143 +- src/client/client.rs | 165 ++ src/client/default_object_access_control.rs | 148 +- src/client/hmac_key.rs | 110 +- src/client/mod.rs | 17 + src/client/object.rs | 382 ++- src/client/object_access_control.rs | 123 +- src/configuration/mod.rs | 2 + .../service_account.rs | 22 +- src/{resources => configuration}/signature.rs | 0 src/crypto/mod.rs | 10 + src/crypto/openssl.rs | 16 + src/crypto/ring.rs | 20 + src/error.rs | 368 +-- src/global_client/bucket.rs | 480 ++++ .../bucket_access_control.rs | 209 +- .../default_object_access_control.rs | 185 +- src/{resources => global_client}/hmac_key.rs | 117 +- src/global_client/mod.rs | 63 + src/global_client/object.rs | 1137 +++++++++ .../object_access_control.rs | 223 +- src/lib.rs | 135 +- src/models/action.rs | 11 + src/models/action_type.rs | 8 + src/models/billing.rs | 7 + src/models/binding.rs | 42 + src/models/bucket.rs | 87 + src/models/bucket_access_control.rs | 62 + src/models/compose_parameters.rs | 27 + src/models/compose_request.rs | 13 + src/models/condition.rs | 25 + src/models/copy_paramters.rs | 61 + src/models/cors.rs | 20 + src/models/create/bucket.rs | 49 + src/models/create/bucket_access_control.rs | 27 + .../create/default_object_access_control.rs | 27 + src/models/create/mod.rs | 16 + src/models/create/notification.rs | 22 + src/models/create/object_access_control.rs | 27 + src/models/create/payload_format.rs | 9 + src/models/customer_encryption.rs | 9 + src/models/default_object_access_control.rs | 46 + src/models/delete_parameters.rs | 22 + src/models/encryption.rs | 8 + src/{resources/common.rs => models/entity.rs} | 153 +- src/models/error.rs | 42 + src/models/error_list.rs | 15 + src/models/error_reason.rs | 221 ++ src/models/error_response.rs | 30 + src/models/hmac_key.rs | 18 + src/models/hmac_metadata.rs | 29 + src/models/hmac_state.rs | 11 + src/models/iam_condition.rs | 13 + src/models/iam_configuration.rs | 13 + src/models/iam_policy.rs | 19 + src/models/iam_role.rs | 14 + src/models/legacy_iam_role.rs | 38 + src/models/lifecycle.rs | 10 + src/models/list_response.rs | 6 + src/{resources => models}/location.rs | 0 src/models/logging.rs | 9 + src/models/mod.rs | 132 ++ src/{resources => models}/notification.rs | 88 +- src/models/object.rs | 380 +++ src/models/object_access_control.rs | 66 + src/models/object_access_control_list.rs | 9 + src/models/object_create_parameters.rs | 45 + src/models/object_list.rs | 24 + src/models/object_list_request.rs | 53 + src/models/object_precondition.rs | 9 + src/models/object_read_parameters.rs | 27 + src/models/owner.rs | 11 + src/models/primitive_iam_role.rs | 17 + src/models/project_team.rs | 11 + src/models/projection.rs | 9 + src/models/response.rs | 81 + src/models/retention_policy.rs | 16 + src/models/rewrite_parameters.rs | 69 + src/models/rewrite_response.rs | 12 + src/models/role.rs | 11 + src/models/rule.rs | 11 + src/models/source_object.rs | 14 + src/models/standard_iam_role.rs | 28 + src/models/storage_class.rs | 28 + src/models/team.rs | 36 + src/models/test_iam_permission.rs | 13 + src/{resources => models}/topic.rs | 7 +- src/models/uniform_bucket_level_access.rs | 15 + src/models/update_hmac_metadata.rs | 6 + src/models/update_hmac_request.rs | 7 + src/models/update_parameters.rs | 40 + src/models/versioning.rs | 7 + src/models/website.rs | 13 + src/resources/bucket.rs | 1032 --------- src/resources/channel.rs | 29 - src/resources/mod.rs | 28 - src/resources/object.rs | 2039 ----------------- src/sized_byte_stream.rs | 33 + src/sync.rs | 79 - src/sync/bucket.rs | 78 +- src/sync/bucket_access_control.rs | 69 +- src/sync/client.rs | 78 + src/sync/default_object_access_control.rs | 67 +- src/sync/hmac_key.rs | 36 +- src/sync/mod.rs | 19 + src/sync/object.rs | 138 +- src/sync/object_access_control.rs | 59 +- src/token.rs | 58 +- 117 files changed, 5340 insertions(+), 5635 deletions(-) create mode 100644 .cargo/config.toml delete mode 100644 .rustfmt.toml create mode 100644 rustfmt.toml create mode 100644 src/builders/bucket.rs create mode 100644 src/builders/mod.rs delete mode 100644 src/client.rs create mode 100644 src/client/client.rs create mode 100644 src/client/mod.rs create mode 100644 src/configuration/mod.rs rename src/{resources => configuration}/service_account.rs (72%) rename src/{resources => configuration}/signature.rs (100%) create mode 100644 src/crypto/mod.rs create mode 100644 src/crypto/openssl.rs create mode 100644 src/crypto/ring.rs create mode 100644 src/global_client/bucket.rs rename src/{resources => global_client}/bucket_access_control.rs (53%) rename src/{resources => global_client}/default_object_access_control.rs (58%) rename src/{resources => global_client}/hmac_key.rs (69%) create mode 100644 src/global_client/mod.rs create mode 100644 src/global_client/object.rs rename src/{resources => global_client}/object_access_control.rs (57%) create mode 100644 src/models/action.rs create mode 100644 src/models/action_type.rs create mode 100644 src/models/billing.rs create mode 100644 src/models/binding.rs create mode 100644 src/models/bucket.rs create mode 100644 src/models/bucket_access_control.rs create mode 100644 src/models/compose_parameters.rs create mode 100644 src/models/compose_request.rs create mode 100644 src/models/condition.rs create mode 100644 src/models/copy_paramters.rs create mode 100644 src/models/cors.rs create mode 100644 src/models/create/bucket.rs create mode 100644 src/models/create/bucket_access_control.rs create mode 100644 src/models/create/default_object_access_control.rs create mode 100644 src/models/create/mod.rs create mode 100644 src/models/create/notification.rs create mode 100644 src/models/create/object_access_control.rs create mode 100644 src/models/create/payload_format.rs create mode 100644 src/models/customer_encryption.rs create mode 100644 src/models/default_object_access_control.rs create mode 100644 src/models/delete_parameters.rs create mode 100644 src/models/encryption.rs rename src/{resources/common.rs => models/entity.rs} (53%) create mode 100644 src/models/error.rs create mode 100644 src/models/error_list.rs create mode 100644 src/models/error_reason.rs create mode 100644 src/models/error_response.rs create mode 100644 src/models/hmac_key.rs create mode 100644 src/models/hmac_metadata.rs create mode 100644 src/models/hmac_state.rs create mode 100644 src/models/iam_condition.rs create mode 100644 src/models/iam_configuration.rs create mode 100644 src/models/iam_policy.rs create mode 100644 src/models/iam_role.rs create mode 100644 src/models/legacy_iam_role.rs create mode 100644 src/models/lifecycle.rs create mode 100644 src/models/list_response.rs rename src/{resources => models}/location.rs (100%) create mode 100644 src/models/logging.rs create mode 100644 src/models/mod.rs rename src/{resources => models}/notification.rs (59%) create mode 100644 src/models/object.rs create mode 100644 src/models/object_access_control.rs create mode 100644 src/models/object_access_control_list.rs create mode 100644 src/models/object_create_parameters.rs create mode 100644 src/models/object_list.rs create mode 100644 src/models/object_list_request.rs create mode 100644 src/models/object_precondition.rs create mode 100644 src/models/object_read_parameters.rs create mode 100644 src/models/owner.rs create mode 100644 src/models/primitive_iam_role.rs create mode 100644 src/models/project_team.rs create mode 100644 src/models/projection.rs create mode 100644 src/models/response.rs create mode 100644 src/models/retention_policy.rs create mode 100644 src/models/rewrite_parameters.rs create mode 100644 src/models/rewrite_response.rs create mode 100644 src/models/role.rs create mode 100644 src/models/rule.rs create mode 100644 src/models/source_object.rs create mode 100644 src/models/standard_iam_role.rs create mode 100644 src/models/storage_class.rs create mode 100644 src/models/team.rs create mode 100644 src/models/test_iam_permission.rs rename src/{resources => models}/topic.rs (90%) create mode 100644 src/models/uniform_bucket_level_access.rs create mode 100644 src/models/update_hmac_metadata.rs create mode 100644 src/models/update_hmac_request.rs create mode 100644 src/models/update_parameters.rs create mode 100644 src/models/versioning.rs create mode 100644 src/models/website.rs delete mode 100644 src/resources/bucket.rs delete mode 100644 src/resources/channel.rs delete mode 100644 src/resources/mod.rs delete mode 100644 src/resources/object.rs create mode 100644 src/sized_byte_stream.rs delete mode 100644 src/sync.rs create mode 100644 src/sync/client.rs create mode 100644 src/sync/mod.rs diff --git a/.cargo/config.toml b/.cargo/config.toml new file mode 100644 index 0000000..4eb4ae5 --- /dev/null +++ b/.cargo/config.toml @@ -0,0 +1,5 @@ +[env] +# Required for linking openssl on windows, has no effect on other platforms. +VCPKGRS_DYNAMIC="1" +# Remove this when https://github.com/rust-lang/rust/issues/84277 becomes stable +RUSTC_BOOTSTRAP="1" \ No newline at end of file diff --git a/.rustfmt.toml b/.rustfmt.toml deleted file mode 100644 index f1120af..0000000 --- a/.rustfmt.toml +++ /dev/null @@ -1,64 +0,0 @@ -max_width = 100 -hard_tabs = false -tab_spaces = 4 -newline_style = "Auto" -use_small_heuristics = "Default" -indent_style = "Block" -wrap_comments = false -format_code_in_doc_comments = false -comment_width = 80 -normalize_comments = false -normalize_doc_attributes = false -format_strings = false -format_macro_matchers = false -format_macro_bodies = true -empty_item_single_line = true -struct_lit_single_line = true -fn_single_line = false -where_single_line = false -imports_indent = "Block" -imports_layout = "Mixed" -imports_granularity = "Crate" -reorder_imports = true -reorder_modules = true -reorder_impl_items = false -type_punctuation_density = "Wide" -space_before_colon = false -space_after_colon = true -spaces_around_ranges = false -binop_separator = "Front" -remove_nested_parens = true -combine_control_expr = true -overflow_delimited_expr = false -struct_field_align_threshold = 0 -enum_discrim_align_threshold = 0 -match_arm_blocks = true -force_multiline_blocks = false -fn_args_layout = "Tall" -brace_style = "SameLineWhere" -control_brace_style = "AlwaysSameLine" -trailing_semicolon = true -trailing_comma = "Vertical" -match_block_trailing_comma = false -blank_lines_upper_bound = 1 -blank_lines_lower_bound = 0 -edition = "2018" -version = "One" -inline_attribute_width = 0 -merge_derives = true -use_try_shorthand = false -use_field_init_shorthand = false -force_explicit_abi = true -condense_wildcard_suffixes = false -color = "Auto" -unstable_features = false -disable_all_formatting = false -skip_children = false -hide_parse_errors = false -error_on_line_overflow = false -error_on_unformatted = false -report_todo = "Never" -report_fixme = "Never" -ignore = [] -emit_mode = "Files" -make_backup = false diff --git a/Cargo.toml b/Cargo.toml index bc92c46..6303c54 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,21 +1,21 @@ [package] name = "cloud-storage" -version = "0.11.2" +version = "1.0.0" authors = ["Luuk Wester "] -edition = "2018" +edition = "2021" description = "A crate for uploading files to Google cloud storage, and for generating download urls." license = "MIT" -repository = "https://github.com/ThouCheese/cloud-storage-rs" +repository = "https://github.com/SonnyX/cloud-storage-rs" documentation = "https://docs.rs/cloud-storage" keywords = ["google", "cloud", "storage"] readme = "README.md" categories = ["api-bindings", "web-programming"] -resolver = "2" # maintenance = { status = "actively-developed" } [features] -default = ["native-tls"] +default = ["native-tls", "ring", "pem", "global-client"] +dotenv = ["dep:dotenv"] global-client = [] sync = ["reqwest/blocking"] native-tls = ["reqwest/default-tls", "openssl"] @@ -23,27 +23,28 @@ rustls-tls = ["reqwest/rustls-tls", "ring", "pem"] trust-dns = ["reqwest/trust-dns"] [dependencies] -reqwest = { version = "0.11", default-features = false, features = ["json", "stream", "multipart"] } -percent-encoding = { version = "2", default-features = false } -jsonwebtoken = { version = "8.1", default-features = false, features = ["use_pem"] } -serde = { version = "1", default-features = false, features = ["derive"] } -serde_json = { version = "1", default-features = false } -base64 = { version = "0.13", default-features = false } -lazy_static = { version = "1", default-features = false } -dotenv = { version = "0.15", default-features = false } -openssl = { version = "0.10", default-features = false, optional = true } -ring = { version = "0.16", default-features = false, optional = true } -pem = { version = "0.8", default-features = false, optional = true } -time = { version = "0.3", default-features = false, features = ["serde-well-known", "serde-human-readable", "macros"]} -hex = { version = "0.4", default-features = false, features = ["alloc"] } -tokio = { version = "1.0", default-features = false, features = ["macros", "rt"] } -tokio-util = { version = "0.7", default-features = false, features = ["compat"] } -futures-util = { version = "0.3", default_features = false, features = ["alloc"] } -bytes = { version = "1.0", default-features = false } -async-trait = { version = "0.1.48", default-features = false } +reqwest = { version = "0.11.16", default-features = false, features = ["json", "stream", "multipart"] } +percent-encoding = { version = "2.2.0", default-features = false } +jsonwebtoken = { version = "8.3.0", default-features = false, features = ["use_pem"] } +serde = { version = "1.0.160", default-features = false, features = ["derive"] } +serde_json = { version = "1.0.96", default-features = false } +base64 = { version = "0.21.0", default-features = false } +once_cell = { version = "1.17.1", default-features = false } +time = { version = "0.3.20", default-features = false, features = ["serde-well-known", "serde-human-readable", "macros"]} +hex = { version = "0.4.3", default-features = false, features = ["alloc"] } +tokio = { version = "1.28.0", default-features = false, features = ["macros", "rt"] } +tokio-util = { version = "0.7.8", default-features = false, features = ["compat"] } +futures-util = { version = "0.3.28", default_features = false, features = ["alloc"] } +bytes = { version = "1.4.0", default-features = false } +async-trait = { version = "0.1.68", default-features = false } +# Optional features +openssl = { version = "0.10.52", default-features = false, optional = true } +ring = { version = "0.16.20", default-features = false, optional = true } +pem = { version = "2.0.1", default-features = false, optional = true, features = ["std"] } +dotenv = { version = "0.15.0", default-features = false, optional = true } [dev-dependencies] -tokio = { version = "1.0", default-features = false, features = ["full"] } +tokio = { version = "1.28.0", default-features = false, features = ["full"] } [package.metadata.docs.rs] -features = ["global-client", "sync"] +features = ["global-client", "sync"] \ No newline at end of file diff --git a/README.md b/README.md index 966d03c..257008e 100644 --- a/README.md +++ b/README.md @@ -13,23 +13,21 @@ cloud-storage = "0.10" ### Examples ```rust // create a new Bucket -let new_bucket = NewBucket { name: "mybucket", ..Default::default() } +let new_bucket = create::Bucket { name: "my_bucket", ..Default::default() } let bucket = Bucket::create(new_bucket).await?; // upload a file to our new bucket let content = b"Your file is now on google cloud storage!"; bucket.upload(content, "folder/filename.txt", "application/text", None).await?; -let mut object = Object::create("mybucket", content, "folder/filename.txt", "application/text", None).await?; +let mut object = Object::create("my_bucket", content, "folder/filename.txt", "application/text", None).await?; // let's copy the file -object.copy("mybucket2: electric boogaloo", "otherfolder/filename.txt", None).await?; +object.copy("my_bucket2: electric boogaloo", "otherfolder/filename.txt", None).await?; // print a link to the file println!("{}", object.download_url(1000)); // download link for 1000 seconds // remove the file from the bucket object.delete(None).await?; ``` -Authorization can be granted using the `SERVICE_ACCOUNT` or `GOOGLE_APPLICATION_CREDENTIALS` environment variable, which should contain path to the `service-account-*******.json` file that contains the Google credentials. Alternatively, the service account credentials can be provided as JSON directly through the `SERVICE_ACCOUNT_JSON` or `GOOGLE_APPLICATION_CREDENTIALS_JSON` environment variable, which is useful when providing secrets in CI or k8s. - -The service account should also have the roles `Service Account Token Creator` (for generating access tokens) and `Storage Object Admin` (for generating sign urls to download the files). +The service account should have the roles `Service Account Token Creator` (for generating access tokens) and `Storage Object Admin` (for generating sign urls to download the files). ### Sync If you're not (yet) interested in running an async executor, then `cloud_storage` exposes a sync api. To use it, enable the feature flag `sync`, and then call instead of calling `function().await`, call `function_sync()`. diff --git a/rustfmt.toml b/rustfmt.toml new file mode 100644 index 0000000..c4de4c2 --- /dev/null +++ b/rustfmt.toml @@ -0,0 +1,3 @@ +imports_granularity = "Crate" +edition = "2021" +error_on_line_overflow = true diff --git a/src/builders/bucket.rs b/src/builders/bucket.rs new file mode 100644 index 0000000..8558918 --- /dev/null +++ b/src/builders/bucket.rs @@ -0,0 +1 @@ +use crate::Error; \ No newline at end of file diff --git a/src/builders/mod.rs b/src/builders/mod.rs new file mode 100644 index 0000000..8192685 --- /dev/null +++ b/src/builders/mod.rs @@ -0,0 +1 @@ +mod bucket; \ No newline at end of file diff --git a/src/client.rs b/src/client.rs deleted file mode 100644 index 6c8745b..0000000 --- a/src/client.rs +++ /dev/null @@ -1,160 +0,0 @@ -//! Clients for Google Cloud Storage endpoints. - -use std::{fmt, sync}; - -use crate::token::TokenCache; - -mod bucket; -mod bucket_access_control; -mod default_object_access_control; -mod hmac_key; -mod object; -mod object_access_control; - -pub use bucket::BucketClient; -pub use bucket_access_control::BucketAccessControlClient; -pub use default_object_access_control::DefaultObjectAccessControlClient; -pub use hmac_key::HmacKeyClient; -pub use object::ObjectClient; -pub use object_access_control::ObjectAccessControlClient; - -/// The primary entrypoint to perform operations with Google Cloud Storage. -pub struct Client { - client: reqwest::Client, - /// Static `Token` struct that caches - token_cache: sync::Arc, -} - -impl fmt::Debug for Client { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("Client") - .field("client", &self.client) - .field("token_cache", &"") - .finish() - } -} - -impl Default for Client { - fn default() -> Self { - Self { - client: Default::default(), - token_cache: sync::Arc::new(crate::Token::default()), - } - } -} - -impl Client { - /// Constructs a client with the default token provider, where it attemps to obtain the - /// credentials from the following locations: - /// - /// 1. Checks for the environment variable `SERVICE_ACCOUNT`, and if it exists, reads the file - /// at the path specified there as a credentials json file. - /// 2. It attemps to do the same with the `GOOGLE_APPLICATION_CREDENTIALS` var. - /// 3. It reads the `SERVICE_ACCOUNT_JSON` environment variable directly as json and uses that - /// 4. It attemps to do the same with the `GOOGLE_APPLICATION_CREDENTIALS_JSON` var. - pub fn new() -> Self { - Default::default() - } - - /// Constucts a client with given reqwest client - pub fn with_client(client: reqwest::Client) -> Self { - Self { - client: client, - token_cache: sync::Arc::new(crate::Token::default()), - } - } - - /// Initializer with a provided refreshable token - pub fn with_cache(token: impl TokenCache + Send + 'static) -> Self { - Self { - client: Default::default(), - token_cache: sync::Arc::new(token), - } - } - - /// Creates a new [ClientBuilder] - pub fn builder() -> ClientBuilder { - ClientBuilder::new() - } - - /// Operations on [`Bucket`](crate::bucket::Bucket)s. - pub fn bucket(&self) -> BucketClient<'_> { - BucketClient(self) - } - - /// Operations on [`BucketAccessControl`](crate::bucket_access_control::BucketAccessControl)s. - pub fn bucket_access_control(&self) -> BucketAccessControlClient<'_> { - BucketAccessControlClient(self) - } - - /// Operations on [`DefaultObjectAccessControl`](crate::default_object_access_control::DefaultObjectAccessControl)s. - pub fn default_object_access_control(&self) -> DefaultObjectAccessControlClient<'_> { - DefaultObjectAccessControlClient(self) - } - - /// Operations on [`HmacKey`](crate::hmac_key::HmacKey)s. - pub fn hmac_key(&self) -> HmacKeyClient<'_> { - HmacKeyClient(self) - } - - /// Operations on [`Object`](crate::object::Object)s. - pub fn object(&self) -> ObjectClient<'_> { - ObjectClient(self) - } - - /// Operations on [`ObjectAccessControl`](crate::object_access_control::ObjectAccessControl)s. - pub fn object_access_control(&self) -> ObjectAccessControlClient<'_> { - ObjectAccessControlClient(self) - } - - async fn get_headers(&self) -> crate::Result { - let mut result = reqwest::header::HeaderMap::new(); - let token = self.token_cache.get(&self.client).await?; - result.insert( - reqwest::header::AUTHORIZATION, - format!("Bearer {}", token).parse().unwrap(), - ); - Ok(result) - } -} - -/// A ClientBuilder can be used to create a Client with custom configuration. -#[derive(Default)] -pub struct ClientBuilder { - client: Option, - /// Static `Token` struct that caches - token_cache: Option>, -} - -impl ClientBuilder { - /// Constructs a new ClientBuilder - pub fn new() -> Self { - Default::default() - } - - /// Returns a `Client` that uses this `ClientBuilder` configuration. - pub fn build(self) -> Client { - Client { - client: self.client.unwrap_or_default(), - token_cache: self - .token_cache - .unwrap_or(sync::Arc::new(crate::Token::default())), - } - } - - /// Sets refreshable token - pub fn with_cache(self, token: impl TokenCache + Send + 'static) -> Self { - ClientBuilder { - token_cache: Some(sync::Arc::new(token)), - ..self - } - } - - /// Sets internal [reqwest Client](https://docs.rs/reqwest/latest/reqwest/struct.Client.html) - pub fn with_reqwest_client(self, reqwest_client: reqwest::Client) -> Self { - ClientBuilder { - client: Some(reqwest_client), - ..self - } - } -} diff --git a/src/client/bucket.rs b/src/client/bucket.rs index 6ea3591..58bf7fc 100644 --- a/src/client/bucket.rs +++ b/src/client/bucket.rs @@ -1,14 +1,13 @@ -use crate::{ - bucket::{IamPolicy, TestIamPermission}, - error::GoogleResponse, - object::percent_encode, - resources::common::ListResponse, - Bucket, NewBucket, -}; +use crate::{models::{create, ListResponse, IamPolicy, TestIamPermission}, Bucket, Error}; + /// Operations on [`Bucket`]()s. #[derive(Debug)] -pub struct BucketClient<'a>(pub(super) &'a super::Client); +pub struct BucketClient<'a> { + pub(crate) client: &'a super::client::Client, + pub(crate) bucket_url: &'a str, + pub(crate) project_id: &'a str, +} impl<'a> BucketClient<'a> { /// Creates a new `Bucket`. There are many options that you can provide for creating a new @@ -20,11 +19,11 @@ impl<'a> BucketClient<'a> { /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { /// use cloud_storage::Client; - /// use cloud_storage::bucket::{Bucket, NewBucket}; + /// use cloud_storage::bucket::{Bucket, create::Bucket}; /// use cloud_storage::bucket::{Location, MultiRegion}; /// /// let client = Client::default(); - /// let new_bucket = NewBucket { + /// let new_bucket = create::Bucket { /// name: "cloud-storage-rs-doc-1".to_string(), // this is the only mandatory field /// location: Location::Multi(MultiRegion::Eu), /// ..Default::default() @@ -34,25 +33,13 @@ impl<'a> BucketClient<'a> { /// # Ok(()) /// # } /// ``` - pub async fn create(&self, new_bucket: &NewBucket) -> crate::Result { - let url = format!("{}/b/", crate::BASE_URL); - let project = &crate::SERVICE_ACCOUNT.project_id; + pub async fn create(&self, new_bucket: &create::Bucket) -> Result { + let headers = self.client.get_headers().await?; + let url = format!("{}/", self.bucket_url); + let project = self.project_id; let query = [("project", project)]; - let result: GoogleResponse = self - .0 - .client - .post(&url) - .headers(self.0.get_headers().await?) - .query(&query) - .json(new_bucket) - .send() - .await? - .json() - .await?; - match result { - GoogleResponse::Success(s) => Ok(s), - GoogleResponse::Error(e) => Err(e.into()), - } + let result: crate::models::Response = self.client.reqwest.post(&url).headers(headers).query(&query).json(new_bucket).send().await?.json().await?; + Ok(result?) } /// Returns all `Bucket`s within this project. @@ -72,24 +59,13 @@ impl<'a> BucketClient<'a> { /// # Ok(()) /// # } /// ``` - pub async fn list(&self) -> crate::Result> { - let url = format!("{}/b/", crate::BASE_URL); - let project = &crate::SERVICE_ACCOUNT.project_id; + pub async fn list(&self) -> Result, Error> { + let headers = self.client.get_headers().await?; + let url = format!("{}/", self.bucket_url); + let project = self.project_id; let query = [("project", project)]; - let result: GoogleResponse> = self - .0 - .client - .get(&url) - .headers(self.0.get_headers().await?) - .query(&query) - .send() - .await? - .json() - .await?; - match result { - GoogleResponse::Success(s) => Ok(s.items), - GoogleResponse::Error(e) => Err(e.into()), - } + let result: crate::models::Response> = self.client.reqwest.get(&url).headers(headers).query(&query).send().await?.json().await?; + Ok(result?.items) } /// Returns a single `Bucket` by its name. If the Bucket does not exist, an error is returned. @@ -102,7 +78,7 @@ impl<'a> BucketClient<'a> { /// /// let client = Client::default(); /// # use cloud_storage::bucket::NewBucket; - /// # let new_bucket = NewBucket { + /// # let new_bucket = create::Bucket { /// # name: "cloud-storage-rs-doc-2".to_string(), /// # ..Default::default() /// # }; @@ -113,21 +89,11 @@ impl<'a> BucketClient<'a> { /// # Ok(()) /// # } /// ``` - pub async fn read(&self, name: &str) -> crate::Result { - let url = format!("{}/b/{}", crate::BASE_URL, percent_encode(name),); - let result: GoogleResponse = self - .0 - .client - .get(&url) - .headers(self.0.get_headers().await?) - .send() - .await? - .json() - .await?; - match result { - GoogleResponse::Success(s) => Ok(s), - GoogleResponse::Error(e) => Err(e.into()), - } + pub async fn read(&self, name: &str) -> Result { + let headers = self.client.get_headers().await?; + let url = format!("{}/{}", self.bucket_url, crate::percent_encode(name),); + let result: crate::models::Response = self.client.reqwest.get(&url).headers(headers).send().await?.json().await?; + Ok(result?) } /// Update an existing `Bucket`. If you declare you bucket as mutable, you can edit its fields. @@ -141,7 +107,7 @@ impl<'a> BucketClient<'a> { /// /// let client = Client::default(); /// # use cloud_storage::bucket::NewBucket; - /// # let new_bucket = NewBucket { + /// # let new_bucket = create::Bucket { /// # name: "cloud-storage-rs-doc-3".to_string(), /// # ..Default::default() /// # }; @@ -158,22 +124,11 @@ impl<'a> BucketClient<'a> { /// # Ok(()) /// # } /// ``` - pub async fn update(&self, bucket: &Bucket) -> crate::Result { - let url = format!("{}/b/{}", crate::BASE_URL, percent_encode(&bucket.name),); - let result: GoogleResponse = self - .0 - .client - .put(&url) - .headers(self.0.get_headers().await?) - .json(bucket) - .send() - .await? - .json() - .await?; - match result { - GoogleResponse::Success(s) => Ok(s), - GoogleResponse::Error(e) => Err(e.into()), - } + pub async fn update(&self, bucket: &Bucket) -> Result { + let headers = self.client.get_headers().await?; + let url = format!("{}/{}", self.bucket_url, crate::percent_encode(&bucket.name),); + let result: crate::models::Response = self.client.reqwest.put(&url).headers(headers).json(bucket).send().await?.json().await?; + Ok(result?) } /// Delete an existing `Bucket`. This permanently removes a bucket from Google Cloud Storage. @@ -188,7 +143,7 @@ impl<'a> BucketClient<'a> { /// /// let client = Client::default(); /// # use cloud_storage::bucket::NewBucket; - /// # let new_bucket = NewBucket { + /// # let new_bucket = create::Bucket { /// # name: "unnecessary-bucket".to_string(), /// # ..Default::default() /// # }; @@ -199,15 +154,10 @@ impl<'a> BucketClient<'a> { /// # Ok(()) /// # } /// ``` - pub async fn delete(&self, bucket: Bucket) -> crate::Result<()> { - let url = format!("{}/b/{}", crate::BASE_URL, percent_encode(&bucket.name)); - let response = self - .0 - .client - .delete(&url) - .headers(self.0.get_headers().await?) - .send() - .await?; + pub async fn delete(&self, bucket: Bucket) -> Result<(), Error> { + let headers = self.client.get_headers().await?; + let url = format!("{}/{}", self.bucket_url, crate::percent_encode(&bucket.name)); + let response = self.client.reqwest.delete(&url).headers(headers).send().await?; if response.status().is_success() { Ok(()) } else { @@ -225,7 +175,7 @@ impl<'a> BucketClient<'a> { /// /// let client = Client::default(); /// # use cloud_storage::bucket::NewBucket; - /// # let new_bucket = NewBucket { + /// # let new_bucket = create::Bucket { /// # name: "cloud-storage-rs-doc-4".to_string(), /// # ..Default::default() /// # }; @@ -237,21 +187,11 @@ impl<'a> BucketClient<'a> { /// # Ok(()) /// # } /// ``` - pub async fn get_iam_policy(&self, bucket: &Bucket) -> crate::Result { - let url = format!("{}/b/{}/iam", crate::BASE_URL, percent_encode(&bucket.name)); - let result: GoogleResponse = self - .0 - .client - .get(&url) - .headers(self.0.get_headers().await?) - .send() - .await? - .json() - .await?; - match result { - GoogleResponse::Success(s) => Ok(s), - GoogleResponse::Error(e) => Err(e.into()), - } + pub async fn get_iam_policy(&self, bucket: &Bucket) -> Result { + let headers = self.client.get_headers().await?; + let url = format!("{}/{}/iam", self.bucket_url, crate::percent_encode(&bucket.name)); + let result: crate::models::Response = self.client.reqwest.get(&url).headers(headers).send().await?.json().await?; + Ok(result?) } /// Updates the [IAM Policy](https://cloud.google.com/iam/docs/) for this bucket. @@ -265,7 +205,7 @@ impl<'a> BucketClient<'a> { /// /// let client = Client::default(); /// # use cloud_storage::bucket::NewBucket; - /// # let new_bucket = NewBucket { + /// # let new_bucket = create::Bucket { /// # name: "cloud-storage-rs-doc-5".to_string(), /// # ..Default::default() /// # }; @@ -292,22 +232,11 @@ impl<'a> BucketClient<'a> { &self, bucket: &Bucket, iam: &IamPolicy, - ) -> crate::Result { - let url = format!("{}/b/{}/iam", crate::BASE_URL, percent_encode(&bucket.name)); - let result: GoogleResponse = self - .0 - .client - .put(&url) - .headers(self.0.get_headers().await?) - .json(iam) - .send() - .await? - .json() - .await?; - match result { - GoogleResponse::Success(s) => Ok(s), - GoogleResponse::Error(e) => Err(e.into()), - } + ) -> Result { + let headers = self.client.get_headers().await?; + let url = format!("{}/{}/iam", self.bucket_url, crate::percent_encode(&bucket.name)); + let result: crate::models::Response = self.client.reqwest.put(&url).headers(headers).json(iam).send().await?.json().await?; + Ok(result?) } /// Checks whether the user provided in the service account has this permission. @@ -318,9 +247,9 @@ impl<'a> BucketClient<'a> { /// use cloud_storage::Client; /// use cloud_storage::Bucket; /// - /// let client = Client::default(); - /// let bucket = client.bucket().read("my-bucket").await?; - /// client.bucket().test_iam_permission(&bucket, "storage.buckets.get").await?; + /// let bucket_client = Client::default().bucket(); + /// let bucket = bucket_client.read("my_bucket").await?; + /// bucket_client.test_iam_permission(&bucket, "storage.buckets.get").await?; /// # Ok(()) /// # } /// ``` @@ -328,30 +257,19 @@ impl<'a> BucketClient<'a> { &self, bucket: &Bucket, permission: &str, - ) -> crate::Result { + ) -> Result { if permission == "storage.buckets.list" || permission == "storage.buckets.create" { return Err(crate::Error::new( "tested permission must not be `storage.buckets.list` or `storage.buckets.create`", )); } let url = format!( - "{}/b/{}/iam/testPermissions", - crate::BASE_URL, - percent_encode(&bucket.name) + "{}/{}/iam/testPermissions", + self.bucket_url, + crate::percent_encode(&bucket.name) ); - let result: GoogleResponse = self - .0 - .client - .get(&url) - .headers(self.0.get_headers().await?) - .query(&[("permissions", permission)]) - .send() - .await? - .json() - .await?; - match result { - GoogleResponse::Success(s) => Ok(s), - GoogleResponse::Error(e) => Err(e.into()), - } + let headers = self.client.get_headers().await?; + let result: crate::models::Response = self.client.reqwest.get(&url).headers(headers).query(&[("permissions", permission)]).send().await?.json().await?; + Ok(result?) } } diff --git a/src/client/bucket_access_control.rs b/src/client/bucket_access_control.rs index acd46b7..f91ae1a 100644 --- a/src/client/bucket_access_control.rs +++ b/src/client/bucket_access_control.rs @@ -1,16 +1,14 @@ -use crate::{ - bucket_access_control::{BucketAccessControl, Entity, NewBucketAccessControl}, - error::GoogleResponse, - object::percent_encode, - resources::common::ListResponse, -}; +use crate::{models::{create, BucketAccessControl, ListResponse, Entity}, Error}; /// Operations on [`BucketAccessControl`](BucketAccessControl)s. -pub struct BucketAccessControlClient<'a>(pub(super) &'a super::Client); +#[derive(Debug)] +pub struct BucketAccessControlClient<'a> { + pub(crate) client: &'a super::client::Client, + pub(crate) bucket_acl_url: String +} impl<'a> BucketAccessControlClient<'a> { - /// Create a new `BucketAccessControl` using the provided `NewBucketAccessControl`, related to - /// the `Bucket` provided by the `bucket_name` argument. + /// Create a new `BucketAccessControl` using the provided `create::BucketAccessControl`. /// /// ### Important /// Important: This method fails with a 400 Bad Request response for buckets with uniform @@ -21,38 +19,25 @@ impl<'a> BucketAccessControlClient<'a> { /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { /// use cloud_storage::Client; - /// use cloud_storage::bucket_access_control::{BucketAccessControl, NewBucketAccessControl}; + /// use cloud_storage::bucket_access_control::{BucketAccessControl, create::BucketAccessControl}; /// use cloud_storage::bucket_access_control::{Role, Entity}; /// /// let client = Client::default(); - /// let new_bucket_access_control = NewBucketAccessControl { + /// let new_bucket_access_control = create::BucketAccessControl { /// entity: Entity::AllUsers, /// role: Role::Reader, /// }; - /// client.bucket_access_control().create("mybucket", &new_bucket_access_control).await?; + /// client.bucket_access_control("my_bucket").create_using(&new_bucket_access_control).await?; /// # Ok(()) /// # } /// ``` - pub async fn create( + pub async fn create_using( &self, - bucket: &str, - new_bucket_access_control: &NewBucketAccessControl, - ) -> crate::Result { - let url = format!("{}/b/{}/acl", crate::BASE_URL, percent_encode(bucket)); - let result: GoogleResponse = self - .0 - .client - .post(&url) - .headers(self.0.get_headers().await?) - .json(new_bucket_access_control) - .send() - .await? - .json() - .await?; - match result { - GoogleResponse::Success(s) => Ok(s), - GoogleResponse::Error(e) => Err(e.into()), - } + new_bucket_access_control: &create::BucketAccessControl, + ) -> Result { + let headers = self.client.get_headers().await?; + let result: crate::models::Response = self.client.reqwest.post(&self.bucket_acl_url).headers(headers).json(new_bucket_access_control).send().await?.json().await?; + Ok(result?) } /// Returns all `BucketAccessControl`s related to this bucket. @@ -69,28 +54,20 @@ impl<'a> BucketAccessControlClient<'a> { /// use cloud_storage::bucket_access_control::BucketAccessControl; /// /// let client = Client::default(); - /// let acls = client.bucket_access_control().list("mybucket").await?; + /// let acls = client.bucket_access_control("my_bucket").list().await?; /// # Ok(()) /// # } /// ``` - pub async fn list(&self, bucket: &str) -> crate::Result> { - let url = format!("{}/b/{}/acl", crate::BASE_URL, percent_encode(bucket)); - let result: GoogleResponse> = self - .0 - .client - .get(&url) - .headers(self.0.get_headers().await?) - .send() - .await? - .json() - .await?; + pub async fn list(&self) -> Result, Error> { + let headers = self.client.get_headers().await?; + let result: crate::models::Response> = self.client.reqwest.get(&self.bucket_acl_url).headers(headers).send().await?.json().await?; match result { - GoogleResponse::Success(s) => Ok(s.items), - GoogleResponse::Error(e) => Err(e.into()), + crate::models::Response::Success(s) => Ok(s.items), + crate::models::Response::Error(e) => Err(e.into()), } } - /// Returns the ACL entry for the specified entity on the specified bucket. + /// Returns the ACL entry for the specified entity. /// /// ### Important /// Important: This method fails with a 400 Bad Request response for buckets with uniform @@ -104,30 +81,19 @@ impl<'a> BucketAccessControlClient<'a> { /// use cloud_storage::bucket_access_control::{BucketAccessControl, Entity}; /// /// let client = Client::default(); - /// let controls = client.bucket_access_control().read("mybucket", &Entity::AllUsers).await?; + /// let controls = client.bucket_access_control("my_bucket").read(&Entity::AllUsers).await?; /// # Ok(()) /// # } /// ``` - pub async fn read(&self, bucket: &str, entity: &Entity) -> crate::Result { + pub async fn read(&self, entity: &Entity) -> Result { let url = format!( - "{}/b/{}/acl/{}", - crate::BASE_URL, - percent_encode(bucket), - percent_encode(&entity.to_string()) + "{}/{}", + self.bucket_acl_url, + crate::percent_encode(&entity.to_string()) ); - let result: GoogleResponse = self - .0 - .client - .get(&url) - .headers(self.0.get_headers().await?) - .send() - .await? - .json() - .await?; - match result { - GoogleResponse::Success(s) => Ok(s), - GoogleResponse::Error(e) => Err(e.into()), - } + let headers = self.client.get_headers().await?; + let result: crate::models::Response = self.client.reqwest.get(&url).headers(headers).send().await?.json().await?; + Ok(result?) } /// Update this `BucketAccessControl`. @@ -144,7 +110,7 @@ impl<'a> BucketAccessControlClient<'a> { /// use cloud_storage::bucket_access_control::{BucketAccessControl, Entity}; /// /// let client = Client::default(); - /// let mut acl = client.bucket_access_control().read("mybucket", &Entity::AllUsers).await?; + /// let mut acl = client.bucket_access_control("my_bucket").read(&Entity::AllUsers).await?; /// acl.entity = Entity::AllAuthenticatedUsers; /// client.bucket_access_control().update(&acl).await?; /// # Ok(()) @@ -153,27 +119,15 @@ impl<'a> BucketAccessControlClient<'a> { pub async fn update( &self, bucket_access_control: &BucketAccessControl, - ) -> crate::Result { + ) -> Result { let url = format!( - "{}/b/{}/acl/{}", - crate::BASE_URL, - percent_encode(&bucket_access_control.bucket), - percent_encode(&bucket_access_control.entity.to_string()), + "{}/{}", + self.bucket_acl_url, + crate::percent_encode(&bucket_access_control.entity.to_string()), ); - let result: GoogleResponse = self - .0 - .client - .put(&url) - .headers(self.0.get_headers().await?) - .json(bucket_access_control) - .send() - .await? - .json() - .await?; - match result { - GoogleResponse::Success(s) => Ok(s), - GoogleResponse::Error(e) => Err(e.into()), - } + let headers = self.client.get_headers().await?; + let result: crate::models::Response = self.client.reqwest.put(&url).headers(headers).json(bucket_access_control).send().await?.json().await?; + Ok(result?) } /// Permanently deletes the ACL entry for the specified entity on the specified bucket. @@ -190,23 +144,24 @@ impl<'a> BucketAccessControlClient<'a> { /// use cloud_storage::bucket_access_control::{BucketAccessControl, Entity}; /// /// let client = Client::default(); - /// let controls = client.bucket_access_control().read("mybucket", &Entity::AllUsers).await?; - /// client.bucket_access_control().delete(controls).await?; + /// let my_bucket = client.bucket_access_control("my_bucket"); + /// let controls = my_bucket.read(&Entity::AllUsers).await?; + /// my_bucket.delete(controls).await?; /// # Ok(()) /// # } /// ``` - pub async fn delete(&self, bucket_access_control: BucketAccessControl) -> crate::Result<()> { + pub async fn delete(&self, bucket_access_control: BucketAccessControl) -> Result<(), Error> { let url = format!( - "{}/b/{}/acl/{}", - crate::BASE_URL, - percent_encode(&bucket_access_control.bucket), - percent_encode(&bucket_access_control.entity.to_string()), + "{}/{}", + self.bucket_acl_url, + crate::percent_encode(&bucket_access_control.entity.to_string()), ); + let headers = self.client.get_headers().await?; let response = self - .0 .client + .reqwest .delete(&url) - .headers(self.0.get_headers().await?) + .headers(headers) .send() .await?; if response.status().is_success() { diff --git a/src/client/client.rs b/src/client/client.rs new file mode 100644 index 0000000..ba4937d --- /dev/null +++ b/src/client/client.rs @@ -0,0 +1,165 @@ +//! Clients for Google Cloud Storage endpoints. + +use std::{fmt, sync}; +use crate::{Error, token::TokenCache, ServiceAccount}; + +use super::{BucketClient, BucketAccessControlClient, DefaultObjectAccessControlClient, HmacKeyClient, ObjectClient, ObjectAccessControlClient}; + +/// The primary entrypoint to perform operations with Google Cloud Storage. +pub struct Client { + pub(crate) reqwest: reqwest::Client, + pub(crate) service_account: crate::ServiceAccount, + /// Static `Token` struct that caches + pub(crate) token_cache: sync::Arc, +} + +impl fmt::Debug for Client { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("Client") + .field("client", &self.reqwest) + .field("token_cache", &"") + .finish() + } +} + +impl Default for Client { + fn default() -> Self { + Self { + reqwest: Default::default(), + token_cache: sync::Arc::new(crate::Token::default()), + service_account: crate::ServiceAccount::default() + } + } +} + +impl Client { + /// Constucts a client with given reqwest client + pub fn with_client(client: reqwest::Client) -> Self { + Self { + reqwest: client, + token_cache: sync::Arc::new(crate::Token::default()), + service_account: crate::ServiceAccount::default() + } + } + + /// Initializer with a provided refreshable token + pub fn with_cache(token: impl TokenCache + 'static) -> Self { + Self { + reqwest: Default::default(), + token_cache: sync::Arc::new(token), + service_account: crate::ServiceAccount::default() + } + } + + /// Creates a new [ClientBuilder] + pub fn builder() -> ClientBuilder { + ClientBuilder::new() + } + + /// Operations on [`Bucket`](crate::bucket::Bucket)s. + pub fn bucket(&self) -> BucketClient { + BucketClient { + bucket_url: "https://storage.googleapis.com/storage/v1/b/", + project_id: &self.service_account.project_id, + client: self, + } + } + + /// Operations on [`BucketAccessControl`](crate::bucket_access_control::BucketAccessControl)s. + pub fn bucket_access_control(&self, bucket: &str) -> BucketAccessControlClient { + let url = format!("https://storage.googleapis.com/storage/v1/b/{}/acl", crate::percent_encode(bucket)); + BucketAccessControlClient { + bucket_acl_url: url, + client: &self + } + } + + /// Operations on [`DefaultObjectAccessControl`](crate::default_object_access_control::DefaultObjectAccessControl)s. + pub fn default_object_access_control(&self, bucket: &str) -> DefaultObjectAccessControlClient { + let url = format!("https://storage.googleapis.com/storage/v1/b/{}/defaultObjectAcl", crate::percent_encode(bucket)); + DefaultObjectAccessControlClient { + base_url: url, + bucket: bucket.to_string(), + client: self + } + } + + /// Operations on [`HmacKey`](crate::hmac_key::HmacKey)s. + pub fn hmac_key(&self) -> HmacKeyClient { + HmacKeyClient { + hmac_keys_url: format!("https://storage.googleapis.com/storage/v1/projects/{}/hmacKeys", &self.service_account.project_id), + client_email: self.service_account.client_email.clone(), + client: self, + } + } + + /// Operations on [`Object`](crate::object::Object)s. + pub fn object(&self) -> ObjectClient { + ObjectClient { + object_creation_url: todo!(), + base_url: "https://storage.googleapis.com/storage/v1/", + client: self, + } + } + + /// Operations on [`ObjectAccessControl`](crate::object_access_control::ObjectAccessControl)s. + pub fn object_access_control(&self, bucket: &str, object: &str,) -> ObjectAccessControlClient { + ObjectAccessControlClient { + acl_url: format!("https://storage.googleapis.com/storage/v1/b/{}/o/{}/acl", crate::percent_encode(bucket), crate::percent_encode(object)), + client: self + } + } + + pub(crate) async fn get_headers(&self) -> Result { + let mut result = reqwest::header::HeaderMap::new(); + let token = self.token_cache.get(&self.reqwest, self.service_account.client_email.clone(), self.service_account.private_key.as_bytes()).await?; + result.insert( + reqwest::header::AUTHORIZATION, + format!("Bearer {}", token).parse().unwrap(), + ); + Ok(result) + } +} + +/// A ClientBuilder can be used to create a Client with custom configuration. +#[derive(Default)] +pub struct ClientBuilder { + client: Option, + /// Static `Token` struct that caches + token_cache: Option>, + service_account: Option +} + +impl ClientBuilder { + /// Constructs a new ClientBuilder + pub fn new() -> Self { + Default::default() + } + + /// Returns a `Client` that uses this `ClientBuilder` configuration. + pub fn build(self) -> Client { + Client { + reqwest: self.client.unwrap_or_default(), + token_cache: self.token_cache.unwrap_or(sync::Arc::new(crate::Token::default())), + service_account: self.service_account.unwrap_or(crate::ServiceAccount::default()) + } + } + + /// Sets refreshable token + pub fn with_cache(&mut self, token: impl TokenCache + 'static) -> &mut Self { + self.token_cache = Some(sync::Arc::new(token)); + self + } + + /// Sets service account + pub fn with_service_account(&mut self, service_account: crate::ServiceAccount) -> &mut Self { + self.service_account = Some(service_account); + self + } + + /// Sets internal [reqwest Client](https://docs.rs/reqwest/latest/reqwest/struct.Client.html) + pub fn with_reqwest_client(&mut self, reqwest_client: reqwest::Client) -> &mut Self { + self.client = Some(reqwest_client); + self + } +} diff --git a/src/client/default_object_access_control.rs b/src/client/default_object_access_control.rs index 3c66832..ec4278b 100644 --- a/src/client/default_object_access_control.rs +++ b/src/client/default_object_access_control.rs @@ -1,14 +1,13 @@ -use crate::{ - bucket_access_control::Entity, - default_object_access_control::{DefaultObjectAccessControl, NewDefaultObjectAccessControl}, - error::GoogleResponse, - object::percent_encode, - resources::common::ListResponse, -}; +use crate::{models::{create, DefaultObjectAccessControl, ListResponse, Entity}, Error}; + /// Operations on [`DefaultObjectAccessControl`](DefaultObjectAccessControl)s. #[derive(Debug)] -pub struct DefaultObjectAccessControlClient<'a>(pub(super) &'a super::Client); +pub struct DefaultObjectAccessControlClient<'a> { + pub(crate) client: &'a super::client::Client, + pub(crate) base_url: String, + pub(crate) bucket: String, +} impl<'a> DefaultObjectAccessControlClient<'a> { /// Create a new `DefaultObjectAccessControl` entry on the specified bucket. @@ -22,45 +21,43 @@ impl<'a> DefaultObjectAccessControlClient<'a> { /// # async fn main() -> Result<(), Box> { /// use cloud_storage::Client; /// use cloud_storage::default_object_access_control::{ - /// DefaultObjectAccessControl, NewDefaultObjectAccessControl, Role, Entity, + /// DefaultObjectAccessControl, create::DefaultObjectAccessControl, Role, Entity, /// }; /// /// let client = Client::default(); - /// let new_acl = NewDefaultObjectAccessControl { + /// let new_acl = create::DefaultObjectAccessControl { /// entity: Entity::AllAuthenticatedUsers, /// role: Role::Reader, /// }; - /// let default_acl = client.default_object_access_control().create("mybucket", &new_acl).await?; + /// let default_acl = client.default_object_access_control("my_bucket").create(&new_acl).await?; /// # client.default_object_access_control().delete(default_acl).await?; /// # Ok(()) /// # } /// ``` pub async fn create( &self, - bucket: &str, - new_acl: &NewDefaultObjectAccessControl, - ) -> crate::Result { + new_acl: &create::DefaultObjectAccessControl, + ) -> Result { + let headers = self.client.get_headers().await?; let url = format!( - "{}/b/{}/defaultObjectAcl", - crate::BASE_URL, - percent_encode(bucket) + "{}", + self.base_url ); - let result: GoogleResponse = self - .0 - .client + let result: crate::models::Response = self.client.reqwest .post(&url) - .headers(self.0.get_headers().await?) + .headers(headers) .json(new_acl) .send() .await? .json() .await?; match result { - GoogleResponse::Success(mut s) => { - s.bucket = bucket.to_string(); + crate::models::Response::Success(mut s) => { + // todo: + // s.bucket = bucket.to_string(); Ok(s) } - GoogleResponse::Error(e) => Err(e.into()), + crate::models::Response::Error(e) => Err(e.into()), } } @@ -77,35 +74,20 @@ impl<'a> DefaultObjectAccessControlClient<'a> { /// use cloud_storage::default_object_access_control::DefaultObjectAccessControl; /// /// let client = Client::default(); - /// let default_acls = client.default_object_access_control().list("mybucket").await?; + /// let default_acls = client.default_object_access_control("my_bucket").list().await?; /// # Ok(()) /// # } /// ``` - pub async fn list(&self, bucket: &str) -> crate::Result> { - let url = format!( - "{}/b/{}/defaultObjectAcl", - crate::BASE_URL, - percent_encode(bucket) - ); - let result: GoogleResponse> = self - .0 - .client - .get(&url) - .headers(self.0.get_headers().await?) - .send() - .await? - .json() - .await?; + pub async fn list(&self) -> Result, Error> { + let headers = self.client.get_headers().await?; + let result: crate::models::Response> = self.client.reqwest.get(&self.base_url).headers(headers).send().await?.json().await?; match result { - GoogleResponse::Success(s) => Ok(s - .items - .into_iter() - .map(|item| DefaultObjectAccessControl { - bucket: bucket.to_string(), - ..item - }) - .collect()), - GoogleResponse::Error(e) => Err(e.into()), + crate::models::Response::Success(s) => Ok(s.items.into_iter() + .map(|item| DefaultObjectAccessControl { + bucket: self.bucket.to_string(), + ..item + }).collect()), + crate::models::Response::Error(e) => Err(e.into()), } } @@ -126,36 +108,33 @@ impl<'a> DefaultObjectAccessControlClient<'a> { /// use cloud_storage::default_object_access_control::{DefaultObjectAccessControl, Entity}; /// /// let client = Client::default(); - /// let default_acl = client.default_object_access_control().read("mybucket", &Entity::AllUsers).await?; + /// let default_acl = client.default_object_access_control("my_bucket").read(&Entity::AllUsers).await?; /// # Ok(()) /// # } /// ``` pub async fn read( &self, - bucket: &str, entity: &Entity, - ) -> crate::Result { + ) -> Result { + let headers = self.client.get_headers().await?; let url = format!( - "{}/b/{}/defaultObjectAcl/{}", - crate::BASE_URL, - percent_encode(bucket), - percent_encode(&entity.to_string()), + "{}/{}", + self.base_url, + crate::percent_encode(&entity.to_string()), ); - let result: GoogleResponse = self - .0 - .client + let result: crate::models::Response = self.client.reqwest .get(&url) - .headers(self.0.get_headers().await?) + .headers(headers) .send() .await? .json() .await?; match result { - GoogleResponse::Success(mut s) => { - s.bucket = bucket.to_string(); + crate::models::Response::Success(mut s) => { + s.bucket = self.bucket.to_string(); Ok(s) } - GoogleResponse::Error(e) => Err(e.into()), + crate::models::Response::Error(e) => Err(e.into()), } } @@ -172,7 +151,7 @@ impl<'a> DefaultObjectAccessControlClient<'a> { /// use cloud_storage::default_object_access_control::{DefaultObjectAccessControl, Entity}; /// /// let client = Client::default(); - /// let mut default_acl = client.default_object_access_control().read("my_bucket", &Entity::AllUsers).await?; + /// let mut default_acl = client.default_object_access_control("my_bucket").read(&Entity::AllUsers).await?; /// default_acl.entity = Entity::AllAuthenticatedUsers; /// client.default_object_access_control().update(&default_acl).await?; /// # Ok(()) @@ -181,29 +160,20 @@ impl<'a> DefaultObjectAccessControlClient<'a> { pub async fn update( &self, default_object_access_control: &DefaultObjectAccessControl, - ) -> crate::Result { + ) -> Result { + let headers = self.client.get_headers().await?; let url = format!( - "{}/b/{}/defaultObjectAcl/{}", - crate::BASE_URL, - percent_encode(&default_object_access_control.bucket), - percent_encode(&default_object_access_control.entity.to_string()), + "{}/{}", + self.base_url, + crate::percent_encode(&default_object_access_control.entity.to_string()), ); - let result: GoogleResponse = self - .0 - .client - .put(&url) - .headers(self.0.get_headers().await?) - .json(default_object_access_control) - .send() - .await? - .json() - .await?; + let result: crate::models::Response = self.client.reqwest.put(&url).headers(headers).json(default_object_access_control).send().await?.json().await?; match result { - GoogleResponse::Success(mut s) => { + crate::models::Response::Success(mut s) => { s.bucket = default_object_access_control.bucket.to_string(); Ok(s) } - GoogleResponse::Error(e) => Err(e.into()), + crate::models::Response::Error(e) => Err(e.into()), } } @@ -220,7 +190,7 @@ impl<'a> DefaultObjectAccessControlClient<'a> { /// use cloud_storage::default_object_access_control::{DefaultObjectAccessControl, Entity}; /// /// let client = Client::default(); - /// let mut default_acl = client.default_object_access_control().read("my_bucket", &Entity::AllUsers).await?; + /// let mut default_acl = client.default_object_access_control("my_bucket").read(&Entity::AllUsers).await?; /// client.default_object_access_control().delete(default_acl).await?; /// # Ok(()) /// # } @@ -229,17 +199,15 @@ impl<'a> DefaultObjectAccessControlClient<'a> { &self, default_object_access_control: DefaultObjectAccessControl, ) -> Result<(), crate::Error> { + let headers = self.client.get_headers().await?; let url = format!( - "{}/b/{}/defaultObjectAcl/{}", - crate::BASE_URL, - percent_encode(&default_object_access_control.bucket), - percent_encode(&default_object_access_control.entity.to_string()), + "{}/{}", + self.base_url, + crate::percent_encode(&default_object_access_control.entity.to_string()), ); - let response = self - .0 - .client + let response = self.client.reqwest .delete(&url) - .headers(self.0.get_headers().await?) + .headers(headers) .send() .await?; if response.status().is_success() { diff --git a/src/client/hmac_key.rs b/src/client/hmac_key.rs index 12e728c..6ddbf37 100644 --- a/src/client/hmac_key.rs +++ b/src/client/hmac_key.rs @@ -1,11 +1,12 @@ -use crate::{ - error::GoogleResponse, - hmac_key::{HmacKey, HmacMeta, HmacState}, -}; +use crate::{Error, models::{HmacKey, HmacMeta, Response, ListResponse, HmacState, UpdateHmacMetadata}}; /// Operations on [`HmacKey`](HmacKey)s. #[derive(Debug)] -pub struct HmacKeyClient<'a>(pub(super) &'a super::Client); +pub struct HmacKeyClient<'a> { + pub(crate) client: &'a super::client::Client, + pub(crate) hmac_keys_url: String, + pub(crate) client_email: String, +} impl<'a> HmacKeyClient<'a> { /// Creates a new HMAC key for the specified service account. @@ -30,31 +31,21 @@ impl<'a> HmacKeyClient<'a> { /// # Ok(()) /// # } /// ``` - pub async fn create(&self) -> crate::Result { + pub async fn create(&self) -> Result { use reqwest::header::CONTENT_LENGTH; - let url = format!( - "{}/projects/{}/hmacKeys", - crate::BASE_URL, - crate::SERVICE_ACCOUNT.project_id - ); - let query = [("serviceAccountEmail", &crate::SERVICE_ACCOUNT.client_email)]; - let mut headers = self.0.get_headers().await?; + let query = [("serviceAccountEmail", &self.client_email)]; + let mut headers = self.client.get_headers().await?; headers.insert(CONTENT_LENGTH, 0.into()); - let result: GoogleResponse = self - .0 - .client - .post(&url) + let result: crate::models::Response = self.client.reqwest + .post(&self.hmac_keys_url) .headers(headers) .query(&query) .send() .await? .json() .await?; - match result { - GoogleResponse::Success(s) => Ok(s), - GoogleResponse::Error(e) => Err(e.into()), - } + Ok(result?) } /// Retrieves a list of HMAC keys matching the criteria. Since the HmacKey is secret, this does @@ -78,31 +69,23 @@ impl<'a> HmacKeyClient<'a> { /// # Ok(()) /// # } /// ``` - pub async fn list(&self) -> crate::Result> { - let url = format!( - "{}/projects/{}/hmacKeys", - crate::BASE_URL, - crate::SERVICE_ACCOUNT.project_id - ); - let response = self - .0 - .client - .get(&url) - .headers(self.0.get_headers().await?) + pub async fn list(&self) -> Result, Error> { + let response = self.client.reqwest + .get(&self.hmac_keys_url) + .headers(self.client.get_headers().await?) .send() .await? .text() .await?; - let result: Result, _> = - serde_json::from_str(&response); + let result: Result>, serde_json::Error> = serde_json::from_str(&response); // This function rquires more complicated error handling because when there is only one // entry, Google will return the response `{ "kind": "storage#hmacKeysMetadata" }` instead // of a list with one element. This breaks the parser. match result { Ok(parsed) => match parsed { - GoogleResponse::Success(s) => Ok(s.items), - GoogleResponse::Error(e) => Err(e.into()), + crate::models::Response::Success(s) => Ok(s.items), + crate::models::Response::Error(e) => Err(e.into()), }, Err(_) => Ok(vec![]), } @@ -128,26 +111,16 @@ impl<'a> HmacKeyClient<'a> { /// let key = client.hmac_key().read("some identifier").await?; /// # Ok(()) /// # } - pub async fn read(&self, access_id: &str) -> crate::Result { - let url = format!( - "{}/projects/{}/hmacKeys/{}", - crate::BASE_URL, - crate::SERVICE_ACCOUNT.project_id, - access_id - ); - let result: GoogleResponse = self - .0 - .client + pub async fn read(&self, access_id: &str) -> Result { + let url = format!("{}/{}",self.hmac_keys_url,access_id); + let result: crate::models::Response = self.client.reqwest .get(&url) - .headers(self.0.get_headers().await?) + .headers(self.client.get_headers().await?) .send() .await? .json() .await?; - match result { - GoogleResponse::Success(s) => Ok(s), - GoogleResponse::Error(e) => Err(e.into()), - } + Ok(result?) } /// Updates the state of an HMAC key. See the HMAC Key resource descriptor for valid states. @@ -170,28 +143,22 @@ impl<'a> HmacKeyClient<'a> { /// let key = client.hmac_key().update("your key", HmacState::Active).await?; /// # Ok(()) /// # } - pub async fn update(&self, access_id: &str, state: HmacState) -> crate::Result { + pub async fn update(&self, access_id: &str, state: HmacState) -> Result { let url = format!( - "{}/projects/{}/hmacKeys/{}", - crate::BASE_URL, - crate::SERVICE_ACCOUNT.project_id, + "{}/{}", + self.hmac_keys_url, access_id ); - serde_json::to_string(&crate::hmac_key::UpdateMeta { state })?; - let result: GoogleResponse = self - .0 - .client + serde_json::to_string(&UpdateHmacMetadata { state })?; + let result: Response = self.client.reqwest .put(&url) - .headers(self.0.get_headers().await?) - .json(&crate::hmac_key::UpdateMeta { state }) + .headers(self.client.get_headers().await?) + .json(&UpdateHmacMetadata { state }) .send() .await? .json() .await?; - match result { - GoogleResponse::Success(s) => Ok(s), - GoogleResponse::Error(e) => Err(e.into()), - } + Ok(result?) } /// Deletes an HMAC key. Note that a key must be set to `Inactive` first. @@ -213,18 +180,15 @@ impl<'a> HmacKeyClient<'a> { /// client.hmac_key().delete(&key.access_id).await?; /// # Ok(()) /// # } - pub async fn delete(&self, access_id: &str) -> crate::Result<()> { + pub async fn delete(&self, access_id: &str) -> Result<(), Error> { let url = format!( - "{}/projects/{}/hmacKeys/{}", - crate::BASE_URL, - crate::SERVICE_ACCOUNT.project_id, + "{}/{}", + self.hmac_keys_url, access_id ); - let response = self - .0 - .client + let response = self.client.reqwest .delete(&url) - .headers(self.0.get_headers().await?) + .headers(self.client.get_headers().await?) .send() .await?; if response.status().is_success() { diff --git a/src/client/mod.rs b/src/client/mod.rs new file mode 100644 index 0000000..af31ac0 --- /dev/null +++ b/src/client/mod.rs @@ -0,0 +1,17 @@ +//! Clients for Google Cloud Storage endpoints. + +mod bucket; +mod bucket_access_control; +mod client; +mod default_object_access_control; +mod hmac_key; +mod object; +mod object_access_control; + +pub use client::Client; +pub use bucket::BucketClient; +pub use bucket_access_control::BucketAccessControlClient; +pub use default_object_access_control::DefaultObjectAccessControlClient; +pub use hmac_key::HmacKeyClient; +pub use object::ObjectClient; +pub use object_access_control::ObjectAccessControlClient; \ No newline at end of file diff --git a/src/client/object.rs b/src/client/object.rs index 4ac1320..3243d56 100644 --- a/src/client/object.rs +++ b/src/client/object.rs @@ -1,22 +1,16 @@ -use futures_util::{stream, Stream, TryStream}; +use bytes::Buf; +use futures_util::{Stream, stream, TryStream}; use reqwest::StatusCode; -use crate::{ - error::GoogleResponse, - object::{ - percent_encode, ComposeParameters, ComposeRequest, CopyParameters, CreateParameters, - DeleteParameters, ObjectList, ReadParameters, RewriteParameters, RewriteResponse, - SizedByteStream, UpdateParameters, - }, - ListRequest, Object, -}; - -// Object uploads has its own url for some reason -const BASE_URL: &str = "https://storage.googleapis.com/upload/storage/v1/b"; +use crate::{models::{CreateParameters, ObjectList, ReadParameters, UpdateParameters, DeleteParameters, ComposeRequest, ComposeParameters, CopyParameters, RewriteParameters, rewrite_response::RewriteResponse}, Object, Error, ListRequest, sized_byte_stream::SizedByteStream}; /// Operations on [`Object`](Object)s. #[derive(Debug)] -pub struct ObjectClient<'a>(pub(super) &'a super::Client); +pub struct ObjectClient<'a> { + pub(crate) client: &'a super::client::Client, + pub(crate) object_creation_url: &'a str, // {}/{}/o?name={}&uploadType=media + pub(crate) base_url: &'a str, +} impl<'a> ObjectClient<'a> { /// Create a new object. @@ -32,7 +26,7 @@ impl<'a> ObjectClient<'a> { /// /// let file: Vec = read_cute_cat("cat.png"); /// let client = Client::default(); - /// client.object().create("cat-photos", file, "recently read cat.png", "image/png", None).await?; + /// client.object("cat-photos").create(file, "recently read cat.png", "image/png", None).await?; /// # Ok(()) /// # } /// ``` @@ -43,21 +37,14 @@ impl<'a> ObjectClient<'a> { filename: &str, mime_type: &str, parameters: Option, - ) -> crate::Result { + ) -> Result { use reqwest::header::{CONTENT_LENGTH, CONTENT_TYPE}; - let url = &format!( - "{}/{}/o?uploadType=media&name={}", - BASE_URL, - percent_encode(bucket), - percent_encode(filename), - ); - let mut headers = self.0.get_headers().await?; + let url = &format!("{}&uploadType=media", self.object_creation_url); + let mut headers = self.client.get_headers().await?; headers.insert(CONTENT_TYPE, mime_type.parse()?); headers.insert(CONTENT_LENGTH, file.len().to_string().parse()?); - let response = self - .0 - .client + let response = self.client.reqwest .post(url) .query(¶meters) .headers(headers) @@ -89,7 +76,7 @@ impl<'a> ObjectClient<'a> { /// "custom_id": "1234" /// } /// }); - /// client.object().create_with("cat-photos", file, "recently read cat.png", "image/png", &metadata).await?; + /// client.object("cat-photos").create_with(file, "recently read cat.png", "image/png", &metadata).await?; /// # Ok(()) /// # } /// ``` @@ -100,13 +87,8 @@ impl<'a> ObjectClient<'a> { filename: &str, mime_type: &str, metadata: &serde_json::Value, - ) -> crate::Result { - let url = &format!( - "{}/{}/o?uploadType=multipart&name={}", - BASE_URL, - percent_encode(bucket), - percent_encode(filename), - ); + ) -> Result { + let url = &format!("{}&uploadType=multipart", self.object_creation_url); // single-request upload that includes metadata require a mutlipart request where // part 1 is metadata, and part2 is the file to upload @@ -116,10 +98,8 @@ impl<'a> ObjectClient<'a> { let form = reqwest::multipart::Form::new() .part("metadata", metadata_part) .part("file", file_part); - let headers = self.0.get_headers().await?; - let response = self - .0 - .client + let headers = self.client.get_headers().await?; + let response = self.client.reqwest .post(url) .headers(headers) .multipart(form) @@ -148,7 +128,7 @@ impl<'a> ObjectClient<'a> { /// .send() /// .await? /// .bytes_stream(); - /// client.object().create_streamed("cat-photos", file, 10, "recently read cat.png", "image/png").await?; + /// client.object("cat-photos").create_streamed(file, 10, "recently read cat.png", "image/png").await?; /// # Ok(()) /// # } /// ``` @@ -159,19 +139,14 @@ impl<'a> ObjectClient<'a> { filename: &str, mime_type: &str, metadata: &serde_json::Value, - ) -> crate::Result + ) -> Result where S: TryStream + Send + Sync + 'static, S::Error: Into>, bytes::Bytes: From, { - let url = &format!( - "{}/{}/o?uploadType=multipart&name={}", - BASE_URL, - percent_encode(bucket), - percent_encode(filename), - ); - let headers = self.0.get_headers().await?; + let url = &format!("{}&uploadType=multipart", self.object_creation_url); + let headers = self.client.get_headers().await?; // single-request upload that includes metadata require a mutlipart request where // part 1 is metadata, and part2 is the file to upload @@ -183,9 +158,7 @@ impl<'a> ObjectClient<'a> { .part("metadata", metadata_part) .part("file", file_part); - let response = self - .0 - .client + let response = self.client.reqwest .post(url) .headers(headers) .multipart(form) @@ -213,7 +186,7 @@ impl<'a> ObjectClient<'a> { /// .send() /// .await? /// .bytes_stream(); - /// client.object().create_streamed("cat-photos", file, 10, "recently read cat.png", "image/png", None).await?; + /// client.object("cat-photos").create_streamed(file, 10, "recently read cat.png", "image/png", None).await?; /// # Ok(()) /// # } /// ``` @@ -225,7 +198,7 @@ impl<'a> ObjectClient<'a> { filename: &str, mime_type: &str, parameters: Option, - ) -> crate::Result + ) -> Result where S: TryStream + Send + Sync + 'static, S::Error: Into>, @@ -233,22 +206,15 @@ impl<'a> ObjectClient<'a> { { use reqwest::header::{CONTENT_LENGTH, CONTENT_TYPE}; - let url = &format!( - "{}/{}/o?uploadType=media&name={}", - BASE_URL, - percent_encode(bucket), - percent_encode(filename), - ); - let mut headers = self.0.get_headers().await?; + let url = &format!("{}&uploadType=media", self.object_creation_url); + let mut headers = self.client.get_headers().await?; headers.insert(CONTENT_TYPE, mime_type.parse()?); if let Some(length) = length.into() { headers.insert(CONTENT_LENGTH, length.into()); } let body = reqwest::Body::wrap_stream(stream); - let response = self - .0 - .client + let response = self.client.reqwest .post(url) .query(¶meters) .headers(headers) @@ -271,15 +237,17 @@ impl<'a> ObjectClient<'a> { /// use cloud_storage::{Object, ListRequest}; /// /// let client = Client::default(); - /// let all_objects = client.object().list("my_bucket", ListRequest::default()).await?; + /// let all_objects = client.object("my_bucket").list(ListRequest::default()).await?; /// # Ok(()) /// # } /// ``` pub async fn list( &self, - bucket: &'a str, + bucket: &str, list_request: ListRequest, - ) -> crate::Result> + 'a> { + ) -> Result>, Error> { + let bucket = bucket.clone(); + enum ListState { Start(ListRequest), HasMore(ListRequest), @@ -302,63 +270,65 @@ impl<'a> ObjectClient<'a> { } } - let client = self.0; - - Ok(stream::unfold( - ListState::Start(list_request), - move |mut state| async move { - let url = format!("{}/b/{}/o", crate::BASE_URL, percent_encode(bucket)); - let headers = match client.get_headers().await { - Ok(h) => h, - Err(e) => return Some((Err(e), state)), - }; - let req = state.req_mut()?; - if req.max_results == Some(0) { - return None; - } - - let response = client - .client - .get(&url) - .query(req) - .headers(headers) - .send() - .await; - - let response = match response { - Ok(r) if r.status() == 200 => r, - Ok(r) => { - let e = match r.json::().await { - Ok(err_res) => err_res.into(), - Err(serde_err) => serde_err.into(), - }; - return Some((Err(e), state)); + let reqwest = self.client.reqwest.clone(); + let headers = self.client.get_headers().await?.clone(); + let url = format!("{}/b/{}/o", self.base_url, crate::percent_encode(bucket)); + + Ok(stream::unfold(ListState::Start(list_request), move |mut state| { + let reqwest = reqwest.clone(); + let url = url.clone(); + let headers = headers.clone(); + + async move { + + + let req = state.req_mut()?; + if req.max_results == Some(0) { + return None; } - Err(e) => return Some((Err(e.into()), state)), - }; - - let result: GoogleResponse = match response.json().await { - Ok(json) => json, - Err(e) => return Some((Err(e.into()), state)), - }; - - let response_body = match result { - GoogleResponse::Success(success) => success, - GoogleResponse::Error(e) => return Some((Err(e.into()), state)), - }; - - let next_state = if let Some(ref page_token) = response_body.next_page_token { - req.page_token = Some(page_token.clone()); - req.max_results = req - .max_results - .map(|rem| rem.saturating_sub(response_body.items.len())); - state.into_has_more()? - } else { - Done - }; - - Some((Ok(response_body), next_state)) - }, + + let response = reqwest + .get(&url) + .query(req) + .headers(headers.clone()) + .send() + .await; + + let response = match response { + Ok(r) if r.status() == 200 => r, + Ok(r) => { + let e = match r.json::().await { + Ok(err_res) => err_res.into(), + Err(serde_err) => serde_err.into(), + }; + return Some((Err(e), state)); + } + Err(e) => return Some((Err(e.into()), state)), + }; + + let result: crate::models::Response = match response.json().await { + Ok(json) => json, + Err(e) => return Some((Err(e.into()), state)), + }; + + let response_body = match result { + crate::models::Response::Success(success) => success, + crate::models::Response::Error(e) => return Some((Err(e.into()), state)), + }; + + let next_state = if let Some(ref page_token) = response_body.next_page_token { + req.page_token = Some(page_token.clone()); + req.max_results = req + .max_results + .map(|rem| rem.saturating_sub(response_body.items.len())); + state.into_has_more()? + } else { + Done + }; + + Some((Ok(response_body), next_state)) + } + } )) } @@ -371,7 +341,7 @@ impl<'a> ObjectClient<'a> { /// use cloud_storage::Object; /// /// let client = Client::default(); - /// let object = client.object().read("my_bucket", "path/to/my/file.png", None).await?; + /// let object = client.object("my_bucket").read("path/to/my/file.png", None).await?; /// # Ok(()) /// # } /// ``` @@ -380,28 +350,23 @@ impl<'a> ObjectClient<'a> { bucket: &str, file_name: &str, parameters: Option, - ) -> crate::Result { + ) -> Result { //let paramters = qs:: let url = format!( "{}/b/{}/o/{}", - crate::BASE_URL, - percent_encode(bucket), - percent_encode(file_name), + self.base_url, + crate::percent_encode(bucket), + crate::percent_encode(file_name), ); - let result: GoogleResponse = self - .0 - .client + let result: crate::models::Response = self.client.reqwest .get(&url) .query(¶meters) - .headers(self.0.get_headers().await?) + .headers(self.client.get_headers().await?) .send() .await? .json() .await?; - match result { - GoogleResponse::Success(s) => Ok(s), - GoogleResponse::Error(e) => Err(e.into()), - } + Ok(result?) } /// Download the content of the object with the specified name in the specified bucket. @@ -413,7 +378,7 @@ impl<'a> ObjectClient<'a> { /// use cloud_storage::Object; /// /// let client = Client::default(); - /// let bytes = client.object().download("my_bucket", "path/to/my/file.png", None).await?; + /// let bytes = client.object("my_bucket").download("path/to/my/file.png", None).await?; /// # Ok(()) /// # } /// ``` @@ -422,19 +387,17 @@ impl<'a> ObjectClient<'a> { bucket: &str, file_name: &str, parameters: Option, - ) -> crate::Result> { + ) -> Result, Error> { let url = format!( "{}/b/{}/o/{}?alt=media", - crate::BASE_URL, - percent_encode(bucket), - percent_encode(file_name), + self.base_url, + crate::percent_encode(bucket), + crate::percent_encode(file_name), ); - let resp = self - .0 - .client + let resp = self.client.reqwest .get(&url) .query(¶meters) - .headers(self.0.get_headers().await?) + .headers(self.client.get_headers().await?) .send() .await?; if resp.status() == StatusCode::NOT_FOUND { @@ -457,7 +420,7 @@ impl<'a> ObjectClient<'a> { /// use tokio::io::{AsyncWriteExt, BufWriter}; /// /// let client = Client::default(); - /// let mut stream = client.object().download_streamed("my_bucket", "path/to/my/file.png", None).await?; + /// let mut stream = client.object("my_bucket").download_streamed("path/to/my/file.png", None).await?; /// let mut file = BufWriter::new(File::create("file.png").await.unwrap()); /// while let Some(byte) = stream.next().await { /// file.write_all(&[byte.unwrap()]).await.unwrap(); @@ -471,28 +434,24 @@ impl<'a> ObjectClient<'a> { bucket: &str, file_name: &str, parameters: Option, - ) -> crate::Result> + Unpin> { - use futures_util::{StreamExt, TryStreamExt}; + ) -> Result> + Unpin, Error> { + use futures_util::TryStreamExt; let url = format!( "{}/b/{}/o/{}?alt=media", - crate::BASE_URL, - percent_encode(bucket), - percent_encode(file_name), + self.base_url, + crate::percent_encode(bucket), + crate::percent_encode(file_name), ); - let response = self - .0 - .client + let response = self.client.reqwest .get(&url) .query(¶meters) - .headers(self.0.get_headers().await?) + .headers(self.client.get_headers().await?) .send() .await? .error_for_status()?; let size = response.content_length(); let bytes = response - .bytes_stream() - .map(|chunk| chunk.map(|c| futures_util::stream::iter(c.into_iter().map(Ok)))) - .try_flatten(); + .bytes_stream().map_err(Error::from); Ok(SizedByteStream::new(bytes, size)) } @@ -509,7 +468,7 @@ impl<'a> ObjectClient<'a> { /// use cloud_storage::Object; /// /// let client = Client::default(); - /// let mut object = client.object().read("my_bucket", "path/to/my/file.png", None).await?; + /// let mut object = client.object("my_bucket").read("path/to/my/file.png", None).await?; /// object.content_type = Some("application/xml".to_string()); /// client.object().update(&object, None).await?; /// # Ok(()) @@ -519,28 +478,23 @@ impl<'a> ObjectClient<'a> { &self, object: &Object, parameters: Option, - ) -> crate::Result { + ) -> Result { let url = format!( "{}/b/{}/o/{}", - crate::BASE_URL, - percent_encode(&object.bucket), - percent_encode(&object.name), + self.base_url, + crate::percent_encode(&object.bucket), + crate::percent_encode(&object.name), ); - let result: GoogleResponse = self - .0 - .client + let result: crate::models::Response = self.client.reqwest .put(&url) .query(¶meters) - .headers(self.0.get_headers().await?) + .headers(self.client.get_headers().await?) .json(&object) .send() .await? .json() .await?; - match result { - GoogleResponse::Success(s) => Ok(s), - GoogleResponse::Error(e) => Err(e.into()), - } + Ok(result?) } /// Deletes a single object with the specified name in the specified bucket. @@ -552,7 +506,7 @@ impl<'a> ObjectClient<'a> { /// use cloud_storage::Object; /// /// let client = Client::default(); - /// client.object().delete("my_bucket", "path/to/my/file.png", None).await?; + /// client.object("my_bucket").delete("path/to/my/file.png", None).await?; /// # Ok(()) /// # } /// ``` @@ -561,19 +515,17 @@ impl<'a> ObjectClient<'a> { bucket: &str, file_name: &str, parameters: Option, - ) -> crate::Result<()> { + ) -> Result<(), Error> { let url = format!( "{}/b/{}/o/{}", - crate::BASE_URL, - percent_encode(bucket), - percent_encode(file_name), + self.base_url, + crate::percent_encode(bucket), + crate::percent_encode(file_name), ); - let response = self - .0 - .client + let response = self.client.reqwest .delete(&url) .query(¶meters) - .headers(self.0.get_headers().await?) + .headers(self.client.get_headers().await?) .send() .await?; if response.status().is_success() { @@ -592,8 +544,8 @@ impl<'a> ObjectClient<'a> { /// use cloud_storage::object::{Object, ComposeRequest, SourceObject}; /// /// let client = Client::default(); - /// let obj1 = client.object().read("my_bucket", "file1", None).await?; - /// let obj2 = client.object().read("my_bucket", "file2", None).await?; + /// let obj1 = client.object("my_bucket").read("file1", None).await?; + /// let obj2 = client.object("my_bucket").read("file2", None).await?; /// let compose_request = ComposeRequest { /// kind: "storage#composeRequest".to_string(), /// source_objects: vec![ @@ -610,7 +562,7 @@ impl<'a> ObjectClient<'a> { /// ], /// destination: None, /// }; - /// let obj3 = client.object().compose("my_bucket", &compose_request, "test-concatted-file", None).await?; + /// let obj3 = client.object("my_bucket").compose(&compose_request, "test-concatted-file", None).await?; /// // obj3 is now a file with the content of obj1 and obj2 concatted together. /// # Ok(()) /// # } @@ -621,28 +573,23 @@ impl<'a> ObjectClient<'a> { req: &ComposeRequest, destination_object: &str, parameters: Option, - ) -> crate::Result { + ) -> Result { let url = format!( "{}/b/{}/o/{}/compose", - crate::BASE_URL, - percent_encode(bucket), - percent_encode(destination_object) + self.base_url, + crate::percent_encode(bucket), + crate::percent_encode(destination_object) ); - let result: GoogleResponse = self - .0 - .client + let result: crate::models::Response = self.client.reqwest .post(&url) .query(¶meters) - .headers(self.0.get_headers().await?) + .headers(self.client.get_headers().await?) .json(req) .send() .await? .json() .await?; - match result { - GoogleResponse::Success(s) => Ok(s), - GoogleResponse::Error(e) => Err(e.into()), - } + Ok(result?) } /// Copy this object to the target bucket and path. @@ -654,7 +601,7 @@ impl<'a> ObjectClient<'a> { /// use cloud_storage::object::{Object, ComposeRequest}; /// /// let client = Client::default(); - /// let obj1 = client.object().read("my_bucket", "file1", None).await?; + /// let obj1 = client.object("my_bucket").read("file1", None).await?; /// let obj2 = client.object().copy(&obj1, "my_other_bucket", "file2", None).await?; /// // obj2 is now a copy of obj1. /// # Ok(()) @@ -666,22 +613,20 @@ impl<'a> ObjectClient<'a> { destination_bucket: &str, path: &str, parameters: Option, - ) -> crate::Result { + ) -> Result { use reqwest::header::CONTENT_LENGTH; let url = format!( "{base}/b/{sBucket}/o/{sObject}/copyTo/b/{dBucket}/o/{dObject}", - base = crate::BASE_URL, - sBucket = percent_encode(&object.bucket), - sObject = percent_encode(&object.name), - dBucket = percent_encode(destination_bucket), - dObject = percent_encode(path), + base = self.base_url, + sBucket = crate::percent_encode(&object.bucket), + sObject = crate::percent_encode(&object.name), + dBucket = crate::percent_encode(destination_bucket), + dObject = crate::percent_encode(path), ); - let mut headers = self.0.get_headers().await?; + let mut headers = self.client.get_headers().await?; headers.insert(CONTENT_LENGTH, "0".parse()?); - let result: GoogleResponse = self - .0 - .client + let result: crate::models::Response = self.client.reqwest .post(&url) .query(¶meters) .headers(headers) @@ -689,10 +634,7 @@ impl<'a> ObjectClient<'a> { .await? .json() .await?; - match result { - GoogleResponse::Success(s) => Ok(s), - GoogleResponse::Error(e) => Err(e.into()), - } + Ok(result?) } /// Moves a file from the current location to the target bucket and path. @@ -711,7 +653,7 @@ impl<'a> ObjectClient<'a> { /// use cloud_storage::object::Object; /// /// let client = Client::default(); - /// let obj1 = client.object().read("my_bucket", "file1", None).await?; + /// let obj1 = client.object("my_bucket").read("file1", None).await?; /// let obj2 = client.object().rewrite(&obj1, "my_other_bucket", "file2", None).await?; /// // obj2 is now a copy of obj1. /// # Ok(()) @@ -723,22 +665,20 @@ impl<'a> ObjectClient<'a> { destination_bucket: &str, path: &str, parameters: Option, - ) -> crate::Result { + ) -> Result { use reqwest::header::CONTENT_LENGTH; let url = format!( "{base}/b/{sBucket}/o/{sObject}/rewriteTo/b/{dBucket}/o/{dObject}", - base = crate::BASE_URL, - sBucket = percent_encode(&object.bucket), - sObject = percent_encode(&object.name), - dBucket = percent_encode(destination_bucket), - dObject = percent_encode(path), + base = self.base_url, + sBucket = crate::percent_encode(&object.bucket), + sObject = crate::percent_encode(&object.name), + dBucket = crate::percent_encode(destination_bucket), + dObject = crate::percent_encode(path), ); - let mut headers = self.0.get_headers().await?; + let mut headers = self.client.get_headers().await?; headers.insert(CONTENT_LENGTH, "0".parse()?); - let s = self - .0 - .client + let s = self.client.reqwest .post(&url) .query(¶meters) .headers(headers) @@ -749,9 +689,5 @@ impl<'a> ObjectClient<'a> { let result: RewriteResponse = serde_json::from_str(&s).unwrap(); Ok(result.resource) - // match result { - // GoogleResponse::Success(s) => Ok(s.resource), - // GoogleResponse::Error(e) => Err(e.into()), - // } } } diff --git a/src/client/object_access_control.rs b/src/client/object_access_control.rs index 6089c6e..f65fae6 100644 --- a/src/client/object_access_control.rs +++ b/src/client/object_access_control.rs @@ -1,14 +1,12 @@ -use crate::{ - bucket_access_control::Entity, - error::GoogleResponse, - object::percent_encode, - object_access_control::{NewObjectAccessControl, ObjectAccessControl}, - resources::common::ListResponse, -}; +use crate::{models::{create, ObjectAccessControl, ListResponse, Entity}, Error}; + /// Operations on [`ObjectAccessControl`](ObjectAccessControl)s. #[derive(Debug)] -pub struct ObjectAccessControlClient<'a>(pub(super) &'a super::Client); +pub struct ObjectAccessControlClient<'a> { + pub(crate) client: &'a super::client::Client, + pub(crate) acl_url: String, +} impl<'a> ObjectAccessControlClient<'a> { /// Creates a new ACL entry on the specified `object`. @@ -19,30 +17,17 @@ impl<'a> ObjectAccessControlClient<'a> { /// control access instead. pub async fn create( &self, - bucket: &str, - object: &str, - new_object_access_control: &NewObjectAccessControl, - ) -> crate::Result { - let url = format!( - "{}/b/{}/o/{}/acl", - crate::BASE_URL, - percent_encode(bucket), - percent_encode(object), - ); - let result: GoogleResponse = self - .0 - .client - .post(&url) - .headers(self.0.get_headers().await?) + new_object_access_control: &create::ObjectAccessControl, + ) -> Result { + let result: crate::models::Response = self.client.reqwest + .post(&self.acl_url) + .headers(self.client.get_headers().await?) .json(new_object_access_control) .send() .await? .json() .await?; - match result { - GoogleResponse::Success(s) => Ok(s), - GoogleResponse::Error(e) => Err(e.into()), - } + Ok(result?) } /// Retrieves `ACL` entries on the specified object. @@ -52,28 +37,18 @@ impl<'a> ObjectAccessControlClient<'a> { /// bucket-level access enabled. Use `Bucket::get_iam_policy` and `Bucket::set_iam_policy` to /// control access instead. pub async fn list( - &self, - bucket: &str, - object: &str, - ) -> crate::Result> { - let url = format!( - "{}/b/{}/o/{}/acl", - crate::BASE_URL, - percent_encode(bucket), - percent_encode(object), - ); - let result: GoogleResponse> = self - .0 - .client - .get(&url) - .headers(self.0.get_headers().await?) + &self + ) -> Result, Error> { + let result: crate::models::Response> = self.client.reqwest + .get(&self.acl_url) + .headers(self.client.get_headers().await?) .send() .await? .json() .await?; match result { - GoogleResponse::Success(s) => Ok(s.items), - GoogleResponse::Error(e) => Err(e.into()), + crate::models::Response::Success(s) => Ok(s.items), + crate::models::Response::Error(e) => Err(e.into()), } } @@ -85,30 +60,21 @@ impl<'a> ObjectAccessControlClient<'a> { /// control access instead. pub async fn read( &self, - bucket: &str, - object: &str, entity: &Entity, - ) -> crate::Result { + ) -> Result { let url = format!( - "{}/b/{}/o/{}/acl/{}", - crate::BASE_URL, - percent_encode(bucket), - percent_encode(object), - percent_encode(&entity.to_string()) + "{}/{}", + &self.acl_url, + crate::percent_encode(&entity.to_string()) ); - let result: GoogleResponse = self - .0 - .client + let result: crate::models::Response = self.client.reqwest .get(&url) - .headers(self.0.get_headers().await?) + .headers(self.client.get_headers().await?) .send() .await? .json() .await?; - match result { - GoogleResponse::Success(s) => Ok(s), - GoogleResponse::Error(e) => Err(e.into()), - } + Ok(result?) } /// Updates an ACL entry on the specified object. @@ -120,28 +86,21 @@ impl<'a> ObjectAccessControlClient<'a> { pub async fn update( &self, object_access_control: &ObjectAccessControl, - ) -> crate::Result { + ) -> Result { let url = format!( - "{}/b/{}/o/{}/acl/{}", - crate::BASE_URL, - percent_encode(&object_access_control.bucket), - percent_encode(&object_access_control.object), - percent_encode(&object_access_control.entity.to_string()), + "{}/{}", + &self.acl_url, + crate::percent_encode(&object_access_control.entity.to_string()), ); - let result: GoogleResponse = self - .0 - .client + let result: crate::models::Response = self.client.reqwest .put(&url) - .headers(self.0.get_headers().await?) + .headers(self.client.get_headers().await?) .json(object_access_control) .send() .await? .json() .await?; - match result { - GoogleResponse::Success(s) => Ok(s), - GoogleResponse::Error(e) => Err(e.into()), - } + Ok(result?) } /// Permanently deletes the ACL entry for the specified entity on the specified object. @@ -150,19 +109,15 @@ impl<'a> ObjectAccessControlClient<'a> { /// Important: This method fails with a 400 Bad Request response for buckets with uniform /// bucket-level access enabled. Use `Bucket::get_iam_policy` and `Bucket::set_iam_policy` to /// control access instead. - pub async fn delete(&self, object_access_control: ObjectAccessControl) -> crate::Result<()> { + pub async fn delete(&self, object_access_control: ObjectAccessControl) -> Result<(), Error> { let url = format!( - "{}/b/{}/o/{}/acl/{}", - crate::BASE_URL, - percent_encode(&object_access_control.bucket), - percent_encode(&object_access_control.object), - percent_encode(&object_access_control.entity.to_string()), + "{}/{}", + &self.acl_url, + crate::percent_encode(&object_access_control.entity.to_string()), ); - let response = self - .0 - .client + let response = self.client.reqwest .delete(&url) - .headers(self.0.get_headers().await?) + .headers(self.client.get_headers().await?) .send() .await?; if response.status().is_success() { diff --git a/src/configuration/mod.rs b/src/configuration/mod.rs new file mode 100644 index 0000000..ddd1c38 --- /dev/null +++ b/src/configuration/mod.rs @@ -0,0 +1,2 @@ +mod service_account; +pub use self::service_account::ServiceAccount; \ No newline at end of file diff --git a/src/resources/service_account.rs b/src/configuration/service_account.rs similarity index 72% rename from src/resources/service_account.rs rename to src/configuration/service_account.rs index c573081..46f8b0c 100644 --- a/src/resources/service_account.rs +++ b/src/configuration/service_account.rs @@ -24,8 +24,9 @@ pub struct ServiceAccount { pub client_x509_cert_url: String, } -impl ServiceAccount { - pub(crate) fn get() -> Self { +impl Default for ServiceAccount { + fn default() -> Self { + #[cfg(feature = "dotenv")] dotenv::dotenv().ok(); let credentials_json = std::env::var("SERVICE_ACCOUNT") .or_else(|_| std::env::var("GOOGLE_APPLICATION_CREDENTIALS")) @@ -35,11 +36,22 @@ impl ServiceAccount { .expect( "SERVICE_ACCOUNT(_JSON) or GOOGLE_APPLICATION_CREDENTIALS(_JSON) environment parameter required", ); - let account: Self = - serde_json::from_str(&credentials_json).expect("SERVICE_ACCOUNT file not valid"); + let account: Self = serde_json::from_str(&credentials_json).expect("SERVICE_ACCOUNT file not valid"); + assert_eq!( + account.r#type, "service_account", + "`type` should be 'service_account'" + ); + account + } +} + +impl ServiceAccount { + /// Method for creating a `ServiceAccount` from a json string. + pub fn from_str(credentials_json: &str) -> Self { + let account: Self = serde_json::from_str(&credentials_json).expect("Format for Service Account invalid"); assert_eq!( account.r#type, "service_account", - "`type` parameter of `SERVICE_ACCOUNT` variable is not 'service_account'" + "`type` should be 'service_account'" ); account } diff --git a/src/resources/signature.rs b/src/configuration/signature.rs similarity index 100% rename from src/resources/signature.rs rename to src/configuration/signature.rs diff --git a/src/crypto/mod.rs b/src/crypto/mod.rs new file mode 100644 index 0000000..e0a8d38 --- /dev/null +++ b/src/crypto/mod.rs @@ -0,0 +1,10 @@ + +#[cfg(feature = "openssl")] +mod openssl; +#[cfg(feature = "openssl")] +pub use self::openssl::*; + +#[cfg(all(feature = "ring", not(feature = "openssl")))] +mod ring; +#[cfg(all(feature = "ring", not(feature = "openssl")))] +pub use self::ring::*; \ No newline at end of file diff --git a/src/crypto/openssl.rs b/src/crypto/openssl.rs new file mode 100644 index 0000000..77b83ff --- /dev/null +++ b/src/crypto/openssl.rs @@ -0,0 +1,16 @@ +use crate::Error; + +#[inline(always)] +pub fn rsa_pkcs1_sha256(message: &str, private_pem: &[u8]) -> Result, Error> { + use openssl::{hash::MessageDigest, pkey::PKey, sign::Signer}; + + let key = PKey::private_key_from_pem(private_pem)?; + let mut signer = Signer::new(MessageDigest::sha256(), &key)?; + signer.update(message.as_bytes())?; + Ok(signer.sign_to_vec()?) +} + +#[inline(always)] +pub fn sha256(bytes: &[u8]) -> impl AsRef<[u8]> { + openssl::sha::sha256(bytes) +} \ No newline at end of file diff --git a/src/crypto/ring.rs b/src/crypto/ring.rs new file mode 100644 index 0000000..fab8e53 --- /dev/null +++ b/src/crypto/ring.rs @@ -0,0 +1,20 @@ +use crate::Error; + +#[inline(always)] +pub fn rsa_pkcs1_sha256(message: &str, private_pem: &[u8]) -> Result, Error> { + use ring::{rand::SystemRandom, signature::{RsaKeyPair, RSA_PKCS1_SHA256}, + }; + + let key_pem = pem::parse(private_pem)?; + let key = RsaKeyPair::from_pkcs8(&key_pem.contents())?; + let rng = SystemRandom::new(); + let mut signature = vec![0; key.public_modulus_len()]; + key.sign(&RSA_PKCS1_SHA256, &rng, message.as_bytes(), &mut signature)?; + Ok(signature) +} + +#[inline(always)] +pub fn sha256(bytes: &[u8]) -> impl AsRef<[u8]> { + use ring::digest::{digest, SHA256}; + digest(&SHA256, bytes) +} \ No newline at end of file diff --git a/src/error.rs b/src/error.rs index 93c8511..7a62d18 100644 --- a/src/error.rs +++ b/src/error.rs @@ -1,8 +1,10 @@ +use crate::models::ErrorResponse; + /// Represents any of the ways storing something in Google Cloud Storage can fail. #[derive(Debug)] pub enum Error { /// If the error is caused by a non 2xx response by Google, this variant is returned. - Google(GoogleErrorResponse), + Google(ErrorResponse), /// If another network error causes something to fail, this variant is used. Reqwest(reqwest::Error), /// If we encounter a problem decoding the private key, this variant is used. @@ -115,366 +117,8 @@ impl From for Error { } } -#[derive(Debug, serde::Deserialize)] -#[serde(rename = "camelCase")] -#[serde(untagged)] -pub(crate) enum GoogleResponse { - Success(T), - Error(GoogleErrorResponse), -} - -// TODO comment this in when try_trait (#42327) get stabilized and enjoy the nicer handling of -// errors -// -// impl std::ops::Try for GoogleResponse { -// type Ok = T; -// type Error = Error; -// -// fn into_result(self) -> Result { -// match self { -// GoogleResponse::Success(t) => Ok(t), -// GoogleResponse::Error(error) => Err(Error::Google(error)), -// } -// } -// -// fn from_error(_a: Error) -> Self { -// unimplemented!() -// } -// -// fn from_ok(t: T) -> Self { -// GoogleResponse::Success(t) -// } -// } - -/// The structure of a error response returned by Google. -#[derive(Debug, serde::Deserialize)] -#[serde(rename = "camelCase")] -pub struct GoogleErrorResponse { - /// A container for the error information. - pub error: ErrorList, -} - -impl GoogleErrorResponse { - /// Return list of errors returned by Google - pub fn errors(&self) -> &[GoogleError] { - &self.error.errors - } - - /// Check whether errors contain given reason - pub fn errors_has_reason(&self, reason: &Reason) -> bool { - self.errors() - .iter() - .any(|google_error| google_error.is_reason(reason)) - } -} - -impl std::fmt::Display for GoogleErrorResponse { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> Result<(), std::fmt::Error> { - writeln!(f, "{:?}", self) - } -} - -impl std::error::Error for GoogleErrorResponse {} - -/// A container for the error information. -#[derive(Debug, serde::Deserialize)] -#[serde(rename = "camelCase")] -pub struct ErrorList { - /// A container for the error details. - pub errors: Vec, - /// An HTTP status code value, without the textual description. - /// - /// Example values include: 400 (Bad Request), 401 (Unauthorized), and 404 (Not Found). - pub code: u16, - /// Description of the error. Same as errors.message. - pub message: String, -} - -/// Google Error structure -#[derive(Debug, serde::Deserialize)] -#[serde(rename = "camelCase")] -pub struct GoogleError { - /// The scope of the error. Example values include: global and push. - pub domain: String, - /// Example values include `invalid`, `invalidParameter`, and `required`. - pub reason: Reason, - /// Description of the error. - /// - /// Example values include `Invalid argument`, `Login required`, and `Required parameter: - /// project`. - pub message: String, - /// The location or part of the request that caused the error. Use with `location` to pinpoint - /// the error. For example, if you specify an invalid value for a parameter, the `locationType` - /// will be parameter and the location will be the name of the parameter. - /// - /// Example values include `header` and `parameter`. - pub location_type: Option, - /// The specific item within the `locationType` that caused the error. For example, if you - /// specify an invalid value for a parameter, the `location` will be the name of the parameter. - /// - /// Example values include: `Authorization`, `project`, and `projection`. - pub location: Option, -} - -impl std::fmt::Display for GoogleError { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{}", self.message) - } -} - -impl std::error::Error for GoogleError {} - -impl GoogleError { - /// Check what was the reason of error - pub fn is_reason(&self, reason: &Reason) -> bool { - self.reason == *reason - } -} - -impl From for Error { - fn from(err: GoogleErrorResponse) -> Self { +impl From for Error { + fn from(err: crate::models::ErrorResponse) -> Self { Self::Google(err) } -} - -/// Google provides a list of codes, but testing indicates that this list is not exhaustive. -#[derive(Debug, PartialEq, serde::Deserialize)] -#[serde(rename_all = "camelCase")] -pub enum Reason { - /// When requesting a download using alt=media URL parameter, the direct URL path to use is - /// prefixed by /download. If this is omitted, the service will issue this redirect with the - /// appropriate media download path in the Location header. - MediaDownloadRedirect, - /// The conditional request would have been successful, but the condition was false, so no body - /// was sent. - NotModified, - /// Resource temporarily located elsewhere according to the Location header. Among other - /// reasons, this can occur when cookie-based authentication is being used, e.g., when using the - /// Storage Browser, and it receives a request to download content. - TemporaryRedirect, - // /// Indicates an incomplete resumable upload and provides the range of bytes already received by - // /// Cloud Storage. Responses with this status do not contain a body. - // ResumeIncomplete, - - // - /// Undocumeten variant that is sometimes returned by Google. - Invalid, - /// The request cannot be completed based on your current Cloud Storage settings. For example, - /// you cannot lock a retention policy if the requested bucket doesn't have a retention policy, - /// and you cannot set ACLs if the requested bucket has Bucket Policy Only enabled. - BadRequest, - /// The retention period on a locked bucket cannot be reduced. - BadRequestException, - /// Bad Cloud KMS key. - CloudKmsBadKey, - /// Cloud KMS key name cannot be changed. - CloudKmsCannotChangeKeyName, - /// Resource's Cloud KMS decryption key not found. - CloudKmsDecryptionKeyNotFound, - /// Cloud KMS key is disabled, destroyed, or scheduled to be destroyed. - CloudKmsDisabledKey, - /// Cloud KMS encryption key not found. - CloudKmsEncryptionKeyNotFound, - /// Cloud KMS key location not allowed. - CloudKmsKeyLocationNotAllowed, - /// Missing an encryption algorithm, or the provided algorithm is not "AE256." - CustomerEncryptionAlgorithmIsInvalid, - /// Missing an encryption key, or it is not Base64 encoded, or it does not meet the required - /// length of the encryption algorithm. - CustomerEncryptionKeyFormatIsInvalid, - /// The provided encryption key is incorrect. - CustomerEncryptionKeyIsIncorrect, - /// Missing a SHA256 hash of the encryption key, or it is not Base64 encoded, or it does not - /// match the encryption key. - CustomerEncryptionKeySha256IsInvalid, - /// The value for the alt URL parameter was not recognized. - InvalidAltValue, - /// The value for one of fields in the request body was invalid. - InvalidArgument, - /// The value for one of the URL parameters was invalid. In addition to normal URL parameter - /// validation, any URL parameters that have a corresponding value in provided JSON request - /// bodies must match if they are both specified. If using JSONP, you will get this error if you - /// provide an alt parameter that is not json. - InvalidParameter, - /// Uploads or normal API request was sent to a `/download/*` path. Use the same path, but - /// without the /download prefix. - NotDownload, - /// Downloads or normal API request was sent to an `/upload/*` path. Use the same path, but - /// without the `/upload` prefix. - NotUpload, - /// Could not parse the body of the request according to the provided Content-Type. - ParseError, - /// Channel id must match the following regular expression: `[A-Za-z0-9\\-_\\+/=]+`. - #[serde(rename = "push.channelIdInvalid")] - PushChannelIdInvalid, - /// `storage.objects.watchAll`'s id property must be unique across channels. - #[serde(rename = "push.channelIdNotUnique")] - PushChannelIdNotUnique, - /// `storage.objects.watchAll`'s address property must contain a valid URL. - #[serde(rename = "push.webhookUrlNoHostOrAddress")] - PushWebhookUrlNoHostOrAddress, - /// `storage.objects.watchAll`'s address property must be an HTTPS URL. - #[serde(rename = "push.webhookUrlNotHttps")] - PushWebhookUrlNotHttps, - /// A required URL parameter or required request body JSON property is missing. - Required, - /// The resource is encrypted with a customer-supplied encryption key, but the request did not - /// provide one. - ResourceIsEncryptedWithCustomerEncryptionKey, - /// The resource is not encrypted with a customer-supplied encryption key, but the request - /// provided one. - ResourceNotEncryptedWithCustomerEncryptionKey, - /// A request was made to an API version that has been turned down. Clients will need to update - /// to a supported version. - TurnedDown, - /// The user project specified in the request does not match the user project specifed in an - /// earlier, related request. - UserProjectInconsistent, - /// The user project specified in the request is invalid, either because it is a malformed - /// project id or because it refers to a non-existent project. - UserProjectInvalid, - /// The requested bucket has Requester Pays enabled, the requester is not an owner of the - /// bucket, and no user project was present in the request. - UserProjectMissing, - /// storage.objects.insert must be invoked as an upload rather than a metadata. - WrongUrlForUpload, - // - - // - /// Access to a Requester Pays bucket requires authentication. - #[serde(rename = "AuthenticationRequiredRequesterPays")] - AuthenticationRequiredRequesterPays, - /// This error indicates a problem with the authorization provided in the request to Cloud - /// Storage. The following are some situations where that will occur: - /// - /// * The OAuth access token has expired and needs to be refreshed. This can be avoided by - /// refreshing the access token early, but code can also catch this error, refresh the token - /// and retry automatically. - /// * Multiple non-matching authorizations were provided; choose one mode only. - /// * The OAuth access token's bound project does not match the project associated with the - /// provided developer key. - /// * The Authorization header was of an unrecognized format or uses an unsupported credential - /// type. - AuthError, - /// When downloading content from a cookie-authenticated site, e.g., using the Storage Browser, - /// the response will redirect to a temporary domain. This error will occur if access to said - /// domain occurs after the domain expires. Issue the original request again, and receive a new - /// redirect. - LockedDomainExpired, - /// Requests to storage.objects.watchAll will fail unless you verify you own the domain. - #[serde(rename = "push.webhookUrlUnauthorized")] - PushWebhookUrlUnauthorized, - // /// Access to a non-public method that requires authorization was made, but none was provided in - // /// the Authorization header or through other means. - // Required, - // - - // - /// The account associated with the project that owns the bucket or object has been disabled. Check the Google Cloud Console to see if there is a problem with billing, and if not, contact account support. - AccountDisabled, - /// The Cloud Storage JSON API is restricted by law from operating with certain countries. - CountryBlocked, - /// According to access control policy, the current user does not have access to perform the requested action. This code applies even if the resource being acted on doesn't exist. - Forbidden, - /// According to access control policy, the current user does not have access to perform the requested action. This code applies even if the resource being acted on doesn't exist. - InsufficientPermissions, - /// Object overwrite or deletion is not allowed due to an active hold on the object. - ObjectUnderActiveHold, - /// The Cloud Storage rate limit was exceeded. Retry using exponential backoff. - RateLimitExceeded, - /// Object overwrite or deletion is not allowed until the object meets the retention period set by the retention policy on the bucket. - RetentionPolicyNotMet, - /// Requests to this API require SSL. - SslRequired, - /// Calls to storage.channels.stop require that the caller own the channel. - StopChannelCallerNotOwner, - /// This error implies that for the project associated with the OAuth token or the developer key provided, access to Cloud Storage JSON API is not enabled. This is most commonly because Cloud Storage JSON API is not enabled in the Google Cloud Console, though there are other cases where the project is blocked or has been deleted when this can occur. - #[serde(rename = "UsageLimits.accessNotConfigured")] - UsageLimitsAccessNotConfigured, - /// The requester is not authorized to use the project specified in their request. The - /// requester must have either the serviceusage.services.use permission or the Editor role for - /// the specified project. - #[serde(rename = "UserProjectAccessDenied")] - UserProjectAccessDenied, - /// There is a problem with the project used in the request that prevents the operation from - /// completing successfully. One issue could be billing. Check the billing page to see if you - /// have a past due balance or if the credit card (or other payment mechanism) on your account is expired. For project creation, see the Projects page in the Google Cloud Console. For other problems, see the Resources and Support page. - #[serde(rename = "UserProjectAccountProblem")] - UserProjectAccountProblem, - /// The developer-specified per-user rate quota was exceeded. If you are the developer, then - /// you can view these quotas at Quotas pane in the Google Cloud Console. - UserRateLimitExceeded, - /// Seems to indicate the same thing - // NONEXHAUST - QuotaExceeded, - // - /// Either there is no API method associated with the URL path of the request, or the request - /// refers to one or more resources that were not found. - NotFound, - /// Either there is no API method associated with the URL path of the request, or the request - /// refers to one or more resources that were not found. - MethodNotAllowed, - /// The request timed out. Please try again using truncated exponential backoff. - UploadBrokenConnection, - /// A request to change a resource, usually a storage.*.update or storage.*.patch method, failed - /// to commit the change due to a conflicting concurrent change to the same resource. The - /// request can be retried, though care should be taken to consider the new state of the - /// resource to avoid blind overwriting of other agent's changes. - Conflict, - /// You have attempted to use a resumable upload session that is no longer available. If the - /// reported status code was not successful and you still wish to upload the file, you must - /// start a new session. - Gone, - // /// You must provide the Content-Length HTTP header. This error has no response body. - // LengthRequired, - - // - /// At least one of the pre-conditions you specified did not hold. - ConditionNotMet, - /// Request violates an OrgPolicy constraint. - OrgPolicyConstraintFailed, - // - /// The Cloud Storage JSON API supports up to 5 TB objects. - /// - /// This error may, alternatively, arise if copying objects between locations and/or storage - /// classes can not complete within 30 seconds. In this case, use the `Object::rewrite` method - /// instead. - UploadTooLarge, - /// The requested Range cannot be satisfied. - RequestedRangeNotSatisfiable, - /// A [Cloud Storage JSON API usage limit](https://cloud.google.com/storage/quotas) was - /// exceeded. If your application tries to use more than its limit, additional requests will - /// fail. Throttle your client's requests, and/or use truncated exponential backoff. - #[serde(rename = "usageLimits.rateLimitExceeded")] - UsageLimitsRateLimitExceeded, - - // - /// We encountered an internal error. Please try again using truncated exponential backoff. - BackendError, - /// We encountered an internal error. Please try again using truncated exponential backoff. - InternalError, - // - /// May be returned by Google, meaning undocumented. - // NONEXHAUST - GatewayTimeout, -} - -#[derive(Debug, serde::Deserialize)] -#[serde(rename = "camelCase")] -enum BadRequest {} - -#[derive(Debug, serde::Deserialize)] -#[serde(rename = "camelCase")] -enum Unauthorized {} - -#[derive(Debug, serde::Deserialize)] -#[serde(rename = "camelCase")] -enum Forbidden {} - -#[derive(Debug, serde::Deserialize)] -#[serde(rename = "camelCase")] -enum PreconditionFailed {} - -#[derive(Debug, serde::Deserialize)] -#[serde(rename = "camelCase")] -enum InternalServerError {} +} \ No newline at end of file diff --git a/src/global_client/bucket.rs b/src/global_client/bucket.rs new file mode 100644 index 0000000..4b76781 --- /dev/null +++ b/src/global_client/bucket.rs @@ -0,0 +1,480 @@ +use crate::{Bucket, models::{create, IamPolicy, TestIamPermission}, Error}; + +impl Bucket { + /// Creates a new `Bucket`. There are many options that you can provide for creating a new + /// bucket, so the `NewBucket` resource contains all of them. Note that `NewBucket` implements + /// `Default`, so you don't have to specify the fields you're not using. And error is returned + /// if that bucket name is already taken. + /// ### Example + /// ``` + /// # #[tokio::main] + /// # async fn main() -> Result<(), Box> { + /// use cloud_storage::bucket::{Bucket, create::Bucket}; + /// use cloud_storage::bucket::{Location, MultiRegion}; + /// + /// let new_bucket = create::Bucket { + /// name: "cloud-storage-rs-doc-1".to_string(), // this is the only mandatory field + /// location: Location::Multi(MultiRegion::Eu), + /// ..Default::default() + /// }; + /// let bucket = Bucket::create(&new_bucket).await?; + /// # bucket.delete().await?; + /// # Ok(()) + /// # } + /// ``` + pub async fn create(new_bucket: &create::Bucket) -> Result { + crate::CLOUD_CLIENT.bucket().create(new_bucket).await + } + + /// The synchronous equivalent of `Bucket::create`. + /// + /// ### Features + /// This function requires that the feature flag `sync` is enabled in `Cargo.toml`. + #[cfg(feature = "sync")] + pub fn create_sync(new_bucket: &create::Bucket) -> Result { + crate::runtime()?.block_on(Self::create(new_bucket)) + } + + /// Returns all `Bucket`s within this project. + /// + /// ### Note + /// When using incorrect permissions, this function fails silently and returns an empty list. + /// + /// ### Example + /// ``` + /// # #[tokio::main] + /// # async fn main() -> Result<(), Box> { + /// use cloud_storage::Bucket; + /// + /// let buckets = Bucket::list().await?; + /// # Ok(()) + /// # } + /// ``` + pub async fn list() -> Result, Error> { + crate::CLOUD_CLIENT.bucket().list().await + } + + /// The synchronous equivalent of `Bucket::list`. + /// + /// ### Features + /// This function requires that the feature flag `sync` is enabled in `Cargo.toml`. + #[cfg(feature = "sync")] + pub fn list_sync() -> Result, Error> { + crate::runtime()?.block_on(Self::list()) + } + + /// Returns a single `Bucket` by its name. If the Bucket does not exist, an error is returned. + /// ### Example + /// ``` + /// # #[tokio::main] + /// # async fn main() -> Result<(), Box> { + /// use cloud_storage::Bucket; + /// # use cloud_storage::bucket::NewBucket; + /// # let new_bucket = create::Bucket { + /// # name: "cloud-storage-rs-doc-2".to_string(), + /// # ..Default::default() + /// # }; + /// # let _ = Bucket::create(&new_bucket).await?; + /// + /// let bucket = Bucket::read("cloud-storage-rs-doc-2").await?; + /// # bucket.delete().await?; + /// # Ok(()) + /// # } + /// ``` + pub async fn read(name: &str) -> Result { + crate::CLOUD_CLIENT.bucket().read(name).await + } + + /// The synchronous equivalent of `Bucket::read`. + /// + /// ### Features + /// This function requires that the feature flag `sync` is enabled in `Cargo.toml`. + #[cfg(feature = "sync")] + pub fn read_sync(name: &str) -> Result { + crate::runtime()?.block_on(Self::read(name)) + } + + /// Update an existing `Bucket`. If you declare you bucket as mutable, you can edit its fields. + /// You can then flush your changes to Google Cloud Storage using this method. + /// ### Example + /// ``` + /// # #[tokio::main] + /// # async fn main() -> Result<(), Box> { + /// use cloud_storage::bucket::{Bucket, RetentionPolicy}; + /// # use cloud_storage::bucket::NewBucket; + /// # let new_bucket = create::Bucket { + /// # name: "cloud-storage-rs-doc-3".to_string(), + /// # ..Default::default() + /// # }; + /// # let _ = Bucket::create(&new_bucket).await?; + /// + /// let mut bucket = Bucket::read("cloud-storage-rs-doc-3").await?; + /// bucket.retention_policy = Some(RetentionPolicy { + /// retention_period: 50, + /// effective_time: time::OffsetDateTime::now_utc() + std::time::Duration::from_secs(50), + /// is_locked: Some(false), + /// }); + /// bucket.update().await?; + /// # bucket.delete().await?; + /// # Ok(()) + /// # } + /// ``` + pub async fn update(&self) -> Result { + crate::CLOUD_CLIENT.bucket().update(self).await + } + + /// The synchronous equivalent of `Bucket::update`. + /// + /// ### Features + /// This function requires that the feature flag `sync` is enabled in `Cargo.toml`. + #[cfg(feature = "sync")] + pub fn update_sync(&self) -> Result { + crate::runtime()?.block_on(self.update()) + } + + /// Delete an existing `Bucket`. This permanently removes a bucket from Google Cloud Storage. + /// An error is returned when you don't have sufficient permissions, or when the + /// `retention_policy` prevents you from deleting your Bucket. + /// ### Example + /// ```no_run + /// # #[tokio::main] + /// # async fn main() -> Result<(), Box> { + /// use cloud_storage::Bucket; + /// # use cloud_storage::bucket::NewBucket; + /// # let new_bucket = create::Bucket { + /// # name: "unnecessary-bucket".to_string(), + /// # ..Default::default() + /// # }; + /// # let _ = Bucket::create(&new_bucket).await?; + /// + /// let bucket = Bucket::read("unnecessary-bucket").await?; + /// bucket.delete().await?; + /// # Ok(()) + /// # } + /// ``` + pub async fn delete(self) -> Result<(), Error> { + crate::CLOUD_CLIENT.bucket().delete(self).await + } + + /// The synchronous equivalent of `Bucket::delete`. + /// + /// ### Features + /// This function requires that the feature flag `sync` is enabled in `Cargo.toml`. + #[cfg(feature = "sync")] + pub fn delete_sync(self) -> Result<(), Error> { + crate::runtime()?.block_on(self.delete()) + } + + /// Returns the [IAM Policy](https://cloud.google.com/iam/docs/) for this bucket. + /// ### Example + /// ``` + /// # #[tokio::main] + /// # async fn main() -> Result<(), Box> { + /// use cloud_storage::Bucket; + /// # use cloud_storage::bucket::NewBucket; + /// # let new_bucket = create::Bucket { + /// # name: "cloud-storage-rs-doc-4".to_string(), + /// # ..Default::default() + /// # }; + /// # let _ = Bucket::create(&new_bucket).await?; + /// + /// let bucket = Bucket::read("cloud-storage-rs-doc-4").await?; + /// let policy = bucket.get_iam_policy().await?; + /// # bucket.delete().await?; + /// # Ok(()) + /// # } + /// ``` + pub async fn get_iam_policy(&self) -> Result { + crate::CLOUD_CLIENT.bucket().get_iam_policy(self).await + } + + /// The synchronous equivalent of `Bucket::get_iam_policy`. + /// + /// ### Features + /// This function requires that the feature flag `sync` is enabled in `Cargo.toml`. + #[cfg(feature = "sync")] + pub fn get_iam_policy_sync(&self) -> Result { + crate::runtime()?.block_on(self.get_iam_policy()) + } + + /// Updates the [IAM Policy](https://cloud.google.com/iam/docs/) for this bucket. + /// ### Example + /// ``` + /// # #[tokio::main] + /// # async fn main() -> Result<(), Box> { + /// use cloud_storage::Bucket; + /// use cloud_storage::bucket::{IamPolicy, Binding, IamRole, StandardIamRole, Entity}; + /// # use cloud_storage::bucket::NewBucket; + /// # let new_bucket = create::Bucket { + /// # name: "cloud-storage-rs-doc-5".to_string(), + /// # ..Default::default() + /// # }; + /// # let _ = Bucket::create(&new_bucket).await?; + /// + /// let bucket = Bucket::read("cloud-storage-rs-doc-5").await?; + /// let iam_policy = IamPolicy { + /// version: 1, + /// bindings: vec![ + /// Binding { + /// role: IamRole::Standard(StandardIamRole::ObjectViewer), + /// members: vec!["allUsers".to_string()], + /// condition: None, + /// } + /// ], + /// ..Default::default() + /// }; + /// let policy = bucket.set_iam_policy(&iam_policy).await?; + /// # bucket.delete().await?; + /// # Ok(()) + /// # } + /// ``` + pub async fn set_iam_policy(&self, iam: &IamPolicy) -> Result { + crate::CLOUD_CLIENT.bucket().set_iam_policy(self, iam).await + } + + /// The synchronous equivalent of `Bucket::set_iam_policy`. + /// + /// ### Features + /// This function requires that the feature flag `sync` is enabled in `Cargo.toml`. + #[cfg(feature = "sync")] + pub fn set_iam_policy_sync(&self, iam: &IamPolicy) -> Result { + crate::runtime()?.block_on(self.set_iam_policy(iam)) + } + + /// Checks whether the user provided in the service account has this permission. + /// ### Example + /// ```no_run + /// # #[tokio::main] + /// # async fn main() -> Result<(), Box> { + /// use cloud_storage::Bucket; + /// + /// let bucket = Bucket::read("my_bucket").await?; + /// bucket.test_iam_permission("storage.buckets.get").await?; + /// # Ok(()) + /// # } + /// ``` + pub async fn test_iam_permission(&self, permission: &str) -> Result { + crate::CLOUD_CLIENT + .bucket() + .test_iam_permission(self, permission) + .await + } + + /// The synchronous equivalent of `Bucket::test_iam_policy`. + /// + /// ### Features + /// This function requires that the feature flag `sync` is enabled in `Cargo.toml`. + #[cfg(feature = "sync")] + pub fn test_iam_permission_sync(&self, permission: &str) -> Result { + crate::runtime()?.block_on(self.test_iam_permission(permission)) + } +} + + +#[cfg(test)] +mod tests { + use crate::{models::{create, Entity, Role, IamConfiguration, UniformBucketLevelAccess, RetentionPolicy, StandardIamRole, IamPolicy, Binding, IamRole}, Bucket}; + + + + #[tokio::test] + async fn create() -> Result<(), Box> { + #[cfg(feature = "dotenv")] + dotenv::dotenv().ok(); + let base_name = std::env::var("TEST_BUCKET")?; + // use a more complex bucket in this test. + let new_bucket = create::Bucket { + name: format!("{}-test-create", base_name), + default_event_based_hold: Some(true), + acl: Some(vec![create::BucketAccessControl { + entity: Entity::AllUsers, + role: Role::Reader, + }]), + default_object_acl: Some(vec![create::DefaultObjectAccessControl { + entity: Entity::AllUsers, + role: Role::Reader, + }]), + iam_configuration: Some(IamConfiguration { + uniform_bucket_level_access: UniformBucketLevelAccess { + enabled: false, + locked_time: None, + }, + }), + ..Default::default() + }; + let bucket = Bucket::create(&new_bucket).await?; + bucket.delete().await?; + Ok(()) + } + + #[tokio::test] + async fn list() -> Result<(), Box> { + Bucket::list().await?; + Ok(()) + } + + #[tokio::test] + async fn update() -> Result<(), Box> { + let mut bucket = crate::global_client::create_test_bucket("test-update").await; + bucket.retention_policy = Some(RetentionPolicy { + retention_period: 50, + effective_time: time::OffsetDateTime::now_utc() + std::time::Duration::from_secs(50), + is_locked: Some(false), + }); + bucket.update().await?; + let updated = Bucket::read(&bucket.name).await?; + assert_eq!(updated.retention_policy.unwrap().retention_period, 50); + bucket.delete().await?; + Ok(()) + } + + // used a lot throughout the other tests, but included for completeness + #[tokio::test] + async fn delete() -> Result<(), Box> { + let bucket = crate::global_client::create_test_bucket("test-delete").await; + bucket.delete().await?; + Ok(()) + } + + #[tokio::test] + async fn get_iam_policy() -> Result<(), Box> { + let bucket = crate::global_client::create_test_bucket("test-get-iam-policy").await; + bucket.get_iam_policy().await?; + bucket.delete().await?; + Ok(()) + } + + #[tokio::test] + async fn set_iam_policy() -> Result<(), Box> { + let bucket = crate::global_client::create_test_bucket("test-set-iam-policy").await; + let iam_policy = IamPolicy { + bindings: vec![Binding { + role: IamRole::Standard(StandardIamRole::ObjectViewer), + members: vec!["allUsers".to_string()], + condition: None, + }], + ..Default::default() + }; + bucket.set_iam_policy(&iam_policy).await?; + assert_eq!(bucket.get_iam_policy().await?.bindings, iam_policy.bindings); + bucket.delete().await?; + Ok(()) + } + + #[tokio::test] + async fn test_iam_permission() -> Result<(), Box> { + let bucket = crate::global_client::create_test_bucket("test-test-ia-permission").await; + bucket.test_iam_permission("storage.buckets.get").await?; + bucket.delete().await?; + Ok(()) + } + + #[cfg(feature = "sync")] + mod sync { + use super::*; + + #[test] + fn create() -> Result<(), Box> { + #[cfg(feature = "dotenv")] + dotenv::dotenv().ok(); + let base_name = std::env::var("TEST_BUCKET")?; + // use a more complex bucket in this test. + let new_bucket = create::Bucket { + name: format!("{}-test-create", base_name), + default_event_based_hold: Some(true), + acl: Some(vec![create::BucketAccessControl { + entity: Entity::AllUsers, + role: Role::Reader, + }]), + default_object_acl: Some(vec![create::DefaultObjectAccessControl { + entity: Entity::AllUsers, + role: Role::Reader, + }]), + iam_configuration: Some(IamConfiguration { + uniform_bucket_level_access: UniformBucketLevelAccess { + enabled: false, + locked_time: None, + }, + }), + ..Default::default() + }; + let bucket = Bucket::create_sync(&new_bucket)?; + bucket.delete_sync()?; + Ok(()) + } + + #[test] + fn list() -> Result<(), Box> { + Bucket::list_sync()?; + Ok(()) + } + + #[test] + fn read() -> Result<(), Box> { + let bucket = crate::global_client::create_test_bucket_sync("test-read"); + let also_bucket = Bucket::read_sync(&bucket.name)?; + assert_eq!(bucket, also_bucket); + bucket.delete_sync()?; + assert!(also_bucket.delete_sync().is_err()); + Ok(()) + } + + #[test] + fn update() -> Result<(), Box> { + let mut bucket = crate::global_client::create_test_bucket_sync("test-update"); + bucket.retention_policy = Some(RetentionPolicy { + retention_period: 50, + effective_time: time::OffsetDateTime::now_utc() + std::time::Duration::from_secs(50), + is_locked: Some(false), + }); + bucket.update_sync()?; + let updated = Bucket::read_sync(&bucket.name)?; + assert_eq!(updated.retention_policy.unwrap().retention_period, 50); + bucket.delete_sync()?; + Ok(()) + } + + // used a lot throughout the other tests, but included for completeness + #[test] + fn delete() -> Result<(), Box> { + let bucket = crate::global_client::create_test_bucket_sync("test-delete"); + bucket.delete_sync()?; + Ok(()) + } + + #[test] + fn get_iam_policy() -> Result<(), Box> { + let bucket = crate::global_client::create_test_bucket_sync("test-get-iam-policy"); + bucket.get_iam_policy_sync()?; + bucket.delete_sync()?; + Ok(()) + } + + #[test] + fn set_iam_policy() -> Result<(), Box> { + // use crate::resources::iam_policy::{Binding, IamRole, StandardIamRole}; + + let bucket = crate::global_client::create_test_bucket_sync("test-set-iam-policy"); + let iam_policy = IamPolicy { + bindings: vec![Binding { + role: IamRole::Standard(StandardIamRole::ObjectViewer), + members: vec!["allUsers".to_string()], + condition: None, + }], + ..Default::default() + }; + bucket.set_iam_policy_sync(&iam_policy)?; + assert_eq!(bucket.get_iam_policy_sync()?.bindings, iam_policy.bindings); + bucket.delete_sync()?; + Ok(()) + } + + #[test] + fn test_iam_permission() -> Result<(), Box> { + let bucket = crate::global_client::create_test_bucket_sync("test-test-ia-permission"); + bucket.test_iam_permission_sync("storage.buckets.get")?; + bucket.delete_sync()?; + Ok(()) + } + } +} \ No newline at end of file diff --git a/src/resources/bucket_access_control.rs b/src/global_client/bucket_access_control.rs similarity index 53% rename from src/resources/bucket_access_control.rs rename to src/global_client/bucket_access_control.rs index fdf072a..8eeab7f 100644 --- a/src/resources/bucket_access_control.rs +++ b/src/global_client/bucket_access_control.rs @@ -1,94 +1,7 @@ -pub use crate::resources::common::{Entity, ProjectTeam, Role}; - -/// The BucketAccessControl resource represents the Access Control Lists (ACLs) for buckets within -/// Google Cloud Storage. ACLs let you specify who has access to your data and to what extent. -/// -/// ```text,ignore -/// Important: This method fails with a 400 Bad Request response for buckets with uniform -/// bucket-level access enabled. Use `Bucket::get_iam_policy` and `Bucket::set_iam_policy` to -/// control access instead. -/// ``` -/// -/// There are three roles that can be assigned to an entity: -/// -/// * READERs can get the bucket, though no acl property will be returned, and list the bucket's -/// objects. -/// * WRITERs are READERs, and they can insert objects into the bucket and delete the bucket's -/// objects. -/// * OWNERs are WRITERs, and they can get the acl property of a bucket, update a bucket, and call -/// all BucketAccessControl methods on the bucket. -#[derive(Debug, PartialEq, serde::Serialize, serde::Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct BucketAccessControl { - /// The kind of item this is. For bucket access control entries, this is always - /// `storage#bucketAccessControl`. - pub kind: String, - /// The ID of the access-control entry. - pub id: String, - /// The link to this access-control entry. - pub self_link: String, - /// The name of the bucket. - pub bucket: String, - /// The entity holding the permission, in one of the following forms: - /// - /// * `user-userId` - /// * `user-email` - /// * `group-groupId` - /// * `group-email` - /// * `domain-domain` - /// * `project-team-projectId` - /// * `allUsers` - /// * `allAuthenticatedUsers` - /// - /// Examples: - /// - /// * The user liz@example.com would be user-liz@example.com. - /// * The group example@googlegroups.com would be group-example@googlegroups.com. - /// * To refer to all members of the G Suite for Business domain example.com, the entity would - /// be domain-example.com. - pub entity: Entity, - /// The access permission for the entity. - pub role: Role, - /// The email address associated with the entity, if any. - pub email: Option, - /// The ID for the entity, if any. - pub entity_id: Option, - /// The domain associated with the entity, if any. - pub domain: Option, - /// The project team associated with the entity, if any. - pub project_team: Option, - /// HTTP 1.1 Entity tag for the access-control entry. - pub etag: String, -} - -/// Model that can be used to create a new BucketAccessControl object. -#[derive(Debug, PartialEq, serde::Serialize)] -#[serde(rename_all = "camelCase")] -pub struct NewBucketAccessControl { - /// The entity holding the permission, in one of the following forms: - /// - /// * `user-userId` - /// * `user-email` - /// * `group-groupId` - /// * `group-email` - /// * `domain-domain` - /// * `project-team-projectId` - /// * `allUsers` - /// * `allAuthenticatedUsers` - /// - /// Examples: - /// - /// * The user liz@example.com would be user-liz@example.com. - /// * The group example@googlegroups.com would be group-example@googlegroups.com. - /// * To refer to all members of the G Suite for Business domain example.com, the entity would - /// be domain-example.com. - pub entity: Entity, - /// The access permission for the entity. - pub role: Role, -} +use crate::{models::{BucketAccessControl, create, Entity}, Error}; impl BucketAccessControl { - /// Create a new `BucketAccessControl` using the provided `NewBucketAccessControl`, related to + /// Create a new `BucketAccessControl` using the provided `create::BucketAccessControl`, related to /// the `Bucket` provided by the `bucket_name` argument. /// /// ### Important @@ -99,25 +12,24 @@ impl BucketAccessControl { /// ```rust,no_run /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { - /// use cloud_storage::bucket_access_control::{BucketAccessControl, NewBucketAccessControl}; + /// use cloud_storage::bucket_access_control::{BucketAccessControl, create::BucketAccessControl}; /// use cloud_storage::bucket_access_control::{Role, Entity}; /// - /// let new_bucket_access_control = NewBucketAccessControl { + /// let new_bucket_access_control = create::BucketAccessControl { /// entity: Entity::AllUsers, /// role: Role::Reader, /// }; - /// BucketAccessControl::create("mybucket", &new_bucket_access_control).await?; + /// BucketAccessControl::create_using("my_bucket", &new_bucket_access_control).await?; /// # Ok(()) /// # } /// ``` - #[cfg(feature = "global-client")] - pub async fn create( + pub async fn create_using( bucket: &str, - new_bucket_access_control: &NewBucketAccessControl, - ) -> crate::Result { + new_bucket_access_control: &create::BucketAccessControl, + ) -> Result { crate::CLOUD_CLIENT - .bucket_access_control() - .create(bucket, new_bucket_access_control) + .bucket_access_control(bucket) + .create_using(new_bucket_access_control) .await } @@ -125,12 +37,12 @@ impl BucketAccessControl { /// /// ### Features /// This function requires that the feature flag `sync` is enabled in `Cargo.toml`. - #[cfg(all(feature = "global-client", feature = "sync"))] + #[cfg(feature = "sync")] pub fn create_sync( bucket: &str, - new_bucket_access_control: &NewBucketAccessControl, - ) -> crate::Result { - crate::runtime()?.block_on(Self::create(bucket, new_bucket_access_control)) + new_bucket_access_control: &create::BucketAccessControl, + ) -> Result { + crate::runtime()?.block_on(Self::create_using(bucket, new_bucket_access_control)) } /// Returns all `BucketAccessControl`s related to this bucket. @@ -145,15 +57,14 @@ impl BucketAccessControl { /// # async fn main() -> Result<(), Box> { /// use cloud_storage::bucket_access_control::BucketAccessControl; /// - /// let acls = BucketAccessControl::list("mybucket").await?; + /// let acls = BucketAccessControl::list("my_bucket").await?; /// # Ok(()) /// # } /// ``` - #[cfg(feature = "global-client")] - pub async fn list(bucket: &str) -> crate::Result> { + pub async fn list(bucket: &str) -> Result, Error> { crate::CLOUD_CLIENT - .bucket_access_control() - .list(bucket) + .bucket_access_control(bucket) + .list() .await } @@ -161,8 +72,8 @@ impl BucketAccessControl { /// /// ### Features /// This function requires that the feature flag `sync` is enabled in `Cargo.toml`. - #[cfg(all(feature = "global-client", feature = "sync"))] - pub fn list_sync(bucket: &str) -> crate::Result> { + #[cfg(feature = "sync")] + pub fn list_sync(bucket: &str) -> Result, Error> { crate::runtime()?.block_on(Self::list(bucket)) } @@ -178,15 +89,14 @@ impl BucketAccessControl { /// # async fn main() -> Result<(), Box> { /// use cloud_storage::bucket_access_control::{BucketAccessControl, Entity}; /// - /// let controls = BucketAccessControl::read("mybucket", &Entity::AllUsers).await?; + /// let controls = BucketAccessControl::read("my_bucket", &Entity::AllUsers).await?; /// # Ok(()) /// # } /// ``` - #[cfg(feature = "global-client")] - pub async fn read(bucket: &str, entity: &Entity) -> crate::Result { + pub async fn read(bucket: &str, entity: &Entity) -> Result { crate::CLOUD_CLIENT - .bucket_access_control() - .read(bucket, entity) + .bucket_access_control(bucket) + .read(entity) .await } @@ -194,8 +104,8 @@ impl BucketAccessControl { /// /// ### Features /// This function requires that the feature flag `sync` is enabled in `Cargo.toml`. - #[cfg(all(feature = "global-client", feature = "sync"))] - pub fn read_sync(bucket: &str, entity: &Entity) -> crate::Result { + #[cfg(feature = "sync")] + pub fn read_sync(bucket: &str, entity: &Entity) -> Result { crate::runtime()?.block_on(Self::read(bucket, entity)) } @@ -211,16 +121,15 @@ impl BucketAccessControl { /// # async fn main() -> Result<(), Box> { /// use cloud_storage::bucket_access_control::{BucketAccessControl, Entity}; /// - /// let mut acl = BucketAccessControl::read("mybucket", &Entity::AllUsers).await?; + /// let mut acl = BucketAccessControl::read("my_bucket", &Entity::AllUsers).await?; /// acl.entity = Entity::AllAuthenticatedUsers; /// acl.update().await?; /// # Ok(()) /// # } /// ``` - #[cfg(feature = "global-client")] - pub async fn update(&self) -> crate::Result { + pub async fn update(&self) -> Result { crate::CLOUD_CLIENT - .bucket_access_control() + .bucket_access_control(&self.bucket) .update(self) .await } @@ -229,8 +138,8 @@ impl BucketAccessControl { /// /// ### Features /// This function requires that the feature flag `sync` is enabled in `Cargo.toml`. - #[cfg(all(feature = "global-client", feature = "sync"))] - pub fn update_sync(&self) -> crate::Result { + #[cfg(feature = "sync")] + pub fn update_sync(&self) -> Result { crate::runtime()?.block_on(self.update()) } @@ -246,15 +155,14 @@ impl BucketAccessControl { /// # async fn main() -> Result<(), Box> { /// use cloud_storage::bucket_access_control::{BucketAccessControl, Entity}; /// - /// let controls = BucketAccessControl::read("mybucket", &Entity::AllUsers).await?; + /// let controls = BucketAccessControl::read("my_bucket", &Entity::AllUsers).await?; /// controls.delete().await?; /// # Ok(()) /// # } /// ``` - #[cfg(feature = "global-client")] - pub async fn delete(self) -> crate::Result<()> { + pub async fn delete(self) -> Result<(), Error> { crate::CLOUD_CLIENT - .bucket_access_control() + .bucket_access_control(&self.bucket) .delete(self) .await } @@ -263,24 +171,25 @@ impl BucketAccessControl { /// /// ### Features /// This function requires that the feature flag `sync` is enabled in `Cargo.toml`. - #[cfg(all(feature = "global-client", feature = "sync"))] - pub fn delete_sync(self) -> crate::Result<()> { + #[cfg(feature = "sync")] + pub fn delete_sync(self) -> Result<(), Error> { crate::runtime()?.block_on(self.delete()) } } -#[cfg(all(test, feature = "global-client"))] +#[cfg(test)] mod tests { - use super::*; + use crate::models::{create, Entity, Role, BucketAccessControl}; + #[tokio::test] async fn create() -> Result<(), Box> { - let bucket = crate::read_test_bucket().await; - let new_bucket_access_control = NewBucketAccessControl { + let bucket = crate::global_client::read_test_bucket().await; + let new_bucket_access_control = create::BucketAccessControl { entity: Entity::AllUsers, role: Role::Reader, }; - BucketAccessControl::create(&bucket.name, &new_bucket_access_control) + BucketAccessControl::create_using(&bucket.name, &new_bucket_access_control) .await .unwrap(); Ok(()) @@ -288,14 +197,14 @@ mod tests { #[tokio::test] async fn list() -> Result<(), Box> { - let bucket = crate::read_test_bucket().await; + let bucket = crate::global_client::read_test_bucket().await; BucketAccessControl::list(&bucket.name).await?; Ok(()) } #[tokio::test] async fn read() -> Result<(), Box> { - let bucket = crate::read_test_bucket().await; + let bucket = crate::global_client::read_test_bucket().await; BucketAccessControl::read(&bucket.name, &Entity::AllUsers).await?; Ok(()) } @@ -303,12 +212,12 @@ mod tests { #[tokio::test] async fn update() -> Result<(), Box> { // use a seperate bucket to prevent synchronization issues - let bucket = crate::create_test_bucket("test-update-bucket-access-controls").await; - let new_bucket_access_control = NewBucketAccessControl { + let bucket = crate::global_client::create_test_bucket("test-update-bucket-access-controls").await; + let new_bucket_access_control = create::BucketAccessControl { entity: Entity::AllUsers, role: Role::Reader, }; - BucketAccessControl::create(&bucket.name, &new_bucket_access_control).await?; + BucketAccessControl::create_using(&bucket.name, &new_bucket_access_control).await?; let mut acl = BucketAccessControl::read(&bucket.name, &Entity::AllUsers).await?; acl.entity = Entity::AllAuthenticatedUsers; acl.update().await?; @@ -319,26 +228,26 @@ mod tests { #[tokio::test] async fn delete() -> Result<(), Box> { // use a seperate bucket to prevent synchronization issues - let bucket = crate::create_test_bucket("test-delete-bucket-access-controls").await; - let new_bucket_access_control = NewBucketAccessControl { + let bucket = crate::global_client::create_test_bucket("test-delete-bucket-access-controls").await; + let new_bucket_access_control = create::BucketAccessControl { entity: Entity::AllUsers, role: Role::Reader, }; - BucketAccessControl::create(&bucket.name, &new_bucket_access_control).await?; + BucketAccessControl::create_using(&bucket.name, &new_bucket_access_control).await?; let acl = BucketAccessControl::read(&bucket.name, &Entity::AllUsers).await?; acl.delete().await?; bucket.delete().await?; Ok(()) } - #[cfg(all(feature = "global-client", feature = "sync"))] + #[cfg(feature = "sync")] mod sync { use super::*; #[test] fn create() -> Result<(), Box> { - let bucket = crate::read_test_bucket_sync(); - let new_bucket_access_control = NewBucketAccessControl { + let bucket = crate::global_client::read_test_bucket_sync(); + let new_bucket_access_control = create::BucketAccessControl { entity: Entity::AllUsers, role: Role::Reader, }; @@ -348,14 +257,14 @@ mod tests { #[test] fn list() -> Result<(), Box> { - let bucket = crate::read_test_bucket_sync(); + let bucket = crate::global_client::read_test_bucket_sync(); BucketAccessControl::list_sync(&bucket.name)?; Ok(()) } #[test] fn read() -> Result<(), Box> { - let bucket = crate::read_test_bucket_sync(); + let bucket = crate::global_client::read_test_bucket_sync(); BucketAccessControl::read_sync(&bucket.name, &Entity::AllUsers)?; Ok(()) } @@ -363,8 +272,8 @@ mod tests { #[test] fn update() -> Result<(), Box> { // use a seperate bucket to prevent synchronization issues - let bucket = crate::create_test_bucket_sync("test-update-bucket-access-controls"); - let new_bucket_access_control = NewBucketAccessControl { + let bucket = crate::global_client::create_test_bucket_sync("test-update-bucket-access-controls"); + let new_bucket_access_control = create::BucketAccessControl { entity: Entity::AllUsers, role: Role::Reader, }; @@ -379,8 +288,8 @@ mod tests { #[test] fn delete() -> Result<(), Box> { // use a seperate bucket to prevent synchronization issues - let bucket = crate::create_test_bucket_sync("test-delete-bucket-access-controls"); - let new_bucket_access_control = NewBucketAccessControl { + let bucket = crate::global_client::create_test_bucket_sync("test-delete-bucket-access-controls"); + let new_bucket_access_control = create::BucketAccessControl { entity: Entity::AllUsers, role: Role::Reader, }; diff --git a/src/resources/default_object_access_control.rs b/src/global_client/default_object_access_control.rs similarity index 58% rename from src/resources/default_object_access_control.rs rename to src/global_client/default_object_access_control.rs index 3bd4885..0235a52 100644 --- a/src/resources/default_object_access_control.rs +++ b/src/global_client/default_object_access_control.rs @@ -1,78 +1,4 @@ -#![allow(unused_imports)] - -pub use crate::resources::common::{Entity, ProjectTeam, Role}; -use crate::{error::GoogleResponse, resources::common::ListResponse}; - -/// The DefaultObjectAccessControls resources represent the Access Control Lists (ACLs) applied to a -/// new object within Google Cloud Storage when no ACL was provided for that object. ACLs let you -/// specify who has access to your data and to what extent. -#[derive(Debug, PartialEq, serde::Serialize, serde::Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct DefaultObjectAccessControl { - /// The kind of item this is. For object access control entries, this is always - /// storage#objectAccessControl. - pub kind: String, - /// The entity holding the permission, in one of the following forms: - /// - /// * `user-userId` - /// * `user-email` - /// * `group-groupId` - /// * `group-email` - /// * `domain-domain` - /// * `project-team-projectId` - /// * `allUsers` - /// * `allAuthenticatedUsers` - /// - /// Examples: - /// - /// * The user liz@example.com would be user-liz@example.com. - /// * The group example@googlegroups.com would be group-example@googlegroups.com. - /// * To refer to all members of the G Suite for Business domain example.com, the entity would - /// be domain-example.com. - pub entity: Entity, - /// The access permission for the entity. - pub role: Role, - /// The email address associated with the entity, if any. - pub email: Option, - /// The ID for the entity, if any. - pub entity_id: Option, - /// The domain associated with the entity, if any. - pub domain: Option, - /// The project team associated with the entity, if any. - pub project_team: Option, - /// HTTP 1.1 Entity tag for the access-control entry. - pub etag: String, - /// The bucket this resource belongs to. - #[serde(default)] - pub bucket: String, // this field is not returned by Google, but we populate it manually for the - // convenience of the end user. -} - -/// Model that can be used to create a new DefaultObjectAccessControl object. -#[derive(Debug, PartialEq, serde::Serialize, serde::Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct NewDefaultObjectAccessControl { - /// The entity holding the permission, in one of the following forms: - /// - /// * `user-userId` - /// * `user-email` - /// * `group-groupId` - /// * `group-email` - /// * `domain-domain` - /// * `project-team-projectId` - /// * `allUsers` - /// * `allAuthenticatedUsers` - /// - /// Examples: - /// - /// * The user liz@example.com would be user-liz@example.com. - /// * The group example@googlegroups.com would be group-example@googlegroups.com. - /// * To refer to all members of the G Suite for Business domain example.com, the entity would - /// be domain-example.com. - pub entity: Entity, - /// The access permission for the entity. - pub role: Role, -} +use crate::{models::{DefaultObjectAccessControl, create, Entity}, Error}; impl DefaultObjectAccessControl { /// Create a new `DefaultObjectAccessControl` entry on the specified bucket. @@ -85,38 +11,34 @@ impl DefaultObjectAccessControl { /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { /// use cloud_storage::default_object_access_control::{ - /// DefaultObjectAccessControl, NewDefaultObjectAccessControl, Role, Entity, + /// DefaultObjectAccessControl, create::DefaultObjectAccessControl, Role, Entity, /// }; /// - /// let new_acl = NewDefaultObjectAccessControl { + /// let new_acl = create::DefaultObjectAccessControl { /// entity: Entity::AllAuthenticatedUsers, /// role: Role::Reader, /// }; - /// let default_acl = DefaultObjectAccessControl::create("mybucket", &new_acl).await?; + /// let default_acl = DefaultObjectAccessControl::create("my_bucket", &new_acl).await?; /// # default_acl.delete().await?; /// # Ok(()) /// # } /// ``` - #[cfg(feature = "global-client")] pub async fn create( bucket: &str, - new_acl: &NewDefaultObjectAccessControl, - ) -> crate::Result { - crate::CLOUD_CLIENT - .default_object_access_control() - .create(bucket, new_acl) - .await + new_acl: &create::DefaultObjectAccessControl, + ) -> Result { + crate::CLOUD_CLIENT.default_object_access_control(bucket).create(new_acl).await } /// The synchronous equivalent of `DefautObjectAccessControl::create`. /// /// ### Features /// This function requires that the feature flag `sync` is enabled in `Cargo.toml`. - #[cfg(all(feature = "global-client", feature = "sync"))] + #[cfg(feature = "sync")] pub fn create_sync( bucket: &str, - new_acl: &NewDefaultObjectAccessControl, - ) -> crate::Result { + new_acl: &create::DefaultObjectAccessControl, + ) -> Result { crate::runtime()?.block_on(Self::create(bucket, new_acl)) } @@ -131,24 +53,20 @@ impl DefaultObjectAccessControl { /// # async fn main() -> Result<(), Box> { /// use cloud_storage::default_object_access_control::DefaultObjectAccessControl; /// - /// let default_acls = DefaultObjectAccessControl::list("mybucket").await?; + /// let default_acls = DefaultObjectAccessControl::list("my_bucket").await?; /// # Ok(()) /// # } /// ``` - #[cfg(feature = "global-client")] - pub async fn list(bucket: &str) -> crate::Result> { - crate::CLOUD_CLIENT - .default_object_access_control() - .list(bucket) - .await + pub async fn list(bucket: &str) -> Result, Error> { + crate::CLOUD_CLIENT.default_object_access_control(bucket).list().await } /// The synchronous equivalent of `DefautObjectAccessControl::list`. /// /// ### Features /// This function requires that the feature flag `sync` is enabled in `Cargo.toml`. - #[cfg(all(feature = "global-client", feature = "sync"))] - pub fn list_sync(bucket: &str) -> crate::Result> { + #[cfg(feature = "sync")] + pub fn list_sync(bucket: &str) -> Result, Error> { crate::runtime()?.block_on(Self::list(bucket)) } @@ -167,15 +85,12 @@ impl DefaultObjectAccessControl { /// # async fn main() -> Result<(), Box> { /// use cloud_storage::default_object_access_control::{DefaultObjectAccessControl, Entity}; /// - /// let default_acl = DefaultObjectAccessControl::read("mybucket", &Entity::AllUsers).await?; + /// let default_acl = DefaultObjectAccessControl::read("my_bucket", &Entity::AllUsers).await?; /// # Ok(()) /// # } /// ``` - #[cfg(feature = "global-client")] - pub async fn read(bucket: &str, entity: &Entity) -> crate::Result { - crate::CLOUD_CLIENT - .default_object_access_control() - .read(bucket, entity) + pub async fn read(bucket: &str, entity: &Entity) -> Result { + crate::CLOUD_CLIENT.default_object_access_control(bucket).read(entity) .await } @@ -183,8 +98,8 @@ impl DefaultObjectAccessControl { /// /// ### Features /// This function requires that the feature flag `sync` is enabled in `Cargo.toml`. - #[cfg(all(feature = "global-client", feature = "sync"))] - pub fn read_sync(bucket: &str, entity: &Entity) -> crate::Result { + #[cfg(feature = "sync")] + pub fn read_sync(bucket: &str, entity: &Entity) -> Result { crate::runtime()?.block_on(Self::read(bucket, entity)) } @@ -205,20 +120,16 @@ impl DefaultObjectAccessControl { /// # Ok(()) /// # } /// ``` - #[cfg(feature = "global-client")] - pub async fn update(&self) -> crate::Result { - crate::CLOUD_CLIENT - .default_object_access_control() - .update(self) - .await + pub async fn update(&self) -> Result { + crate::CLOUD_CLIENT.default_object_access_control(&self.bucket).update(self).await } /// The synchronous equivalent of `DefautObjectAccessControl::update`. /// /// ### Features /// This function requires that the feature flag `sync` is enabled in `Cargo.toml`. - #[cfg(all(feature = "global-client", feature = "sync"))] - pub fn update_sync(&self) -> crate::Result { + #[cfg(feature = "sync")] + pub fn update_sync(&self) -> Result { crate::runtime()?.block_on(self.update()) } @@ -238,32 +149,30 @@ impl DefaultObjectAccessControl { /// # Ok(()) /// # } /// ``` - #[cfg(feature = "global-client")] pub async fn delete(self) -> Result<(), crate::Error> { - crate::CLOUD_CLIENT - .default_object_access_control() - .delete(self) - .await + crate::CLOUD_CLIENT.default_object_access_control(&self.bucket).delete(self).await } /// The synchronous equivalent of `DefautObjectAccessControl::delete`. /// /// ### Features /// This function requires that the feature flag `sync` is enabled in `Cargo.toml`. - #[cfg(all(feature = "global-client", feature = "sync"))] + #[cfg(feature = "sync")] pub fn delete_sync(self) -> Result<(), crate::Error> { crate::runtime()?.block_on(self.delete()) } } -#[cfg(all(test, feature = "global-client"))] +#[cfg(test)] mod tests { + use crate::models::Role; + use super::*; #[tokio::test] async fn create() -> Result<(), Box> { - let bucket = crate::read_test_bucket().await; - let new_acl = NewDefaultObjectAccessControl { + let bucket = crate::global_client::read_test_bucket().await; + let new_acl = create::DefaultObjectAccessControl { entity: Entity::AllUsers, role: Role::Reader, }; @@ -273,8 +182,8 @@ mod tests { #[tokio::test] async fn read() -> Result<(), Box> { - let bucket = crate::read_test_bucket().await; - NewDefaultObjectAccessControl { + let bucket = crate::global_client::read_test_bucket().await; + create::DefaultObjectAccessControl { entity: Entity::AllUsers, role: Role::Reader, }; @@ -284,15 +193,15 @@ mod tests { #[tokio::test] async fn list() -> Result<(), Box> { - let bucket = crate::read_test_bucket().await; + let bucket = crate::global_client::read_test_bucket().await; DefaultObjectAccessControl::list(&bucket.name).await?; Ok(()) } #[tokio::test] async fn update() -> Result<(), Box> { - let bucket = crate::read_test_bucket().await; - let new_acl = NewDefaultObjectAccessControl { + let bucket = crate::global_client::read_test_bucket().await; + let new_acl = create::DefaultObjectAccessControl { entity: Entity::AllUsers, role: Role::Reader, }; @@ -304,21 +213,21 @@ mod tests { #[tokio::test] async fn delete() -> Result<(), Box> { - let bucket = crate::read_test_bucket().await; + let bucket = crate::global_client::read_test_bucket().await; let default_acl = DefaultObjectAccessControl::read(&bucket.name, &Entity::AllAuthenticatedUsers).await?; default_acl.delete().await?; Ok(()) } - #[cfg(all(feature = "global-client", feature = "sync"))] + #[cfg(feature = "sync")] mod sync { use super::*; #[test] fn create() -> Result<(), Box> { - let bucket = crate::read_test_bucket_sync(); - let new_acl = NewDefaultObjectAccessControl { + let bucket = crate::global_client::read_test_bucket_sync(); + let new_acl = create::DefaultObjectAccessControl { entity: Entity::AllUsers, role: Role::Reader, }; @@ -328,8 +237,8 @@ mod tests { #[test] fn read() -> Result<(), Box> { - let bucket = crate::read_test_bucket_sync(); - let new_acl = NewDefaultObjectAccessControl { + let bucket = crate::global_client::read_test_bucket_sync(); + let new_acl = create::DefaultObjectAccessControl { entity: Entity::AllUsers, role: Role::Reader, }; @@ -340,15 +249,15 @@ mod tests { #[test] fn list() -> Result<(), Box> { - let bucket = crate::read_test_bucket_sync(); + let bucket = crate::global_client::read_test_bucket_sync(); DefaultObjectAccessControl::list_sync(&bucket.name)?; Ok(()) } #[test] fn update() -> Result<(), Box> { - let bucket = crate::read_test_bucket_sync(); - let new_acl = NewDefaultObjectAccessControl { + let bucket = crate::global_client::read_test_bucket_sync(); + let new_acl = create::DefaultObjectAccessControl { entity: Entity::AllUsers, role: Role::Reader, }; @@ -360,8 +269,8 @@ mod tests { #[test] fn delete() -> Result<(), Box> { - let bucket = crate::read_test_bucket_sync(); - let new_acl = NewDefaultObjectAccessControl { + let bucket = crate::global_client::read_test_bucket_sync(); + let new_acl = create::DefaultObjectAccessControl { entity: Entity::AllUsers, role: Role::Reader, }; diff --git a/src/resources/hmac_key.rs b/src/global_client/hmac_key.rs similarity index 69% rename from src/resources/hmac_key.rs rename to src/global_client/hmac_key.rs index 32fb3bb..0965f30 100644 --- a/src/resources/hmac_key.rs +++ b/src/global_client/hmac_key.rs @@ -1,80 +1,4 @@ -#![allow(unused_imports)] -#![allow(dead_code)] - -use crate::error::GoogleResponse; - -/// The `HmacKey` resource represents an HMAC key within Cloud Storage. The resource consists of a -/// secret and `HmacMeta`. HMAC keys can be used as credentials for service accounts. For more -/// information, see HMAC Keys. -/// -/// Note that the `HmacKey` resource is only returned when you use `HmacKey::create`. Other -/// methods, such as `HmacKey::read`, return the metadata portion of the HMAC key resource. -#[derive(Debug, PartialEq, serde::Serialize, serde::Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct HmacKey { - /// The kind of item this is. For HMAC keys, this is always `storage#hmacKey`. - pub kind: String, - /// HMAC key metadata. - pub metadata: HmacMeta, - /// HMAC secret key material. - pub secret: String, -} - -/// Contains information about an Hmac Key. -#[derive(Debug, PartialEq, serde::Serialize, serde::Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct HmacMeta { - /// The kind of item this is. For HMAC key metadata, this is always `storage#hmacKeyMetadata`. - pub kind: String, - /// The ID of the HMAC key, including the Project ID and the Access ID. - pub id: String, - /// The link to this resource. - pub self_link: String, - /// The access ID of the HMAC Key. - pub access_id: String, - /// The Project ID of the project that owns the service account to which the key authenticates. - pub project_id: String, - /// The email address of the key's associated service account. - pub service_account_email: String, - /// The state of the key. - pub state: HmacState, - /// The creation time of the HMAC key. - #[serde(with = "time::serde::rfc3339")] - pub time_created: time::OffsetDateTime, - /// The last modification time of the HMAC key metadata. - #[serde(with = "time::serde::rfc3339")] - pub updated: time::OffsetDateTime, - /// HTTP 1.1 Entity tag for the HMAC key. - pub etag: String, -} - -/// The state of an Hmac Key. -#[derive(Debug, Clone, Copy, PartialEq, serde::Serialize, serde::Deserialize)] -#[serde(rename_all = "UPPERCASE")] -pub enum HmacState { - /// This Hmac key is currently used. - Active, - /// This Hmac key has been set to inactive. - Inactive, - /// This Hmac key has been permanently deleted. - Deleted, -} - -#[derive(Debug, serde::Deserialize)] -pub(crate) struct ListResponse { - pub(crate) items: Vec, -} - -#[derive(serde::Serialize)] -struct UpdateRequest { - secret: String, - metadata: UpdateMeta, -} - -#[derive(serde::Serialize)] -pub(crate) struct UpdateMeta { - pub(crate) state: HmacState, -} +use crate::{models::{HmacKey, HmacMeta, HmacState}, Error}; impl HmacKey { /// Creates a new HMAC key for the specified service account. @@ -97,8 +21,7 @@ impl HmacKey { /// # Ok(()) /// # } /// ``` - #[cfg(feature = "global-client")] - pub async fn create() -> crate::Result { + pub async fn create() -> Result { crate::CLOUD_CLIENT.hmac_key().create().await } @@ -106,8 +29,8 @@ impl HmacKey { /// /// ### Features /// This function requires that the feature flag `sync` is enabled in `Cargo.toml`. - #[cfg(all(feature = "global-client", feature = "sync"))] - pub fn create_sync() -> crate::Result { + #[cfg(feature = "sync")] + pub fn create_sync() -> Result { crate::runtime()?.block_on(Self::create()) } @@ -130,8 +53,7 @@ impl HmacKey { /// # Ok(()) /// # } /// ``` - #[cfg(feature = "global-client")] - pub async fn list() -> crate::Result> { + pub async fn list() -> Result, Error> { crate::CLOUD_CLIENT.hmac_key().list().await } @@ -139,8 +61,8 @@ impl HmacKey { /// /// ### Features /// This function requires that the feature flag `sync` is enabled in `Cargo.toml`. - #[cfg(all(feature = "global-client", feature = "sync"))] - pub fn list_sync() -> crate::Result> { + #[cfg(feature = "sync")] + pub fn list_sync() -> Result, Error> { crate::runtime()?.block_on(Self::list()) } @@ -162,8 +84,7 @@ impl HmacKey { /// let key = HmacKey::read("some identifier").await?; /// # Ok(()) /// # } - #[cfg(feature = "global-client")] - pub async fn read(access_id: &str) -> crate::Result { + pub async fn read(access_id: &str) -> Result { crate::CLOUD_CLIENT.hmac_key().read(access_id).await } @@ -171,8 +92,8 @@ impl HmacKey { /// /// ### Features /// This function requires that the feature flag `sync` is enabled in `Cargo.toml`. - #[cfg(all(feature = "global-client", feature = "sync"))] - pub fn read_sync(access_id: &str) -> crate::Result { + #[cfg(feature = "sync")] + pub fn read_sync(access_id: &str) -> Result { crate::runtime()?.block_on(Self::read(access_id)) } @@ -194,8 +115,7 @@ impl HmacKey { /// let key = HmacKey::update("your key", HmacState::Active).await?; /// # Ok(()) /// # } - #[cfg(feature = "global-client")] - pub async fn update(access_id: &str, state: HmacState) -> crate::Result { + pub async fn update(access_id: &str, state: HmacState) -> Result { crate::CLOUD_CLIENT .hmac_key() .update(access_id, state) @@ -206,8 +126,8 @@ impl HmacKey { /// /// ### Features /// This function requires that the feature flag `sync` is enabled in `Cargo.toml`. - #[cfg(all(feature = "global-client", feature = "sync"))] - pub fn update_sync(access_id: &str, state: HmacState) -> crate::Result { + #[cfg(feature = "sync")] + pub fn update_sync(access_id: &str, state: HmacState) -> Result { crate::runtime()?.block_on(Self::update(access_id, state)) } @@ -228,19 +148,18 @@ impl HmacKey { /// HmacKey::delete(&key.access_id).await?; /// # Ok(()) /// # } - #[cfg(feature = "global-client")] - pub async fn delete(access_id: &str) -> crate::Result<()> { + pub async fn delete(access_id: &str) -> Result<(), Error> { crate::CLOUD_CLIENT.hmac_key().delete(access_id).await } /// The synchronous equivalent of `HmacKey::delete`. - #[cfg(all(feature = "global-client", feature = "sync"))] - pub fn delete_sync(access_id: &str) -> crate::Result<()> { + #[cfg(feature = "sync")] + pub fn delete_sync(access_id: &str) -> Result<(), Error> { crate::runtime()?.block_on(Self::delete(access_id)) } } -#[cfg(all(test, feature = "global-client"))] +#[cfg(test)] mod tests { use super::*; @@ -307,7 +226,7 @@ mod tests { Ok(()) } - #[cfg(all(feature = "global-client", feature = "sync"))] + #[cfg(feature = "sync")] mod sync { use super::*; diff --git a/src/global_client/mod.rs b/src/global_client/mod.rs new file mode 100644 index 0000000..458484e --- /dev/null +++ b/src/global_client/mod.rs @@ -0,0 +1,63 @@ +mod bucket; +mod bucket_access_control; +mod default_object_access_control; +mod hmac_key; +mod object_access_control; +mod object; +use once_cell::sync::Lazy; + +pub(crate) static CLOUD_CLIENT: Lazy = Lazy::new(crate::client::Client::default); + +#[cfg(test)] +pub(crate) use self::test_helpers::*; + +#[cfg(test)] +mod test_helpers { + use crate::{Bucket, models::create}; + + pub(crate) async fn read_test_bucket() -> Bucket { + #[cfg(feature = "dotenv")] + dotenv::dotenv().ok(); + let name = std::env::var("TEST_BUCKET").unwrap(); + match Bucket::read(&name).await { + Ok(bucket) => bucket, + Err(_not_found) => Bucket::create(&create::Bucket { + name, + ..create::Bucket::default() + }) + .await + .unwrap(), + } + } + + #[cfg(feature = "sync")] + pub(crate) fn read_test_bucket_sync() -> Bucket { + crate::runtime().unwrap().block_on(read_test_bucket()) + } + + // since all tests run in parallel, we need to make sure we do not create multiple buckets with + // the same name in each test. + #[cfg(feature = "sync")] + pub(crate) fn create_test_bucket_sync(name: &str) -> Bucket { + crate::runtime().unwrap().block_on(create_test_bucket(name)) + } + + // since all tests run in parallel, we need to make sure we do not create multiple buckets with + // the same name in each test. + pub(crate) async fn create_test_bucket(name: &str) -> Bucket { + std::thread::sleep(std::time::Duration::from_millis(1500)); // avoid getting rate limited + + #[cfg(feature = "dotenv")] + dotenv::dotenv().ok(); + let base_name = std::env::var("TEST_BUCKET").unwrap(); + let name = format!("{}-{}", base_name, name); + let new_bucket = create::Bucket { + name, + ..create::Bucket::default() + }; + match Bucket::create(&new_bucket).await { + Ok(bucket) => bucket, + Err(_alread_exists) => Bucket::read(&new_bucket.name).await.unwrap(), + } + } +} \ No newline at end of file diff --git a/src/global_client/object.rs b/src/global_client/object.rs new file mode 100644 index 0000000..dec3603 --- /dev/null +++ b/src/global_client/object.rs @@ -0,0 +1,1137 @@ +use futures_util::{TryStream, Stream}; + +use crate::{Object, models::{CreateParameters, ObjectList, ReadParameters, UpdateParameters, DeleteParameters, ComposeRequest, ComposeParameters, CopyParameters, RewriteParameters}, Error, ListRequest}; + +impl Object { + /// Create a new object. + /// Upload a file as that is loaded in memory to google cloud storage, where it will be + /// interpreted according to the mime type you specified. + /// ## Example + /// ```rust,no_run + /// # #[tokio::main] + /// # async fn main() -> Result<(), Box> { + /// # fn read_cute_cat(_in: &str) -> Vec { vec![0, 1] } + /// use cloud_storage::Object; + /// + /// let file: Vec = read_cute_cat("cat.png"); + /// Object::create("cat-photos", file, "recently read cat.png", "image/png", None).await?; + /// # Ok(()) + /// # } + /// ``` + pub async fn create( + bucket: &str, + file: Vec, + filename: &str, + mime_type: &str, + parameters: Option, + ) -> Result { + crate::CLOUD_CLIENT + .object() + .create(bucket, file, filename, mime_type, parameters) + .await + } + + /// The synchronous equivalent of `Object::create`. + /// + /// ### Features + /// This function requires that the feature flag `sync` is enabled in `Cargo.toml`. + #[cfg(feature = "sync")] + pub fn create_sync( + bucket: &str, + file: Vec, + filename: &str, + mime_type: &str, + parameters: Option, + ) -> Result { + crate::runtime()?.block_on(Self::create(bucket, file, filename, mime_type, parameters)) + } + + /// Create a new object with metadata. + /// Upload a file as that is loaded in memory to google cloud storage, where it will be + /// interpreted according to the mime type you specified. + /// ## Example + /// ```rust,no_run + /// # #[tokio::main] + /// # async fn main() -> Result<(), Box> { + /// # fn read_cute_cat(_in: &str) -> Vec { vec![0, 1] } + /// use cloud_storage::Object; + /// + /// let file: Vec = read_cute_cat("cat.png"); + /// let metadata = serde_json::json!({ + /// "metadata": { + /// "custom_id": "1234" + /// } + /// }); + /// Object::create("cat-photos", file, "recently read cat.png", "image/png", &metadata).await?; + /// # Ok(()) + /// # } + /// ``` + pub async fn create_with( + bucket: &str, + file: Vec, + filename: &str, + mime_type: &str, + metadata: &serde_json::Value, + ) -> Result { + crate::CLOUD_CLIENT + .object() + .create_with(bucket, file, filename, mime_type, metadata) + .await + } + + /// Synchronous equivalent of `Object::create_with` + /// + /// ### Features + /// This function requires that the feature flag `sync` is enabled in `Cargo.toml`. + #[cfg(feature = "sync")] + pub fn create_with_sync( + bucket: &str, + file: Vec, + filename: &str, + mime_type: &str, + metadata: &serde_json::Value, + ) -> Result { + crate::runtime()?.block_on(Self::create_with( + bucket, file, filename, mime_type, metadata, + )) + } + + /// Create a new object. This works in the same way as `Object::create`, except it does not need + /// to load the entire file in ram. + /// ## Example + /// ```rust,no_run + /// # #[tokio::main] + /// # async fn main() -> Result<(), Box> { + /// use cloud_storage::Object; + /// + /// let file = reqwest::Client::new() + /// .get("https://my_domain.rs/nice_cat_photo.png") + /// .send() + /// .await? + /// .bytes_stream(); + /// Object::create_streamed("cat-photos", file, 10, "recently read cat.png", "image/png", None).await?; + /// # Ok(()) + /// # } + /// ``` + pub async fn create_streamed( + bucket: &str, + stream: S, + length: impl Into>, + filename: &str, + mime_type: &str, + parameters: Option, + ) -> Result + where + S: TryStream + Send + Sync + 'static, + S::Error: Into>, + bytes::Bytes: From, + { + crate::CLOUD_CLIENT + .object() + .create_streamed(bucket, stream, length, filename, mime_type, parameters) + .await + } + + /// The synchronous equivalent of `Object::create_streamed`. + /// + /// ### Features + /// This function requires that the feature flag `sync` is enabled in `Cargo.toml`. + #[cfg(feature = "sync")] + pub fn create_streamed_sync( + bucket: &str, + mut file: R, + length: impl Into>, + filename: &str, + mime_type: &str, + parameters: Option, + ) -> Result { + let mut buffer = Vec::new(); + file.read_to_end(&mut buffer) + .map_err(|e| crate::Error::Other(e.to_string()))?; + + let stream = futures_util::stream::once(async { Ok::<_, crate::Error>(buffer) }); + + crate::runtime()?.block_on(Self::create_streamed( + bucket, stream, length, filename, mime_type, parameters, + )) + } + + /// Obtain a list of objects within this Bucket. This function will repeatedly query Google and + /// merge the responses into one. Google responds with 1000 Objects at a time, so if you want to + /// make sure only one http call is performed, make sure to set `list_request.max_results` to + /// 1000. + /// ### Example + /// ```no_run + /// # #[tokio::main] + /// # async fn main() -> Result<(), Box> { + /// use cloud_storage::{Object, ListRequest}; + /// + /// let all_objects = Object::list("my_bucket", ListRequest::default()).await?; + /// # Ok(()) + /// # } + /// ``` + pub async fn list<'a>( + bucket: &'a str, + list_request: ListRequest, + ) -> Result> + '_, Error> { + let object_client : crate::client::ObjectClient<'a> = crate::CLOUD_CLIENT.object(); + object_client.list(bucket.clone(), list_request).await + } + + /// The synchronous equivalent of `Object::list`. + /// + /// ### Features + /// This function requires that the feature flag `sync` is enabled in `Cargo.toml`. + #[cfg(feature = "sync")] + pub fn list_sync(bucket: &str, list_request: ListRequest) -> Result, Error> { + use futures_util::TryStreamExt; + + let rt = crate::runtime()?; + let listed = rt.block_on(Self::list(bucket, list_request))?; + rt.block_on(listed.try_collect()) + } + + /// Obtains a single object with the specified name in the specified bucket. + /// ### Example + /// ```no_run + /// # #[tokio::main] + /// # async fn main() -> Result<(), Box> { + /// use cloud_storage::Object; + /// + /// let object = Object::read("my_bucket", "path/to/my/file.png", None).await?; + /// # Ok(()) + /// # } + /// ``` + pub async fn read( + bucket: &str, + file_name: &str, + parameters: Option, + ) -> Result { + crate::CLOUD_CLIENT + .object() + .read(bucket, file_name, parameters) + .await + } + + /// The synchronous equivalent of `Object::read`. + /// + /// ### Features + /// This function requires that the feature flag `sync` is enabled in `Cargo.toml`. + #[cfg(feature = "sync")] + pub fn read_sync( + bucket: &str, + file_name: &str, + parameters: Option, + ) -> Result { + crate::runtime()?.block_on(Self::read(bucket, file_name, parameters)) + } + + /// Download the content of the object with the specified name in the specified bucket. + /// ### Example + /// ```no_run + /// # #[tokio::main] + /// # async fn main() -> Result<(), Box> { + /// use cloud_storage::Object; + /// + /// let bytes = Object::download("my_bucket", "path/to/my/file.png", None).await?; + /// # Ok(()) + /// # } + /// ``` + pub async fn download( + bucket: &str, + file_name: &str, + parameters: Option, + ) -> Result, Error> { + crate::CLOUD_CLIENT + .object() + .download(bucket, file_name, parameters) + .await + } + + /// The synchronous equivalent of `Object::download`. + /// + /// ### Features + /// This function requires that the feature flag `sync` is enabled in `Cargo.toml`. + #[cfg(feature = "sync")] + pub fn download_sync( + bucket: &str, + file_name: &str, + parameters: Option, + ) -> Result, Error> { + crate::runtime()?.block_on(Self::download(bucket, file_name, parameters)) + } + + /// Download the content of the object with the specified name in the specified bucket, without + /// allocating the whole file into a vector. + /// ### Example + /// ```no_run + /// # #[tokio::main] + /// # async fn main() -> Result<(), Box> { + /// use cloud_storage::Object; + /// use futures_util::stream::StreamExt; + /// use std::fs::File; + /// use std::io::{BufWriter, Write}; + /// + /// let mut stream = Object::download_streamed("my_bucket", "path/to/my/file.png", None).await?; + /// let mut file = BufWriter::new(File::create("file.png").unwrap()); + /// while let Some(byte) = stream.next().await { + /// file.write_all(&[byte.unwrap()]).unwrap(); + /// } + /// # Ok(()) + /// # } + /// ``` + pub async fn download_streamed<'a>( + bucket: &str, + file_name: &str, + parameters: Option, + ) -> Result> + Unpin, Error> { + crate::CLOUD_CLIENT + .object() + .download_streamed(bucket, file_name, parameters) + .await + } + + /// Obtains a single object with the specified name in the specified bucket. + /// ### Example + /// ```no_run + /// # #[tokio::main] + /// # async fn main() -> Result<(), Box> { + /// use cloud_storage::Object; + /// + /// let mut object = Object::read("my_bucket", "path/to/my/file.png", None).await?; + /// object.content_type = Some("application/xml".to_string()); + /// object.update(None).await?; + /// # Ok(()) + /// # } + /// ``` + pub async fn update(&self, parameters: Option) -> Result { + crate::CLOUD_CLIENT.object().update(self, parameters).await + } + + /// The synchronous equivalent of `Object::download`. + /// + /// ### Features + /// This function requires that the feature flag `sync` is enabled in `Cargo.toml`. + #[cfg(feature = "sync")] + pub fn update_sync(&self, parameters: Option) -> Result { + crate::runtime()?.block_on(self.update(parameters)) + } + + /// Deletes a single object with the specified name in the specified bucket. + /// ### Example + /// ```no_run + /// # #[tokio::main] + /// # async fn main() -> Result<(), Box> { + /// use cloud_storage::Object; + /// + /// Object::delete("my_bucket", "path/to/my/file.png", None).await?; + /// # Ok(()) + /// # } + /// ``` + pub async fn delete( + bucket: &str, + file_name: &str, + parameters: Option, + ) -> Result<(), Error> { + crate::CLOUD_CLIENT + .object() + .delete(bucket, file_name, parameters) + .await + } + + /// The synchronous equivalent of `Object::delete`. + /// + /// ### Features + /// This function requires that the feature flag `sync` is enabled in `Cargo.toml`. + #[cfg(feature = "sync")] + pub fn delete_sync( + bucket: &str, + file_name: &str, + parameters: Option, + ) -> Result<(), Error> { + crate::runtime()?.block_on(Self::delete(bucket, file_name, parameters)) + } + + /// Obtains a single object with the specified name in the specified bucket. + /// ### Example + /// ```no_run + /// # #[tokio::main] + /// # async fn main() -> Result<(), Box> { + /// use cloud_storage::object::{Object, ComposeRequest, SourceObject}; + /// + /// let obj1 = Object::read("my_bucket", "file1", None).await?; + /// let obj2 = Object::read("my_bucket", "file2", None).await?; + /// let compose_request = ComposeRequest { + /// kind: "storage#composeRequest".to_string(), + /// source_objects: vec![ + /// SourceObject { + /// name: obj1.name.clone(), + /// generation: None, + /// object_preconditions: None, + /// }, + /// SourceObject { + /// name: obj2.name.clone(), + /// generation: None, + /// object_preconditions: None, + /// }, + /// ], + /// destination: None, + /// }; + /// let obj3 = Object::compose("my_bucket", &compose_request, "test-concatted-file", None).await?; + /// // obj3 is now a file with the content of obj1 and obj2 concatted together. + /// # Ok(()) + /// # } + /// ``` + pub async fn compose( + bucket: &str, + req: &ComposeRequest, + destination_object: &str, + parameters: Option, + ) -> Result { + crate::CLOUD_CLIENT + .object() + .compose(bucket, req, destination_object, parameters) + .await + } + + /// The synchronous equivalent of `Object::compose`. + /// + /// ### Features + /// This function requires that the feature flag `sync` is enabled in `Cargo.toml`. + #[cfg(feature = "sync")] + pub fn compose_sync( + bucket: &str, + req: &ComposeRequest, + destination_object: &str, + parameters: Option, + ) -> Result { + + crate::runtime()?.block_on(Self::compose(bucket, req, destination_object, parameters)) + } + + /// Copy this object to the target bucket and path + /// ### Example + /// ```no_run + /// # #[tokio::main] + /// # async fn main() -> Result<(), Box> { + /// use cloud_storage::object::{Object, ComposeRequest}; + /// + /// let obj1 = Object::read("my_bucket", "file1", None).await?; + /// let obj2 = obj1.copy("my_other_bucket", "file2", None).await?; + /// // obj2 is now a copy of obj1. + /// # Ok(()) + /// # } + /// ``` + pub async fn copy( + &self, + destination_bucket: &str, + path: &str, + parameters: Option, + ) -> Result { + crate::CLOUD_CLIENT + .object() + .copy(self, destination_bucket, path, parameters) + .await + } + + /// The synchronous equivalent of `Object::copy`. + /// + /// ### Features + /// This function requires that the feature flag `sync` is enabled in `Cargo.toml`. + #[cfg(feature = "sync")] + pub fn copy_sync( + &self, + destination_bucket: &str, + path: &str, + parameters: Option, + ) -> Result { + crate::runtime()?.block_on(self.copy(destination_bucket, path, parameters)) + } + + /// Moves a file from the current location to the target bucket and path. + /// + /// ## Limitations + /// This function does not yet support rewriting objects to another + /// * Geographical Location, + /// * Encryption, + /// * Storage class. + /// These limitations mean that for now, the rewrite and the copy methods do the same thing. + /// ### Example + /// ```no_run + /// # #[tokio::main] + /// # async fn main() -> Result<(), Box> { + /// use cloud_storage::object::Object; + /// + /// let obj1 = Object::read("my_bucket", "file1", None).await?; + /// let obj2 = obj1.rewrite("my_other_bucket", "file2", None).await?; + /// // obj2 is now a copy of obj1. + /// # Ok(()) + /// # } + /// ``` + pub async fn rewrite( + &self, + destination_bucket: &str, + path: &str, + parameters: Option, + ) -> Result { + crate::CLOUD_CLIENT + .object() + .rewrite(self, destination_bucket, path, parameters) + .await + } + + /// The synchronous equivalent of `Object::rewrite`. + /// + /// ### Features + /// This function requires that the feature flag `sync` is enabled in `Cargo.toml`. + #[cfg(feature = "sync")] + pub fn rewrite_sync( + &self, + destination_bucket: &str, + path: &str, + parameters: Option, + ) -> Result { + crate::runtime()?.block_on(self.rewrite(destination_bucket, path, parameters)) + } +} + +#[cfg(test)] +mod tests { + use std::{collections::HashMap, io::Write}; + + use super::*; + use crate::{Error, models::{ComposeRequest, SourceObject}}; + use bytes::Buf; + use futures_util::{stream, StreamExt, TryStreamExt}; + + #[tokio::test] + async fn create() -> Result<(), Box> { + let bucket = crate::global_client::read_test_bucket().await; + Object::create(&bucket.name, vec![0, 1], "test-create", "text/plain", None).await?; + Ok(()) + } + + #[tokio::test] + async fn create_with() -> Result<(), Box> { + let bucket = crate::global_client::read_test_bucket().await; + let metadata = serde_json::json!({ + "metadata": { + "object_id": "1234" + } + }); + let obj = Object::create_with( + &bucket.name, + vec![0, 1], + "test-create-meta", + "text/plain", + &metadata, + ) + .await?; + assert_eq!( + obj.metadata.unwrap().get("object_id"), + Some(&String::from("1234")) + ); + Ok(()) + } + + #[tokio::test] + async fn create_streamed() -> Result<(), Box> { + let bucket = crate::global_client::read_test_bucket().await; + let stream = stream::iter([0u8, 1].iter()) + .map(Ok::<_, Box>) + .map_ok(|&b| bytes::BytesMut::from(&[b][..])); + Object::create_streamed( + &bucket.name, + stream, + 2, + "test-create-streamed", + "text/plain", + None, + ) + .await?; + Ok(()) + } + + #[tokio::test] + async fn list() -> Result<(), Box> { + let test_bucket = crate::global_client::read_test_bucket().await; + let _v: Vec = Object::list(&test_bucket.name, ListRequest::default()) + .await? + .try_collect() + .await?; + Ok(()) + } + + async fn flattened_list_prefix_stream( + bucket: &str, + prefix: &str, + ) -> Result, Box> { + let request = ListRequest { + prefix: Some(prefix.into()), + ..Default::default() + }; + + Ok(Object::list(bucket, request) + .await? + .map_ok(|object_list| object_list.items) + .try_concat() + .await?) + } + + #[tokio::test] + async fn list_prefix() -> Result<(), Box> { + let test_bucket = crate::global_client::read_test_bucket().await; + + let prefix_names = [ + "test-list-prefix/1", + "test-list-prefix/2", + "test-list-prefix/sub/1", + "test-list-prefix/sub/2", + ]; + + for name in &prefix_names { + Object::create(&test_bucket.name, vec![0, 1], name, "text/plain", None).await?; + } + + let list = flattened_list_prefix_stream(&test_bucket.name, "test-list-prefix/").await?; + assert_eq!(list.len(), 4); + let list = flattened_list_prefix_stream(&test_bucket.name, "test-list-prefix/sub").await?; + assert_eq!(list.len(), 2); + Ok(()) + } + + #[tokio::test] + async fn read() -> Result<(), Box> { + let bucket = crate::global_client::read_test_bucket().await; + Object::create(&bucket.name, vec![0, 1], "test-read", "text/plain", None).await?; + Object::read(&bucket.name, "test-read", None).await?; + Ok(()) + } + + #[tokio::test] + async fn download() -> Result<(), Box> { + let bucket = crate::global_client::read_test_bucket().await; + let content = b"hello world"; + Object::create( + &bucket.name, + content.to_vec(), + "test-download", + "application/octet-stream", + None, + ) + .await?; + + let data = Object::download(&bucket.name, "test-download", None).await?; + assert_eq!(data, content); + + Ok(()) + } + + #[tokio::test] + async fn download_streamed() -> Result<(), Box> { + let bucket = crate::global_client::read_test_bucket().await; + let content = b"hello world"; + Object::create( + &bucket.name, + content.to_vec(), + "test-download", + "application/octet-stream", + None, + ) + .await?; + + let mut result = Object::download_streamed(&bucket.name, "test-download", None).await?; + let mut data: Vec = Vec::new(); + while let Some(part) = result.next().await { + data.write_all(part?.chunk()); + } + assert_eq!(data, content); + + Ok(()) + } + + #[tokio::test] + async fn download_streamed_large() -> Result<(), Box> { + let bucket = crate::global_client::read_test_bucket().await; + let content = vec![5u8; 1_000_000]; + Object::create( + &bucket.name, + content.to_vec(), + "test-download-large", + "application/octet-stream", + None, + ) + .await?; + + let mut result = + Object::download_streamed(&bucket.name, "test-download-large", None).await?; + let mut data: Vec = Vec::new(); + while let Some(part) = result.next().await { + data.write_all(part?.chunk()); + } + assert_eq!(data, content); + + Ok(()) + } + + #[tokio::test] + async fn update() -> Result<(), Box> { + let bucket = crate::global_client::read_test_bucket().await; + let mut obj = + Object::create(&bucket.name, vec![0, 1], "test-update", "text/plain", None).await?; + obj.content_type = Some("application/xml".to_string()); + obj.update(None).await?; + Ok(()) + } + + #[tokio::test] + async fn delete() -> Result<(), Box> { + let bucket = crate::global_client::read_test_bucket().await; + Object::create(&bucket.name, vec![0, 1], "test-delete", "text/plain", None).await?; + + Object::delete(&bucket.name, "test-delete", None).await?; + + let list: Vec<_> = flattened_list_prefix_stream(&bucket.name, "test-delete").await?; + assert!(list.is_empty()); + + Ok(()) + } + + #[tokio::test] + async fn delete_nonexistent() -> Result<(), Box> { + let bucket = crate::global_client::read_test_bucket().await; + + let nonexistent_object = "test-delete-nonexistent"; + + let delete_result = Object::delete(&bucket.name, nonexistent_object, None).await; + + if let Err(Error::Google(google_error_response)) = delete_result { + assert!(google_error_response.to_string().contains(&format!( + "No such object: {}/{}", + bucket.name, nonexistent_object + ))); + } else { + panic!("Expected a Google error, instead got {:?}", delete_result); + } + + Ok(()) + } + + #[tokio::test] + async fn compose() -> Result<(), Box> { + let bucket = crate::global_client::read_test_bucket().await; + let obj1 = Object::create( + &bucket.name, + vec![0, 1], + "test-compose-1", + "text/plain", + None, + ) + .await?; + let obj2 = Object::create( + &bucket.name, + vec![2, 3], + "test-compose-2", + "text/plain", + None, + ) + .await?; + let compose_request = ComposeRequest { + kind: "storage#composeRequest".to_string(), + source_objects: vec![ + SourceObject { + name: obj1.name.clone(), + generation: None, + object_preconditions: None, + }, + SourceObject { + name: obj2.name.clone(), + generation: None, + object_preconditions: None, + }, + ], + destination: None, + }; + let obj3 = + Object::compose(&bucket.name, &compose_request, "test-concatted-file", None).await?; + let url = obj3.download_url(100)?; + let content = reqwest::get(&url).await?.text().await?; + assert_eq!(content.as_bytes(), &[0, 1, 2, 3]); + Ok(()) + } + + #[tokio::test] + async fn copy() -> Result<(), Box> { + let bucket = crate::global_client::read_test_bucket().await; + let original = + Object::create(&bucket.name, vec![2, 3], "test-copy", "text/plain", None).await?; + original + .copy(&bucket.name, "test-copy - copy", None) + .await?; + Ok(()) + } + + #[tokio::test] + async fn rewrite() -> Result<(), Box> { + let bucket = crate::global_client::read_test_bucket().await; + let obj = + Object::create(&bucket.name, vec![0, 1], "test-rewrite", "text/plain", None).await?; + let obj = obj.rewrite(&bucket.name, "test-rewritten", None).await?; + let url = obj.download_url(100)?; + let client = reqwest::Client::default(); + let download = client.head(&url).send().await?; + assert_eq!(download.status().as_u16(), 200); + Ok(()) + } + + #[tokio::test] + async fn test_url_encoding() -> Result<(), Box> { + let bucket = crate::global_client::read_test_bucket().await; + let complicated_names = [ + "asdf", + "asdf+1", + "asdf&&+1?=3,,-_()*&^%$#@!`~{}[]\\|:;\"'<>,.?/äöüëß", + "https://www.google.com", + "परिक्षण फाईल", + "测试很重要", + ]; + for name in &complicated_names { + let _obj = Object::create(&bucket.name, vec![0, 1], name, "text/plain", None).await?; + let obj = Object::read(&bucket.name, &name, None).await.unwrap(); + let url = obj.download_url(100)?; + let client = reqwest::Client::default(); + let download = client.head(&url).send().await?; + assert_eq!(download.status().as_u16(), 200); + } + Ok(()) + } + + #[tokio::test] + async fn test_download_url_with() -> Result<(), Box> { + let bucket = crate::global_client::read_test_bucket().await; + let client = reqwest::Client::new(); + let obj = + Object::create(&bucket.name, vec![0, 1], "test-rewrite", "text/plain", None).await?; + + let opts1 = crate::DownloadOptions::new().content_disposition("attachment"); + let download_url1 = obj.download_url_with(100, opts1)?; + let download1 = client.head(&download_url1).send().await?; + assert_eq!(download1.headers()["content-disposition"], "attachment"); + Ok(()) + } + + #[tokio::test] + async fn test_upload_url() -> Result<(), Box> { + let bucket = crate::global_client::read_test_bucket().await; + let client = reqwest::Client::new(); + let blob_name = "test-upload-url"; + let obj = Object::create(&bucket.name, vec![0, 1], blob_name, "text/plain", None).await?; + + let url = obj.upload_url(100).unwrap(); + let updated_content = vec![2, 3]; + let response = client + .put(&url) + .body(updated_content.clone()) + .send() + .await?; + assert!(response.status().is_success()); + let data = Object::download(&bucket.name, blob_name, None).await?; + assert_eq!(data, updated_content); + Ok(()) + } + + #[tokio::test] + async fn test_upload_url_with() -> Result<(), Box> { + let bucket = crate::global_client::read_test_bucket().await; + let client = reqwest::Client::new(); + let blob_name = "test-upload-url"; + let obj = Object::create(&bucket.name, vec![0, 1], blob_name, "text/plain", None).await?; + let mut custom_metadata = HashMap::new(); + custom_metadata.insert(String::from("field"), String::from("value")); + + let (url, headers) = obj.upload_url_with(100, custom_metadata).unwrap(); + let updated_content = vec![2, 3]; + let mut request = client.put(&url).body(updated_content); + for (metadata_field, metadata_value) in headers.iter() { + request = request.header(metadata_field, metadata_value); + } + let response = request.send().await?; + assert!(response.status().is_success()); + let updated_obj = Object::read(&bucket.name, blob_name, None).await?; + let obj_metadata = updated_obj.metadata.unwrap(); + assert_eq!(obj_metadata.get("field").unwrap(), "value"); + Ok(()) + } + + #[cfg(feature = "sync")] + mod sync { + use super::*; + + #[test] + fn create() -> Result<(), Box> { + let bucket = crate::global_client::read_test_bucket_sync(); + Object::create_sync(&bucket.name, vec![0, 1], "test-create", "text/plain", None)?; + Ok(()) + } + + #[test] + fn create_with() -> Result<(), Box> { + let bucket = crate::global_client::read_test_bucket_sync(); + let metadata = serde_json::json!({ + "metadata": { + "object_id": "1234" + } + }); + let obj = Object::create_with_sync( + &bucket.name, + vec![0, 1], + "test-create-meta", + "text/plain", + &metadata, + )?; + assert_eq!( + obj.metadata.unwrap().get("object_id"), + Some(&String::from("1234")) + ); + Ok(()) + } + + #[test] + fn create_streamed() -> Result<(), Box> { + let bucket = crate::global_client::read_test_bucket_sync(); + let cursor = std::io::Cursor::new([0, 1]); + Object::create_streamed_sync( + &bucket.name, + cursor, + 2, + "test-create-streamed", + "text/plain", + None, + )?; + Ok(()) + } + + #[test] + fn list() -> Result<(), Box> { + let test_bucket = crate::global_client::read_test_bucket_sync(); + Object::list_sync(&test_bucket.name, ListRequest::default())?; + Ok(()) + } + + #[test] + fn list_prefix() -> Result<(), Box> { + let test_bucket = crate::global_client::read_test_bucket_sync(); + + let prefix_names = [ + "test-list-prefix/1", + "test-list-prefix/2", + "test-list-prefix/sub/1", + "test-list-prefix/sub/2", + ]; + + for name in &prefix_names { + Object::create_sync(&test_bucket.name, vec![0, 1], name, "text/plain", None)?; + } + + let request = ListRequest { + prefix: Some("test-list-prefix/".into()), + ..Default::default() + }; + let list = Object::list_sync(&test_bucket.name, request)?; + assert_eq!(list[0].items.len(), 4); + + let request = ListRequest { + prefix: Some("test-list-prefix/sub".into()), + ..Default::default() + }; + let list = Object::list_sync(&test_bucket.name, request)?; + assert_eq!(list[0].items.len(), 2); + Ok(()) + } + + #[test] + fn list_prefix_delimiter() -> Result<(), Box> { + let test_bucket = crate::global_client::read_test_bucket_sync(); + + let prefix_names = [ + "test-list-prefix/1", + "test-list-prefix/2", + "test-list-prefix/sub/1", + "test-list-prefix/sub/2", + ]; + + for name in &prefix_names { + Object::create_sync(&test_bucket.name, vec![0, 1], name, "text/plain", None)?; + } + + let request = ListRequest { + prefix: Some("test-list-prefix/".into()), + delimiter: Some("/".into()), + ..Default::default() + }; + let list = Object::list_sync(&test_bucket.name, request)?; + assert_eq!(list[0].items.len(), 2); + assert_eq!(list[0].prefixes.len(), 1); + Ok(()) + } + + #[test] + fn read() -> Result<(), Box> { + let bucket = crate::global_client::read_test_bucket_sync(); + Object::create_sync(&bucket.name, vec![0, 1], "test-read", "text/plain", None)?; + Object::read_sync(&bucket.name, "test-read", None)?; + Ok(()) + } + + #[test] + fn download() -> Result<(), Box> { + let bucket = crate::global_client::read_test_bucket_sync(); + let content = b"hello world"; + Object::create_sync( + &bucket.name, + content.to_vec(), + "test-download", + "application/octet-stream", + None, + )?; + + let data = Object::download_sync(&bucket.name, "test-download", None)?; + assert_eq!(data, content); + + Ok(()) + } + + #[test] + fn update() -> Result<(), Box> { + let bucket = crate::global_client::read_test_bucket_sync(); + let mut obj = + Object::create_sync(&bucket.name, vec![0, 1], "test-update", "text/plain", None)?; + obj.content_type = Some("application/xml".to_string()); + obj.update_sync(None)?; + Ok(()) + } + + #[test] + fn delete() -> Result<(), Box> { + let bucket = crate::global_client::read_test_bucket_sync(); + Object::create_sync(&bucket.name, vec![0, 1], "test-delete", "text/plain", None)?; + + Object::delete_sync(&bucket.name, "test-delete", None)?; + + let request = ListRequest { + prefix: Some("test-delete".into()), + ..Default::default() + }; + + let list = Object::list_sync(&bucket.name, request)?; + assert!(list[0].items.is_empty()); + + Ok(()) + } + + #[test] + fn delete_nonexistent() -> Result<(), Box> { + let bucket = crate::global_client::read_test_bucket_sync(); + + let nonexistent_object = "test-delete-nonexistent"; + + let delete_result = Object::delete_sync(&bucket.name, nonexistent_object, None); + + if let Err(Error::Google(google_error_response)) = delete_result { + assert!(google_error_response.to_string().contains(&format!( + "No such object: {}/{}", + bucket.name, nonexistent_object + ))); + } else { + panic!("Expected a Google error, instead got {:?}", delete_result); + } + + Ok(()) + } + + #[test] + fn compose() -> Result<(), Box> { + let bucket = crate::global_client::read_test_bucket_sync(); + let obj1 = Object::create_sync( + &bucket.name, + vec![0, 1], + "test-compose-1", + "text/plain", + None, + )?; + let obj2 = Object::create_sync( + &bucket.name, + vec![2, 3], + "test-compose-2", + "text/plain", + None, + )?; + let compose_request = ComposeRequest { + kind: "storage#composeRequest".to_string(), + source_objects: vec![ + SourceObject { + name: obj1.name.clone(), + generation: None, + object_preconditions: None, + }, + SourceObject { + name: obj2.name.clone(), + generation: None, + object_preconditions: None, + }, + ], + destination: None, + }; + let obj3 = + Object::compose_sync(&bucket.name, &compose_request, "test-concatted-file", None)?; + let url = obj3.download_url(100)?; + let content = reqwest::blocking::get(&url)?.text()?; + assert_eq!(content.as_bytes(), &[0, 1, 2, 3]); + Ok(()) + } + + #[test] + fn copy() -> Result<(), Box> { + let bucket = crate::global_client::read_test_bucket_sync(); + let original = + Object::create_sync(&bucket.name, vec![2, 3], "test-copy", "text/plain", None)?; + original.copy_sync(&bucket.name, "test-copy - copy", None)?; + Ok(()) + } + + #[test] + fn rewrite() -> Result<(), Box> { + let bucket = crate::global_client::read_test_bucket_sync(); + let obj = + Object::create_sync(&bucket.name, vec![0, 1], "test-rewrite", "text/plain", None)?; + let obj = obj.rewrite_sync(&bucket.name, "test-rewritten", None)?; + let url = obj.download_url(100)?; + let client = reqwest::blocking::Client::new(); + let download = client.head(&url).send()?; + assert_eq!(download.status().as_u16(), 200); + Ok(()) + } + + #[test] + fn test_url_encoding() -> Result<(), Box> { + let bucket = crate::global_client::read_test_bucket_sync(); + let complicated_names = [ + "asdf", + "asdf+1", + "asdf&&+1?=3,,-_()*&^%$#@!`~{}[]\\|:;\"'<>,.?/äöüëß", + "https://www.google.com", + "परिक्षण फाईल", + "测试很重要", + ]; + for name in &complicated_names { + let _obj = Object::create_sync(&bucket.name, vec![0, 1], name, "text/plain", None)?; + let obj = Object::read_sync(&bucket.name, &name, None).unwrap(); + let url = obj.download_url(100)?; + let client = reqwest::blocking::Client::new(); + let download = client.head(&url).send()?; + assert_eq!(download.status().as_u16(), 200); + } + Ok(()) + } + } +} \ No newline at end of file diff --git a/src/resources/object_access_control.rs b/src/global_client/object_access_control.rs similarity index 57% rename from src/resources/object_access_control.rs rename to src/global_client/object_access_control.rs index 47ec3f0..370f673 100644 --- a/src/resources/object_access_control.rs +++ b/src/global_client/object_access_control.rs @@ -1,106 +1,4 @@ -#![allow(unused_imports)] - -pub use crate::resources::common::{Entity, ProjectTeam, Role}; -use crate::{error::GoogleResponse, resources::common::ListResponse}; - -/// The ObjectAccessControls resources represent the Access Control Lists (ACLs) for objects within -/// Google Cloud Storage. ACLs let you specify who has access to your data and to what extent. -/// -/// ```text,ignore -/// Important: The methods for this resource fail with a 400 Bad Request response for buckets with -/// uniform bucket-level access enabled. Use storage.buckets.getIamPolicy and -/// storage.buckets.setIamPolicy to control access instead. -/// ``` -/// -/// There are two roles that can be assigned to an entity: -/// -/// READERs can get an object, though the acl property will not be revealed. -/// OWNERs are READERs, and they can get the acl property, update an object, and call all -/// objectAccessControls methods on the object. The owner of an object is always an OWNER. -/// -/// For more information, see Access Control, with the caveat that this API uses READER and OWNER -/// instead of READ and FULL_CONTROL. -#[derive(Debug, PartialEq, serde::Serialize, serde::Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct ObjectAccessControl { - /// The kind of item this is. For object access control entries, this is always - /// `storage#objectAccessControl`. - pub kind: String, - /// The ID of the access-control entry. - pub id: String, - /// The link to this access-control entry. - pub self_link: String, - /// The name of the bucket. - pub bucket: String, - /// The name of the object, if applied to an object. - pub object: String, - /// The content generation of the object, if applied to an object. - pub generation: Option, - /// The entity holding the permission, in one of the following forms: - /// - /// user-userId - /// user-email - /// group-groupId - /// group-email - /// domain-domain - /// project-team-projectId - /// allUsers - /// allAuthenticatedUsers - /// - /// Examples: - /// - /// The user liz@example.com would be user-liz@example.com. - /// The group example@googlegroups.com would be group-example@googlegroups.com. - /// To refer to all members of the G Suite for Business domain example.com, the entity would be - /// domain-example.com. - pub entity: Entity, - /// The access permission for the entity. - pub role: Role, - /// The email address associated with the entity, if any. - pub email: Option, - /// The ID for the entity, if any. - pub entity_id: Option, - /// The domain associated with the entity, if any. - pub domain: Option, - /// The project team associated with the entity, if any. - pub project_team: Option, - /// HTTP 1.1 Entity tag for the access-control entry. - pub etag: String, -} - -/// Used to create a new `ObjectAccessControl` object. -#[derive(Debug, PartialEq, serde::Serialize)] -#[serde(rename_all = "camelCase")] -pub struct NewObjectAccessControl { - /// The entity holding the permission, in one of the following forms: - /// - /// user-userId - /// user-email - /// group-groupId - /// group-email - /// domain-domain - /// project-team-projectId - /// allUsers - /// allAuthenticatedUsers - /// - /// Examples: - /// - /// The user liz@example.com would be user-liz@example.com. - /// The group example@googlegroups.com would be group-example@googlegroups.com. - /// To refer to all members of the G Suite for Business domain example.com, the entity would be - /// domain-example.com. - pub entity: Entity, - /// The access permission for the entity. - pub role: Role, -} - -#[allow(unused)] -#[derive(Debug, serde::Deserialize)] -#[serde(rename_all = "camelCase")] -struct ObjectAccessControlList { - kind: String, - items: Vec, -} +use crate::{models::{create, ObjectAccessControl, Entity}, Error}; impl ObjectAccessControl { /// Creates a new ACL entry on the specified `object`. @@ -109,15 +7,14 @@ impl ObjectAccessControl { /// This method fails with a 400 Bad Request response for buckets with uniform /// bucket-level access enabled. Use `Bucket::get_iam_policy` and `Bucket::set_iam_policy` to /// control access instead. - #[cfg(feature = "global-client")] pub async fn create( bucket: &str, object: &str, - new_object_access_control: &NewObjectAccessControl, - ) -> crate::Result { + new_object_access_control: &create::ObjectAccessControl, + ) -> Result { crate::CLOUD_CLIENT - .object_access_control() - .create(bucket, object, new_object_access_control) + .object_access_control(bucket, object) + .create(new_object_access_control) .await } @@ -125,12 +22,12 @@ impl ObjectAccessControl { /// /// ### Features /// This function requires that the feature flag `sync` is enabled in `Cargo.toml`. - #[cfg(all(feature = "global-client", feature = "sync"))] + #[cfg(feature = "sync")] pub fn create_sync( bucket: &str, object: &str, - new_object_access_control: &NewObjectAccessControl, - ) -> crate::Result { + new_object_access_control: &create::ObjectAccessControl, + ) -> Result { crate::runtime()?.block_on(Self::create(bucket, object, new_object_access_control)) } @@ -140,11 +37,10 @@ impl ObjectAccessControl { /// Important: This method fails with a 400 Bad Request response for buckets with uniform /// bucket-level access enabled. Use `Bucket::get_iam_policy` and `Bucket::set_iam_policy` to /// control access instead. - #[cfg(feature = "global-client")] - pub async fn list(bucket: &str, object: &str) -> crate::Result> { + pub async fn list(bucket: &str, object: &str) -> Result, Error> { crate::CLOUD_CLIENT - .object_access_control() - .list(bucket, object) + .object_access_control(bucket, object) + .list() .await } @@ -152,8 +48,8 @@ impl ObjectAccessControl { /// /// ### Features /// This function requires that the feature flag `sync` is enabled in `Cargo.toml`. - #[cfg(all(feature = "global-client", feature = "sync"))] - pub fn list_sync(bucket: &str, object: &str) -> crate::Result> { + #[cfg(feature = "sync")] + pub fn list_sync(bucket: &str, object: &str) -> Result, Error> { crate::runtime()?.block_on(Self::list(bucket, object)) } @@ -163,11 +59,10 @@ impl ObjectAccessControl { /// Important: This method fails with a 400 Bad Request response for buckets with uniform /// bucket-level access enabled. Use `Bucket::get_iam_policy` and `Bucket::set_iam_policy` to /// control access instead. - #[cfg(feature = "global-client")] - pub async fn read(bucket: &str, object: &str, entity: &Entity) -> crate::Result { + pub async fn read(bucket: &str, object: &str, entity: &Entity) -> Result { crate::CLOUD_CLIENT - .object_access_control() - .read(bucket, object, entity) + .object_access_control(bucket, object) + .read(entity) .await } @@ -175,8 +70,8 @@ impl ObjectAccessControl { /// /// ### Features /// This function requires that the feature flag `sync` is enabled in `Cargo.toml`. - #[cfg(all(feature = "global-client", feature = "sync"))] - pub fn read_sync(bucket: &str, object: &str, entity: &Entity) -> crate::Result { + #[cfg(feature = "sync")] + pub fn read_sync(bucket: &str, object: &str, entity: &Entity) -> Result { crate::runtime()?.block_on(Self::read(bucket, object, entity)) } @@ -186,10 +81,9 @@ impl ObjectAccessControl { /// Important: This method fails with a 400 Bad Request response for buckets with uniform /// bucket-level access enabled. Use `Bucket::get_iam_policy` and `Bucket::set_iam_policy` to /// control access instead. - #[cfg(feature = "global-client")] - pub async fn update(&self) -> crate::Result { + pub async fn update(&self) -> Result { crate::CLOUD_CLIENT - .object_access_control() + .object_access_control(&self.bucket, &self.object) .update(self) .await } @@ -198,8 +92,8 @@ impl ObjectAccessControl { /// /// ### Features /// This function requires that the feature flag `sync` is enabled in `Cargo.toml`. - #[cfg(all(feature = "global-client", feature = "sync"))] - pub fn update_sync(&self) -> crate::Result { + #[cfg(feature = "sync")] + pub fn update_sync(&self) -> Result { crate::runtime()?.block_on(self.update()) } @@ -209,10 +103,9 @@ impl ObjectAccessControl { /// Important: This method fails with a 400 Bad Request response for buckets with uniform /// bucket-level access enabled. Use `Bucket::get_iam_policy` and `Bucket::set_iam_policy` to /// control access instead. - #[cfg(feature = "global-client")] - pub async fn delete(self) -> crate::Result<()> { + pub async fn delete(self) -> Result<(), Error> { crate::CLOUD_CLIENT - .object_access_control() + .object_access_control(&self.bucket, &self.object) .delete(self) .await } @@ -221,29 +114,30 @@ impl ObjectAccessControl { /// /// ### Features /// This function requires that the feature flag `sync` is enabled in `Cargo.toml`. - #[cfg(all(feature = "global-client", feature = "sync"))] - pub fn delete_sync(self) -> crate::Result<()> { + #[cfg(feature = "sync")] + pub fn delete_sync(self) -> Result<(), Error> { crate::runtime()?.block_on(self.delete()) } } -#[cfg(all(test, feature = "global-client"))] +#[cfg(test)] mod tests { use super::*; - use crate::Object; + use crate::{Object, models::Role}; #[tokio::test] async fn create() { - let bucket = crate::read_test_bucket().await; + let bucket = crate::global_client::read_test_bucket().await; Object::create( &bucket.name, vec![0, 1], "test-object-access-controls-create", "text/plain", + None ) .await .unwrap(); - let new_bucket_access_control = NewObjectAccessControl { + let new_bucket_access_control = create::ObjectAccessControl { entity: Entity::AllUsers, role: Role::Reader, }; @@ -258,12 +152,13 @@ mod tests { #[tokio::test] async fn list() { - let bucket = crate::read_test_bucket().await; + let bucket = crate::global_client::read_test_bucket().await; Object::create( &bucket.name, vec![0, 1], "test-object-access-controls-list", "text/plain", + None ) .await .unwrap(); @@ -274,16 +169,17 @@ mod tests { #[tokio::test] async fn read() { - let bucket = crate::read_test_bucket().await; + let bucket = crate::global_client::read_test_bucket().await; Object::create( &bucket.name, vec![0, 1], "test-object-access-controls-read", "text/plain", + None ) .await .unwrap(); - let new_bucket_access_control = NewObjectAccessControl { + let new_bucket_access_control = create::ObjectAccessControl { entity: Entity::AllUsers, role: Role::Reader, }; @@ -306,12 +202,12 @@ mod tests { #[tokio::test] async fn update() { // use a seperate bucket to prevent synchronization issues - let bucket = crate::create_test_bucket("test-object-access-controls-update").await; - let new_bucket_access_control = NewObjectAccessControl { + let bucket = crate::global_client::create_test_bucket("test-object-access-controls-update").await; + let new_bucket_access_control = create::ObjectAccessControl { entity: Entity::AllUsers, role: Role::Reader, }; - Object::create(&bucket.name, vec![0, 1], "test-update", "text/plain") + Object::create(&bucket.name, vec![0, 1], "test-update", "text/plain", None) .await .unwrap(); ObjectAccessControl::create(&bucket.name, "test-update", &new_bucket_access_control) @@ -322,19 +218,19 @@ mod tests { .unwrap(); acl.entity = Entity::AllAuthenticatedUsers; acl.update().await.unwrap(); - Object::delete(&bucket.name, "test-update").await.unwrap(); + Object::delete(&bucket.name, "test-update", None).await.unwrap(); bucket.delete().await.unwrap(); } #[tokio::test] async fn delete() { // use a seperate bucket to prevent synchronization issues - let bucket = crate::create_test_bucket("test-object-access-controls-delete").await; - let new_bucket_access_control = NewObjectAccessControl { + let bucket = crate::global_client::create_test_bucket("test-object-access-controls-delete").await; + let new_bucket_access_control = create::ObjectAccessControl { entity: Entity::AllUsers, role: Role::Reader, }; - Object::create(&bucket.name, vec![0, 1], "test-delete", "text/plain") + Object::create(&bucket.name, vec![0, 1], "test-delete", "text/plain", None) .await .unwrap(); ObjectAccessControl::create(&bucket.name, "test-delete", &new_bucket_access_control) @@ -344,25 +240,26 @@ mod tests { .await .unwrap(); acl.delete().await.unwrap(); - Object::delete(&bucket.name, "test-delete").await.unwrap(); + Object::delete(&bucket.name, "test-delete", None).await.unwrap(); bucket.delete().await.unwrap(); } - #[cfg(all(feature = "global-client", feature = "sync"))] + #[cfg(feature = "sync")] mod sync { use super::*; #[test] fn create() { - let bucket = crate::read_test_bucket_sync(); + let bucket = crate::global_client::read_test_bucket_sync(); Object::create_sync( &bucket.name, vec![0, 1], "test-object-access-controls-create", "text/plain", + None ) .unwrap(); - let new_bucket_access_control = NewObjectAccessControl { + let new_bucket_access_control = create::ObjectAccessControl { entity: Entity::AllUsers, role: Role::Reader, }; @@ -376,12 +273,13 @@ mod tests { #[test] fn list() { - let bucket = crate::read_test_bucket_sync(); + let bucket = crate::global_client::read_test_bucket_sync(); Object::create_sync( &bucket.name, vec![0, 1], "test-object-access-controls-list", "text/plain", + None ) .unwrap(); ObjectAccessControl::list_sync(&bucket.name, "test-object-access-controls-list") @@ -390,15 +288,16 @@ mod tests { #[test] fn read() { - let bucket = crate::read_test_bucket_sync(); + let bucket = crate::global_client::read_test_bucket_sync(); Object::create_sync( &bucket.name, vec![0, 1], "test-object-access-controls-read", "text/plain", + None ) .unwrap(); - let new_bucket_access_control = NewObjectAccessControl { + let new_bucket_access_control = create::ObjectAccessControl { entity: Entity::AllUsers, role: Role::Reader, }; @@ -419,12 +318,12 @@ mod tests { #[test] fn update() { // use a seperate bucket to prevent synchronization issues - let bucket = crate::create_test_bucket_sync("test-object-access-controls-update"); - let new_bucket_access_control = NewObjectAccessControl { + let bucket = crate::global_client::create_test_bucket_sync("test-object-access-controls-update"); + let new_bucket_access_control = create::ObjectAccessControl { entity: Entity::AllUsers, role: Role::Reader, }; - Object::create_sync(&bucket.name, vec![0, 1], "test-update", "text/plain").unwrap(); + Object::create_sync(&bucket.name, vec![0, 1], "test-update", "text/plain", None).unwrap(); ObjectAccessControl::create_sync( &bucket.name, "test-update", @@ -436,19 +335,19 @@ mod tests { .unwrap(); acl.entity = Entity::AllAuthenticatedUsers; acl.update_sync().unwrap(); - Object::delete_sync(&bucket.name, "test-update").unwrap(); + Object::delete_sync(&bucket.name, "test-update", None).unwrap(); bucket.delete_sync().unwrap(); } #[test] fn delete() { // use a seperate bucket to prevent synchronization issues - let bucket = crate::create_test_bucket_sync("test-object-access-controls-delete"); - let new_bucket_access_control = NewObjectAccessControl { + let bucket = crate::global_client::create_test_bucket_sync("test-object-access-controls-delete"); + let new_bucket_access_control = create::ObjectAccessControl { entity: Entity::AllUsers, role: Role::Reader, }; - Object::create_sync(&bucket.name, vec![0, 1], "test-delete", "text/plain").unwrap(); + Object::create_sync(&bucket.name, vec![0, 1], "test-delete", "text/plain", None).unwrap(); ObjectAccessControl::create_sync( &bucket.name, "test-delete", @@ -459,7 +358,7 @@ mod tests { ObjectAccessControl::read_sync(&bucket.name, "test-delete", &Entity::AllUsers) .unwrap(); acl.delete_sync().unwrap(); - Object::delete_sync(&bucket.name, "test-delete").unwrap(); + Object::delete_sync(&bucket.name, "test-delete", None).unwrap(); bucket.delete_sync().unwrap(); } } diff --git a/src/lib.rs b/src/lib.rs index 0bc3e64..23f1fed 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,3 +1,4 @@ +#![feature(try_trait_v2)] //! This crate aims to simplify interacting with the Google Cloud Storage JSON API. Use it until //! Google releases a Cloud Storage Client Library for Rust. Shoutout to //! [MyEmma](https://myemma.io/) for funding this free and open source project. @@ -26,11 +27,11 @@ //! ## Examples: //! Creating a new Bucket in Google Cloud Storage: //! ```rust -//! # use cloud_storage::{Client, Bucket, NewBucket}; +//! # use cloud_storage::{Client, Bucket, create::Bucket}; //! # #[tokio::main] //! # async fn main() -> Result<(), Box> { //! let client = Client::default(); -//! let bucket = client.bucket().create(&NewBucket { +//! let bucket = client.bucket().create(&create::Bucket { //! name: "doctest-bucket".to_string(), //! ..Default::default() //! }).await?; @@ -44,7 +45,7 @@ //! # #[tokio::main] //! # async fn main() -> Result<(), Box> { //! let client = Client::default(); -//! let bucket = client.bucket().read("mybucket").await?; +//! let bucket = client.bucket().read("my_bucket").await?; //! # Ok(()) //! # } //! ``` @@ -60,7 +61,7 @@ //! bytes.push(byte?) //! } //! let client = Client::default(); -//! client.object().create("mybucket", bytes, "myfile.txt", "text/plain").await?; +//! client.object("my_bucket").create(bytes, "myfile.txt", "text/plain").await?; //! # Ok(()) //! # } //! ``` @@ -70,7 +71,7 @@ //! # #[tokio::main] //! # async fn main() -> Result<(), Box> { //! let client = Client::default(); -//! let mut object = client.object().read("mybucket", "myfile").await?; +//! let mut object = client.object("my_bucket").read("myfile").await?; //! object.content_type = Some("application/xml".to_string()); //! client.object().update(&object).await?; //! # Ok(()) @@ -82,65 +83,44 @@ //! # #[tokio::main] //! # async fn main() -> Result<(), Box> { //! let client = Client::default(); -//! client.object().delete("mybucket", "myfile").await?; +//! client.object("my_bucket").delete("myfile").await?; //! # Ok(()) //! # } //! ``` #![forbid(unsafe_code, missing_docs)] - pub mod client; #[cfg(feature = "sync")] pub mod sync; + + +mod configuration; +mod models; mod download_options; mod error; -/// Contains objects as represented by Google, to be used for serialization and deserialization. -mod resources; mod token; -use crate::resources::service_account::ServiceAccount; +#[cfg(feature = "global-client")] +mod global_client; +mod crypto; +mod sized_byte_stream; +#[cfg(feature = "global-client")] +use crate::global_client::CLOUD_CLIENT; + pub use crate::{ client::Client, - error::*, - resources::{ - bucket::{Bucket, NewBucket}, - object::{ListRequest, Object}, - *, - }, + download_options::DownloadOptions, + error::Error, + models::{Bucket, ListRequest, Object}, + configuration::ServiceAccount, token::{Token, TokenCache}, }; -pub use download_options::DownloadOptions; -use tokio::sync::Mutex; - -lazy_static::lazy_static! { - static ref IAM_TOKEN_CACHE: Mutex = Mutex::new(Token::new( - "https://www.googleapis.com/auth/iam" - )); - - /// The struct is the parsed service account json file. It is publicly exported to enable easier - /// debugging of which service account is currently used. It is of the type - /// [ServiceAccount](service_account/struct.ServiceAccount.html). - pub static ref SERVICE_ACCOUNT: ServiceAccount = ServiceAccount::get(); -} - -#[cfg(feature = "global-client")] -lazy_static::lazy_static! { - static ref CLOUD_CLIENT: client::Client = client::Client::default(); -} - -/// A type alias where the error is set to be `cloud_storage::Error`. -pub type Result = std::result::Result; - -const BASE_URL: &str = "https://storage.googleapis.com/storage/v1"; const ISO_8601_BASIC_FORMAT: &[::time::format_description::FormatItem<'_>] = time::macros::format_description!("[year][month][day]T[hour][minute][second]Z"); +// todo: may or may not do stuff? time::serde::format_description!(rfc3339_date, Date, "[year]-[month]-[day]"); -fn from_str<'de, T, D>(deserializer: D) -> std::result::Result -where - T: std::str::FromStr, - T::Err: std::fmt::Display, - D: serde::Deserializer<'de>, +fn from_str<'de, T, D>(deserializer: D) -> std::result::Result where T: std::str::FromStr, T::Err: std::fmt::Display, D: serde::Deserializer<'de> { use serde::de::Deserialize; let s = String::deserialize(deserializer)?; @@ -148,13 +128,9 @@ where } fn from_str_opt<'de, T, D>(deserializer: D) -> std::result::Result, D::Error> -where - T: std::str::FromStr, - T::Err: std::fmt::Display, - D: serde::Deserializer<'de>, +where T: std::str::FromStr, T::Err: std::fmt::Display, D: serde::Deserializer<'de>, { - let s: std::result::Result = - serde::Deserialize::deserialize(deserializer); + let s: std::result::Result = serde::Deserialize::deserialize(deserializer); match s { Ok(serde_json::Value::String(s)) => T::from_str(&s) .map_err(serde::de::Error::custom) @@ -167,57 +143,22 @@ where } } -#[cfg(all(test, feature = "global-client", feature = "sync"))] -fn read_test_bucket_sync() -> Bucket { - crate::runtime().unwrap().block_on(read_test_bucket()) -} +use percent_encoding::{utf8_percent_encode, AsciiSet, NON_ALPHANUMERIC}; -#[cfg(all(test, feature = "global-client"))] -async fn read_test_bucket() -> Bucket { - dotenv::dotenv().ok(); - let name = std::env::var("TEST_BUCKET").unwrap(); - match Bucket::read(&name).await { - Ok(bucket) => bucket, - Err(_not_found) => Bucket::create(&NewBucket { - name, - ..NewBucket::default() - }) - .await - .unwrap(), - } -} +const ENCODE_SET: &AsciiSet = &NON_ALPHANUMERIC.remove(b'*').remove(b'-').remove(b'.').remove(b'_'); +const NOSLASH_ENCODE_SET: &AsciiSet = &ENCODE_SET.remove(b'/').remove(b'~'); -// since all tests run in parallel, we need to make sure we do not create multiple buckets with -// the same name in each test. -#[cfg(all(test, feature = "global-client", feature = "sync"))] -fn create_test_bucket_sync(name: &str) -> Bucket { - crate::runtime().unwrap().block_on(create_test_bucket(name)) +// We need to be able to percent encode stuff, but without touching the slashes in filenames. To +// this end we create an implementation that does this, without touching the slashes. +fn percent_encode_noslash(input: &str) -> String { + utf8_percent_encode(input, NOSLASH_ENCODE_SET).to_string() } -// since all tests run in parallel, we need to make sure we do not create multiple buckets with -// the same name in each test. -#[cfg(all(test, feature = "global-client"))] -async fn create_test_bucket(name: &str) -> Bucket { - std::thread::sleep(std::time::Duration::from_millis(1500)); // avoid getting rate limited - - dotenv::dotenv().ok(); - let base_name = std::env::var("TEST_BUCKET").unwrap(); - let name = format!("{}-{}", base_name, name); - let new_bucket = NewBucket { - name, - ..NewBucket::default() - }; - match Bucket::create(&new_bucket).await { - Ok(bucket) => bucket, - Err(_alread_exists) => Bucket::read(&new_bucket.name).await.unwrap(), - } +pub(crate) fn percent_encode(input: &str) -> String { + utf8_percent_encode(input, ENCODE_SET).to_string() } #[cfg(feature = "sync")] -fn runtime() -> Result { - Ok(tokio::runtime::Builder::new_current_thread() - .thread_name("cloud-storage-worker") - .enable_time() - .enable_io() - .build()?) -} +fn runtime() -> Result { + Ok(tokio::runtime::Builder::new_current_thread().thread_name("cloud-storage-worker").enable_time().enable_io().build()?) +} \ No newline at end of file diff --git a/src/models/action.rs b/src/models/action.rs new file mode 100644 index 0000000..04e1db6 --- /dev/null +++ b/src/models/action.rs @@ -0,0 +1,11 @@ +use super::{StorageClass, ActionType}; + +/// Represents an action that might be undertaken due to a `Condition`. +#[derive(Debug, PartialEq, serde::Serialize, serde::Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct Action { + /// Type of the action. + pub r#type: ActionType, + /// Target storage class. Required iff the type of the action is SetStorageClass. + pub storage_class: Option, +} \ No newline at end of file diff --git a/src/models/action_type.rs b/src/models/action_type.rs new file mode 100644 index 0000000..e2d6b8a --- /dev/null +++ b/src/models/action_type.rs @@ -0,0 +1,8 @@ +/// Type of the action. +#[derive(Debug, PartialEq, serde::Serialize, serde::Deserialize)] +pub enum ActionType { + /// Deletes a Bucket. + Delete, + /// Sets the `storage_class` of a Bucket. + SetStorageClass, +} \ No newline at end of file diff --git a/src/models/billing.rs b/src/models/billing.rs new file mode 100644 index 0000000..05b0ea5 --- /dev/null +++ b/src/models/billing.rs @@ -0,0 +1,7 @@ +/// Contains information about the payment structure of this bucket +#[derive(Debug, PartialEq, serde::Serialize, serde::Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct Billing { + /// When set to true, Requester Pays is enabled for this bucket. + pub requester_pays: bool, +} \ No newline at end of file diff --git a/src/models/binding.rs b/src/models/binding.rs new file mode 100644 index 0000000..935609b --- /dev/null +++ b/src/models/binding.rs @@ -0,0 +1,42 @@ +use super::{IamRole, IamCondition}; + +/// An association between a role, which comes with a set of permissions, and members who may assume +/// that role. +#[derive(Debug, PartialEq, serde::Deserialize, serde::Serialize)] +#[serde(rename_all = "camelCase")] +pub struct Binding { + /// The role to which members belong. Two types of roles are supported: standard IAM roles, + /// which grant permissions that do not map directly to those provided by ACLs, and legacy IAM + /// roles, which do map directly to ACL permissions. All roles are of the format + /// `roles/storage.specificRole.` + /// + /// See + /// [Cloud Storage IAM Roles](https://cloud.google.com/storage/docs/access-control/iam-roles) + /// for a list of available roles. + pub role: IamRole, + /// A collection of identifiers for members who may assume the provided role. Recognized + /// identifiers are as follows: + /// + /// * `allUsers` — A special identifier that represents anyone on the internet; with or without + /// a Google account. + /// * `allAuthenticatedUsers` — A special identifier that represents anyone who is authenticated + /// with a Google account or a service account. + /// * `user:emailid` — An email address that represents a specific account. For example, + /// user:alice@gmail.com or user:joe@example.com. + /// * `serviceAccount:emailid` — An email address that represents a service account. For + /// example, serviceAccount:my-other-app@appspot.gserviceaccount.com . + /// * `group:emailid` — An email address that represents a Google group. For example, + /// group:admins@example.com. + /// * `domain:domain` — A G Suite domain name that represents all the users of that domain. For + /// example, domain:google.com or domain:example.com. + /// * `projectOwner:projectid` — Owners of the given project. For example, + /// projectOwner:my-example-project + /// * `projectEditor:projectid` — Editors of the given project. For example, + /// projectEditor:my-example-project + /// * `projectViewer:projectid` — Viewers of the given project. For example, + /// projectViewer:my-example-project + pub members: Vec, + /// A condition object associated with this binding. Each role binding can only contain one + /// condition. + pub condition: Option, +} \ No newline at end of file diff --git a/src/models/bucket.rs b/src/models/bucket.rs new file mode 100644 index 0000000..1072071 --- /dev/null +++ b/src/models/bucket.rs @@ -0,0 +1,87 @@ +use super::{RetentionPolicy, BucketAccessControl, DefaultObjectAccessControl, IamConfiguration, Encryption, Owner, Website, Logging, Versioning, Cors, Lifecycle, StorageClass, Billing, Location}; + +/// The Buckets resource represents a +/// [bucket](https://cloud.google.com/storage/docs/key-terms#buckets) in Google Cloud Storage. There +/// is a single global namespace shared by all buckets. For more information, see +/// [Bucket Name Requirements](https://cloud.google.com/storage/docs/naming#requirements). +/// +/// Buckets contain objects which can be accessed by their own methods. In addition to the +/// [ACL property](https://cloud.google.com/storage/docs/access-control/lists), buckets contain +/// `BucketAccessControls`, for use in fine-grained manipulation of an existing bucket's access +/// controls. +/// +/// A bucket is always owned by the project team owners group. +#[derive(Debug, PartialEq, serde::Serialize, serde::Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct Bucket { + /// The kind of item this is. For buckets, this is always `storage#bucket`. + pub kind: String, + /// The ID of the bucket. For buckets, the `id` and `name` properties are the same. + pub id: String, // should be u64, mumble mumble + /// The URI of this bucket. + pub self_link: String, + /// The project number of the project the bucket belongs to. + #[serde(deserialize_with = "crate::from_str")] + pub project_number: u64, + /// The name of the bucket. + pub name: String, + /// The creation time of the bucket in RFC 3339 format. + #[serde(with = "time::serde::rfc3339")] + pub time_created: time::OffsetDateTime, + /// The modification time of the bucket in RFC 3339 format. + #[serde(with = "time::serde::rfc3339")] + pub updated: time::OffsetDateTime, + /// Whether or not to automatically apply an eventBasedHold to new objects added to the bucket. + pub default_event_based_hold: Option, + /// The bucket's retention policy, which defines the minimum age an object in the bucket must + /// reach before it can be deleted or overwritten. + pub retention_policy: Option, + /// The metadata generation of this bucket. + #[serde(deserialize_with = "crate::from_str")] + pub metageneration: i64, + /// Access controls on the bucket, containing one or more bucketAccessControls Resources. If + /// iamConfiguration.uniformBucketLevelAccess.enabled is set to true, this field is omitted in + /// responses, and requests that specify this field fail with a 400 Bad Request response. + pub acl: Option>, + /// Default access controls to apply to new objects when no ACL is provided. This list contains + /// one or more defaultObjectAccessControls Resources. If + /// iamConfiguration.uniformBucketLevelAccess.enabled is set to true, this field is omitted in + /// responses, and requests that specify this field fail. + pub default_object_acl: Option>, + /// The bucket's IAM configuration. + pub iam_configuration: Option, + /// Encryption configuration for a bucket. + pub encryption: Option, + /// The owner of the bucket. This is always the project team's owner group. + pub owner: Option, + /// The location of the bucket. Object data for objects in the bucket resides in physical + /// storage within this region. Defaults to US. See Cloud Storage bucket locations for the + /// authoritative list. + pub location: Location, + /// The type of location that the bucket resides in, as determined by the location property. + pub location_type: String, + /// The bucket's website configuration, controlling how the service behaves when accessing + /// bucket contents as a web site. See the Static Website Examples for more information. + pub website: Option, + /// The bucket's logging configuration, which defines the destination bucket and optional name + /// prefix for the current bucket's logs. + pub logging: Option, + /// The bucket's versioning configuration. + pub versioning: Option, + /// The bucket's Cross-Origin Resource Sharing (CORS) configuration. + pub cors: Option>, + /// The bucket's lifecycle configuration. See + /// [lifecycle management](https://cloud.google.com/storage/docs/lifecycle) for more + /// information. + pub lifecycle: Option, + /// User-provided bucket labels, in key/value pairs. + pub labels: Option>, + /// The bucket's default storage class, used whenever no storageClass is specified for a + /// newly-created object. If storageClass is not specified when the bucket + /// is created, it defaults to STANDARD. For more information, see storage classes. + pub storage_class: StorageClass, + /// The bucket's billing configuration. + pub billing: Option, + /// HTTP 1.1 [Entity tag](https://tools.ietf.org/html/rfc7232#section-2.3) for the bucket. + pub etag: String, +} \ No newline at end of file diff --git a/src/models/bucket_access_control.rs b/src/models/bucket_access_control.rs new file mode 100644 index 0000000..e678b7d --- /dev/null +++ b/src/models/bucket_access_control.rs @@ -0,0 +1,62 @@ +use super::{Entity, Role, ProjectTeam}; + +/// The BucketAccessControl resource represents the Access Control Lists (ACLs) for buckets within +/// Google Cloud Storage. ACLs let you specify who has access to your data and to what extent. +/// +/// ```text,ignore +/// Important: This method fails with a 400 Bad Request response for buckets with uniform +/// bucket-level access enabled. Use `Bucket::get_iam_policy` and `Bucket::set_iam_policy` to +/// control access instead. +/// ``` +/// +/// There are three roles that can be assigned to an entity: +/// +/// * READERs can get the bucket, though no acl property will be returned, and list the bucket's +/// objects. +/// * WRITERs are READERs, and they can insert objects into the bucket and delete the bucket's +/// objects. +/// * OWNERs are WRITERs, and they can get the acl property of a bucket, update a bucket, and call +/// all BucketAccessControl methods on the bucket. +#[derive(Debug, PartialEq, serde::Serialize, serde::Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct BucketAccessControl { + /// The kind of item this is. For bucket access control entries, this is always + /// `storage#bucketAccessControl`. + pub kind: String, + /// The ID of the access-control entry. + pub id: String, + /// The link to this access-control entry. + pub self_link: String, + /// The name of the bucket. + pub bucket: String, + /// The entity holding the permission, in one of the following forms: + /// + /// * `user-userId` + /// * `user-email` + /// * `group-groupId` + /// * `group-email` + /// * `domain-domain` + /// * `project-team-projectId` + /// * `allUsers` + /// * `allAuthenticatedUsers` + /// + /// Examples: + /// + /// * The user liz@example.com would be user-liz@example.com. + /// * The group example@googlegroups.com would be group-example@googlegroups.com. + /// * To refer to all members of the G Suite for Business domain example.com, the entity would + /// be domain-example.com. + pub entity: Entity, + /// The access permission for the entity. + pub role: Role, + /// The email address associated with the entity, if any. + pub email: Option, + /// The ID for the entity, if any. + pub entity_id: Option, + /// The domain associated with the entity, if any. + pub domain: Option, + /// The project team associated with the entity, if any. + pub project_team: Option, + /// HTTP 1.1 Entity tag for the access-control entry. + pub etag: String, +} \ No newline at end of file diff --git a/src/models/compose_parameters.rs b/src/models/compose_parameters.rs new file mode 100644 index 0000000..df6aa76 --- /dev/null +++ b/src/models/compose_parameters.rs @@ -0,0 +1,27 @@ +/// The parameters that are optionally supplied when composing an object. +#[derive(Debug, PartialEq, serde::Serialize, Default, Clone)] +#[serde(rename_all = "camelCase")] +pub struct ComposeParameters { + /// Apply a predefined set of access controls to the destination object. + /// + /// Acceptable values are: + /// `authenticatedRead`: Object owner gets OWNER access, and allAuthenticatedUsers get READER access. + /// `bucketOwnerFullControl`: Object owner gets OWNER access, and project team owners get OWNER access. + /// `bucketOwnerRead`: Object owner gets OWNER access, and project team owners get READER access. + /// `private`: Object owner gets OWNER access. + /// `projectPrivate`: Object owner gets OWNER access, and project team members get access according to their roles. + /// `publicRead`: Object owner gets OWNER access, and allUsers get READER access. + /// If `iamConfiguration.uniformBucketLevelAccess.enabled` is set to `true`, requests that include this parameter fail with a 400 Bad Request response. + pub destination_predefined_acl: Option, + + /// Makes the operation conditional on there being a live destination object with a generation number that matches the given value. + /// Setting `ifGenerationMatch` to 0 makes the operation succeed only if there is no live destination object. + pub if_generation_match: Option, + + /// Makes the operation conditional on there being a live destination object with a metageneration number that matches the given value. + pub if_metageneration_match: Option, + + /// Resource name of the Cloud KMS key that will be used to encrypt the composed object. + /// If not specified, the request uses the bucket's default Cloud KMS key, if any, or a Google-managed encryption key. + pub kms_key_name: Option, +} \ No newline at end of file diff --git a/src/models/compose_request.rs b/src/models/compose_request.rs new file mode 100644 index 0000000..90c0f63 --- /dev/null +++ b/src/models/compose_request.rs @@ -0,0 +1,13 @@ +use super::{SourceObject, Object}; + +/// The request that is supplied to perform `Object::compose`. +#[derive(Debug, PartialEq, serde::Serialize)] +#[serde(rename_all = "camelCase")] +pub struct ComposeRequest { + /// The kind of item this is. Will always be `storage#composeRequest`. + pub kind: String, + /// The list of source objects that will be concatenated into a single object. + pub source_objects: Vec, + /// Properties of the resulting object. + pub destination: Option, +} \ No newline at end of file diff --git a/src/models/condition.rs b/src/models/condition.rs new file mode 100644 index 0000000..b5df3b0 --- /dev/null +++ b/src/models/condition.rs @@ -0,0 +1,25 @@ +/// A rule that might induce an `Action` if met. +#[derive(Debug, PartialEq, serde::Serialize, serde::Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct Condition { + /// Age of an object (in days). This condition is satisfied when an object reaches the specified + /// age. + pub age: Option, + /// A date in `RFC 3339` format with only the date part (for instance, "2013-01-15"). This + /// condition is satisfied when an object is created before midnight of the specified date in + /// UTC. + #[serde(default, with = "crate::rfc3339_date::option")] + pub created_before: Option, + /// Relevant only for versioned objects. If the value is true, this condition matches the live + /// version of objects; if the value is `false`, it matches noncurrent versions of objects. + pub is_live: Option, + /// Objects having any of the storage classes specified by this condition will be matched. + /// Values include STANDARD, NEARLINE, COLDLINE, MULTI_REGIONAL, REGIONAL, and + /// DURABLE_REDUCED_AVAILABILITY. + pub matches_storage_class: Option>, + /// Relevant only for versioned objects. If the value is N, this condition is satisfied when + /// there are at least N versions (including the live version) newer than this version of the + /// object. + #[serde(default, deserialize_with = "crate::from_str_opt")] + pub num_newer_versions: Option, +} \ No newline at end of file diff --git a/src/models/copy_paramters.rs b/src/models/copy_paramters.rs new file mode 100644 index 0000000..24ab226 --- /dev/null +++ b/src/models/copy_paramters.rs @@ -0,0 +1,61 @@ +/// The parameters that are optionally supplied when copying an object. +#[derive(Debug, PartialEq, serde::Serialize, Default, Clone)] +#[serde(rename_all = "camelCase")] +pub struct CopyParameters { + /// Resource name of the Cloud KMS key that will be used to encrypt the object. + /// The Cloud KMS key must be located in same location as the object. + // + // If the parameter is not specified, the request uses the destination bucket's default encryption key, if any, or the Google-managed encryption key. + // + // If the object is large, re-encryption with the key may take too long and result in a Deadline exceeded error. + // For large objects, consider using the rewrite method instead. + pub destination_kms_key_name: Option, + + /// Apply a predefined set of access controls to the destination object. + /// + /// Acceptable values are: + /// `authenticatedRead`: Object owner gets OWNER access, and allAuthenticatedUsers get READER access. + /// `bucketOwnerFullControl`: Object owner gets OWNER access, and project team owners get OWNER access. + /// `bucketOwnerRead`: Object owner gets OWNER access, and project team owners get READER access. + /// `private`: Object owner gets OWNER access. + /// `projectPrivate`: Object owner gets OWNER access, and project team members get access according to their roles. + /// `publicRead`: Object owner gets OWNER access, and allUsers get READER access. + /// If `iamConfiguration.uniformBucketLevelAccess.enabled` is set to `true`, requests that include this parameter fail with a 400 Bad Request response. + pub destination_predefined_acl: Option, + + /// Makes the operation conditional on there being a live destination object with a generation number that matches the given value. + /// Setting `ifGenerationMatch` to 0 makes the operation succeed only if there is no live destination object. + pub if_generation_match: Option, + + /// Makes the operation conditional on there being a live destination object with a generation number that does not match the given value. + /// If no live destination object exists, the precondition fails. + /// Setting `ifGenerationNotMatch` to 0 makes the operation succeed if there is a live version of the object. + pub if_generation_not_match: Option, + + /// Makes the operation conditional on there being a live destination object with a metageneration number that matches the given value. + pub if_metageneration_match: Option, + + /// Makes the operation conditional on there being a live destination object with a metageneration number that does not match the given value. + pub if_metageneration_not_match: Option, + + /// Makes the operation conditional on whether the source object's generation matches the given value. + pub if_source_generation_match: Option, + + /// Makes the operation conditional on whether the source object's generation does not match the given value. + pub if_source_generation_not_match: Option, + + /// Makes the operation conditional on whether the source object's current metageneration matches the given value. + pub if_source_metageneration_match: Option, + + /// Makes the operation conditional on whether the source object's current metageneration does not match the given value. + pub if_source_metageneration_not_match: Option, + + /// Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full. + /// Acceptable values are: + /// full: Include all properties. + /// noAcl: Omit the owner, acl property. + pub projection: Option, + + /// If present, selects a specific revision of the source object (as opposed to the latest version, the default). + pub source_generation: Option, +} \ No newline at end of file diff --git a/src/models/cors.rs b/src/models/cors.rs new file mode 100644 index 0000000..e02f71a --- /dev/null +++ b/src/models/cors.rs @@ -0,0 +1,20 @@ +/// Contains information about how OPTIONS requests for this Bucket are handled. +#[derive(Debug, PartialEq, serde::Serialize, serde::Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct Cors { + /// The list of Origins eligible to receive CORS response headers. Note: "*" is permitted in the + /// list of origins, and means "any Origin". + #[serde(default)] + pub origin: Vec, + /// The list of HTTP methods on which to include CORS response headers, (GET, OPTIONS, POST, + /// etc) Note: "*" is permitted in the list of methods, and means "any method". + #[serde(default)] + pub method: Vec, + /// The list of HTTP headers other than the simple response headers to give permission for the + /// user-agent to share across domains. + #[serde(default)] + pub response_header: Vec, + /// The value, in seconds, to return in the Access-Control-Max-Age header used in preflight + /// responses. + pub max_age_seconds: Option, +} \ No newline at end of file diff --git a/src/models/create/bucket.rs b/src/models/create/bucket.rs new file mode 100644 index 0000000..d8556ea --- /dev/null +++ b/src/models/create/bucket.rs @@ -0,0 +1,49 @@ +use crate::models::{IamConfiguration, Encryption, Website, Logging, Versioning, Cors, Lifecycle, StorageClass, Billing, Location}; +use super::{BucketAccessControl, DefaultObjectAccessControl}; + +/// A model that can be used to insert new buckets into Google Cloud Storage. +#[derive(Debug, PartialEq, Default, serde::Serialize)] +#[serde(rename_all = "camelCase")] +pub struct Bucket { + /// The name of the bucket. See the bucket naming guidelines for more information. + pub name: String, + /// Whether or not to automatically apply an eventBasedHold to new objects added to the bucket. + pub default_event_based_hold: Option, + /// Access controls on the bucket, containing one or more `BucketAccessControls` resources. If + /// `iamConfiguration.uniformBucketLevelAccess.enabled` is set to true, this field is omitted in + /// responses, and requests that specify this field fail with a `400 Bad Request` response. + pub acl: Option>, + /// Default access controls to apply to new objects when no ACL is provided. This list defines + /// an entity and role for one or more `DefaultObjectAccessControls` resources. If + /// `iamConfiguration.uniformBucketLevelAccess.enabled` is set to true, this field is omitted in + /// responses, and requests that specify this field fail with a `400 Bad Request` response. + pub default_object_acl: Option>, + /// The bucket's IAM configuration. + pub iam_configuration: Option, + /// Encryption configuration for a bucket. + pub encryption: Option, + /// The location of the bucket. Object data for objects in the bucket resides in physical + /// storage within this region. Defaults to US. See Cloud Storage bucket locations for the + /// authoritative list. + pub location: Location, + /// The bucket's website configuration, controlling how the service behaves when accessing + /// bucket contents as a web site. See the Static Website Examples for more information. + pub website: Option, + /// The bucket's logging configuration, which defines the destination bucket and optional name + /// prefix for the current bucket's logs. + pub logging: Option, + /// The bucket's versioning configuration. + pub versioning: Option, + /// The bucket's Cross-Origin Resource Sharing (CORS) configuration. + pub cors: Option>, + /// The bucket's lifecycle configuration. See [lifecycle management](https://cloud.google.com/storage/docs/lifecycle) for more information. + pub lifecycle: Option, + /// User-provided bucket labels, in key/value pairs. + pub labels: Option>, + /// The bucket's default storage class, used whenever no storageClass is specified for a + /// newly-created object. If storageClass is not specified when the bucket + /// is created, it defaults to STANDARD. For more information, see storage classes. + pub storage_class: Option, + /// The bucket's billing configuration. + pub billing: Option, +} \ No newline at end of file diff --git a/src/models/create/bucket_access_control.rs b/src/models/create/bucket_access_control.rs new file mode 100644 index 0000000..8a8c7a4 --- /dev/null +++ b/src/models/create/bucket_access_control.rs @@ -0,0 +1,27 @@ +use crate::models::{Entity, Role}; + +/// Model that can be used to create a new BucketAccessControl object. +#[derive(Debug, PartialEq, serde::Serialize)] +#[serde(rename_all = "camelCase")] +pub struct BucketAccessControl { + /// The entity holding the permission, in one of the following forms: + /// + /// * `user-userId` + /// * `user-email` + /// * `group-groupId` + /// * `group-email` + /// * `domain-domain` + /// * `project-team-projectId` + /// * `allUsers` + /// * `allAuthenticatedUsers` + /// + /// Examples: + /// + /// * The user liz@example.com would be user-liz@example.com. + /// * The group example@googlegroups.com would be group-example@googlegroups.com. + /// * To refer to all members of the G Suite for Business domain example.com, the entity would + /// be domain-example.com. + pub entity: Entity, + /// The access permission for the entity. + pub role: Role, +} \ No newline at end of file diff --git a/src/models/create/default_object_access_control.rs b/src/models/create/default_object_access_control.rs new file mode 100644 index 0000000..3505b21 --- /dev/null +++ b/src/models/create/default_object_access_control.rs @@ -0,0 +1,27 @@ +use crate::models::{Entity, Role}; + +/// Model that can be used to create a new DefaultObjectAccessControl object. +#[derive(Debug, PartialEq, serde::Serialize, serde::Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct DefaultObjectAccessControl { + /// The entity holding the permission, in one of the following forms: + /// + /// * `user-userId` + /// * `user-email` + /// * `group-groupId` + /// * `group-email` + /// * `domain-domain` + /// * `project-team-projectId` + /// * `allUsers` + /// * `allAuthenticatedUsers` + /// + /// Examples: + /// + /// * The user liz@example.com would be user-liz@example.com. + /// * The group example@googlegroups.com would be group-example@googlegroups.com. + /// * To refer to all members of the G Suite for Business domain example.com, the entity would + /// be domain-example.com. + pub entity: Entity, + /// The access permission for the entity. + pub role: Role, +} \ No newline at end of file diff --git a/src/models/create/mod.rs b/src/models/create/mod.rs new file mode 100644 index 0000000..ad036b3 --- /dev/null +++ b/src/models/create/mod.rs @@ -0,0 +1,16 @@ +mod bucket; +mod bucket_access_control; +mod default_object_access_control; +//mod notification; +mod payload_format; +mod object_access_control; + +pub(crate) use self::{ + bucket::Bucket, + bucket_access_control::BucketAccessControl, + default_object_access_control::DefaultObjectAccessControl, + //notification::Notification, + payload_format::PayloadFormat, + object_access_control::ObjectAccessControl, + +}; \ No newline at end of file diff --git a/src/models/create/notification.rs b/src/models/create/notification.rs new file mode 100644 index 0000000..35d9284 --- /dev/null +++ b/src/models/create/notification.rs @@ -0,0 +1,22 @@ +use std::collections::HashMap; + +use super::PayloadFormat; + +/// Use this struct to create new notifications. +#[derive(Debug, PartialEq, Default, serde::Serialize)] +pub struct Notification { + /// The Pub/Sub topic to which this subscription publishes. Formatted as: + /// `'//pubsub.googleapis.com/projects/{project-identifier}/topics/{my-topic}'`. + topic: String, + /// If present, only send notifications about listed event types. If empty, send notifications + /// for all event types. + event_types: Option>, + /// An optional list of additional attributes to attach to each Pub/Sub message published + /// for this notification subscription. + custom_attributes: Option>, + /// The desired content of the Payload. + payload_format: Option, + /// If present, only apply this notification configuration to object names that begin with this + /// prefix. + object_name_prefix: Option, +} \ No newline at end of file diff --git a/src/models/create/object_access_control.rs b/src/models/create/object_access_control.rs new file mode 100644 index 0000000..e29fef8 --- /dev/null +++ b/src/models/create/object_access_control.rs @@ -0,0 +1,27 @@ +use crate::models::{Entity, Role}; + +/// Used to create a new `ObjectAccessControl` object. +#[derive(Debug, PartialEq, serde::Serialize)] +#[serde(rename_all = "camelCase")] +pub struct ObjectAccessControl { + /// The entity holding the permission, in one of the following forms: + /// + /// user-userId + /// user-email + /// group-groupId + /// group-email + /// domain-domain + /// project-team-projectId + /// allUsers + /// allAuthenticatedUsers + /// + /// Examples: + /// + /// The user liz@example.com would be user-liz@example.com. + /// The group example@googlegroups.com would be group-example@googlegroups.com. + /// To refer to all members of the G Suite for Business domain example.com, the entity would be + /// domain-example.com. + pub entity: Entity, + /// The access permission for the entity. + pub role: Role, +} \ No newline at end of file diff --git a/src/models/create/payload_format.rs b/src/models/create/payload_format.rs new file mode 100644 index 0000000..19a5cef --- /dev/null +++ b/src/models/create/payload_format.rs @@ -0,0 +1,9 @@ +/// Various ways of having the response formatted. +#[derive(Debug, PartialEq, serde::Serialize)] +#[serde(rename_all = "SCREAMING_SNAKE_CASE")] +pub enum PayloadFormat { + /// Respond with a format as specified in the Json API V1 documentation. + JsonApiV1, + /// Do not respond. + None, +} \ No newline at end of file diff --git a/src/models/customer_encryption.rs b/src/models/customer_encryption.rs new file mode 100644 index 0000000..7216d64 --- /dev/null +++ b/src/models/customer_encryption.rs @@ -0,0 +1,9 @@ +/// Contains data about how a user might encrypt their files in Google Cloud Storage. +#[derive(Debug, PartialEq, serde::Serialize, serde::Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct CustomerEncrypton { + /// The encryption algorithm. + pub encryption_algorithm: String, + /// SHA256 hash value of the encryption key. + pub key_sha256: String, +} \ No newline at end of file diff --git a/src/models/default_object_access_control.rs b/src/models/default_object_access_control.rs new file mode 100644 index 0000000..1c2633a --- /dev/null +++ b/src/models/default_object_access_control.rs @@ -0,0 +1,46 @@ +use super::{Entity, Role, ProjectTeam}; + +/// The DefaultObjectAccessControls resources represent the Access Control Lists (ACLs) applied to a +/// new object within Google Cloud Storage when no ACL was provided for that object. ACLs let you +/// specify who has access to your data and to what extent. +#[derive(Debug, PartialEq, serde::Serialize, serde::Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct DefaultObjectAccessControl { + /// The kind of item this is. For object access control entries, this is always + /// storage#objectAccessControl. + pub kind: String, + /// The entity holding the permission, in one of the following forms: + /// + /// * `user-userId` + /// * `user-email` + /// * `group-groupId` + /// * `group-email` + /// * `domain-domain` + /// * `project-team-projectId` + /// * `allUsers` + /// * `allAuthenticatedUsers` + /// + /// Examples: + /// + /// * The user liz@example.com would be user-liz@example.com. + /// * The group example@googlegroups.com would be group-example@googlegroups.com. + /// * To refer to all members of the G Suite for Business domain example.com, the entity would + /// be domain-example.com. + pub entity: Entity, + /// The access permission for the entity. + pub role: Role, + /// The email address associated with the entity, if any. + pub email: Option, + /// The ID for the entity, if any. + pub entity_id: Option, + /// The domain associated with the entity, if any. + pub domain: Option, + /// The project team associated with the entity, if any. + pub project_team: Option, + /// HTTP 1.1 Entity tag for the access-control entry. + pub etag: String, + /// The bucket this resource belongs to. + #[serde(default)] + pub bucket: String, // this field is not returned by Google, but we populate it manually for the + // convenience of the end user. +} \ No newline at end of file diff --git a/src/models/delete_parameters.rs b/src/models/delete_parameters.rs new file mode 100644 index 0000000..229065b --- /dev/null +++ b/src/models/delete_parameters.rs @@ -0,0 +1,22 @@ +/// The parameters that are optionally supplied when deleting an object. +#[derive(Debug, PartialEq, serde::Serialize, Default, Clone)] +#[serde(rename_all = "camelCase")] +pub struct DeleteParameters { + /// If present, permanently deletes a specific revision of this object (as opposed to the latest version, the default). + pub generation: Option, + + /// Makes the operation conditional on whether the object's current generation matches the given value. + /// Setting to 0 makes the operation succeed only if there are no live versions of the object. + pub if_generation_match: Option, + + /// Makes the operation conditional on whether the object's current generation does not match the given value. + /// If no live object exists, the precondition fails. + /// Setting to 0 makes the operation succeed only if there is a live version of the object. + pub if_generation_not_match: Option, + + /// Makes the operation conditional on whether the object's current metageneration matches the given value. + pub if_metageneration_match: Option, + + /// Makes the operation conditional on whether the object's current metageneration does not match the given value. + pub if_metageneration_not_match: Option, +} \ No newline at end of file diff --git a/src/models/encryption.rs b/src/models/encryption.rs new file mode 100644 index 0000000..571b1a1 --- /dev/null +++ b/src/models/encryption.rs @@ -0,0 +1,8 @@ +/// Contains information about the encryption used for data in this Bucket. +#[derive(Debug, PartialEq, serde::Serialize, serde::Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct Encryption { + /// A Cloud KMS key that will be used to encrypt objects inserted into this bucket, if no + /// encryption method is specified. + pub default_kms_key_name: String, +} \ No newline at end of file diff --git a/src/resources/common.rs b/src/models/entity.rs similarity index 53% rename from src/resources/common.rs rename to src/models/entity.rs index df69d8c..689aafa 100644 --- a/src/resources/common.rs +++ b/src/models/entity.rs @@ -1,70 +1,6 @@ -use serde::Serializer; use std::str::FromStr; -/// Contains information about the team related to this `DefaultObjectAccessControls` -#[derive(Debug, PartialEq, serde::Serialize, serde::Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct ProjectTeam { - /// The project number. - project_number: String, - /// The team. - team: Team, -} - -/// Any type of team we can encounter. -#[derive(Debug, Clone, PartialEq, serde::Serialize, serde::Deserialize)] -#[serde(rename_all = "lowercase")] -pub enum Team { - /// The team consists of `Editors`. - Editors, - /// The team consists of `Owners`. - Owners, - /// The team consists of `Viewers`. - Viewers, -} - -impl std::fmt::Display for Team { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - match self { - Team::Editors => write!(f, "editors"), - Team::Owners => write!(f, "owners"), - Team::Viewers => write!(f, "viewers"), - } - } -} - -impl FromStr for Team { - type Err = String; - - fn from_str(s: &str) -> Result { - match s { - "editors" => Ok(Self::Editors), - "owners" => Ok(Self::Owners), - "viewers" => Ok(Self::Viewers), - _ => Err(format!("Invalid `Team`: {}", s)), - } - } -} - -/// Any type of role we can encounter. -#[derive(Debug, PartialEq, serde::Serialize, serde::Deserialize)] -#[serde(rename_all = "UPPERCASE")] -pub enum Role { - /// Full access. - Owner, - /// Write, but not administer. - Writer, - /// Only read access. - Reader, -} - -#[derive(Debug, serde::Deserialize)] -#[serde(rename_all = "camelCase")] -pub(crate) struct ListResponse { - #[serde(default = "Vec::new")] - pub items: Vec, - // pub next_page_token: Option, -} +use super::Team; /// An entity is used to represent a user or group of users that often have some kind of permission. #[derive(Debug, PartialEq, Clone)] @@ -88,32 +24,39 @@ pub enum Entity { AllAuthenticatedUsers, } -use Entity::*; - impl std::fmt::Display for Entity { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { match self { - UserId(s) => write!(f, "user-{}", s), - UserEmail(s) => write!(f, "user-{}", s), - GroupId(s) => write!(f, "group-{}", s), - GroupEmail(s) => write!(f, "group-{}", s), - Domain(s) => write!(f, "domain-{}", s), - Project(team, project_id) => write!(f, "project-{}-{}", team, project_id), - AllUsers => write!(f, "allUsers"), - AllAuthenticatedUsers => write!(f, "allAuthenticatedUsers"), + Entity::UserId(s) => write!(f, "user-{}", s), + Entity::UserEmail(s) => write!(f, "user-{}", s), + Entity::GroupId(s) => write!(f, "group-{}", s), + Entity::GroupEmail(s) => write!(f, "group-{}", s), + Entity::Domain(s) => write!(f, "domain-{}", s), + Entity::Project(team, project_id) => write!(f, "project-{}-{}", team, project_id), + Entity::AllUsers => write!(f, "allUsers"), + Entity::AllAuthenticatedUsers => write!(f, "allAuthenticatedUsers"), } } } +// This uses Display to serialize a entity as a string based enum variant, rather than generating an object impl serde::Serialize for Entity { fn serialize(&self, serializer: S) -> Result where - S: Serializer, + S: serde::Serializer, { serializer.serialize_str(&format!("{}", self)) } } +impl<'de> serde::Deserialize<'de> for Entity { + fn deserialize(deserializer: D) -> Result + where D: serde::Deserializer<'de>, + { + deserializer.deserialize_str(EntityVisitor) + } +} + struct EntityVisitor; impl<'de> serde::de::Visitor<'de> for EntityVisitor { @@ -129,78 +72,70 @@ impl<'de> serde::de::Visitor<'de> for EntityVisitor { { let parts: Vec<&str> = value.split('-').collect(); let result = match &parts[..] { - ["user", rest @ ..] if is_email(rest) => UserEmail(rest.join("-")), - ["user", rest @ ..] => UserId(rest.join("-")), - ["group", rest @ ..] if is_email(rest) => GroupEmail(rest.join("-")), - ["group", rest @ ..] => GroupId(rest.join("-")), - ["domain", rest @ ..] => Domain(rest.join("-")), + ["user", rest @ ..] if is_email(rest) => Entity::UserEmail(rest.join("-")), + ["user", rest @ ..] => Entity::UserId(rest.join("-")), + ["group", rest @ ..] if is_email(rest) => Entity::GroupEmail(rest.join("-")), + ["group", rest @ ..] => Entity::GroupId(rest.join("-")), + ["domain", rest @ ..] => Entity::Domain(rest.join("-")), ["project", team, project_id] => { - Project(Team::from_str(team).unwrap(), project_id.to_string()) + Entity::Project(Team::from_str(team).unwrap(), project_id.to_string()) } - ["allUsers"] => AllUsers, - ["allAuthenticatedUsers"] => AllAuthenticatedUsers, + ["allUsers"] => Entity::AllUsers, + ["allAuthenticatedUsers"] => Entity::AllAuthenticatedUsers, _ => return Err(E::custom(format!("Unexpected `Entity`: {}", value))), }; Ok(result) } } +// Used for EntityVisitor fn is_email(pattern: &[&str]) -> bool { pattern.iter().any(|s| s.contains('@')) } -impl<'de> serde::Deserialize<'de> for Entity { - fn deserialize(deserializer: D) -> Result - where - D: serde::Deserializer<'de>, - { - deserializer.deserialize_str(EntityVisitor) - } -} - #[cfg(test)] mod tests { use super::*; #[test] fn serialize() { - let entity1 = UserId("some id".to_string()); + let entity1 = Entity::UserId("some id".to_string()); assert_eq!(serde_json::to_string(&entity1).unwrap(), "\"user-some id\""); - let entity2 = UserEmail("some@email".to_string()); + let entity2 = Entity::UserEmail("some@email".to_string()); assert_eq!( serde_json::to_string(&entity2).unwrap(), "\"user-some@email\"" ); - let entity3 = GroupId("some group id".to_string()); + let entity3 = Entity::GroupId("some group id".to_string()); assert_eq!( serde_json::to_string(&entity3).unwrap(), "\"group-some group id\"" ); - let entity4 = GroupEmail("some@group.email".to_string()); + let entity4 = Entity::GroupEmail("some@group.email".to_string()); assert_eq!( serde_json::to_string(&entity4).unwrap(), "\"group-some@group.email\"" ); - let entity5 = Domain("example.com".to_string()); + let entity5 = Entity::Domain("example.com".to_string()); assert_eq!( serde_json::to_string(&entity5).unwrap(), "\"domain-example.com\"" ); - let entity6 = Project(Team::Viewers, "project id".to_string()); + let entity6 = Entity::Project(Team::Viewers, "project id".to_string()); assert_eq!( serde_json::to_string(&entity6).unwrap(), "\"project-viewers-project id\"" ); - let entity7 = AllUsers; + let entity7 = Entity::AllUsers; assert_eq!(serde_json::to_string(&entity7).unwrap(), "\"allUsers\""); - let entity8 = AllAuthenticatedUsers; + let entity8 = Entity::AllAuthenticatedUsers; assert_eq!( serde_json::to_string(&entity8).unwrap(), "\"allAuthenticatedUsers\"" @@ -212,46 +147,46 @@ mod tests { let str1 = "\"user-some id\""; assert_eq!( serde_json::from_str::(str1).unwrap(), - UserId("some id".to_string()) + Entity::UserId("some id".to_string()) ); let str2 = "\"user-some@email\""; assert_eq!( serde_json::from_str::(str2).unwrap(), - UserEmail("some@email".to_string()) + Entity::UserEmail("some@email".to_string()) ); let str3 = "\"group-some group id\""; assert_eq!( serde_json::from_str::(str3).unwrap(), - GroupId("some group id".to_string()) + Entity::GroupId("some group id".to_string()) ); let str4 = "\"group-some@group.email\""; assert_eq!( serde_json::from_str::(str4).unwrap(), - GroupEmail("some@group.email".to_string()) + Entity::GroupEmail("some@group.email".to_string()) ); let str5 = "\"domain-example.com\""; assert_eq!( serde_json::from_str::(str5).unwrap(), - Domain("example.com".to_string()) + Entity::Domain("example.com".to_string()) ); let str6 = "\"project-viewers-project id\""; assert_eq!( serde_json::from_str::(str6).unwrap(), - Project(Team::Viewers, "project id".to_string()) + Entity::Project(Team::Viewers, "project id".to_string()) ); let str7 = "\"allUsers\""; - assert_eq!(serde_json::from_str::(str7).unwrap(), AllUsers); + assert_eq!(serde_json::from_str::(str7).unwrap(), Entity::AllUsers); let str8 = "\"allAuthenticatedUsers\""; assert_eq!( serde_json::from_str::(str8).unwrap(), - AllAuthenticatedUsers + Entity::AllAuthenticatedUsers ); } } diff --git a/src/models/error.rs b/src/models/error.rs new file mode 100644 index 0000000..f5a29f8 --- /dev/null +++ b/src/models/error.rs @@ -0,0 +1,42 @@ +use super::ErrorReason; + +/// Google Error structure +#[derive(Debug, serde::Deserialize)] +#[serde(rename = "camelCase")] +pub struct Error { + /// The scope of the error. Example values include: global and push. + pub domain: String, + /// Example values include `invalid`, `invalidParameter`, and `required`. + pub reason: ErrorReason, + /// Description of the error. + /// + /// Example values include `Invalid argument`, `Login required`, and `Required parameter: + /// project`. + pub message: String, + /// The location or part of the request that caused the error. Use with `location` to pinpoint + /// the error. For example, if you specify an invalid value for a parameter, the `locationType` + /// will be parameter and the location will be the name of the parameter. + /// + /// Example values include `header` and `parameter`. + pub location_type: Option, + /// The specific item within the `locationType` that caused the error. For example, if you + /// specify an invalid value for a parameter, the `location` will be the name of the parameter. + /// + /// Example values include: `Authorization`, `project`, and `projection`. + pub location: Option, +} + +impl std::fmt::Display for Error { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self.message) + } +} + +impl std::error::Error for Error {} + +impl Error { + /// Check what was the reason of error + pub fn is_reason(&self, reason: &ErrorReason) -> bool { + self.reason == *reason + } +} \ No newline at end of file diff --git a/src/models/error_list.rs b/src/models/error_list.rs new file mode 100644 index 0000000..f0ace43 --- /dev/null +++ b/src/models/error_list.rs @@ -0,0 +1,15 @@ +use super::Error; + +/// A container for the error information. +#[derive(Debug, serde::Deserialize)] +#[serde(rename = "camelCase")] +pub struct ErrorList { + /// A container for the error details. + pub errors: Vec, + /// An HTTP status code value, without the textual description. + /// + /// Example values include: 400 (Bad Request), 401 (Unauthorized), and 404 (Not Found). + pub code: u16, + /// Description of the error. Same as errors.message. + pub message: String, +} \ No newline at end of file diff --git a/src/models/error_reason.rs b/src/models/error_reason.rs new file mode 100644 index 0000000..544084c --- /dev/null +++ b/src/models/error_reason.rs @@ -0,0 +1,221 @@ +/// Google provides a list of codes, but testing indicates that this list is not exhaustive. +#[derive(Debug, PartialEq, serde::Deserialize)] +#[serde(rename_all = "camelCase")] +pub enum ErrorReason { + /// When requesting a download using alt=media URL parameter, the direct URL path to use is + /// prefixed by /download. If this is omitted, the service will issue this redirect with the + /// appropriate media download path in the Location header. + MediaDownloadRedirect, + /// The conditional request would have been successful, but the condition was false, so no body + /// was sent. + NotModified, + /// Resource temporarily located elsewhere according to the Location header. Among other + /// reasons, this can occur when cookie-based authentication is being used, e.g., when using the + /// Storage Browser, and it receives a request to download content. + TemporaryRedirect, + // /// Indicates an incomplete resumable upload and provides the range of bytes already received by + // /// Cloud Storage. Responses with this status do not contain a body. + // ResumeIncomplete, + + // + /// Undocumeten variant that is sometimes returned by Google. + Invalid, + /// The request cannot be completed based on your current Cloud Storage settings. For example, + /// you cannot lock a retention policy if the requested bucket doesn't have a retention policy, + /// and you cannot set ACLs if the requested bucket has Bucket Policy Only enabled. + BadRequest, + /// The retention period on a locked bucket cannot be reduced. + BadRequestException, + /// Bad Cloud KMS key. + CloudKmsBadKey, + /// Cloud KMS key name cannot be changed. + CloudKmsCannotChangeKeyName, + /// Resource's Cloud KMS decryption key not found. + CloudKmsDecryptionKeyNotFound, + /// Cloud KMS key is disabled, destroyed, or scheduled to be destroyed. + CloudKmsDisabledKey, + /// Cloud KMS encryption key not found. + CloudKmsEncryptionKeyNotFound, + /// Cloud KMS key location not allowed. + CloudKmsKeyLocationNotAllowed, + /// Missing an encryption algorithm, or the provided algorithm is not "AE256." + CustomerEncryptionAlgorithmIsInvalid, + /// Missing an encryption key, or it is not Base64 encoded, or it does not meet the required + /// length of the encryption algorithm. + CustomerEncryptionKeyFormatIsInvalid, + /// The provided encryption key is incorrect. + CustomerEncryptionKeyIsIncorrect, + /// Missing a SHA256 hash of the encryption key, or it is not Base64 encoded, or it does not + /// match the encryption key. + CustomerEncryptionKeySha256IsInvalid, + /// The value for the alt URL parameter was not recognized. + InvalidAltValue, + /// The value for one of fields in the request body was invalid. + InvalidArgument, + /// The value for one of the URL parameters was invalid. In addition to normal URL parameter + /// validation, any URL parameters that have a corresponding value in provided JSON request + /// bodies must match if they are both specified. If using JSONP, you will get this error if you + /// provide an alt parameter that is not json. + InvalidParameter, + /// Uploads or normal API request was sent to a `/download/*` path. Use the same path, but + /// without the /download prefix. + NotDownload, + /// Downloads or normal API request was sent to an `/upload/*` path. Use the same path, but + /// without the `/upload` prefix. + NotUpload, + /// Could not parse the body of the request according to the provided Content-Type. + ParseError, + /// Channel id must match the following regular expression: `[A-Za-z0-9\\-_\\+/=]+`. + #[serde(rename = "push.channelIdInvalid")] + PushChannelIdInvalid, + /// `storage.objects.watchAll`'s id property must be unique across channels. + #[serde(rename = "push.channelIdNotUnique")] + PushChannelIdNotUnique, + /// `storage.objects.watchAll`'s address property must contain a valid URL. + #[serde(rename = "push.webhookUrlNoHostOrAddress")] + PushWebhookUrlNoHostOrAddress, + /// `storage.objects.watchAll`'s address property must be an HTTPS URL. + #[serde(rename = "push.webhookUrlNotHttps")] + PushWebhookUrlNotHttps, + /// A required URL parameter or required request body JSON property is missing. + Required, + /// The resource is encrypted with a customer-supplied encryption key, but the request did not + /// provide one. + ResourceIsEncryptedWithCustomerEncryptionKey, + /// The resource is not encrypted with a customer-supplied encryption key, but the request + /// provided one. + ResourceNotEncryptedWithCustomerEncryptionKey, + /// A request was made to an API version that has been turned down. Clients will need to update + /// to a supported version. + TurnedDown, + /// The user project specified in the request does not match the user project specifed in an + /// earlier, related request. + UserProjectInconsistent, + /// The user project specified in the request is invalid, either because it is a malformed + /// project id or because it refers to a non-existent project. + UserProjectInvalid, + /// The requested bucket has Requester Pays enabled, the requester is not an owner of the + /// bucket, and no user project was present in the request. + UserProjectMissing, + /// storage.objects.insert must be invoked as an upload rather than a metadata. + WrongUrlForUpload, + // + + // + /// Access to a Requester Pays bucket requires authentication. + #[serde(rename = "AuthenticationRequiredRequesterPays")] + AuthenticationRequiredRequesterPays, + /// This error indicates a problem with the authorization provided in the request to Cloud + /// Storage. The following are some situations where that will occur: + /// + /// * The OAuth access token has expired and needs to be refreshed. This can be avoided by + /// refreshing the access token early, but code can also catch this error, refresh the token + /// and retry automatically. + /// * Multiple non-matching authorizations were provided; choose one mode only. + /// * The OAuth access token's bound project does not match the project associated with the + /// provided developer key. + /// * The Authorization header was of an unrecognized format or uses an unsupported credential + /// type. + AuthError, + /// When downloading content from a cookie-authenticated site, e.g., using the Storage Browser, + /// the response will redirect to a temporary domain. This error will occur if access to said + /// domain occurs after the domain expires. Issue the original request again, and receive a new + /// redirect. + LockedDomainExpired, + /// Requests to storage.objects.watchAll will fail unless you verify you own the domain. + #[serde(rename = "push.webhookUrlUnauthorized")] + PushWebhookUrlUnauthorized, + // /// Access to a non-public method that requires authorization was made, but none was provided in + // /// the Authorization header or through other means. + // Required, + // + + // + /// The account associated with the project that owns the bucket or object has been disabled. Check the Google Cloud Console to see if there is a problem with billing, and if not, contact account support. + AccountDisabled, + /// The Cloud Storage JSON API is restricted by law from operating with certain countries. + CountryBlocked, + /// According to access control policy, the current user does not have access to perform the requested action. This code applies even if the resource being acted on doesn't exist. + Forbidden, + /// According to access control policy, the current user does not have access to perform the requested action. This code applies even if the resource being acted on doesn't exist. + InsufficientPermissions, + /// Object overwrite or deletion is not allowed due to an active hold on the object. + ObjectUnderActiveHold, + /// The Cloud Storage rate limit was exceeded. Retry using exponential backoff. + RateLimitExceeded, + /// Object overwrite or deletion is not allowed until the object meets the retention period set by the retention policy on the bucket. + RetentionPolicyNotMet, + /// Requests to this API require SSL. + SslRequired, + /// Calls to storage.channels.stop require that the caller own the channel. + StopChannelCallerNotOwner, + /// This error implies that for the project associated with the OAuth token or the developer key provided, access to Cloud Storage JSON API is not enabled. This is most commonly because Cloud Storage JSON API is not enabled in the Google Cloud Console, though there are other cases where the project is blocked or has been deleted when this can occur. + #[serde(rename = "UsageLimits.accessNotConfigured")] + UsageLimitsAccessNotConfigured, + /// The requester is not authorized to use the project specified in their request. The + /// requester must have either the serviceusage.services.use permission or the Editor role for + /// the specified project. + #[serde(rename = "UserProjectAccessDenied")] + UserProjectAccessDenied, + /// There is a problem with the project used in the request that prevents the operation from + /// completing successfully. One issue could be billing. Check the billing page to see if you + /// have a past due balance or if the credit card (or other payment mechanism) on your account is expired. For project creation, see the Projects page in the Google Cloud Console. For other problems, see the Resources and Support page. + #[serde(rename = "UserProjectAccountProblem")] + UserProjectAccountProblem, + /// The developer-specified per-user rate quota was exceeded. If you are the developer, then + /// you can view these quotas at Quotas pane in the Google Cloud Console. + UserRateLimitExceeded, + /// Seems to indicate the same thing + // NONEXHAUST + QuotaExceeded, + // + /// Either there is no API method associated with the URL path of the request, or the request + /// refers to one or more resources that were not found. + NotFound, + /// Either there is no API method associated with the URL path of the request, or the request + /// refers to one or more resources that were not found. + MethodNotAllowed, + /// The request timed out. Please try again using truncated exponential backoff. + UploadBrokenConnection, + /// A request to change a resource, usually a storage.*.update or storage.*.patch method, failed + /// to commit the change due to a conflicting concurrent change to the same resource. The + /// request can be retried, though care should be taken to consider the new state of the + /// resource to avoid blind overwriting of other agent's changes. + Conflict, + /// You have attempted to use a resumable upload session that is no longer available. If the + /// reported status code was not successful and you still wish to upload the file, you must + /// start a new session. + Gone, + // /// You must provide the Content-Length HTTP header. This error has no response body. + // LengthRequired, + + // + /// At least one of the pre-conditions you specified did not hold. + ConditionNotMet, + /// Request violates an OrgPolicy constraint. + OrgPolicyConstraintFailed, + // + /// The Cloud Storage JSON API supports up to 5 TB objects. + /// + /// This error may, alternatively, arise if copying objects between locations and/or storage + /// classes can not complete within 30 seconds. In this case, use the `Object::rewrite` method + /// instead. + UploadTooLarge, + /// The requested Range cannot be satisfied. + RequestedRangeNotSatisfiable, + /// A [Cloud Storage JSON API usage limit](https://cloud.google.com/storage/quotas) was + /// exceeded. If your application tries to use more than its limit, additional requests will + /// fail. Throttle your client's requests, and/or use truncated exponential backoff. + #[serde(rename = "usageLimits.rateLimitExceeded")] + UsageLimitsRateLimitExceeded, + + // + /// We encountered an internal error. Please try again using truncated exponential backoff. + BackendError, + /// We encountered an internal error. Please try again using truncated exponential backoff. + InternalError, + // + /// May be returned by Google, meaning undocumented. + // NONEXHAUST + GatewayTimeout, +} \ No newline at end of file diff --git a/src/models/error_response.rs b/src/models/error_response.rs new file mode 100644 index 0000000..0a94512 --- /dev/null +++ b/src/models/error_response.rs @@ -0,0 +1,30 @@ +use super::{ErrorList, Error, ErrorReason}; + +/// The structure of a error response returned by Google. +#[derive(Debug, serde::Deserialize)] +#[serde(rename = "camelCase")] +pub struct ErrorResponse { + /// A container for the error information. + pub error: ErrorList, +} + +impl ErrorResponse { + /// Return list of errors returned by Google + pub fn errors(&self) -> &[Error] { + &self.error.errors + } + + /// Check whether errors contain given reason + pub fn errors_has_reason(&self, reason: &ErrorReason) -> bool { + self.errors() + .iter() + .any(|google_error| google_error.is_reason(reason)) + } +} + +impl std::fmt::Display for ErrorResponse { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> Result<(), std::fmt::Error> { + writeln!(f, "{:?}", self) + } +} +impl std::error::Error for ErrorResponse {} \ No newline at end of file diff --git a/src/models/hmac_key.rs b/src/models/hmac_key.rs new file mode 100644 index 0000000..7a03b82 --- /dev/null +++ b/src/models/hmac_key.rs @@ -0,0 +1,18 @@ +use super::HmacMeta; + +/// The `HmacKey` resource represents an HMAC key within Cloud Storage. The resource consists of a +/// secret and `HmacMeta`. HMAC keys can be used as credentials for service accounts. For more +/// information, see HMAC Keys. +/// +/// Note that the `HmacKey` resource is only returned when you use `HmacKey::create`. Other +/// methods, such as `HmacKey::read`, return the metadata portion of the HMAC key resource. +#[derive(Debug, PartialEq, serde::Serialize, serde::Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct HmacKey { + /// The kind of item this is. For HMAC keys, this is always `storage#hmacKey`. + pub kind: String, + /// HMAC key metadata. + pub metadata: HmacMeta, + /// HMAC secret key material. + pub secret: String, +} \ No newline at end of file diff --git a/src/models/hmac_metadata.rs b/src/models/hmac_metadata.rs new file mode 100644 index 0000000..50ab504 --- /dev/null +++ b/src/models/hmac_metadata.rs @@ -0,0 +1,29 @@ +use super::HmacState; + +/// Contains information about an Hmac Key. +#[derive(Debug, PartialEq, serde::Serialize, serde::Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct HmacMeta { + /// The kind of item this is. For HMAC key metadata, this is always `storage#hmacKeyMetadata`. + pub kind: String, + /// The ID of the HMAC key, including the Project ID and the Access ID. + pub id: String, + /// The link to this resource. + pub self_link: String, + /// The access ID of the HMAC Key. + pub access_id: String, + /// The Project ID of the project that owns the service account to which the key authenticates. + pub project_id: String, + /// The email address of the key's associated service account. + pub service_account_email: String, + /// The state of the key. + pub state: HmacState, + /// The creation time of the HMAC key. + #[serde(with = "time::serde::rfc3339")] + pub time_created: time::OffsetDateTime, + /// The last modification time of the HMAC key metadata. + #[serde(with = "time::serde::rfc3339")] + pub updated: time::OffsetDateTime, + /// HTTP 1.1 Entity tag for the HMAC key. + pub etag: String, +} \ No newline at end of file diff --git a/src/models/hmac_state.rs b/src/models/hmac_state.rs new file mode 100644 index 0000000..9cca4f0 --- /dev/null +++ b/src/models/hmac_state.rs @@ -0,0 +1,11 @@ +/// The state of an Hmac Key. +#[derive(Debug, Clone, Copy, PartialEq, serde::Serialize, serde::Deserialize)] +#[serde(rename_all = "UPPERCASE")] +pub enum HmacState { + /// This Hmac key is currently used. + Active, + /// This Hmac key has been set to inactive. + Inactive, + /// This Hmac key has been permanently deleted. + Deleted, +} \ No newline at end of file diff --git a/src/models/iam_condition.rs b/src/models/iam_condition.rs new file mode 100644 index 0000000..883bc28 --- /dev/null +++ b/src/models/iam_condition.rs @@ -0,0 +1,13 @@ +/// A condition object associated with a binding. +#[derive(Debug, PartialEq, serde::Deserialize, serde::Serialize)] +#[serde(rename_all = "camelCase")] +pub struct IamCondition { + /// Title of the condition. For example, "expires_end_of_2018". + pub title: String, + /// Optional description of the condition. For example, "Expires at midnight on 2018-12-31". + pub description: Option, + /// [Attribute-based](https://cloud.google.com/iam/docs/conditions-overview#attributes) logic + /// expression using a subset of the Common Expression Language (CEL). For example, + /// "request.time < timestamp('2019-01-01T00:00:00Z')". + pub expression: String, +} \ No newline at end of file diff --git a/src/models/iam_configuration.rs b/src/models/iam_configuration.rs new file mode 100644 index 0000000..0c77396 --- /dev/null +++ b/src/models/iam_configuration.rs @@ -0,0 +1,13 @@ +use super::UniformBucketLevelAccess; + +/// Contains information about the Buckets IAM configuration. +#[derive(Debug, PartialEq, serde::Serialize, serde::Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct IamConfiguration { + /// The bucket's uniform bucket-level access configuration. + /// + /// Note: iamConfiguration also includes the bucketPolicyOnly field, which uses a legacy name + /// but has the same functionality as the uniformBucketLevelAccess field. We recommend only + /// using uniformBucketLevelAccess, as specifying both fields may result in unreliable behavior. + pub uniform_bucket_level_access: UniformBucketLevelAccess, +} \ No newline at end of file diff --git a/src/models/iam_policy.rs b/src/models/iam_policy.rs new file mode 100644 index 0000000..f7ee67c --- /dev/null +++ b/src/models/iam_policy.rs @@ -0,0 +1,19 @@ +use super::Binding; + +/// A representation of the IAM Policiy for a certain bucket. +#[derive(Debug, PartialEq, Default, serde::Deserialize, serde::Serialize)] +#[serde(rename_all = "camelCase")] +pub struct IamPolicy { + /// The [Cloud IAM policy](https://cloud.google.com/iam/docs/policies#versions) version. + pub version: i32, + /// The kind of item this is. For policies, this field is ignored in a request and is + /// `storage#policy` in a response. + pub kind: Option, + /// The ID of the resource to which this policy belongs. The response for this field is of the + /// form `projects/_/buckets/bucket`. This field is ignored in a request. + pub resource_id: Option, + /// A list of the bindings for this policy. + pub bindings: Vec, + /// HTTP 1.1 [Entity tag](https://tools.ietf.org/html/rfc7232#section-2.3) for this policy. + pub etag: String, +} \ No newline at end of file diff --git a/src/models/iam_role.rs b/src/models/iam_role.rs new file mode 100644 index 0000000..4877f0e --- /dev/null +++ b/src/models/iam_role.rs @@ -0,0 +1,14 @@ +use super::{StandardIamRole, PrimitiveIamRole, LegacyIamRole}; + +/// All possible roles that can exist in the IAM system. For a more comprehensive version, check +/// [Googles Documentation](https://cloud.google.com/storage/docs/access-control/iam-roles). +#[derive(Debug, PartialEq, serde::Deserialize, serde::Serialize)] +#[serde(untagged)] +pub enum IamRole { + /// Standard roles can be applied to either buckets or projects. + Standard(StandardIamRole), + /// Primitive roles are roles that must be added on a per-project basis. + Primitive(PrimitiveIamRole), + /// Legacy roles are roles that can only be added to an individual bucket. + Legacy(LegacyIamRole), +} \ No newline at end of file diff --git a/src/models/legacy_iam_role.rs b/src/models/legacy_iam_role.rs new file mode 100644 index 0000000..89f3d48 --- /dev/null +++ b/src/models/legacy_iam_role.rs @@ -0,0 +1,38 @@ +/// The following enum contains Cloud IAM roles that are equivalent to Access Control List (ACL) +/// permissions. These Cloud IAM roles can only be applied to a bucket, not a project. +#[derive(Debug, PartialEq, serde::Deserialize, serde::Serialize)] +pub enum LegacyIamRole { + /// Grants permission to view objects and their metadata, excluding ACLs. + #[serde(rename = "roles/storage.legacyObjectReader")] + LegacyObjectReader, + /// Grants permission to view and edit objects and their metadata, including ACLs. + #[serde(rename = "roles/storage.legacyObjectOwner")] + LegacyObjectOwner, + /// Grants permission to list a bucket's contents and read bucket metadata, excluding Cloud IAM + /// policies. Also grants permission to read object metadata, excluding Cloud IAM policies, when + /// listing objects. + /// + /// Use of this role is also reflected in the bucket's ACLs. See + /// [Cloud IAM relation to ACLs](https://cloud.google.com/storage/docs/access-control/iam#acls) + /// for more information. + #[serde(rename = "roles/storage.legacyBucketReader")] + LegacyBucketReader, + /// Grants permission to create, overwrite, and delete objects; list objects in a bucket and + /// read object metadata, excluding Cloud IAM policies, when listing; and read bucket metadata, + /// excluding Cloud IAM policies. + /// + /// Use of this role is also reflected in the bucket's ACLs. See + /// [Cloud IAM relation to ACLs](https://cloud.google.com/storage/docs/access-control/iam#acls) + /// for more information. + #[serde(rename = "roles/storage.legacyBucketWriter")] + LegacyBucketWriter, + /// Grants permission to create, overwrite, and delete objects; list objects in a bucket and + /// read object metadata, excluding Cloud IAM policies, when listing; and read and edit bucket + /// metadata, including Cloud IAM policies. + /// + /// Use of this role is also reflected in the bucket's ACLs. See + /// [Cloud IAM relation to ACLs](https://cloud.google.com/storage/docs/access-control/iam#acls) + /// for more information. + #[serde(rename = "roles/storage.legacyBucketOwner")] + LegacyBucketOwner, +} \ No newline at end of file diff --git a/src/models/lifecycle.rs b/src/models/lifecycle.rs new file mode 100644 index 0000000..ef33147 --- /dev/null +++ b/src/models/lifecycle.rs @@ -0,0 +1,10 @@ +use super::Rule; + +/// Contains a set of `Rule` Objects which together describe the way this lifecycle behaves +#[derive(Debug, PartialEq, serde::Serialize, serde::Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct Lifecycle { + /// A lifecycle management rule, which is made of an action to take and the condition(s) under + /// which the action will be taken. + pub rule: Vec, +} \ No newline at end of file diff --git a/src/models/list_response.rs b/src/models/list_response.rs new file mode 100644 index 0000000..a01afda --- /dev/null +++ b/src/models/list_response.rs @@ -0,0 +1,6 @@ +#[derive(Debug, serde::Deserialize)] +#[serde(rename_all = "camelCase")] +pub(crate) struct ListResponse { + #[serde(default = "Vec::new")] + pub items: Vec, +} \ No newline at end of file diff --git a/src/resources/location.rs b/src/models/location.rs similarity index 100% rename from src/resources/location.rs rename to src/models/location.rs diff --git a/src/models/logging.rs b/src/models/logging.rs new file mode 100644 index 0000000..a30b858 --- /dev/null +++ b/src/models/logging.rs @@ -0,0 +1,9 @@ +/// Contains information of where and how access logs to this bucket are maintained. +#[derive(Debug, PartialEq, serde::Serialize, serde::Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct Logging { + /// The destination bucket where the current bucket's logs should be placed. + pub log_bucket: String, + /// A prefix for log object names. The default prefix is the bucket name. + pub log_object_prefix: String, +} \ No newline at end of file diff --git a/src/models/mod.rs b/src/models/mod.rs new file mode 100644 index 0000000..d035ace --- /dev/null +++ b/src/models/mod.rs @@ -0,0 +1,132 @@ +pub(crate)mod create; + +mod legacy_iam_role; +mod test_iam_permission; +mod primitive_iam_role; +mod standard_iam_role; +mod iam_role; +mod iam_condition; +mod binding; +mod iam_policy; +mod storage_class; +mod billing; +mod condition; +mod action_type; +mod action; +mod rule; +mod bucket_access_control; +mod bucket; +mod retention_policy; +mod iam_configuration; +mod uniform_bucket_level_access; +mod encryption; +mod owner; +mod website; +mod logging; +mod versioning; +mod cors; +mod lifecycle; +mod team; +mod project_team; +mod role; +pub(crate) mod list_response; +mod entity; +mod default_object_access_control; +mod hmac_key; +mod hmac_metadata; +mod hmac_state; +mod update_hmac_metadata; +mod update_hmac_request; +mod location; +mod customer_encryption; +mod compose_request; +mod source_object; +mod object_precondition; +mod object_list_request; +mod object_create_parameters; +mod object_read_parameters; +mod compose_parameters; +mod copy_paramters; +mod rewrite_parameters; +mod delete_parameters; +mod update_parameters; +mod projection; +mod object_list; +pub(crate) mod rewrite_response; +mod object; +// mod notification; +mod topic; +mod error; +mod error_list; +mod error_reason; +mod error_response; +mod response; +mod object_access_control; +mod object_access_control_list; + +pub use self::{ + legacy_iam_role::LegacyIamRole, + test_iam_permission::TestIamPermission, + primitive_iam_role::PrimitiveIamRole, + standard_iam_role::StandardIamRole, + iam_role::IamRole, + iam_condition::IamCondition, + binding::Binding, + iam_policy::IamPolicy, + storage_class::StorageClass, + billing::Billing, + condition::Condition, + action_type::ActionType, + action::Action, + rule::Rule, + bucket_access_control::BucketAccessControl, + bucket::Bucket, + retention_policy::RetentionPolicy, + iam_configuration::IamConfiguration, + uniform_bucket_level_access::UniformBucketLevelAccess, + encryption::Encryption, + owner::Owner, + website::Website, + logging::Logging, + versioning::Versioning, + cors::Cors, + lifecycle::Lifecycle, + team::Team, + project_team::ProjectTeam, + role::Role, + entity::Entity, + default_object_access_control::DefaultObjectAccessControl, + hmac_key::HmacKey, + hmac_metadata::HmacMeta, + hmac_state::HmacState, + location::{Location, AusLocation, AsiaLocation, EuropeLocation, NALocation, SALocation, DualRegion, MultiRegion, SingleRegion}, + customer_encryption::CustomerEncrypton, + compose_request::ComposeRequest, + source_object::SourceObject, + object_precondition::ObjectPrecondition, + object_list_request::ListRequest, + object_create_parameters::CreateParameters, + object_read_parameters::ReadParameters, + compose_parameters::ComposeParameters, + copy_paramters::CopyParameters, + rewrite_parameters::RewriteParameters, + delete_parameters::DeleteParameters, + update_parameters::UpdateParameters, + projection::Projection, + object_list::ObjectList, + object::Object, + //notification::Notification, + topic::Topic, + error::Error, + error_list::ErrorList, + error_reason::ErrorReason, + error_response::ErrorResponse, + object_access_control::ObjectAccessControl, +}; + +pub(crate) use self::{ + response::Response, + list_response::ListResponse, + update_hmac_metadata::UpdateHmacMetadata, + update_hmac_request::UpdateHmacRequest, +}; \ No newline at end of file diff --git a/src/resources/notification.rs b/src/models/notification.rs similarity index 59% rename from src/resources/notification.rs rename to src/models/notification.rs index 87bdd8b..f06b22c 100644 --- a/src/resources/notification.rs +++ b/src/models/notification.rs @@ -1,6 +1,5 @@ -use crate::error::GoogleResponse; -use crate::resources::common::ListResponse; -pub use crate::resources::topic::Topic; +use std::collections::HashMap; +use super::{create, Topic}; /// A subscription to receive /// [Pub/Sub notifications](https://cloud.google.com/storage/docs/pubsub-notifications). @@ -16,7 +15,7 @@ pub struct Notification { event_types: Option>, /// An optional list of additional attributes to attach to each Pub/Sub message published /// for this notification subscription. - custom_attributes: Option>, + custom_attributes: Option>, /// The desired content of the Payload. /// /// Acceptable values are: @@ -35,49 +34,17 @@ pub struct Notification { kind: String, } -/// Use this struct to create new notifications. -#[derive(Debug, PartialEq, Default, serde::Serialize)] -pub struct NewNotification { - /// The Pub/Sub topic to which this subscription publishes. Formatted as: - /// `'//pubsub.googleapis.com/projects/{project-identifier}/topics/{my-topic}'`. - topic: String, - /// If present, only send notifications about listed event types. If empty, send notifications - /// for all event types. - event_types: Option>, - /// An optional list of additional attributes to attach to each Pub/Sub message published - /// for this notification subscription. - custom_attributes: Option>, - /// The desired content of the Payload. - payload_format: Option, - /// If present, only apply this notification configuration to object names that begin with this - /// prefix. - object_name_prefix: Option, -} - -/// Various ways of having the response formatted. -#[derive(Debug, PartialEq, serde::Serialize)] -#[serde(rename_all = "SCREAMING_SNAKE_CASE")] -pub enum PayloadFormat { - /// Respond with a format as specified in the Json API V1 documentation. - JsonApiV1, - /// Do not respond. - None, -} - impl Notification { /// Creates a notification subscription for a given bucket. - pub fn create(bucket: &str, new_notification: &NewNotification) -> Result { + pub fn create(bucket: &str, new_notification: &create::Notification) -> Result { let url = format!("{}/b/{}/notificationConfigs", crate::BASE_URL, bucket); - let result: GoogleResponse = crate::CLIENT + let result: crate::models::Response = crate::CLIENT .post(&url) .headers(crate::get_headers()?) .json(new_notification) .send()? .json()?; - match result { - GoogleResponse::Success(s) => Ok(s), - GoogleResponse::Error(e) => Err(e.into()), - } + result } /// View a notification configuration. @@ -88,28 +55,23 @@ impl Notification { bucket, notification ); - let result: GoogleResponse = crate::CLIENT; - .get(&url) - .headers(crate::get_headers()?) + let result: crate::models::Response = crate::CLIENT.get(&url).headers(crate::get_headers()?) .send()? .json()?; - match result { - GoogleResponse::Success(s) => Ok(s), - GoogleResponse::Error(e) => Err(e.into()), - } + result } /// Retrieves a list of notification subscriptions for a given bucket.} pub fn list(bucket: &str) -> Result, crate::Error> { let url = format!("{}/v1/b/{}/notificationConfigs", crate::BASE_URL, bucket); - let result: GoogleResponse> = crate::CLIENT + let result: crate::models::Response> = crate::CLIENT .get(&url) .headers(crate::get_headers()?) .send()? .json()?; match result { - GoogleResponse::Success(s) => Ok(s.items), - GoogleResponse::Error(e) => Err(e.into()), + crate::models::Response::Success(s) => Ok(s.items), + crate::models::Response::Error(e) => Err(e.into()), } } @@ -136,15 +98,18 @@ mod tests { #[test] fn create() { - let bucket = crate::read_test_bucket(); + let bucket = crate::global_client::read_test_bucket(); + #[cfg(feature = "dotenv")] + dotenv::dotenv().ok(); + let service_account = crate::ServiceAccount::default(); let topic = format!( "//pubsub.googleapis.com/projects/{}/topics/{}", - crate::SERVICE_ACCOUNT.project_id, + service_account.project_id, "testing-is-important", ); - let new_notification = NewNotification { + let new_notification = create::Notification { topic, - payload_format: Some(PayloadFormat::JsonApiV1), + payload_format: Some(create::PayloadFormat::JsonApiV1), ..Default::default() }; Notification::create(&bucket.name, &new_notification).unwrap(); @@ -152,30 +117,33 @@ mod tests { #[test] fn read() { - let bucket = crate::read_test_bucket(); + let bucket = crate::global_client::read_test_bucket(); Notification::read(&bucket.name, "testing-is-important").unwrap(); } #[test] fn list() { - let bucket = crate::read_test_bucket(); + let bucket = crate::global_client::read_test_bucket(); Notification::list(&bucket.name).unwrap(); } #[test] fn delete() { - let bucket = crate::read_test_bucket(); + let bucket = crate::global_client::read_test_bucket(); + #[cfg(feature = "dotenv")] + dotenv::dotenv().ok(); + let service_account = crate::ServiceAccount::default(); let topic = format!( "//pubsub.googleapis.com/projects/{}/topics/{}", - crate::SERVICE_ACCOUNT.project_id, + service_account.project_id, "testing-is-important", ); - let new_notification = NewNotification { + let new_notification = create::Notification { topic, - payload_format: Some(PayloadFormat::JsonApiV1), + payload_format: Some(create::PayloadFormat::JsonApiV1), ..Default::default() }; Notification::create(&bucket.name, &new_notification).unwrap(); Notification::delete(&bucket.name, "testing-is-important").unwrap(); } -} +} \ No newline at end of file diff --git a/src/models/object.rs b/src/models/object.rs new file mode 100644 index 0000000..539cbf8 --- /dev/null +++ b/src/models/object.rs @@ -0,0 +1,380 @@ +use std::collections::HashMap; + +use crate::Error; + +use super::{CustomerEncrypton, Owner, ObjectAccessControl}; + +/// A resource representing a file in Google Cloud Storage. +#[derive(Debug, PartialEq, serde::Serialize, serde::Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct Object { + /// The kind of item this is. For objects, this is always `storage#object`. + pub kind: String, + /// The ID of the object, including the bucket name, object name, and generation number. + pub id: String, + /// The link to this object. + pub self_link: String, + /// The name of the object. Required if not specified by URL parameter. + pub name: String, + /// The name of the bucket containing this object. + pub bucket: String, + /// The content generation of this object. Used for object versioning. + #[serde(deserialize_with = "crate::from_str")] + pub generation: i64, + /// The version of the metadata for this object at this generation. Used for preconditions and + /// for detecting changes in metadata. A metageneration number is only meaningful in the context + /// of a particular generation of a particular object. + #[serde(deserialize_with = "crate::from_str")] + pub metageneration: i64, + /// Content-Type of the object data. If an object is stored without a Content-Type, it is served + /// as application/octet-stream. + pub content_type: Option, + /// The creation time of the object in RFC 3339 format. + #[serde(with = "time::serde::rfc3339")] + pub time_created: time::OffsetDateTime, + /// The modification time of the object metadata in RFC 3339 format. + #[serde(with = "time::serde::rfc3339")] + pub updated: time::OffsetDateTime, + /// The deletion time of the object in RFC 3339 format. Returned if and only if this version of + /// the object is no longer a live version, but remains in the bucket as a noncurrent version. + #[serde(default, with = "time::serde::rfc3339::option")] + pub time_deleted: Option, + /// Whether or not the object is subject to a temporary hold. + pub temporary_hold: Option, + /// Whether or not the object is subject to an event-based hold. + pub event_based_hold: Option, + /// The earliest time that the object can be deleted, based on a bucket's retention policy, in + /// RFC 3339 format. + #[serde(default, with = "time::serde::rfc3339::option")] + pub retention_expiration_time: Option, + /// Storage class of the object. + pub storage_class: String, + /// The time at which the object's storage class was last changed. When the object is initially + /// created, it will be set to timeCreated. + #[serde(with = "time::serde::rfc3339")] + pub time_storage_class_updated: time::OffsetDateTime, + /// Content-Length of the data in bytes. + #[serde(deserialize_with = "crate::from_str")] + pub size: u64, + /// MD5 hash of the data; encoded using base64. For more information about using the MD5 hash, + /// see Hashes and ETags: Best Practices. + pub md5_hash: Option, + /// Media download link. + pub media_link: String, + /// Content-Encoding of the object data. + pub content_encoding: Option, + /// Content-Disposition of the object data. + pub content_disposition: Option, + /// Content-Language of the object data. + pub content_language: Option, + /// Cache-Control directive for the object data. If omitted, and the object is accessible to all + /// anonymous users, the default will be public, max-age=3600. + pub cache_control: Option, + /// User-provided metadata, in key/value pairs. + pub metadata: Option>, + /// Access controls on the object, containing one or more objectAccessControls Resources. If + /// iamConfiguration.uniformBucketLevelAccess.enabled is set to true, this field is omitted in + /// responses, and requests that specify this field fail. + pub acl: Option>, + /// The owner of the object. This will always be the uploader of the object. If + /// `iamConfiguration.uniformBucketLevelAccess.enabled` is set to true, this field does not + /// apply, and is omitted in responses. + pub owner: Option, + /// CRC32c checksum, as described in RFC 4960, Appendix B; encoded using base64 in big-endian + /// byte order. For more information about using the CRC32c checksum, see Hashes and ETags: Best + /// Practices. + pub crc32c: String, + /// Number of underlying components that make up a composite object. Components are accumulated + /// by compose operations, counting 1 for each non-composite source object and componentCount + /// for each composite source object. Note: componentCount is included in the metadata for + /// composite objects only. + #[serde(default, deserialize_with = "crate::from_str_opt")] + pub component_count: Option, + /// HTTP 1.1 Entity tag for the object. + pub etag: String, + /// Metadata of customer-supplied encryption key, if the object is encrypted by such a key. + pub customer_encryption: Option, + /// Cloud KMS Key used to encrypt this object, if the object is encrypted by such a key. + pub kms_key_name: Option, + + #[serde(skip)] + private_key: Option, + #[serde(skip)] + client_email: Option, +} + +impl Object { + /// Creates a [Signed Url](https://cloud.google.com/storage/docs/access-control/signed-urls) + /// which is valid for `duration` seconds, and lets the posessor download the file contents + /// without any authentication. + /// ### Example + /// ```no_run + /// # #[tokio::main] + /// # async fn main() -> Result<(), Box> { + /// use cloud_storage::{Client, object::{Object, ComposeRequest}}; + /// + /// let client = Client::default(); + /// let obj1 = client.object("my_bucket").read("file1", None).await?; + /// let url = obj1.download_url(50)?; + /// // url is now a url to which an unauthenticated user can make a request to download a file + /// // for 50 seconds. + /// # Ok(()) + /// # } + /// ``` + pub fn download_url(&self, duration: u32) -> Result { + self.sign(&self.name, duration, "GET", None, &HashMap::new()) + } + + /// Creates a [Signed Url](https://cloud.google.com/storage/docs/access-control/signed-urls) + /// which is valid for `duration` seconds, and lets the posessor download the file contents + /// without any authentication. + /// ### Example + /// ```no_run + /// # #[tokio::main] + /// # async fn main() -> Result<(), Box> { + /// use cloud_storage::{Client, object::{Object, ComposeRequest}}; + /// + /// let client = Client::default(); + /// let obj1 = client.object("my_bucket").read("file1", None).await?; + /// let url = obj1.download_url(50)?; + /// // url is now a url to which an unauthenticated user can make a request to download a file + /// // for 50 seconds. + /// # Ok(()) + /// # } + /// ``` + pub fn download_url_with( + &self, + duration: u32, + opts: crate::DownloadOptions, + ) -> Result { + self.sign( + &self.name, + duration, + "GET", + opts.content_disposition, + &HashMap::new(), + ) + } + + /// Creates a [Signed Url](https://cloud.google.com/storage/docs/access-control/signed-urls) + /// which is valid for `duration` seconds, and lets the posessor upload data to a blob + /// without any authentication. + /// ### Example + /// ```no_run + /// # #[tokio::main] + /// # async fn main() -> Result<(), Box> { + /// use cloud_storage::{Client, object::{Object, ComposeRequest}}; + /// + /// let client = Client::default(); + /// let obj1 = client.object("my_bucket").read("file1", None).await?; + /// let url = obj1.upload_url(50)?; + /// // url is now a url to which an unauthenticated user can make a PUT request to upload a file + /// // for 50 seconds. + /// # Ok(()) + /// # } + /// ``` + pub fn upload_url(&self, duration: u32) -> Result { + self.sign(&self.name, duration, "PUT", None, &HashMap::new()) + } + + /// Creates a [Signed Url](https://cloud.google.com/storage/docs/access-control/signed-urls) + /// which is valid for `duration` seconds, and lets the posessor upload data and custom metadata + /// to a blob without any authentication. + /// ### Example + /// ```no_run + /// # #[tokio::main] + /// # async fn main() -> Result<(), Box> { + /// use cloud_storage::{Client, object::{Object, ComposeRequest}}; + /// use std::collections::HashMap; + /// + /// let client = Client::default(); + /// let obj1 = client.object("my_bucket").read("file1", None).await?; + /// let mut custom_metadata = HashMap::new(); + /// custom_metadata.insert(String::from("field"), String::from("value")); + /// let (url, headers) = obj1.upload_url_with(50, custom_metadata)?; + /// // url is now a url to which an unauthenticated user can make a PUT request to upload a file + /// // for 50 seconds. Note that the user must also include the returned headers in the PUT request + /// # Ok(()) + /// # } + /// ``` + pub fn upload_url_with(&self, duration: u32, custom_metadata: HashMap) -> Result<(String, HashMap), Error> { + let url = self.sign(&self.name, duration, "PUT", None, &custom_metadata)?; + let mut headers = HashMap::new(); + for (k, v) in custom_metadata.iter() { + headers.insert(format!("x-goog-meta-{}", k), v.to_string()); + } + Ok((url, headers)) + } + + /// Creates a [Signed Url](https://cloud.google.com/storage/docs/access-control/signed-urls) + /// which is valid for `duration` seconds, and lets the posessor upload new file contents. + /// without any authentication. + /// ### Example + /// ```no_run + /// pub fn upload_url(&self, duration: u32) -> Result { + /// self.sign(&self.name, duration, "POST", None, &HashMap::new()) + /// } + /// ``` + #[inline(always)] + fn sign( + &self, + file_path: &str, + duration: u32, + http_verb: &str, + content_disposition: Option, + custom_metadata: &HashMap, + ) -> Result { + let client_email = self.client_email.clone().expect("client_email not configured"); + if duration > 604800 { + let msg = format!( + "duration may not be greater than 604800, but was {}", + duration + ); + return Err(crate::Error::Other(msg)); + } + + // 0 Sort and construct the canonical headers + let mut headers = vec![("host".to_string(), "storage.googleapis.com".to_string())]; + // Add custom metadata headers, guaranteed unique by HashMap input + for (k, v) in custom_metadata.iter() { + headers.push((format!("x-goog-meta-{}", k), v.to_string())); + } + headers.sort_unstable_by(|(k1, _), (k2, _)| k1.cmp(k2)); + let canonical_headers: String = headers + .iter() + .map(|(k, v)| format!("{}:{}", k.to_lowercase(), v.to_lowercase())) + .collect::>() + .join("\n"); + let signed_headers = headers + .iter() + .map(|(k, _)| k.to_lowercase()) + .collect::>() + .join(";"); + + // 1 construct the canonical request + let issue_date = time::OffsetDateTime::now_utc(); + let file_path = self.path_to_resource(file_path); + let query_string = Self::get_canonical_query_string( + &issue_date, + duration, + &signed_headers, + content_disposition, + &client_email + ); + let canonical_request = self.get_canonical_request( + &file_path, + &query_string, + http_verb, + &canonical_headers, + &signed_headers, + ); + + // 2 get hex encoded SHA256 hash the canonical request + let hex_hash = hex::encode(crate::crypto::sha256(canonical_request.as_bytes()).as_ref()); + + // 3 construct the string to sign + let string_to_sign = format!( + "{signing_algorithm}\n\ + {current_datetime}\n\ + {credential_scope}\n\ + {hashed_canonical_request}", + signing_algorithm = "GOOG4-RSA-SHA256", + current_datetime = issue_date.format(crate::ISO_8601_BASIC_FORMAT).unwrap(), + credential_scope = Self::get_credential_scope(&issue_date), + hashed_canonical_request = hex_hash, + ); + + // 4 sign the string to sign with RSA - SHA256 + let signature = hex::encode(crate::crypto::rsa_pkcs1_sha256(&string_to_sign, self.private_key.clone().expect("No Private Key in Object").as_bytes())?); + + // 5 construct the signed url + Ok(format!( + "https://storage.googleapis.com{path_to_resource}?\ + {query_string}&\ + X-Goog-Signature={request_signature}", + path_to_resource = file_path, + query_string = query_string, + request_signature = signature, + )) + } + + #[inline(always)] + fn get_canonical_request( + &self, + path: &str, + query_string: &str, + http_verb: &str, + headers: &str, + signed_headers: &str, + ) -> String { + format!( + "{http_verb}\n\ + {path_to_resource}\n\ + {canonical_query_string}\n\ + {canonical_headers}\n\ + \n\ + {signed_headers}\n\ + {payload}", + http_verb = http_verb, + path_to_resource = path, + canonical_query_string = query_string, + canonical_headers = headers, + signed_headers = signed_headers, + payload = "UNSIGNED-PAYLOAD", + ) + } + + #[inline(always)] + fn get_canonical_query_string( + date: &time::OffsetDateTime, + exp: u32, + headers: &str, + content_disposition: Option, + client_email: &str + ) -> String { + let credential = format!( + "{authorizer}/{scope}", + authorizer = client_email, + scope = Self::get_credential_scope(date), + ); + + let disposition = match content_disposition { + Some(cd) => format!("&response-content-disposition={}", cd), + None => "".to_string() + }; + + let s = format!( + "X-Goog-Algorithm={algo}&\ + X-Goog-Credential={cred}&\ + X-Goog-Date={date}&\ + X-Goog-Expires={exp}&\ + X-Goog-SignedHeaders={signed}\ + {disposition}", + algo = "GOOG4-RSA-SHA256", + cred = crate::percent_encode(&credential), + date = date.format(crate::ISO_8601_BASIC_FORMAT).unwrap(), + exp = exp, + disposition = disposition, + signed = crate::percent_encode(headers), + ); + s + } + + #[inline(always)] + fn path_to_resource(&self, path: &str) -> String { + format!( + "/{bucket}/{file_path}", + bucket = self.bucket, + file_path = crate::percent_encode_noslash(path), + ) + } + + #[inline(always)] + fn get_credential_scope(date: &time::OffsetDateTime) -> String { + format!( + "{}/henk/storage/goog4_request", + date.format(time::macros::format_description!("[year][month][day]")) + .unwrap() + ) + } +} \ No newline at end of file diff --git a/src/models/object_access_control.rs b/src/models/object_access_control.rs new file mode 100644 index 0000000..c395973 --- /dev/null +++ b/src/models/object_access_control.rs @@ -0,0 +1,66 @@ +use super::{Entity, ProjectTeam, Role}; + +/// The ObjectAccessControls resources represent the Access Control Lists (ACLs) for objects within +/// Google Cloud Storage. ACLs let you specify who has access to your data and to what extent. +/// +/// ```text,ignore +/// Important: The methods for this resource fail with a 400 Bad Request response for buckets with +/// uniform bucket-level access enabled. Use storage.buckets.getIamPolicy and +/// storage.buckets.setIamPolicy to control access instead. +/// ``` +/// +/// There are two roles that can be assigned to an entity: +/// +/// READERs can get an object, though the acl property will not be revealed. +/// OWNERs are READERs, and they can get the acl property, update an object, and call all +/// objectAccessControls methods on the object. The owner of an object is always an OWNER. +/// +/// For more information, see Access Control, with the caveat that this API uses READER and OWNER +/// instead of READ and FULL_CONTROL. +#[derive(Debug, PartialEq, serde::Serialize, serde::Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct ObjectAccessControl { + /// The kind of item this is. For object access control entries, this is always + /// `storage#objectAccessControl`. + pub kind: String, + /// The ID of the access-control entry. + pub id: String, + /// The link to this access-control entry. + pub self_link: String, + /// The name of the bucket. + pub bucket: String, + /// The name of the object, if applied to an object. + pub object: String, + /// The content generation of the object, if applied to an object. + pub generation: Option, + /// The entity holding the permission, in one of the following forms: + /// + /// user-userId + /// user-email + /// group-groupId + /// group-email + /// domain-domain + /// project-team-projectId + /// allUsers + /// allAuthenticatedUsers + /// + /// Examples: + /// + /// The user liz@example.com would be user-liz@example.com. + /// The group example@googlegroups.com would be group-example@googlegroups.com. + /// To refer to all members of the G Suite for Business domain example.com, the entity would be + /// domain-example.com. + pub entity: Entity, + /// The access permission for the entity. + pub role: Role, + /// The email address associated with the entity, if any. + pub email: Option, + /// The ID for the entity, if any. + pub entity_id: Option, + /// The domain associated with the entity, if any. + pub domain: Option, + /// The project team associated with the entity, if any. + pub project_team: Option, + /// HTTP 1.1 Entity tag for the access-control entry. + pub etag: String, +} \ No newline at end of file diff --git a/src/models/object_access_control_list.rs b/src/models/object_access_control_list.rs new file mode 100644 index 0000000..e72c890 --- /dev/null +++ b/src/models/object_access_control_list.rs @@ -0,0 +1,9 @@ +use super::ObjectAccessControl; + +#[allow(unused)] +#[derive(Debug, serde::Deserialize)] +#[serde(rename_all = "camelCase")] +struct ObjectAccessControlList { + kind: String, + items: Vec, +} \ No newline at end of file diff --git a/src/models/object_create_parameters.rs b/src/models/object_create_parameters.rs new file mode 100644 index 0000000..69af36d --- /dev/null +++ b/src/models/object_create_parameters.rs @@ -0,0 +1,45 @@ +/// The parameters that are optionally supplied when creating an object. +#[derive(Debug, PartialEq, serde::Serialize, Default, Clone)] +#[serde(rename_all = "camelCase")] +pub struct CreateParameters { + /// Setting this value is equivalent of setting the `contentEncoding` metadata property of the object. + /// This can be useful when uploading an object with `uploadType=media` to indicate the encoding of the content being uploaded. + pub content_encoding: Option, + + /// Makes the operation conditional on whether the object's current generation matches the given value. + /// Setting to 0 makes the operation succeed only if there are no live versions of the object. + pub if_generation_match: Option, + + /// Makes the operation conditional on whether the object's current generation does not match the given value. + /// If no live object exists, the precondition fails. + /// Setting to 0 makes the operation succeed only if there is a live version of the object. + pub if_generation_not_match: Option, + + /// Makes the operation conditional on whether the object's current metageneration matches the given value. + pub if_metageneration_match: Option, + + /// Makes the operation conditional on whether the object's current metageneration does not match the given value. + pub if_metageneration_not_match: Option, + + /// Resource name of the Cloud KMS key that will be used to encrypt the object. + /// If not specified, the request uses the bucket's default Cloud KMS key, if any, or a Google-managed encryption key. + pub kms_key_name: Option, + + /// Apply a predefined set of access controls to this object. + /// + /// Acceptable values are: + /// `authenticatedRead`: Object owner gets OWNER access, and allAuthenticatedUsers get READER access. + /// `bucketOwnerFullControl`: Object owner gets OWNER access, and project team owners get OWNER access. + /// `bucketOwnerRead`: Object owner gets OWNER access, and project team owners get READER access. + /// `private`: Object owner gets OWNER access. + /// `projectPrivate`: Object owner gets OWNER access, and project team members get access according to their roles. + /// `publicRead`: Object owner gets OWNER access, and allUsers get READER access. + /// If `iamConfiguration.uniformBucketLevelAccess.enabled` is set to `true`, requests that include this parameter fail with a 400 Bad Request response. + pub predefined_acl: Option, + + /// Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full. + /// Acceptable values are: + /// `full`: Include all properties. + /// `noAcl`: Omit the owner, acl property. + pub projection: Option, +} \ No newline at end of file diff --git a/src/models/object_list.rs b/src/models/object_list.rs new file mode 100644 index 0000000..7b9258b --- /dev/null +++ b/src/models/object_list.rs @@ -0,0 +1,24 @@ +use super::Object; + +/// Response from `Object::list`. +#[derive(Debug, serde::Deserialize, Default)] +#[serde(rename_all = "camelCase")] +pub struct ObjectList { + /// The kind of item this is. For lists of objects, this is always `storage#objects`. + pub kind: String, + + /// The list of objects, ordered lexicographically by name. + #[serde(default = "Vec::new")] + pub items: Vec, + + /// Object name prefixes for objects that matched the listing request but were excluded + /// from `items` because of a delimiter. Values in this list are object names up to and + /// including the requested delimiter. Duplicate entries are omitted from this list. + #[serde(default = "Vec::new")] + pub prefixes: Vec, + + /// The continuation token, included only if there are more items to return. Provide + /// this value as the `page_token` of a subsequent request in order to return the next + /// page of results. + pub next_page_token: Option, +} \ No newline at end of file diff --git a/src/models/object_list_request.rs b/src/models/object_list_request.rs new file mode 100644 index 0000000..c8efd20 --- /dev/null +++ b/src/models/object_list_request.rs @@ -0,0 +1,53 @@ +use super::Projection; + +/// The request that is supplied to perform `Object::list`. +/// See [the Google Cloud Storage API +/// reference](https://cloud.google.com/storage/docs/json_api/v1/objects/list) +/// for more details. +#[derive(Debug, PartialEq, serde::Serialize, Default, Clone)] +#[serde(rename_all = "camelCase")] +pub struct ListRequest { + /// When specified, allows the `list` to operate like a directory listing by splitting the + /// object location on this delimiter. + pub delimiter: Option, + + /// Filter results to objects whose names are lexicographically before `end_offset`. + /// If `start_offset` is also set, the objects listed have names between `start_offset` + /// (inclusive) and `end_offset` (exclusive). + pub end_offset: Option, + + /// If true, objects that end in exactly one instance of `delimiter` have their metadata + /// included in `items` in addition to the relevant part of the object name appearing in + /// `prefixes`. + pub include_trailing_delimiter: Option, + + /// Maximum combined number of entries in `items` and `prefixes` to return in a single + /// page of responses. Because duplicate entries in `prefixes` are omitted, fewer total + /// results may be returned than requested. The service uses this parameter or 1,000 + /// items, whichever is smaller. + pub max_results: Option, + + /// A previously-returned page token representing part of the larger set of results to view. + /// The `page_token` is an encoded field that marks the name and generation of the last object + /// in the returned list. In a subsequent request using the `page_token`, items that come after + /// the `page_token` are shown (up to `max_results`). + /// + /// If the page token is provided, all objects starting at that page token are queried + pub page_token: Option, + + /// Filter results to include only objects whose names begin with this prefix. + pub prefix: Option, + + /// Set of properties to return. Defaults to `NoAcl`. + pub projection: Option, + + /// Filter results to objects whose names are lexicographically equal to or after + /// `start_offset`. If `end_offset` is also set, the objects listed have names between + /// `start_offset` (inclusive) and `end_offset` (exclusive). + pub start_offset: Option, + + /// If true, lists all versions of an object as distinct results in order of increasing + /// generation number. The default value for versions is false. For more information, see + /// Object Versioning. + pub versions: Option, +} \ No newline at end of file diff --git a/src/models/object_precondition.rs b/src/models/object_precondition.rs new file mode 100644 index 0000000..7a390c5 --- /dev/null +++ b/src/models/object_precondition.rs @@ -0,0 +1,9 @@ +/// Allows conditional copying of this file. +#[derive(Debug, PartialEq, serde::Serialize)] +#[serde(rename_all = "camelCase")] +pub struct ObjectPrecondition { + /// Only perform the composition if the generation of the source object that would be used + /// matches this value. If this value and a generation are both specified, they must be the same + /// value or the call will fail. + pub if_generation_match: i64, +} \ No newline at end of file diff --git a/src/models/object_read_parameters.rs b/src/models/object_read_parameters.rs new file mode 100644 index 0000000..06c91d2 --- /dev/null +++ b/src/models/object_read_parameters.rs @@ -0,0 +1,27 @@ +/// The parameters that are optionally supplied when reading an object. +#[derive(Debug, PartialEq, serde::Serialize, Default, Clone)] +#[serde(rename_all = "camelCase")] +pub struct ReadParameters { + /// If present, selects a specific revision of this object (as opposed to the latest version, the default). + pub generation: Option, + + /// Makes the operation conditional on whether the object's current generation matches the given value. + /// Setting to 0 makes the operation succeed only if there are no live versions of the object. + pub if_generation_match: Option, + + /// Makes the operation conditional on whether the object's current generation does not match the given value. + /// If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object. + pub if_generation_not_match: Option, + + /// Makes the operation conditional on whether the object's current metageneration matches the given value. + pub if_metageneration_match: Option, + + /// Makes the operation conditional on whether the object's current metageneration does not match the given value. + pub if_metageneration_not_match: Option, + + /// Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full. + /// Acceptable values are: + /// `full`: Include all properties. + /// `noAcl`: Omit the owner, acl property. + pub projection: Option, +} \ No newline at end of file diff --git a/src/models/owner.rs b/src/models/owner.rs new file mode 100644 index 0000000..0b03c58 --- /dev/null +++ b/src/models/owner.rs @@ -0,0 +1,11 @@ +use super::Entity; + +/// Contains information about an entity that is able to own a `Bucket`. +#[derive(Debug, PartialEq, serde::Serialize, serde::Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct Owner { + /// The entity, in the form project-owner-projectId. + pub entity: Entity, + /// The ID for the entity. + pub entity_id: Option, +} \ No newline at end of file diff --git a/src/models/primitive_iam_role.rs b/src/models/primitive_iam_role.rs new file mode 100644 index 0000000..c62df0c --- /dev/null +++ b/src/models/primitive_iam_role.rs @@ -0,0 +1,17 @@ +/// The following enum contains primitive roles and the Cloud Storage permissions that these roles +/// contain. Primitive roles cannot be added at the bucket-level. +#[derive(Debug, PartialEq, serde::Deserialize, serde::Serialize)] +pub enum PrimitiveIamRole { + /// Grants permission to list buckets as well as view bucket metadata, excluding ACLs, when + /// listing. Also grants permission to list and get HMAC keys in the project. + #[serde(rename = "role/viewer")] + Viewer, + /// Grants permission to create, list, and delete buckets. Grants permission to view bucket + /// metadata, excluding ACLs, when listing. Grants full control over HMAC keys in a project. + #[serde(rename = "role/editor")] + Editor, + /// Grants permission to create, list, and delete buckets. Also grants permission to view bucket + /// metadata, excluding ACLs, when listing. Grants full control over HMAC keys in a project. + #[serde(rename = "role/owner")] + Owner, +} \ No newline at end of file diff --git a/src/models/project_team.rs b/src/models/project_team.rs new file mode 100644 index 0000000..d02c73d --- /dev/null +++ b/src/models/project_team.rs @@ -0,0 +1,11 @@ +use super::Team; + +/// Contains information about the team related to this `DefaultObjectAccessControls` +#[derive(Debug, PartialEq, serde::Serialize, serde::Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct ProjectTeam { + /// The project number. + project_number: String, + /// The team. + team: Team, +} \ No newline at end of file diff --git a/src/models/projection.rs b/src/models/projection.rs new file mode 100644 index 0000000..ebe1149 --- /dev/null +++ b/src/models/projection.rs @@ -0,0 +1,9 @@ +/// Acceptable values of `projection` properties to return from `Object::list` requests. +#[derive(Debug, PartialEq, serde::Serialize, Clone)] +#[serde(rename_all = "camelCase")] +pub enum Projection { + /// Include all properties. + Full, + /// Omit the owner, acl property. + NoAcl, +} \ No newline at end of file diff --git a/src/models/response.rs b/src/models/response.rs new file mode 100644 index 0000000..85623d0 --- /dev/null +++ b/src/models/response.rs @@ -0,0 +1,81 @@ +use std::ops::ControlFlow; + +use super::{ErrorResponse}; +use crate::Error; + +#[derive(Debug, serde::Deserialize)] +#[serde(rename = "camelCase")] +#[serde(untagged)] +pub(crate) enum Response { + Success(T), + Error(ErrorResponse), +} + +/// Enable desugaring for `Response`, e.g. the use of the `?` on an object of type `Response` +/// ```no_run +/// if let Response::Error(error) = my_response { +/// return error; +/// } +/// let my_response = my_response.unwrap(); +/// ``` +/// becomes: +/// ```no_run +/// my_response?; +/// ``` +impl std::ops::Try for Response { + type Output = T; + type Residual = Result; + #[inline] + fn from_output(output: Self::Output) -> Self { + Response::Success(output) + } + #[inline] + fn branch(self) -> ControlFlow { + match self { + Response::Success(t) => ControlFlow::Continue(t), + Response::Error(error) => ControlFlow::Break(Err(Error::Google(error))), + } + } +} + + +impl std::ops::FromResidual> for Response { + #[inline] + #[track_caller] + fn from_residual(residual: ::Residual) -> Self { + if let Err(Error::Google(err)) = residual { + Response::Error(err) + } else { + panic!("Non expected residual type encountered") + } + } +} + +#[cfg(test)] +mod tests { + use crate::{models::{ErrorResponse, ErrorList}, Error}; + + use super::Response; + + #[test] + fn test_try_impl() -> Result<(), Error> { + let response = Response::Success(()); + let output = response?; + assert_eq!(output, ()); + Ok(()) + } + + #[test] + fn test_try_impl_error() -> Result<(), Error> { + let response = Response::Error::<()>(ErrorResponse { + error: ErrorList { + errors: Vec::new(), + code: 250, + message: "Some error occurred".to_string(), + }, + }); + let output = response?; + assert_eq!(output, ()); + Ok(()) + } +} \ No newline at end of file diff --git a/src/models/retention_policy.rs b/src/models/retention_policy.rs new file mode 100644 index 0000000..b071aeb --- /dev/null +++ b/src/models/retention_policy.rs @@ -0,0 +1,16 @@ +/// Contains information about how files are kept after deletion. +#[derive(Debug, PartialEq, serde::Serialize, serde::Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct RetentionPolicy { + /// The period of time, in seconds, that objects in the bucket must be retained and cannot be + /// deleted, overwritten, or made noncurrent. The value must be greater than 0 seconds and less + /// than 3,155,760,000 seconds. + #[serde(deserialize_with = "crate::from_str")] + pub retention_period: u64, + /// The time from which the retentionPolicy was effective, in RFC 3339 format. + #[serde(with = "time::serde::rfc3339")] + pub effective_time: time::OffsetDateTime, + /// Whether or not the retentionPolicy is locked. If true, the retentionPolicy cannot be removed + /// and the retention period cannot be reduced. + pub is_locked: Option, +} \ No newline at end of file diff --git a/src/models/rewrite_parameters.rs b/src/models/rewrite_parameters.rs new file mode 100644 index 0000000..142e7f7 --- /dev/null +++ b/src/models/rewrite_parameters.rs @@ -0,0 +1,69 @@ +/// The parameters that are optionally supplied when rewriting an object. +#[derive(Debug, PartialEq, serde::Serialize, Default, Clone)] +#[serde(rename_all = "camelCase")] +pub struct RewriteParameters { + ///Resource name of the Cloud KMS key that will be used to encrypt the object. + /// The Cloud KMS key must be located in same location as the object. + // + // If the parameter is not specified, the request uses the destination bucket's default encryption key, if any, or the Google-managed encryption key. + pub destination_kms_key_name: Option, + + /// Apply a predefined set of access controls to the destination object. + /// + /// Acceptable values are: + /// `authenticatedRead`: Object owner gets OWNER access, and allAuthenticatedUsers get READER access. + /// `bucketOwnerFullControl`: Object owner gets OWNER access, and project team owners get OWNER access. + /// `bucketOwnerRead`: Object owner gets OWNER access, and project team owners get READER access. + /// `private`: Object owner gets OWNER access. + /// `projectPrivate`: Object owner gets OWNER access, and project team members get access according to their roles. + /// `publicRead`: Object owner gets OWNER access, and allUsers get READER access. + /// If `iamConfiguration.uniformBucketLevelAccess.enabled` is set to `true`, requests that include this parameter fail with a 400 Bad Request response. + pub destination_predefined_acl: Option, + + /// Makes the operation conditional on there being a live destination object with a generation number that matches the given value. + /// Setting `ifGenerationMatch` to 0 makes the operation succeed only if there is no live destination object. + pub if_generation_match: Option, + + /// Makes the operation conditional on there being a live destination object with a generation number that does not match the given value. + /// If no live destination object exists, the precondition fails. + /// Setting `ifGenerationNotMatch` to 0 makes the operation succeed if there is a live version of the object. + pub if_generation_not_match: Option, + + /// Makes the operation conditional on there being a live destination object with a metageneration number that matches the given value. + pub if_metageneration_match: Option, + + /// Makes the operation conditional on there being a live destination object with a metageneration number that does not match the given value. + pub if_metageneration_not_match: Option, + + /// Makes the operation conditional on whether the source object's generation matches the given value. + pub if_source_generation_match: Option, + + /// Makes the operation conditional on whether the source object's generation does not match the given value. + pub if_source_generation_not_match: Option, + + /// Makes the operation conditional on whether the source object's current metageneration matches the given value. + pub if_source_metageneration_match: Option, + + /// Makes the operation conditional on whether the source object's current metageneration does not match the given value. + pub if_source_metageneration_not_match: Option, + + /// The maximum number of bytes that will be rewritten per rewrite request. + /// Most callers shouldn't need to specify this parameter - it is primarily in place to support testing. + /// If specified the value must be an integral multiple of 1 MiB (1048576). + /// Also, this only applies to requests where the source and destination span locations and/or storage classes. + /// Finally, this value must not change across rewrite calls else you'll get an error that the `rewriteToken` is invalid. + pub max_bytes_rewritten_per_call: Option, + + /// Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full. + /// Acceptable values are: + /// `full`: Include all properties. + /// `noAcl`: Omit the owner, acl property. + pub projection: Option, + + /// Include this field (from the previous rewrite response) on each rewrite request after the first one, until the rewrite response 'done' flag is true. + /// Calls that provide a `rewriteToken` can omit all other request fields, but if included those fields must match the values provided in the first rewrite request. + pub rewrite_token: Option, + + /// If present, selects a specific revision of the source object (as opposed to the latest version, the default). + pub source_generation: Option, +} \ No newline at end of file diff --git a/src/models/rewrite_response.rs b/src/models/rewrite_response.rs new file mode 100644 index 0000000..7f5b473 --- /dev/null +++ b/src/models/rewrite_response.rs @@ -0,0 +1,12 @@ +use super::Object; + +#[derive(Debug, serde::Deserialize)] +#[serde(rename_all = "camelCase")] +#[allow(dead_code)] +pub(crate) struct RewriteResponse { + kind: String, + total_bytes_rewritten: String, + object_size: String, + done: bool, + pub(crate) resource: Object, +} \ No newline at end of file diff --git a/src/models/role.rs b/src/models/role.rs new file mode 100644 index 0000000..90d076e --- /dev/null +++ b/src/models/role.rs @@ -0,0 +1,11 @@ +/// Any type of role we can encounter. +#[derive(Debug, PartialEq, serde::Serialize, serde::Deserialize)] +#[serde(rename_all = "UPPERCASE")] +pub enum Role { + /// Full access. + Owner, + /// Write, but not administer. + Writer, + /// Only read access. + Reader, +} \ No newline at end of file diff --git a/src/models/rule.rs b/src/models/rule.rs new file mode 100644 index 0000000..cd810e0 --- /dev/null +++ b/src/models/rule.rs @@ -0,0 +1,11 @@ +use super::{Action, Condition}; + +/// An element of the lifecyle list. +#[derive(Debug, PartialEq, serde::Serialize, serde::Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct Rule { + /// The action to take. + pub action: Action, + /// The condition(s) under which the action will be taken. + pub condition: Condition, +} \ No newline at end of file diff --git a/src/models/source_object.rs b/src/models/source_object.rs new file mode 100644 index 0000000..8eded43 --- /dev/null +++ b/src/models/source_object.rs @@ -0,0 +1,14 @@ +use super::ObjectPrecondition; + +/// A SourceObject represents one of the objects that is to be composed. +#[derive(Debug, PartialEq, serde::Serialize)] +#[serde(rename_all = "camelCase")] +pub struct SourceObject { + /// The source object's name. All source objects must have the same storage class and reside in + /// the same bucket. + pub name: String, + /// The generation of this object to use as the source. + pub generation: Option, + /// Conditions that must be met for this operation to execute. + pub object_preconditions: Option, +} \ No newline at end of file diff --git a/src/models/standard_iam_role.rs b/src/models/standard_iam_role.rs new file mode 100644 index 0000000..e58ae19 --- /dev/null +++ b/src/models/standard_iam_role.rs @@ -0,0 +1,28 @@ +/// The following enum contains Cloud Identity and Access Management (Cloud IAM) roles that are +/// associated with Cloud Storage and lists the permissions that are contained in each role. Unless +/// otherwise noted, these roles can be applied either to entire projects or specific buckets. +#[derive(Debug, PartialEq, serde::Deserialize, serde::Serialize)] +pub enum StandardIamRole { + /// Allows users to create objects. Does not give permission to view, delete, or overwrite + /// objects. + #[serde(rename = "roles/storage.objectCreator")] + ObjectCreator, + /// Grants access to view objects and their metadata, excluding ACLs. + /// + /// Can also list the objects in a bucket. + #[serde(rename = "roles/storage.objectViewer")] + ObjectViewer, + /// Grants full control over objects, including listing, creating, viewing, and deleting + /// objects. + #[serde(rename = "roles/storage.objectAdmin")] + ObjectAdmin, + /// Full control over HMAC keys in a project. + #[serde(rename = "roles/storage.hmacKeyAdmin")] + HmacKeyAdmin, + /// Grants full control of buckets and objects. + /// + /// When applied to an individual bucket, control applies only to the specified bucket and + /// objects within the bucket. + #[serde(rename = "roles/storage.admin")] + Admin, +} \ No newline at end of file diff --git a/src/models/storage_class.rs b/src/models/storage_class.rs new file mode 100644 index 0000000..267475d --- /dev/null +++ b/src/models/storage_class.rs @@ -0,0 +1,28 @@ +/// The type of storage that is used. Pertains to availability, performance and cost. +#[derive(Debug, PartialEq, serde::Serialize, serde::Deserialize)] +#[serde(rename_all = "SCREAMING_SNAKE_CASE")] +pub enum StorageClass { + /// Standard Storage is best for data that is frequently accessed ("hot" data) and/or stored for + /// only brief periods of time. + Standard, + /// Nearline Storage is a low-cost, highly durable storage service for storing infrequently + /// accessed data. + Nearline, + /// Coldline Storage is a very-low-cost, highly durable storage service for data archiving, + /// online backup, and disaster recovery. + Coldline, + /// Equivalent to Standard Storage, except Multi-Regional Storage can only be used for objects + /// stored in multi-regions or dual-regions. + MultiRegional, + /// Equivalent to Standard Storage, except Regional Storage can only be used for objects stored + /// in regions. + Regional, + /// Similar to Standard Storage except: + /// + /// DRA has higher pricing for operations. + /// DRA has lower performance, particularly in terms of availability (DRA has a 99% availability + /// SLA). + /// + /// You can move your data from DRA to other storage classes by performing a storage transfer. + DurableReducedAvailability, +} \ No newline at end of file diff --git a/src/models/team.rs b/src/models/team.rs new file mode 100644 index 0000000..b89883c --- /dev/null +++ b/src/models/team.rs @@ -0,0 +1,36 @@ +use std::str::FromStr; + +/// Any type of team we can encounter. +#[derive(Debug, Clone, PartialEq, serde::Serialize, serde::Deserialize)] +#[serde(rename_all = "lowercase")] +pub enum Team { + /// The team consists of `Editors`. + Editors, + /// The team consists of `Owners`. + Owners, + /// The team consists of `Viewers`. + Viewers, +} + +impl std::fmt::Display for Team { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + match self { + Team::Editors => write!(f, "editors"), + Team::Owners => write!(f, "owners"), + Team::Viewers => write!(f, "viewers"), + } + } +} + +impl FromStr for Team { + type Err = String; + + fn from_str(s: &str) -> Result { + match s { + "editors" => Ok(Self::Editors), + "owners" => Ok(Self::Owners), + "viewers" => Ok(Self::Viewers), + _ => Err(format!("Invalid `Team`: {}", s)), + } + } +} \ No newline at end of file diff --git a/src/models/test_iam_permission.rs b/src/models/test_iam_permission.rs new file mode 100644 index 0000000..26ed9ba --- /dev/null +++ b/src/models/test_iam_permission.rs @@ -0,0 +1,13 @@ +/// The request needed to perform the Object::test_iam_permission function. +#[derive(Debug, PartialEq, serde::Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct TestIamPermission { + /// The kind of item this is. + kind: String, + /// The permissions held by the caller. Permissions are always of the format + /// `storage.resource.capability`, where resource is one of buckets or objects. See + /// [Cloud Storage IAM Permissions] + /// (https://cloud.google.com/storage/docs/access-control/iam-permissions) for a list of + /// supported permissions. + permissions: Vec, +} \ No newline at end of file diff --git a/src/resources/topic.rs b/src/models/topic.rs similarity index 90% rename from src/resources/topic.rs rename to src/models/topic.rs index 7a5c427..723b6cc 100644 --- a/src/resources/topic.rs +++ b/src/models/topic.rs @@ -9,14 +9,11 @@ pub struct Topic { impl std::fmt::Display for Topic { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - write!( - f, - "//pubsub.googleapis.com/projects/{}/topics/{}", - self.project_id, self.topic - ) + write!(f, "//pubsub.googleapis.com/projects/{}/topics/{}",self.project_id, self.topic) } } +// This uses Display to serialize a topic as a url, rather than generating an object impl serde::Serialize for Topic { fn serialize(&self, serializer: S) -> Result where diff --git a/src/models/uniform_bucket_level_access.rs b/src/models/uniform_bucket_level_access.rs new file mode 100644 index 0000000..ba69398 --- /dev/null +++ b/src/models/uniform_bucket_level_access.rs @@ -0,0 +1,15 @@ +/// Access that is configured for all objects in one go. +#[derive(Debug, PartialEq, serde::Serialize, serde::Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct UniformBucketLevelAccess { + /// Whether or not the bucket uses uniform bucket-level access. If set, access checks only use + /// bucket-level IAM policies or above. + pub enabled: bool, + /// The deadline time for changing iamConfiguration.uniformBucketLevelAccess.enabled from true + /// to false, in RFC 3339 format. + /// + /// iamConfiguration.uniformBucketLevelAccess.enabled may be changed from true to false until + /// the locked time, after which the field is immutable. + #[serde(with = "time::serde::rfc3339::option")] + pub locked_time: Option, +} \ No newline at end of file diff --git a/src/models/update_hmac_metadata.rs b/src/models/update_hmac_metadata.rs new file mode 100644 index 0000000..98ae38a --- /dev/null +++ b/src/models/update_hmac_metadata.rs @@ -0,0 +1,6 @@ +use super::HmacState; + +#[derive(serde::Serialize)] +pub(crate) struct UpdateHmacMetadata { + pub(crate) state: HmacState, +} \ No newline at end of file diff --git a/src/models/update_hmac_request.rs b/src/models/update_hmac_request.rs new file mode 100644 index 0000000..23a561e --- /dev/null +++ b/src/models/update_hmac_request.rs @@ -0,0 +1,7 @@ +use super::update_hmac_metadata::UpdateHmacMetadata; + +#[derive(serde::Serialize)] +pub(crate) struct UpdateHmacRequest { + secret: String, + metadata: UpdateHmacMetadata, +} \ No newline at end of file diff --git a/src/models/update_parameters.rs b/src/models/update_parameters.rs new file mode 100644 index 0000000..3b4af02 --- /dev/null +++ b/src/models/update_parameters.rs @@ -0,0 +1,40 @@ +/// The parameters that are optionally supplied when updating an object. +#[derive(Debug, PartialEq, serde::Serialize, Default, Clone)] +#[serde(rename_all = "camelCase")] +pub struct UpdateParameters { + /// If present, selects a specific revision of this object (as opposed to the latest version, the default). + pub generation: Option, + + /// Makes the operation conditional on whether the object's current generation matches the given value. + /// Setting to 0 makes the operation succeed only if there are no live versions of the object. + pub if_generation_match: Option, + + /// Makes the operation conditional on whether the object's current generation does not match the given value. + /// If no live object exists, the precondition fails. + /// Setting to 0 makes the operation succeed only if there is a live version of the object. + pub if_generation_not_match: Option, + + /// Makes the operation conditional on whether the object's current metageneration matches the given value. + pub if_metageneration_match: Option, + + /// Makes the operation conditional on whether the object's current metageneration does not match the given value. + pub if_metageneration_not_match: Option, + + /// Apply a predefined set of access controls to this object. + /// + /// Acceptable values are: + /// `authenticatedRead`: Object owner gets OWNER access, and allAuthenticatedUsers get READER access. + /// `bucketOwnerFullControl`: Object owner gets OWNER access, and project team owners get OWNER access. + /// `bucketOwnerRead`: Object owner gets OWNER access, and project team owners get READER access. + /// `private`: Object owner gets OWNER access. + /// `projectPrivate`: Object owner gets OWNER access, and project team members get access according to their roles. + /// `publicRead`: Object owner gets OWNER access, and allUsers get READER access. + /// If `iamConfiguration.uniformBucketLevelAccess.enabled` is set to `true`, requests that include this parameter fail with a 400 Bad Request response. + pub predefined_acl: Option, + + /// Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full. + /// Acceptable values are: + /// `full`: Include all properties. + /// `noAcl`: Omit the owner, acl property. + pub projection: Option, +} \ No newline at end of file diff --git a/src/models/versioning.rs b/src/models/versioning.rs new file mode 100644 index 0000000..aeea54c --- /dev/null +++ b/src/models/versioning.rs @@ -0,0 +1,7 @@ +/// Contains information about whether a Bucket keeps track of its version. +#[derive(Debug, PartialEq, serde::Serialize, serde::Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct Versioning { + /// While set to true, versioning is fully enabled for this bucket. + pub enabled: bool, +} \ No newline at end of file diff --git a/src/models/website.rs b/src/models/website.rs new file mode 100644 index 0000000..c2ed454 --- /dev/null +++ b/src/models/website.rs @@ -0,0 +1,13 @@ +/// Contains configuration about how to visit the website linked to this Bucket. +#[derive(Debug, PartialEq, serde::Serialize, serde::Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct Website { + /// If the requested object path is missing, the service will ensure the path has a trailing + /// '/', append this suffix, and attempt to retrieve the resulting object. This allows the + /// creation of index.html objects to represent directory pages. + pub main_page_suffix: String, + /// If the requested object path is missing, and any mainPageSuffix object is missing, if + /// applicable, the service will return the named object from this bucket as the content for a + /// 404 Not Found result. + pub not_found_page: String, +} \ No newline at end of file diff --git a/src/resources/bucket.rs b/src/resources/bucket.rs deleted file mode 100644 index 69a73f7..0000000 --- a/src/resources/bucket.rs +++ /dev/null @@ -1,1032 +0,0 @@ -use crate::resources::{ - bucket_access_control::{BucketAccessControl, NewBucketAccessControl}, - default_object_access_control::{DefaultObjectAccessControl, NewDefaultObjectAccessControl}, -}; -pub use crate::resources::{common::Entity, location::*}; - -/// The Buckets resource represents a -/// [bucket](https://cloud.google.com/storage/docs/key-terms#buckets) in Google Cloud Storage. There -/// is a single global namespace shared by all buckets. For more information, see -/// [Bucket Name Requirements](https://cloud.google.com/storage/docs/naming#requirements). -/// -/// Buckets contain objects which can be accessed by their own methods. In addition to the -/// [ACL property](https://cloud.google.com/storage/docs/access-control/lists), buckets contain -/// `BucketAccessControls`, for use in fine-grained manipulation of an existing bucket's access -/// controls. -/// -/// A bucket is always owned by the project team owners group. -#[derive(Debug, PartialEq, serde::Serialize, serde::Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct Bucket { - /// The kind of item this is. For buckets, this is always `storage#bucket`. - pub kind: String, - /// The ID of the bucket. For buckets, the `id` and `name` properties are the same. - pub id: String, // should be u64, mumble mumble - /// The URI of this bucket. - pub self_link: String, - /// The project number of the project the bucket belongs to. - #[serde(deserialize_with = "crate::from_str")] - pub project_number: u64, - /// The name of the bucket. - pub name: String, - /// The creation time of the bucket in RFC 3339 format. - #[serde(with = "time::serde::rfc3339")] - pub time_created: time::OffsetDateTime, - /// The modification time of the bucket in RFC 3339 format. - #[serde(with = "time::serde::rfc3339")] - pub updated: time::OffsetDateTime, - /// Whether or not to automatically apply an eventBasedHold to new objects added to the bucket. - pub default_event_based_hold: Option, - /// The bucket's retention policy, which defines the minimum age an object in the bucket must - /// reach before it can be deleted or overwritten. - pub retention_policy: Option, - /// The metadata generation of this bucket. - #[serde(deserialize_with = "crate::from_str")] - pub metageneration: i64, - /// Access controls on the bucket, containing one or more bucketAccessControls Resources. If - /// iamConfiguration.uniformBucketLevelAccess.enabled is set to true, this field is omitted in - /// responses, and requests that specify this field fail with a 400 Bad Request response. - pub acl: Option>, - /// Default access controls to apply to new objects when no ACL is provided. This list contains - /// one or more defaultObjectAccessControls Resources. If - /// iamConfiguration.uniformBucketLevelAccess.enabled is set to true, this field is omitted in - /// responses, and requests that specify this field fail. - pub default_object_acl: Option>, - /// The bucket's IAM configuration. - pub iam_configuration: Option, - /// Encryption configuration for a bucket. - pub encryption: Option, - /// The owner of the bucket. This is always the project team's owner group. - pub owner: Option, - /// The location of the bucket. Object data for objects in the bucket resides in physical - /// storage within this region. Defaults to US. See Cloud Storage bucket locations for the - /// authoritative list. - pub location: Location, - /// The type of location that the bucket resides in, as determined by the location property. - pub location_type: String, - /// The bucket's website configuration, controlling how the service behaves when accessing - /// bucket contents as a web site. See the Static Website Examples for more information. - pub website: Option, - /// The bucket's logging configuration, which defines the destination bucket and optional name - /// prefix for the current bucket's logs. - pub logging: Option, - /// The bucket's versioning configuration. - pub versioning: Option, - /// The bucket's Cross-Origin Resource Sharing (CORS) configuration. - pub cors: Option>, - /// The bucket's lifecycle configuration. See - /// [lifecycle management](https://cloud.google.com/storage/docs/lifecycle) for more - /// information. - pub lifecycle: Option, - /// User-provided bucket labels, in key/value pairs. - pub labels: Option>, - /// The bucket's default storage class, used whenever no storageClass is specified for a - /// newly-created object. If storageClass is not specified when the bucket - /// is created, it defaults to STANDARD. For more information, see storage classes. - pub storage_class: StorageClass, - /// The bucket's billing configuration. - pub billing: Option, - /// HTTP 1.1 [Entity tag](https://tools.ietf.org/html/rfc7232#section-2.3) for the bucket. - pub etag: String, -} - -/// A model that can be used to insert new buckets into Google Cloud Storage. -#[derive(Debug, PartialEq, Default, serde::Serialize)] -#[serde(rename_all = "camelCase")] -pub struct NewBucket { - /// The name of the bucket. See the bucket naming guidelines for more information. - pub name: String, - /// Whether or not to automatically apply an eventBasedHold to new objects added to the bucket. - pub default_event_based_hold: Option, - /// Access controls on the bucket, containing one or more `BucketAccessControls` resources. If - /// `iamConfiguration.uniformBucketLevelAccess.enabled` is set to true, this field is omitted in - /// responses, and requests that specify this field fail with a `400 Bad Request` response. - pub acl: Option>, - /// Default access controls to apply to new objects when no ACL is provided. This list defines - /// an entity and role for one or more `DefaultObjectAccessControls` resources. If - /// `iamConfiguration.uniformBucketLevelAccess.enabled` is set to true, this field is omitted in - /// responses, and requests that specify this field fail with a `400 Bad Request` response. - pub default_object_acl: Option>, - /// The bucket's IAM configuration. - pub iam_configuration: Option, - /// Encryption configuration for a bucket. - pub encryption: Option, - /// The location of the bucket. Object data for objects in the bucket resides in physical - /// storage within this region. Defaults to US. See Cloud Storage bucket locations for the - /// authoritative list. - pub location: Location, - /// The bucket's website configuration, controlling how the service behaves when accessing - /// bucket contents as a web site. See the Static Website Examples for more information. - pub website: Option, - /// The bucket's logging configuration, which defines the destination bucket and optional name - /// prefix for the current bucket's logs. - pub logging: Option, - /// The bucket's versioning configuration. - pub versioning: Option, - /// The bucket's Cross-Origin Resource Sharing (CORS) configuration. - pub cors: Option>, - /// The bucket's lifecycle configuration. See - /// [lifecycle management](https://cloud.google.com/storage/docs/lifecycle) for more - /// information. - pub lifecycle: Option, - /// User-provided bucket labels, in key/value pairs. - pub labels: Option>, - /// The bucket's default storage class, used whenever no storageClass is specified for a - /// newly-created object. If storageClass is not specified when the bucket - /// is created, it defaults to STANDARD. For more information, see storage classes. - pub storage_class: Option, - /// The bucket's billing configuration. - pub billing: Option, -} - -/// Contains information about how files are kept after deletion. -#[derive(Debug, PartialEq, serde::Serialize, serde::Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct RetentionPolicy { - /// The period of time, in seconds, that objects in the bucket must be retained and cannot be - /// deleted, overwritten, or made noncurrent. The value must be greater than 0 seconds and less - /// than 3,155,760,000 seconds. - #[serde(deserialize_with = "crate::from_str")] - pub retention_period: u64, - /// The time from which the retentionPolicy was effective, in RFC 3339 format. - #[serde(with = "time::serde::rfc3339")] - pub effective_time: time::OffsetDateTime, - /// Whether or not the retentionPolicy is locked. If true, the retentionPolicy cannot be removed - /// and the retention period cannot be reduced. - pub is_locked: Option, -} - -/// Contains information about the Buckets IAM configuration. -#[derive(Debug, PartialEq, serde::Serialize, serde::Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct IamConfiguration { - /// The bucket's uniform bucket-level access configuration. - /// - /// Note: iamConfiguration also includes the bucketPolicyOnly field, which uses a legacy name - /// but has the same functionality as the uniformBucketLevelAccess field. We recommend only - /// using uniformBucketLevelAccess, as specifying both fields may result in unreliable behavior. - pub uniform_bucket_level_access: UniformBucketLevelAccess, -} - -/// Access that is configured for all objects in one go. -#[derive(Debug, PartialEq, serde::Serialize, serde::Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct UniformBucketLevelAccess { - /// Whether or not the bucket uses uniform bucket-level access. If set, access checks only use - /// bucket-level IAM policies or above. - pub enabled: bool, - /// The deadline time for changing iamConfiguration.uniformBucketLevelAccess.enabled from true - /// to false, in RFC 3339 format. - /// - /// iamConfiguration.uniformBucketLevelAccess.enabled may be changed from true to false until - /// the locked time, after which the field is immutable. - #[serde(with = "time::serde::rfc3339::option")] - pub locked_time: Option, -} - -/// Contains information about the encryption used for data in this Bucket. -#[derive(Debug, PartialEq, serde::Serialize, serde::Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct Encryption { - /// A Cloud KMS key that will be used to encrypt objects inserted into this bucket, if no - /// encryption method is specified. - pub default_kms_key_name: String, -} - -/// Contains information about an entity that is able to own a `Bucket`. -#[derive(Debug, PartialEq, serde::Serialize, serde::Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct Owner { - /// The entity, in the form project-owner-projectId. - pub entity: Entity, - /// The ID for the entity. - pub entity_id: Option, -} - -/// Contains configuration about how to visit the website linked to this Bucket. -#[derive(Debug, PartialEq, serde::Serialize, serde::Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct Website { - /// If the requested object path is missing, the service will ensure the path has a trailing - /// '/', append this suffix, and attempt to retrieve the resulting object. This allows the - /// creation of index.html objects to represent directory pages. - pub main_page_suffix: String, - /// If the requested object path is missing, and any mainPageSuffix object is missing, if - /// applicable, the service will return the named object from this bucket as the content for a - /// 404 Not Found result. - pub not_found_page: String, -} - -/// Contains information of where and how access logs to this bucket are maintained. -#[derive(Debug, PartialEq, serde::Serialize, serde::Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct Logging { - /// The destination bucket where the current bucket's logs should be placed. - pub log_bucket: String, - /// A prefix for log object names. The default prefix is the bucket name. - pub log_object_prefix: String, -} - -/// Contains information about whether a Bucket keeps track of its version. -#[derive(Debug, PartialEq, serde::Serialize, serde::Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct Versioning { - /// While set to true, versioning is fully enabled for this bucket. - pub enabled: bool, -} - -/// Contains information about how OPTIONS requests for this Bucket are handled. -#[derive(Debug, PartialEq, serde::Serialize, serde::Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct Cors { - /// The list of Origins eligible to receive CORS response headers. Note: "*" is permitted in the - /// list of origins, and means "any Origin". - #[serde(default)] - pub origin: Vec, - /// The list of HTTP methods on which to include CORS response headers, (GET, OPTIONS, POST, - /// etc) Note: "*" is permitted in the list of methods, and means "any method". - #[serde(default)] - pub method: Vec, - /// The list of HTTP headers other than the simple response headers to give permission for the - /// user-agent to share across domains. - #[serde(default)] - pub response_header: Vec, - /// The value, in seconds, to return in the Access-Control-Max-Age header used in preflight - /// responses. - pub max_age_seconds: Option, -} - -/// Contains a set of `Rule` Objects which together describe the way this lifecycle behaves -#[derive(Debug, PartialEq, serde::Serialize, serde::Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct Lifecycle { - /// A lifecycle management rule, which is made of an action to take and the condition(s) under - /// which the action will be taken. - pub rule: Vec, -} - -/// An element of the lifecyle list. -#[derive(Debug, PartialEq, serde::Serialize, serde::Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct Rule { - /// The action to take. - pub action: Action, - /// The condition(s) under which the action will be taken. - pub condition: Condition, -} - -/// Represents an action that might be undertaken due to a `Condition`. -#[derive(Debug, PartialEq, serde::Serialize, serde::Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct Action { - /// Type of the action. - pub r#type: ActionType, - /// Target storage class. Required iff the type of the action is SetStorageClass. - pub storage_class: Option, -} - -/// Type of the action. -#[derive(Debug, PartialEq, serde::Serialize, serde::Deserialize)] -pub enum ActionType { - /// Deletes a Bucket. - Delete, - /// Sets the `storage_class` of a Bucket. - SetStorageClass, -} - -/// A rule that might induce an `Action` if met. -#[derive(Debug, PartialEq, serde::Serialize, serde::Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct Condition { - /// Age of an object (in days). This condition is satisfied when an object reaches the specified - /// age. - pub age: Option, - /// A date in `RFC 3339` format with only the date part (for instance, "2013-01-15"). This - /// condition is satisfied when an object is created before midnight of the specified date in - /// UTC. - #[serde(default, with = "crate::rfc3339_date::option")] - pub created_before: Option, - /// Relevant only for versioned objects. If the value is true, this condition matches the live - /// version of objects; if the value is `false`, it matches noncurrent versions of objects. - pub is_live: Option, - /// Objects having any of the storage classes specified by this condition will be matched. - /// Values include STANDARD, NEARLINE, COLDLINE, MULTI_REGIONAL, REGIONAL, and - /// DURABLE_REDUCED_AVAILABILITY. - pub matches_storage_class: Option>, - /// Relevant only for versioned objects. If the value is N, this condition is satisfied when - /// there are at least N versions (including the live version) newer than this version of the - /// object. - #[serde(default, deserialize_with = "crate::from_str_opt")] - pub num_newer_versions: Option, -} - -/// Contains information about the payment structure of this bucket -#[derive(Debug, PartialEq, serde::Serialize, serde::Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct Billing { - /// When set to true, Requester Pays is enabled for this bucket. - pub requester_pays: bool, -} - -/// The type of storage that is used. Pertains to availability, performance and cost. -#[derive(Debug, PartialEq, serde::Serialize, serde::Deserialize)] -#[serde(rename_all = "SCREAMING_SNAKE_CASE")] -pub enum StorageClass { - /// Standard Storage is best for data that is frequently accessed ("hot" data) and/or stored for - /// only brief periods of time. - Standard, - /// Nearline Storage is a low-cost, highly durable storage service for storing infrequently - /// accessed data. - Nearline, - /// Coldline Storage is a very-low-cost, highly durable storage service for data archiving, - /// online backup, and disaster recovery. - Coldline, - /// Equivalent to Standard Storage, except Multi-Regional Storage can only be used for objects - /// stored in multi-regions or dual-regions. - MultiRegional, - /// Equivalent to Standard Storage, except Regional Storage can only be used for objects stored - /// in regions. - Regional, - /// Similar to Standard Storage except: - /// - /// DRA has higher pricing for operations. - /// DRA has lower performance, particularly in terms of availability (DRA has a 99% availability - /// SLA). - /// - /// You can move your data from DRA to other storage classes by performing a storage transfer. - DurableReducedAvailability, -} - -/// A representation of the IAM Policiy for a certain bucket. -#[derive(Debug, PartialEq, Default, serde::Deserialize, serde::Serialize)] -#[serde(rename_all = "camelCase")] -pub struct IamPolicy { - /// The [Cloud IAM policy](https://cloud.google.com/iam/docs/policies#versions) version. - pub version: i32, - /// The kind of item this is. For policies, this field is ignored in a request and is - /// `storage#policy` in a response. - pub kind: Option, - /// The ID of the resource to which this policy belongs. The response for this field is of the - /// form `projects/_/buckets/bucket`. This field is ignored in a request. - pub resource_id: Option, - /// A list of the bindings for this policy. - pub bindings: Vec, - /// HTTP 1.1 [Entity tag](https://tools.ietf.org/html/rfc7232#section-2.3) for this policy. - pub etag: String, -} - -/// An association between a role, which comes with a set of permissions, and members who may assume -/// that role. -#[derive(Debug, PartialEq, serde::Deserialize, serde::Serialize)] -#[serde(rename_all = "camelCase")] -pub struct Binding { - /// The role to which members belong. Two types of roles are supported: standard IAM roles, - /// which grant permissions that do not map directly to those provided by ACLs, and legacy IAM - /// roles, which do map directly to ACL permissions. All roles are of the format - /// `roles/storage.specificRole.` - /// - /// See - /// [Cloud Storage IAM Roles](https://cloud.google.com/storage/docs/access-control/iam-roles) - /// for a list of available roles. - pub role: IamRole, - /// A collection of identifiers for members who may assume the provided role. Recognized - /// identifiers are as follows: - /// - /// * `allUsers` — A special identifier that represents anyone on the internet; with or without - /// a Google account. - /// * `allAuthenticatedUsers` — A special identifier that represents anyone who is authenticated - /// with a Google account or a service account. - /// * `user:emailid` — An email address that represents a specific account. For example, - /// user:alice@gmail.com or user:joe@example.com. - /// * `serviceAccount:emailid` — An email address that represents a service account. For - /// example, serviceAccount:my-other-app@appspot.gserviceaccount.com . - /// * `group:emailid` — An email address that represents a Google group. For example, - /// group:admins@example.com. - /// * `domain:domain` — A G Suite domain name that represents all the users of that domain. For - /// example, domain:google.com or domain:example.com. - /// * `projectOwner:projectid` — Owners of the given project. For example, - /// projectOwner:my-example-project - /// * `projectEditor:projectid` — Editors of the given project. For example, - /// projectEditor:my-example-project - /// * `projectViewer:projectid` — Viewers of the given project. For example, - /// projectViewer:my-example-project - pub members: Vec, - /// A condition object associated with this binding. Each role binding can only contain one - /// condition. - pub condition: Option, -} - -/// A condition object associated with a binding. -#[derive(Debug, PartialEq, serde::Deserialize, serde::Serialize)] -#[serde(rename_all = "camelCase")] -pub struct IamCondition { - /// Title of the condition. For example, "expires_end_of_2018". - pub title: String, - /// Optional description of the condition. For example, "Expires at midnight on 2018-12-31". - pub description: Option, - /// [Attribute-based](https://cloud.google.com/iam/docs/conditions-overview#attributes) logic - /// expression using a subset of the Common Expression Language (CEL). For example, - /// "request.time < timestamp('2019-01-01T00:00:00Z')". - pub expression: String, -} - -/// All possible roles that can exist in the IAM system. For a more comprehensive version, check -/// [Googles Documentation](https://cloud.google.com/storage/docs/access-control/iam-roles). -#[derive(Debug, PartialEq, serde::Deserialize, serde::Serialize)] -#[serde(untagged)] -pub enum IamRole { - /// Standard roles can be applied to either buckets or projects. - Standard(StandardIamRole), - /// Primitive roles are roles that must be added on a per-project basis. - Primitive(PrimitiveIamRole), - /// Legacy roles are roles that can only be added to an individual bucket. - Legacy(LegacyIamRole), -} - -/// The following enum contains Cloud Identity and Access Management (Cloud IAM) roles that are -/// associated with Cloud Storage and lists the permissions that are contained in each role. Unless -/// otherwise noted, these roles can be applied either to entire projects or specific buckets. -#[derive(Debug, PartialEq, serde::Deserialize, serde::Serialize)] -pub enum StandardIamRole { - /// Allows users to create objects. Does not give permission to view, delete, or overwrite - /// objects. - #[serde(rename = "roles/storage.objectCreator")] - ObjectCreator, - /// Grants access to view objects and their metadata, excluding ACLs. - /// - /// Can also list the objects in a bucket. - #[serde(rename = "roles/storage.objectViewer")] - ObjectViewer, - /// Grants full control over objects, including listing, creating, viewing, and deleting - /// objects. - #[serde(rename = "roles/storage.objectAdmin")] - ObjectAdmin, - /// Full control over HMAC keys in a project. - #[serde(rename = "roles/storage.hmacKeyAdmin")] - HmacKeyAdmin, - /// Grants full control of buckets and objects. - /// - /// When applied to an individual bucket, control applies only to the specified bucket and - /// objects within the bucket. - #[serde(rename = "roles/storage.admin")] - Admin, -} - -/// The following enum contains primitive roles and the Cloud Storage permissions that these roles -/// contain. Primitive roles cannot be added at the bucket-level. -#[derive(Debug, PartialEq, serde::Deserialize, serde::Serialize)] -pub enum PrimitiveIamRole { - /// Grants permission to list buckets as well as view bucket metadata, excluding ACLs, when - /// listing. Also grants permission to list and get HMAC keys in the project. - #[serde(rename = "role/viewer")] - Viewer, - /// Grants permission to create, list, and delete buckets. Grants permission to view bucket - /// metadata, excluding ACLs, when listing. Grants full control over HMAC keys in a project. - #[serde(rename = "role/editor")] - Editor, - /// Grants permission to create, list, and delete buckets. Also grants permission to view bucket - /// metadata, excluding ACLs, when listing. Grants full control over HMAC keys in a project. - #[serde(rename = "role/owner")] - Owner, -} - -/// The following enum contains Cloud IAM roles that are equivalent to Access Control List (ACL) -/// permissions. These Cloud IAM roles can only be applied to a bucket, not a project. -#[derive(Debug, PartialEq, serde::Deserialize, serde::Serialize)] -pub enum LegacyIamRole { - /// Grants permission to view objects and their metadata, excluding ACLs. - #[serde(rename = "roles/storage.legacyObjectReader")] - LegacyObjectReader, - /// Grants permission to view and edit objects and their metadata, including ACLs. - #[serde(rename = "roles/storage.legacyObjectOwner")] - LegacyObjectOwner, - /// Grants permission to list a bucket's contents and read bucket metadata, excluding Cloud IAM - /// policies. Also grants permission to read object metadata, excluding Cloud IAM policies, when - /// listing objects. - /// - /// Use of this role is also reflected in the bucket's ACLs. See - /// [Cloud IAM relation to ACLs](https://cloud.google.com/storage/docs/access-control/iam#acls) - /// for more information. - #[serde(rename = "roles/storage.legacyBucketReader")] - LegacyBucketReader, - /// Grants permission to create, overwrite, and delete objects; list objects in a bucket and - /// read object metadata, excluding Cloud IAM policies, when listing; and read bucket metadata, - /// excluding Cloud IAM policies. - /// - /// Use of this role is also reflected in the bucket's ACLs. See - /// [Cloud IAM relation to ACLs](https://cloud.google.com/storage/docs/access-control/iam#acls) - /// for more information. - #[serde(rename = "roles/storage.legacyBucketWriter")] - LegacyBucketWriter, - /// Grants permission to create, overwrite, and delete objects; list objects in a bucket and - /// read object metadata, excluding Cloud IAM policies, when listing; and read and edit bucket - /// metadata, including Cloud IAM policies. - /// - /// Use of this role is also reflected in the bucket's ACLs. See - /// [Cloud IAM relation to ACLs](https://cloud.google.com/storage/docs/access-control/iam#acls) - /// for more information. - #[serde(rename = "roles/storage.legacyBucketOwner")] - LegacyBucketOwner, -} - -/// The request needed to perform the Object::test_iam_permission function. -#[derive(Debug, PartialEq, serde::Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct TestIamPermission { - /// The kind of item this is. - kind: String, - /// The permissions held by the caller. Permissions are always of the format - /// `storage.resource.capability`, where resource is one of buckets or objects. See - /// [Cloud Storage IAM Permissions] - /// (https://cloud.google.com/storage/docs/access-control/iam-permissions) for a list of - /// supported permissions. - permissions: Vec, -} - -impl Bucket { - /// Creates a new `Bucket`. There are many options that you can provide for creating a new - /// bucket, so the `NewBucket` resource contains all of them. Note that `NewBucket` implements - /// `Default`, so you don't have to specify the fields you're not using. And error is returned - /// if that bucket name is already taken. - /// ### Example - /// ``` - /// # #[tokio::main] - /// # async fn main() -> Result<(), Box> { - /// use cloud_storage::bucket::{Bucket, NewBucket}; - /// use cloud_storage::bucket::{Location, MultiRegion}; - /// - /// let new_bucket = NewBucket { - /// name: "cloud-storage-rs-doc-1".to_string(), // this is the only mandatory field - /// location: Location::Multi(MultiRegion::Eu), - /// ..Default::default() - /// }; - /// let bucket = Bucket::create(&new_bucket).await?; - /// # bucket.delete().await?; - /// # Ok(()) - /// # } - /// ``` - #[cfg(feature = "global-client")] - pub async fn create(new_bucket: &NewBucket) -> crate::Result { - crate::CLOUD_CLIENT.bucket().create(new_bucket).await - } - - /// The synchronous equivalent of `Bucket::create`. - /// - /// ### Features - /// This function requires that the feature flag `sync` is enabled in `Cargo.toml`. - #[cfg(all(feature = "global-client", feature = "sync"))] - pub fn create_sync(new_bucket: &NewBucket) -> crate::Result { - crate::runtime()?.block_on(Self::create(new_bucket)) - } - - /// Returns all `Bucket`s within this project. - /// - /// ### Note - /// When using incorrect permissions, this function fails silently and returns an empty list. - /// - /// ### Example - /// ``` - /// # #[tokio::main] - /// # async fn main() -> Result<(), Box> { - /// use cloud_storage::Bucket; - /// - /// let buckets = Bucket::list().await?; - /// # Ok(()) - /// # } - /// ``` - #[cfg(feature = "global-client")] - pub async fn list() -> crate::Result> { - crate::CLOUD_CLIENT.bucket().list().await - } - - /// The synchronous equivalent of `Bucket::list`. - /// - /// ### Features - /// This function requires that the feature flag `sync` is enabled in `Cargo.toml`. - #[cfg(all(feature = "global-client", feature = "sync"))] - pub fn list_sync() -> crate::Result> { - crate::runtime()?.block_on(Self::list()) - } - - /// Returns a single `Bucket` by its name. If the Bucket does not exist, an error is returned. - /// ### Example - /// ``` - /// # #[tokio::main] - /// # async fn main() -> Result<(), Box> { - /// use cloud_storage::Bucket; - /// # use cloud_storage::bucket::NewBucket; - /// # let new_bucket = NewBucket { - /// # name: "cloud-storage-rs-doc-2".to_string(), - /// # ..Default::default() - /// # }; - /// # let _ = Bucket::create(&new_bucket).await?; - /// - /// let bucket = Bucket::read("cloud-storage-rs-doc-2").await?; - /// # bucket.delete().await?; - /// # Ok(()) - /// # } - /// ``` - #[cfg(feature = "global-client")] - pub async fn read(name: &str) -> crate::Result { - crate::CLOUD_CLIENT.bucket().read(name).await - } - - /// The synchronous equivalent of `Bucket::read`. - /// - /// ### Features - /// This function requires that the feature flag `sync` is enabled in `Cargo.toml`. - #[cfg(all(feature = "global-client", feature = "sync"))] - pub fn read_sync(name: &str) -> crate::Result { - crate::runtime()?.block_on(Self::read(name)) - } - - /// Update an existing `Bucket`. If you declare you bucket as mutable, you can edit its fields. - /// You can then flush your changes to Google Cloud Storage using this method. - /// ### Example - /// ``` - /// # #[tokio::main] - /// # async fn main() -> Result<(), Box> { - /// use cloud_storage::bucket::{Bucket, RetentionPolicy}; - /// # use cloud_storage::bucket::NewBucket; - /// # let new_bucket = NewBucket { - /// # name: "cloud-storage-rs-doc-3".to_string(), - /// # ..Default::default() - /// # }; - /// # let _ = Bucket::create(&new_bucket).await?; - /// - /// let mut bucket = Bucket::read("cloud-storage-rs-doc-3").await?; - /// bucket.retention_policy = Some(RetentionPolicy { - /// retention_period: 50, - /// effective_time: time::OffsetDateTime::now_utc() + std::time::Duration::from_secs(50), - /// is_locked: Some(false), - /// }); - /// bucket.update().await?; - /// # bucket.delete().await?; - /// # Ok(()) - /// # } - /// ``` - #[cfg(feature = "global-client")] - pub async fn update(&self) -> crate::Result { - crate::CLOUD_CLIENT.bucket().update(self).await - } - - /// The synchronous equivalent of `Bucket::update`. - /// - /// ### Features - /// This function requires that the feature flag `sync` is enabled in `Cargo.toml`. - #[cfg(all(feature = "global-client", feature = "sync"))] - pub fn update_sync(&self) -> crate::Result { - crate::runtime()?.block_on(self.update()) - } - - /// Delete an existing `Bucket`. This permanently removes a bucket from Google Cloud Storage. - /// An error is returned when you don't have sufficient permissions, or when the - /// `retention_policy` prevents you from deleting your Bucket. - /// ### Example - /// ```no_run - /// # #[tokio::main] - /// # async fn main() -> Result<(), Box> { - /// use cloud_storage::Bucket; - /// # use cloud_storage::bucket::NewBucket; - /// # let new_bucket = NewBucket { - /// # name: "unnecessary-bucket".to_string(), - /// # ..Default::default() - /// # }; - /// # let _ = Bucket::create(&new_bucket).await?; - /// - /// let bucket = Bucket::read("unnecessary-bucket").await?; - /// bucket.delete().await?; - /// # Ok(()) - /// # } - /// ``` - #[cfg(feature = "global-client")] - pub async fn delete(self) -> crate::Result<()> { - crate::CLOUD_CLIENT.bucket().delete(self).await - } - - /// The synchronous equivalent of `Bucket::delete`. - /// - /// ### Features - /// This function requires that the feature flag `sync` is enabled in `Cargo.toml`. - #[cfg(all(feature = "global-client", feature = "sync"))] - pub fn delete_sync(self) -> crate::Result<()> { - crate::runtime()?.block_on(self.delete()) - } - - /// Returns the [IAM Policy](https://cloud.google.com/iam/docs/) for this bucket. - /// ### Example - /// ``` - /// # #[tokio::main] - /// # async fn main() -> Result<(), Box> { - /// use cloud_storage::Bucket; - /// # use cloud_storage::bucket::NewBucket; - /// # let new_bucket = NewBucket { - /// # name: "cloud-storage-rs-doc-4".to_string(), - /// # ..Default::default() - /// # }; - /// # let _ = Bucket::create(&new_bucket).await?; - /// - /// let bucket = Bucket::read("cloud-storage-rs-doc-4").await?; - /// let policy = bucket.get_iam_policy().await?; - /// # bucket.delete().await?; - /// # Ok(()) - /// # } - /// ``` - #[cfg(feature = "global-client")] - pub async fn get_iam_policy(&self) -> crate::Result { - crate::CLOUD_CLIENT.bucket().get_iam_policy(self).await - } - - /// The synchronous equivalent of `Bucket::get_iam_policy`. - /// - /// ### Features - /// This function requires that the feature flag `sync` is enabled in `Cargo.toml`. - #[cfg(all(feature = "global-client", feature = "sync"))] - pub fn get_iam_policy_sync(&self) -> crate::Result { - crate::runtime()?.block_on(self.get_iam_policy()) - } - - /// Updates the [IAM Policy](https://cloud.google.com/iam/docs/) for this bucket. - /// ### Example - /// ``` - /// # #[tokio::main] - /// # async fn main() -> Result<(), Box> { - /// use cloud_storage::Bucket; - /// use cloud_storage::bucket::{IamPolicy, Binding, IamRole, StandardIamRole, Entity}; - /// # use cloud_storage::bucket::NewBucket; - /// # let new_bucket = NewBucket { - /// # name: "cloud-storage-rs-doc-5".to_string(), - /// # ..Default::default() - /// # }; - /// # let _ = Bucket::create(&new_bucket).await?; - /// - /// let bucket = Bucket::read("cloud-storage-rs-doc-5").await?; - /// let iam_policy = IamPolicy { - /// version: 1, - /// bindings: vec![ - /// Binding { - /// role: IamRole::Standard(StandardIamRole::ObjectViewer), - /// members: vec!["allUsers".to_string()], - /// condition: None, - /// } - /// ], - /// ..Default::default() - /// }; - /// let policy = bucket.set_iam_policy(&iam_policy).await?; - /// # bucket.delete().await?; - /// # Ok(()) - /// # } - /// ``` - #[cfg(feature = "global-client")] - pub async fn set_iam_policy(&self, iam: &IamPolicy) -> crate::Result { - crate::CLOUD_CLIENT.bucket().set_iam_policy(self, iam).await - } - - /// The synchronous equivalent of `Bucket::set_iam_policy`. - /// - /// ### Features - /// This function requires that the feature flag `sync` is enabled in `Cargo.toml`. - #[cfg(all(feature = "global-client", feature = "sync"))] - pub fn set_iam_policy_sync(&self, iam: &IamPolicy) -> crate::Result { - crate::runtime()?.block_on(self.set_iam_policy(iam)) - } - - /// Checks whether the user provided in the service account has this permission. - /// ### Example - /// ```no_run - /// # #[tokio::main] - /// # async fn main() -> Result<(), Box> { - /// use cloud_storage::Bucket; - /// - /// let bucket = Bucket::read("my-bucket").await?; - /// bucket.test_iam_permission("storage.buckets.get").await?; - /// # Ok(()) - /// # } - /// ``` - #[cfg(feature = "global-client")] - pub async fn test_iam_permission(&self, permission: &str) -> crate::Result { - crate::CLOUD_CLIENT - .bucket() - .test_iam_permission(self, permission) - .await - } - - /// The synchronous equivalent of `Bucket::test_iam_policy`. - /// - /// ### Features - /// This function requires that the feature flag `sync` is enabled in `Cargo.toml`. - #[cfg(all(feature = "global-client", feature = "sync"))] - pub fn test_iam_permission_sync(&self, permission: &str) -> crate::Result { - crate::runtime()?.block_on(self.test_iam_permission(permission)) - } - - fn _lock_retention_policy() { - todo!() - } -} - -#[cfg(all(test, feature = "global-client"))] -mod tests { - use super::*; - use crate::resources::common::Role; - - #[tokio::test] - async fn create() -> Result<(), Box> { - dotenv::dotenv().ok(); - let base_name = std::env::var("TEST_BUCKET")?; - // use a more complex bucket in this test. - let new_bucket = NewBucket { - name: format!("{}-test-create", base_name), - default_event_based_hold: Some(true), - acl: Some(vec![NewBucketAccessControl { - entity: Entity::AllUsers, - role: Role::Reader, - }]), - default_object_acl: Some(vec![NewDefaultObjectAccessControl { - entity: Entity::AllUsers, - role: Role::Reader, - }]), - iam_configuration: Some(IamConfiguration { - uniform_bucket_level_access: UniformBucketLevelAccess { - enabled: false, - locked_time: None, - }, - }), - ..Default::default() - }; - let bucket = Bucket::create(&new_bucket).await?; - bucket.delete().await?; - Ok(()) - } - - #[tokio::test] - async fn list() -> Result<(), Box> { - Bucket::list().await?; - Ok(()) - } - - #[tokio::test] - async fn update() -> Result<(), Box> { - let mut bucket = crate::create_test_bucket("test-update").await; - bucket.retention_policy = Some(RetentionPolicy { - retention_period: 50, - effective_time: time::OffsetDateTime::now_utc() + std::time::Duration::from_secs(50), - is_locked: Some(false), - }); - bucket.update().await?; - let updated = Bucket::read(&bucket.name).await?; - assert_eq!(updated.retention_policy.unwrap().retention_period, 50); - bucket.delete().await?; - Ok(()) - } - - // used a lot throughout the other tests, but included for completeness - #[tokio::test] - async fn delete() -> Result<(), Box> { - let bucket = crate::create_test_bucket("test-delete").await; - bucket.delete().await?; - Ok(()) - } - - #[tokio::test] - async fn get_iam_policy() -> Result<(), Box> { - let bucket = crate::create_test_bucket("test-get-iam-policy").await; - bucket.get_iam_policy().await?; - bucket.delete().await?; - Ok(()) - } - - #[tokio::test] - async fn set_iam_policy() -> Result<(), Box> { - let bucket = crate::create_test_bucket("test-set-iam-policy").await; - let iam_policy = IamPolicy { - bindings: vec![Binding { - role: IamRole::Standard(StandardIamRole::ObjectViewer), - members: vec!["allUsers".to_string()], - condition: None, - }], - ..Default::default() - }; - bucket.set_iam_policy(&iam_policy).await?; - assert_eq!(bucket.get_iam_policy().await?.bindings, iam_policy.bindings); - bucket.delete().await?; - Ok(()) - } - - #[tokio::test] - async fn test_iam_permission() -> Result<(), Box> { - let bucket = crate::create_test_bucket("test-test-ia-permission").await; - bucket.test_iam_permission("storage.buckets.get").await?; - bucket.delete().await?; - Ok(()) - } - - #[cfg(all(feature = "global-client", feature = "sync"))] - mod sync { - use super::*; - use crate::resources::common::Role; - - #[test] - fn create() -> Result<(), Box> { - dotenv::dotenv().ok(); - let base_name = std::env::var("TEST_BUCKET")?; - // use a more complex bucket in this test. - let new_bucket = NewBucket { - name: format!("{}-test-create", base_name), - default_event_based_hold: Some(true), - acl: Some(vec![NewBucketAccessControl { - entity: Entity::AllUsers, - role: Role::Reader, - }]), - default_object_acl: Some(vec![NewDefaultObjectAccessControl { - entity: Entity::AllUsers, - role: Role::Reader, - }]), - iam_configuration: Some(IamConfiguration { - uniform_bucket_level_access: UniformBucketLevelAccess { - enabled: false, - locked_time: None, - }, - }), - ..Default::default() - }; - let bucket = Bucket::create_sync(&new_bucket)?; - bucket.delete_sync()?; - Ok(()) - } - - #[test] - fn list() -> Result<(), Box> { - Bucket::list_sync()?; - Ok(()) - } - - #[test] - fn read() -> Result<(), Box> { - let bucket = crate::create_test_bucket_sync("test-read"); - let also_bucket = Bucket::read_sync(&bucket.name)?; - assert_eq!(bucket, also_bucket); - bucket.delete_sync()?; - assert!(also_bucket.delete_sync().is_err()); - Ok(()) - } - - #[test] - fn update() -> Result<(), Box> { - let mut bucket = crate::create_test_bucket_sync("test-update"); - bucket.retention_policy = Some(RetentionPolicy { - retention_period: 50, - effective_time: time::OffsetDateTime::now_utc() + std::time::Duration::from_secs(50), - is_locked: Some(false), - }); - bucket.update_sync()?; - let updated = Bucket::read_sync(&bucket.name)?; - assert_eq!(updated.retention_policy.unwrap().retention_period, 50); - bucket.delete_sync()?; - Ok(()) - } - - // used a lot throughout the other tests, but included for completeness - #[test] - fn delete() -> Result<(), Box> { - let bucket = crate::create_test_bucket_sync("test-delete"); - bucket.delete_sync()?; - Ok(()) - } - - #[test] - fn get_iam_policy() -> Result<(), Box> { - let bucket = crate::create_test_bucket_sync("test-get-iam-policy"); - bucket.get_iam_policy_sync()?; - bucket.delete_sync()?; - Ok(()) - } - - #[test] - fn set_iam_policy() -> Result<(), Box> { - // use crate::resources::iam_policy::{Binding, IamRole, StandardIamRole}; - - let bucket = crate::create_test_bucket_sync("test-set-iam-policy"); - let iam_policy = IamPolicy { - bindings: vec![Binding { - role: IamRole::Standard(StandardIamRole::ObjectViewer), - members: vec!["allUsers".to_string()], - condition: None, - }], - ..Default::default() - }; - bucket.set_iam_policy_sync(&iam_policy)?; - assert_eq!(bucket.get_iam_policy_sync()?.bindings, iam_policy.bindings); - bucket.delete_sync()?; - Ok(()) - } - - #[test] - fn test_iam_permission() -> Result<(), Box> { - let bucket = crate::create_test_bucket_sync("test-test-ia-permission"); - bucket.test_iam_permission_sync("storage.buckets.get")?; - bucket.delete_sync()?; - Ok(()) - } - } -} diff --git a/src/resources/channel.rs b/src/resources/channel.rs deleted file mode 100644 index cc0898c..0000000 --- a/src/resources/channel.rs +++ /dev/null @@ -1,29 +0,0 @@ -pub struct Channel { - pub id: String, - pub resourceId: String, -} - -impl Channel { - /// Stop receiving object change notifications through this channel. - /// - /// ### Features - /// This function requires that the feature flag `sync` is enabled in `Cargo.toml`. - #[cfg(feature = "sync")] - pub async fn stop_sync(&self) -> Result<(), crate::Error> { - crate::runtime()?.block_on(self.stop_async().await) - } - - pub async fn stop(&self) -> Result<(), crate::Error> { - let url = format!("{}/channels/stop", crate::BASE_URL); - let response = create::CLIENT - .post(&url) - .headers(crate::get_headers().await?) - .send() - .await?; - if response.status().is_success() { - Ok(()) - } else { - Err(crate::Error::Google(response.json().await?)) - } - } -} diff --git a/src/resources/mod.rs b/src/resources/mod.rs deleted file mode 100644 index 8305dd1..0000000 --- a/src/resources/mod.rs +++ /dev/null @@ -1,28 +0,0 @@ -/// This complex object represents a Bucket that can be used to store and read files in Google Cloud -/// Storage. -pub mod bucket; -/// A Bucket Access Control object can be used to configure access on a bucket-wide level. -pub mod bucket_access_control; -/// Commonly used types. -pub mod common; -/// Default Object Access Control objects can be used the configure access that is used as a -/// fallback in the abscence of more specific data. -pub mod default_object_access_control; -/// An Hmac key is a secret key stored in Cloud Storage. -pub mod hmac_key; -/// A location where a bucket can exists physically. -mod location; -// /// A subscription to receive -// /// [Pub/Sub notifications](https://cloud.google.com/storage/docs/pubsub-notifications). -// pub mod notification; -/// A file -pub mod object; -/// Contains data about to access specific files. -pub mod object_access_control; -/// A deserialized version of the `service-account-********.json` file. Used to authenticate -/// requests. -pub mod service_account; -/// Used for parsing the `service-account-********.json` file. -pub(crate) mod signature; -/// The topic field of a `Notification` -mod topic; diff --git a/src/resources/object.rs b/src/resources/object.rs deleted file mode 100644 index a818482..0000000 --- a/src/resources/object.rs +++ /dev/null @@ -1,2039 +0,0 @@ -pub use crate::resources::bucket::Owner; -use crate::resources::object_access_control::ObjectAccessControl; -use futures_util::Stream; -#[cfg(feature = "global-client")] -use futures_util::TryStream; -use percent_encoding::{utf8_percent_encode, AsciiSet, NON_ALPHANUMERIC}; -use std::collections::HashMap; - -/// A resource representing a file in Google Cloud Storage. -#[derive(Debug, PartialEq, serde::Serialize, serde::Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct Object { - /// The kind of item this is. For objects, this is always `storage#object`. - pub kind: String, - /// The ID of the object, including the bucket name, object name, and generation number. - pub id: String, - /// The link to this object. - pub self_link: String, - /// The name of the object. Required if not specified by URL parameter. - pub name: String, - /// The name of the bucket containing this object. - pub bucket: String, - /// The content generation of this object. Used for object versioning. - #[serde(deserialize_with = "crate::from_str")] - pub generation: i64, - /// The version of the metadata for this object at this generation. Used for preconditions and - /// for detecting changes in metadata. A metageneration number is only meaningful in the context - /// of a particular generation of a particular object. - #[serde(deserialize_with = "crate::from_str")] - pub metageneration: i64, - /// Content-Type of the object data. If an object is stored without a Content-Type, it is served - /// as application/octet-stream. - pub content_type: Option, - /// The creation time of the object in RFC 3339 format. - #[serde(with = "time::serde::rfc3339")] - pub time_created: time::OffsetDateTime, - /// The modification time of the object metadata in RFC 3339 format. - #[serde(with = "time::serde::rfc3339")] - pub updated: time::OffsetDateTime, - /// The deletion time of the object in RFC 3339 format. Returned if and only if this version of - /// the object is no longer a live version, but remains in the bucket as a noncurrent version. - #[serde(default, with = "time::serde::rfc3339::option")] - pub time_deleted: Option, - /// Whether or not the object is subject to a temporary hold. - pub temporary_hold: Option, - /// Whether or not the object is subject to an event-based hold. - pub event_based_hold: Option, - /// The earliest time that the object can be deleted, based on a bucket's retention policy, in - /// RFC 3339 format. - #[serde(default, with = "time::serde::rfc3339::option")] - pub retention_expiration_time: Option, - /// Storage class of the object. - pub storage_class: String, - /// The time at which the object's storage class was last changed. When the object is initially - /// created, it will be set to timeCreated. - #[serde(with = "time::serde::rfc3339")] - pub time_storage_class_updated: time::OffsetDateTime, - /// Content-Length of the data in bytes. - #[serde(deserialize_with = "crate::from_str")] - pub size: u64, - /// MD5 hash of the data; encoded using base64. For more information about using the MD5 hash, - /// see Hashes and ETags: Best Practices. - pub md5_hash: Option, - /// Media download link. - pub media_link: String, - /// Content-Encoding of the object data. - pub content_encoding: Option, - /// Content-Disposition of the object data. - pub content_disposition: Option, - /// Content-Language of the object data. - pub content_language: Option, - /// Cache-Control directive for the object data. If omitted, and the object is accessible to all - /// anonymous users, the default will be public, max-age=3600. - pub cache_control: Option, - /// User-provided metadata, in key/value pairs. - pub metadata: Option>, - /// Access controls on the object, containing one or more objectAccessControls Resources. If - /// iamConfiguration.uniformBucketLevelAccess.enabled is set to true, this field is omitted in - /// responses, and requests that specify this field fail. - pub acl: Option>, - /// The owner of the object. This will always be the uploader of the object. If - /// `iamConfiguration.uniformBucketLevelAccess.enabled` is set to true, this field does not - /// apply, and is omitted in responses. - pub owner: Option, - /// CRC32c checksum, as described in RFC 4960, Appendix B; encoded using base64 in big-endian - /// byte order. For more information about using the CRC32c checksum, see Hashes and ETags: Best - /// Practices. - pub crc32c: String, - /// Number of underlying components that make up a composite object. Components are accumulated - /// by compose operations, counting 1 for each non-composite source object and componentCount - /// for each composite source object. Note: componentCount is included in the metadata for - /// composite objects only. - #[serde(default, deserialize_with = "crate::from_str_opt")] - pub component_count: Option, - /// HTTP 1.1 Entity tag for the object. - pub etag: String, - /// Metadata of customer-supplied encryption key, if the object is encrypted by such a key. - pub customer_encryption: Option, - /// Cloud KMS Key used to encrypt this object, if the object is encrypted by such a key. - pub kms_key_name: Option, -} - -/// Contains data about how a user might encrypt their files in Google Cloud Storage. -#[derive(Debug, PartialEq, serde::Serialize, serde::Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct CustomerEncrypton { - /// The encryption algorithm. - pub encryption_algorithm: String, - /// SHA256 hash value of the encryption key. - pub key_sha256: String, -} - -/// The request that is supplied to perform `Object::compose`. -#[derive(Debug, PartialEq, serde::Serialize)] -#[serde(rename_all = "camelCase")] -pub struct ComposeRequest { - /// The kind of item this is. Will always be `storage#composeRequest`. - pub kind: String, - /// The list of source objects that will be concatenated into a single object. - pub source_objects: Vec, - /// Properties of the resulting object. - pub destination: Option, -} - -/// A SourceObject represents one of the objects that is to be composed. -#[derive(Debug, PartialEq, serde::Serialize)] -#[serde(rename_all = "camelCase")] -pub struct SourceObject { - /// The source object's name. All source objects must have the same storage class and reside in - /// the same bucket. - pub name: String, - /// The generation of this object to use as the source. - pub generation: Option, - /// Conditions that must be met for this operation to execute. - pub object_preconditions: Option, -} - -/// Allows conditional copying of this file. -#[derive(Debug, PartialEq, serde::Serialize)] -#[serde(rename_all = "camelCase")] -pub struct ObjectPrecondition { - /// Only perform the composition if the generation of the source object that would be used - /// matches this value. If this value and a generation are both specified, they must be the same - /// value or the call will fail. - pub if_generation_match: i64, -} - -/// The request that is supplied to perform `Object::list`. -/// See [the Google Cloud Storage API -/// reference](https://cloud.google.com/storage/docs/json_api/v1/objects/list) -/// for more details. -#[derive(Debug, PartialEq, serde::Serialize, Default, Clone)] -#[serde(rename_all = "camelCase")] -pub struct ListRequest { - /// When specified, allows the `list` to operate like a directory listing by splitting the - /// object location on this delimiter. - pub delimiter: Option, - - /// Filter results to objects whose names are lexicographically before `end_offset`. - /// If `start_offset` is also set, the objects listed have names between `start_offset` - /// (inclusive) and `end_offset` (exclusive). - pub end_offset: Option, - - /// If true, objects that end in exactly one instance of `delimiter` have their metadata - /// included in `items` in addition to the relevant part of the object name appearing in - /// `prefixes`. - pub include_trailing_delimiter: Option, - - /// Maximum combined number of entries in `items` and `prefixes` to return in a single - /// page of responses. Because duplicate entries in `prefixes` are omitted, fewer total - /// results may be returned than requested. The service uses this parameter or 1,000 - /// items, whichever is smaller. - pub max_results: Option, - - /// A previously-returned page token representing part of the larger set of results to view. - /// The `page_token` is an encoded field that marks the name and generation of the last object - /// in the returned list. In a subsequent request using the `page_token`, items that come after - /// the `page_token` are shown (up to `max_results`). - /// - /// If the page token is provided, all objects starting at that page token are queried - pub page_token: Option, - - /// Filter results to include only objects whose names begin with this prefix. - pub prefix: Option, - - /// Set of properties to return. Defaults to `NoAcl`. - pub projection: Option, - - /// Filter results to objects whose names are lexicographically equal to or after - /// `start_offset`. If `end_offset` is also set, the objects listed have names between - /// `start_offset` (inclusive) and `end_offset` (exclusive). - pub start_offset: Option, - - /// If true, lists all versions of an object as distinct results in order of increasing - /// generation number. The default value for versions is false. For more information, see - /// Object Versioning. - pub versions: Option, -} - -/// The parameters that are optionally supplied when creating an object. -#[derive(Debug, PartialEq, serde::Serialize, Default, Clone)] -#[serde(rename_all = "camelCase")] -pub struct CreateParameters { - /// Setting this value is equivalent of setting the `contentEncoding` metadata property of the object. - /// This can be useful when uploading an object with `uploadType=media` to indicate the encoding of the content being uploaded. - pub content_encoding: Option, - - /// Makes the operation conditional on whether the object's current generation matches the given value. - /// Setting to 0 makes the operation succeed only if there are no live versions of the object. - pub if_generation_match: Option, - - /// Makes the operation conditional on whether the object's current generation does not match the given value. - /// If no live object exists, the precondition fails. - /// Setting to 0 makes the operation succeed only if there is a live version of the object. - pub if_generation_not_match: Option, - - /// Makes the operation conditional on whether the object's current metageneration matches the given value. - pub if_metageneration_match: Option, - - /// Makes the operation conditional on whether the object's current metageneration does not match the given value. - pub if_metageneration_not_match: Option, - - /// Resource name of the Cloud KMS key that will be used to encrypt the object. - /// If not specified, the request uses the bucket's default Cloud KMS key, if any, or a Google-managed encryption key. - pub kms_key_name: Option, - - /// Apply a predefined set of access controls to this object. - /// - /// Acceptable values are: - /// `authenticatedRead`: Object owner gets OWNER access, and allAuthenticatedUsers get READER access. - /// `bucketOwnerFullControl`: Object owner gets OWNER access, and project team owners get OWNER access. - /// `bucketOwnerRead`: Object owner gets OWNER access, and project team owners get READER access. - /// `private`: Object owner gets OWNER access. - /// `projectPrivate`: Object owner gets OWNER access, and project team members get access according to their roles. - /// `publicRead`: Object owner gets OWNER access, and allUsers get READER access. - /// If `iamConfiguration.uniformBucketLevelAccess.enabled` is set to `true`, requests that include this parameter fail with a 400 Bad Request response. - pub predefined_acl: Option, - - /// Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full. - /// Acceptable values are: - /// `full`: Include all properties. - /// `noAcl`: Omit the owner, acl property. - pub projection: Option, -} - -/// The parameters that are optionally supplied when reading an object. -#[derive(Debug, PartialEq, serde::Serialize, Default, Clone)] -#[serde(rename_all = "camelCase")] -pub struct ReadParameters { - /// If present, selects a specific revision of this object (as opposed to the latest version, the default). - pub generation: Option, - - /// Makes the operation conditional on whether the object's current generation matches the given value. - /// Setting to 0 makes the operation succeed only if there are no live versions of the object. - pub if_generation_match: Option, - - /// Makes the operation conditional on whether the object's current generation does not match the given value. - /// If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object. - pub if_generation_not_match: Option, - - /// Makes the operation conditional on whether the object's current metageneration matches the given value. - pub if_metageneration_match: Option, - - /// Makes the operation conditional on whether the object's current metageneration does not match the given value. - pub if_metageneration_not_match: Option, - - /// Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full. - /// Acceptable values are: - /// `full`: Include all properties. - /// `noAcl`: Omit the owner, acl property. - pub projection: Option, -} - -/// The parameters that are optionally supplied when composing an object. -#[derive(Debug, PartialEq, serde::Serialize, Default, Clone)] -#[serde(rename_all = "camelCase")] -pub struct ComposeParameters { - /// Apply a predefined set of access controls to the destination object. - /// - /// Acceptable values are: - /// `authenticatedRead`: Object owner gets OWNER access, and allAuthenticatedUsers get READER access. - /// `bucketOwnerFullControl`: Object owner gets OWNER access, and project team owners get OWNER access. - /// `bucketOwnerRead`: Object owner gets OWNER access, and project team owners get READER access. - /// `private`: Object owner gets OWNER access. - /// `projectPrivate`: Object owner gets OWNER access, and project team members get access according to their roles. - /// `publicRead`: Object owner gets OWNER access, and allUsers get READER access. - /// If `iamConfiguration.uniformBucketLevelAccess.enabled` is set to `true`, requests that include this parameter fail with a 400 Bad Request response. - pub destination_predefined_acl: Option, - - /// Makes the operation conditional on there being a live destination object with a generation number that matches the given value. - /// Setting `ifGenerationMatch` to 0 makes the operation succeed only if there is no live destination object. - pub if_generation_match: Option, - - /// Makes the operation conditional on there being a live destination object with a metageneration number that matches the given value. - pub if_metageneration_match: Option, - - /// Resource name of the Cloud KMS key that will be used to encrypt the composed object. - /// If not specified, the request uses the bucket's default Cloud KMS key, if any, or a Google-managed encryption key. - pub kms_key_name: Option, -} - -/// The parameters that are optionally supplied when copying an object. -#[derive(Debug, PartialEq, serde::Serialize, Default, Clone)] -#[serde(rename_all = "camelCase")] -pub struct CopyParameters { - /// Resource name of the Cloud KMS key that will be used to encrypt the object. - /// The Cloud KMS key must be located in same location as the object. - // - // If the parameter is not specified, the request uses the destination bucket's default encryption key, if any, or the Google-managed encryption key. - // - // If the object is large, re-encryption with the key may take too long and result in a Deadline exceeded error. - // For large objects, consider using the rewrite method instead. - pub destination_kms_key_name: Option, - - /// Apply a predefined set of access controls to the destination object. - /// - /// Acceptable values are: - /// `authenticatedRead`: Object owner gets OWNER access, and allAuthenticatedUsers get READER access. - /// `bucketOwnerFullControl`: Object owner gets OWNER access, and project team owners get OWNER access. - /// `bucketOwnerRead`: Object owner gets OWNER access, and project team owners get READER access. - /// `private`: Object owner gets OWNER access. - /// `projectPrivate`: Object owner gets OWNER access, and project team members get access according to their roles. - /// `publicRead`: Object owner gets OWNER access, and allUsers get READER access. - /// If `iamConfiguration.uniformBucketLevelAccess.enabled` is set to `true`, requests that include this parameter fail with a 400 Bad Request response. - pub destination_predefined_acl: Option, - - /// Makes the operation conditional on there being a live destination object with a generation number that matches the given value. - /// Setting `ifGenerationMatch` to 0 makes the operation succeed only if there is no live destination object. - pub if_generation_match: Option, - - /// Makes the operation conditional on there being a live destination object with a generation number that does not match the given value. - /// If no live destination object exists, the precondition fails. - /// Setting `ifGenerationNotMatch` to 0 makes the operation succeed if there is a live version of the object. - pub if_generation_not_match: Option, - - /// Makes the operation conditional on there being a live destination object with a metageneration number that matches the given value. - pub if_metageneration_match: Option, - - /// Makes the operation conditional on there being a live destination object with a metageneration number that does not match the given value. - pub if_metageneration_not_match: Option, - - /// Makes the operation conditional on whether the source object's generation matches the given value. - pub if_source_generation_match: Option, - - /// Makes the operation conditional on whether the source object's generation does not match the given value. - pub if_source_generation_not_match: Option, - - /// Makes the operation conditional on whether the source object's current metageneration matches the given value. - pub if_source_metageneration_match: Option, - - /// Makes the operation conditional on whether the source object's current metageneration does not match the given value. - pub if_source_metageneration_not_match: Option, - - /// Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full. - /// Acceptable values are: - /// full: Include all properties. - /// noAcl: Omit the owner, acl property. - pub projection: Option, - - /// If present, selects a specific revision of the source object (as opposed to the latest version, the default). - pub source_generation: Option, -} - -/// The parameters that are optionally supplied when rewriting an object. -#[derive(Debug, PartialEq, serde::Serialize, Default, Clone)] -#[serde(rename_all = "camelCase")] -pub struct RewriteParameters { - ///Resource name of the Cloud KMS key that will be used to encrypt the object. - /// The Cloud KMS key must be located in same location as the object. - // - // If the parameter is not specified, the request uses the destination bucket's default encryption key, if any, or the Google-managed encryption key. - pub destination_kms_key_name: Option, - - /// Apply a predefined set of access controls to the destination object. - /// - /// Acceptable values are: - /// `authenticatedRead`: Object owner gets OWNER access, and allAuthenticatedUsers get READER access. - /// `bucketOwnerFullControl`: Object owner gets OWNER access, and project team owners get OWNER access. - /// `bucketOwnerRead`: Object owner gets OWNER access, and project team owners get READER access. - /// `private`: Object owner gets OWNER access. - /// `projectPrivate`: Object owner gets OWNER access, and project team members get access according to their roles. - /// `publicRead`: Object owner gets OWNER access, and allUsers get READER access. - /// If `iamConfiguration.uniformBucketLevelAccess.enabled` is set to `true`, requests that include this parameter fail with a 400 Bad Request response. - pub destination_predefined_acl: Option, - - /// Makes the operation conditional on there being a live destination object with a generation number that matches the given value. - /// Setting `ifGenerationMatch` to 0 makes the operation succeed only if there is no live destination object. - pub if_generation_match: Option, - - /// Makes the operation conditional on there being a live destination object with a generation number that does not match the given value. - /// If no live destination object exists, the precondition fails. - /// Setting `ifGenerationNotMatch` to 0 makes the operation succeed if there is a live version of the object. - pub if_generation_not_match: Option, - - /// Makes the operation conditional on there being a live destination object with a metageneration number that matches the given value. - pub if_metageneration_match: Option, - - /// Makes the operation conditional on there being a live destination object with a metageneration number that does not match the given value. - pub if_metageneration_not_match: Option, - - /// Makes the operation conditional on whether the source object's generation matches the given value. - pub if_source_generation_match: Option, - - /// Makes the operation conditional on whether the source object's generation does not match the given value. - pub if_source_generation_not_match: Option, - - /// Makes the operation conditional on whether the source object's current metageneration matches the given value. - pub if_source_metageneration_match: Option, - - /// Makes the operation conditional on whether the source object's current metageneration does not match the given value. - pub if_source_metageneration_not_match: Option, - - /// The maximum number of bytes that will be rewritten per rewrite request. - /// Most callers shouldn't need to specify this parameter - it is primarily in place to support testing. - /// If specified the value must be an integral multiple of 1 MiB (1048576). - /// Also, this only applies to requests where the source and destination span locations and/or storage classes. - /// Finally, this value must not change across rewrite calls else you'll get an error that the `rewriteToken` is invalid. - pub max_bytes_rewritten_per_call: Option, - - /// Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full. - /// Acceptable values are: - /// `full`: Include all properties. - /// `noAcl`: Omit the owner, acl property. - pub projection: Option, - - /// Include this field (from the previous rewrite response) on each rewrite request after the first one, until the rewrite response 'done' flag is true. - /// Calls that provide a `rewriteToken` can omit all other request fields, but if included those fields must match the values provided in the first rewrite request. - pub rewrite_token: Option, - - /// If present, selects a specific revision of the source object (as opposed to the latest version, the default). - pub source_generation: Option, -} - -/// The parameters that are optionally supplied when deleting an object. -#[derive(Debug, PartialEq, serde::Serialize, Default, Clone)] -#[serde(rename_all = "camelCase")] -pub struct DeleteParameters { - /// If present, permanently deletes a specific revision of this object (as opposed to the latest version, the default). - pub generation: Option, - - /// Makes the operation conditional on whether the object's current generation matches the given value. - /// Setting to 0 makes the operation succeed only if there are no live versions of the object. - pub if_generation_match: Option, - - /// Makes the operation conditional on whether the object's current generation does not match the given value. - /// If no live object exists, the precondition fails. - /// Setting to 0 makes the operation succeed only if there is a live version of the object. - pub if_generation_not_match: Option, - - /// Makes the operation conditional on whether the object's current metageneration matches the given value. - pub if_metageneration_match: Option, - - /// Makes the operation conditional on whether the object's current metageneration does not match the given value. - pub if_metageneration_not_match: Option, -} - -/// The parameters that are optionally supplied when updating an object. -#[derive(Debug, PartialEq, serde::Serialize, Default, Clone)] -#[serde(rename_all = "camelCase")] -pub struct UpdateParameters { - /// If present, selects a specific revision of this object (as opposed to the latest version, the default). - pub generation: Option, - - /// Makes the operation conditional on whether the object's current generation matches the given value. - /// Setting to 0 makes the operation succeed only if there are no live versions of the object. - pub if_generation_match: Option, - - /// Makes the operation conditional on whether the object's current generation does not match the given value. - /// If no live object exists, the precondition fails. - /// Setting to 0 makes the operation succeed only if there is a live version of the object. - pub if_generation_not_match: Option, - - /// Makes the operation conditional on whether the object's current metageneration matches the given value. - pub if_metageneration_match: Option, - - /// Makes the operation conditional on whether the object's current metageneration does not match the given value. - pub if_metageneration_not_match: Option, - - /// Apply a predefined set of access controls to this object. - /// - /// Acceptable values are: - /// `authenticatedRead`: Object owner gets OWNER access, and allAuthenticatedUsers get READER access. - /// `bucketOwnerFullControl`: Object owner gets OWNER access, and project team owners get OWNER access. - /// `bucketOwnerRead`: Object owner gets OWNER access, and project team owners get READER access. - /// `private`: Object owner gets OWNER access. - /// `projectPrivate`: Object owner gets OWNER access, and project team members get access according to their roles. - /// `publicRead`: Object owner gets OWNER access, and allUsers get READER access. - /// If `iamConfiguration.uniformBucketLevelAccess.enabled` is set to `true`, requests that include this parameter fail with a 400 Bad Request response. - pub predefined_acl: Option, - - /// Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full. - /// Acceptable values are: - /// `full`: Include all properties. - /// `noAcl`: Omit the owner, acl property. - pub projection: Option, -} - -/// Acceptable values of `projection` properties to return from `Object::list` requests. -#[derive(Debug, PartialEq, serde::Serialize, Clone)] -#[serde(rename_all = "camelCase")] -pub enum Projection { - /// Include all properties. - Full, - /// Omit the owner, acl property. - NoAcl, -} - -/// Response from `Object::list`. -#[derive(Debug, serde::Deserialize, Default)] -#[serde(rename_all = "camelCase")] -pub struct ObjectList { - /// The kind of item this is. For lists of objects, this is always `storage#objects`. - pub kind: String, - - /// The list of objects, ordered lexicographically by name. - #[serde(default = "Vec::new")] - pub items: Vec, - - /// Object name prefixes for objects that matched the listing request but were excluded - /// from `items` because of a delimiter. Values in this list are object names up to and - /// including the requested delimiter. Duplicate entries are omitted from this list. - #[serde(default = "Vec::new")] - pub prefixes: Vec, - - /// The continuation token, included only if there are more items to return. Provide - /// this value as the `page_token` of a subsequent request in order to return the next - /// page of results. - pub next_page_token: Option, -} - -#[derive(Debug, serde::Deserialize)] -#[serde(rename_all = "camelCase")] -#[allow(dead_code)] -pub(crate) struct RewriteResponse { - kind: String, - total_bytes_rewritten: String, - object_size: String, - done: bool, - pub(crate) resource: Object, -} - -impl Object { - /// Create a new object. - /// Upload a file as that is loaded in memory to google cloud storage, where it will be - /// interpreted according to the mime type you specified. - /// ## Example - /// ```rust,no_run - /// # #[tokio::main] - /// # async fn main() -> Result<(), Box> { - /// # fn read_cute_cat(_in: &str) -> Vec { vec![0, 1] } - /// use cloud_storage::Object; - /// - /// let file: Vec = read_cute_cat("cat.png"); - /// Object::create("cat-photos", file, "recently read cat.png", "image/png", None).await?; - /// # Ok(()) - /// # } - /// ``` - #[cfg(feature = "global-client")] - pub async fn create( - bucket: &str, - file: Vec, - filename: &str, - mime_type: &str, - parameters: Option, - ) -> crate::Result { - crate::CLOUD_CLIENT - .object() - .create(bucket, file, filename, mime_type, parameters) - .await - } - - /// The synchronous equivalent of `Object::create`. - /// - /// ### Features - /// This function requires that the feature flag `sync` is enabled in `Cargo.toml`. - #[cfg(all(feature = "global-client", feature = "sync"))] - pub fn create_sync( - bucket: &str, - file: Vec, - filename: &str, - mime_type: &str, - parameters: Option, - ) -> crate::Result { - crate::runtime()?.block_on(Self::create(bucket, file, filename, mime_type, parameters)) - } - - /// Create a new object with metadata. - /// Upload a file as that is loaded in memory to google cloud storage, where it will be - /// interpreted according to the mime type you specified. - /// ## Example - /// ```rust,no_run - /// # #[tokio::main] - /// # async fn main() -> Result<(), Box> { - /// # fn read_cute_cat(_in: &str) -> Vec { vec![0, 1] } - /// use cloud_storage::Object; - /// - /// let file: Vec = read_cute_cat("cat.png"); - /// let metadata = serde_json::json!({ - /// "metadata": { - /// "custom_id": "1234" - /// } - /// }); - /// Object::create("cat-photos", file, "recently read cat.png", "image/png", &metadata).await?; - /// # Ok(()) - /// # } - /// ``` - #[cfg(feature = "global-client")] - pub async fn create_with( - bucket: &str, - file: Vec, - filename: &str, - mime_type: &str, - metadata: &serde_json::Value, - ) -> crate::Result { - crate::CLOUD_CLIENT - .object() - .create_with(bucket, file, filename, mime_type, metadata) - .await - } - - /// Synchronous equivalent of `Object::create_with` - /// - /// ### Features - /// This function requires that the feature flag `sync` is enabled in `Cargo.toml`. - #[cfg(all(feature = "global-client", feature = "sync"))] - pub fn create_with_sync( - bucket: &str, - file: Vec, - filename: &str, - mime_type: &str, - metadata: &serde_json::Value, - ) -> crate::Result { - crate::runtime()?.block_on(Self::create_with(bucket, file, filename, mime_type, metadata)) - } - - /// Create a new object. This works in the same way as `Object::create`, except it does not need - /// to load the entire file in ram. - /// ## Example - /// ```rust,no_run - /// # #[tokio::main] - /// # async fn main() -> Result<(), Box> { - /// use cloud_storage::Object; - /// - /// let file = reqwest::Client::new() - /// .get("https://my_domain.rs/nice_cat_photo.png") - /// .send() - /// .await? - /// .bytes_stream(); - /// Object::create_streamed("cat-photos", file, 10, "recently read cat.png", "image/png", None).await?; - /// # Ok(()) - /// # } - /// ``` - #[cfg(feature = "global-client")] - pub async fn create_streamed( - bucket: &str, - stream: S, - length: impl Into>, - filename: &str, - mime_type: &str, - parameters: Option, - ) -> crate::Result - where - S: TryStream + Send + Sync + 'static, - S::Error: Into>, - bytes::Bytes: From, - { - crate::CLOUD_CLIENT - .object() - .create_streamed(bucket, stream, length, filename, mime_type, parameters) - .await - } - - /// The synchronous equivalent of `Object::create_streamed`. - /// - /// ### Features - /// This function requires that the feature flag `sync` is enabled in `Cargo.toml`. - #[cfg(all(feature = "global-client", feature = "sync"))] - pub fn create_streamed_sync( - bucket: &str, - mut file: R, - length: impl Into>, - filename: &str, - mime_type: &str, - parameters: Option, - ) -> crate::Result { - let mut buffer = Vec::new(); - file.read_to_end(&mut buffer) - .map_err(|e| crate::Error::Other(e.to_string()))?; - - let stream = futures_util::stream::once(async { Ok::<_, crate::Error>(buffer) }); - - crate::runtime()?.block_on(Self::create_streamed( - bucket, stream, length, filename, mime_type, parameters, - )) - } - - /// Obtain a list of objects within this Bucket. This function will repeatedly query Google and - /// merge the responses into one. Google responds with 1000 Objects at a time, so if you want to - /// make sure only one http call is performed, make sure to set `list_request.max_results` to - /// 1000. - /// ### Example - /// ```no_run - /// # #[tokio::main] - /// # async fn main() -> Result<(), Box> { - /// use cloud_storage::{Object, ListRequest}; - /// - /// let all_objects = Object::list("my_bucket", ListRequest::default()).await?; - /// # Ok(()) - /// # } - /// ``` - #[cfg(feature = "global-client")] - pub async fn list( - bucket: &str, - list_request: ListRequest, - ) -> crate::Result> + '_> { - crate::CLOUD_CLIENT - .object() - .list(bucket, list_request) - .await - } - - /// The synchronous equivalent of `Object::list`. - /// - /// ### Features - /// This function requires that the feature flag `sync` is enabled in `Cargo.toml`. - #[cfg(all(feature = "global-client", feature = "sync"))] - pub fn list_sync(bucket: &str, list_request: ListRequest) -> crate::Result> { - use futures_util::TryStreamExt; - - let rt = crate::runtime()?; - let listed = rt.block_on(Self::list(bucket, list_request))?; - rt.block_on(listed.try_collect()) - } - - /// Obtains a single object with the specified name in the specified bucket. - /// ### Example - /// ```no_run - /// # #[tokio::main] - /// # async fn main() -> Result<(), Box> { - /// use cloud_storage::Object; - /// - /// let object = Object::read("my_bucket", "path/to/my/file.png", None).await?; - /// # Ok(()) - /// # } - /// ``` - #[cfg(feature = "global-client")] - pub async fn read( - bucket: &str, - file_name: &str, - parameters: Option, - ) -> crate::Result { - crate::CLOUD_CLIENT - .object() - .read(bucket, file_name, parameters) - .await - } - - /// The synchronous equivalent of `Object::read`. - /// - /// ### Features - /// This function requires that the feature flag `sync` is enabled in `Cargo.toml`. - #[cfg(all(feature = "global-client", feature = "sync"))] - pub fn read_sync( - bucket: &str, - file_name: &str, - parameters: Option, - ) -> crate::Result { - crate::runtime()?.block_on(Self::read(bucket, file_name, parameters)) - } - - /// Download the content of the object with the specified name in the specified bucket. - /// ### Example - /// ```no_run - /// # #[tokio::main] - /// # async fn main() -> Result<(), Box> { - /// use cloud_storage::Object; - /// - /// let bytes = Object::download("my_bucket", "path/to/my/file.png", None).await?; - /// # Ok(()) - /// # } - /// ``` - #[cfg(feature = "global-client")] - pub async fn download( - bucket: &str, - file_name: &str, - parameters: Option, - ) -> crate::Result> { - crate::CLOUD_CLIENT - .object() - .download(bucket, file_name, parameters) - .await - } - - /// The synchronous equivalent of `Object::download`. - /// - /// ### Features - /// This function requires that the feature flag `sync` is enabled in `Cargo.toml`. - #[cfg(all(feature = "global-client", feature = "sync"))] - pub fn download_sync( - bucket: &str, - file_name: &str, - parameters: Option, - ) -> crate::Result> { - crate::runtime()?.block_on(Self::download(bucket, file_name, parameters)) - } - - /// Download the content of the object with the specified name in the specified bucket, without - /// allocating the whole file into a vector. - /// ### Example - /// ```no_run - /// # #[tokio::main] - /// # async fn main() -> Result<(), Box> { - /// use cloud_storage::Object; - /// use futures_util::stream::StreamExt; - /// use std::fs::File; - /// use std::io::{BufWriter, Write}; - /// - /// let mut stream = Object::download_streamed("my_bucket", "path/to/my/file.png", None).await?; - /// let mut file = BufWriter::new(File::create("file.png").unwrap()); - /// while let Some(byte) = stream.next().await { - /// file.write_all(&[byte.unwrap()]).unwrap(); - /// } - /// # Ok(()) - /// # } - /// ``` - #[cfg(feature = "global-client")] - pub async fn download_streamed( - bucket: &str, - file_name: &str, - parameters: Option, - ) -> crate::Result> + Unpin> { - crate::CLOUD_CLIENT - .object() - .download_streamed(bucket, file_name, parameters) - .await - } - - /// Obtains a single object with the specified name in the specified bucket. - /// ### Example - /// ```no_run - /// # #[tokio::main] - /// # async fn main() -> Result<(), Box> { - /// use cloud_storage::Object; - /// - /// let mut object = Object::read("my_bucket", "path/to/my/file.png", None).await?; - /// object.content_type = Some("application/xml".to_string()); - /// object.update(None).await?; - /// # Ok(()) - /// # } - /// ``` - #[cfg(feature = "global-client")] - pub async fn update(&self, parameters: Option) -> crate::Result { - crate::CLOUD_CLIENT.object().update(self, parameters).await - } - - /// The synchronous equivalent of `Object::download`. - /// - /// ### Features - /// This function requires that the feature flag `sync` is enabled in `Cargo.toml`. - #[cfg(all(feature = "global-client", feature = "sync"))] - pub fn update_sync(&self, parameters: Option) -> crate::Result { - crate::runtime()?.block_on(self.update(parameters)) - } - - /// Deletes a single object with the specified name in the specified bucket. - /// ### Example - /// ```no_run - /// # #[tokio::main] - /// # async fn main() -> Result<(), Box> { - /// use cloud_storage::Object; - /// - /// Object::delete("my_bucket", "path/to/my/file.png", None).await?; - /// # Ok(()) - /// # } - /// ``` - #[cfg(feature = "global-client")] - pub async fn delete( - bucket: &str, - file_name: &str, - parameters: Option, - ) -> crate::Result<()> { - crate::CLOUD_CLIENT - .object() - .delete(bucket, file_name, parameters) - .await - } - - /// The synchronous equivalent of `Object::delete`. - /// - /// ### Features - /// This function requires that the feature flag `sync` is enabled in `Cargo.toml`. - #[cfg(all(feature = "global-client", feature = "sync"))] - pub fn delete_sync( - bucket: &str, - file_name: &str, - parameters: Option, - ) -> crate::Result<()> { - crate::runtime()?.block_on(Self::delete(bucket, file_name, parameters)) - } - - /// Obtains a single object with the specified name in the specified bucket. - /// ### Example - /// ```no_run - /// # #[tokio::main] - /// # async fn main() -> Result<(), Box> { - /// use cloud_storage::object::{Object, ComposeRequest, SourceObject}; - /// - /// let obj1 = Object::read("my_bucket", "file1", None).await?; - /// let obj2 = Object::read("my_bucket", "file2", None).await?; - /// let compose_request = ComposeRequest { - /// kind: "storage#composeRequest".to_string(), - /// source_objects: vec![ - /// SourceObject { - /// name: obj1.name.clone(), - /// generation: None, - /// object_preconditions: None, - /// }, - /// SourceObject { - /// name: obj2.name.clone(), - /// generation: None, - /// object_preconditions: None, - /// }, - /// ], - /// destination: None, - /// }; - /// let obj3 = Object::compose("my_bucket", &compose_request, "test-concatted-file", None).await?; - /// // obj3 is now a file with the content of obj1 and obj2 concatted together. - /// # Ok(()) - /// # } - /// ``` - #[cfg(feature = "global-client")] - pub async fn compose( - bucket: &str, - req: &ComposeRequest, - destination_object: &str, - parameters: Option, - ) -> crate::Result { - crate::CLOUD_CLIENT - .object() - .compose(bucket, req, destination_object, parameters) - .await - } - - /// The synchronous equivalent of `Object::compose`. - /// - /// ### Features - /// This function requires that the feature flag `sync` is enabled in `Cargo.toml`. - #[cfg(all(feature = "global-client", feature = "sync"))] - pub fn compose_sync( - bucket: &str, - req: &ComposeRequest, - destination_object: &str, - parameters: Option, - ) -> crate::Result { - crate::runtime()?.block_on(Self::compose(bucket, req, destination_object, parameters)) - } - - /// Copy this object to the target bucket and path - /// ### Example - /// ```no_run - /// # #[tokio::main] - /// # async fn main() -> Result<(), Box> { - /// use cloud_storage::object::{Object, ComposeRequest}; - /// - /// let obj1 = Object::read("my_bucket", "file1", None).await?; - /// let obj2 = obj1.copy("my_other_bucket", "file2", None).await?; - /// // obj2 is now a copy of obj1. - /// # Ok(()) - /// # } - /// ``` - #[cfg(feature = "global-client")] - pub async fn copy( - &self, - destination_bucket: &str, - path: &str, - parameters: Option, - ) -> crate::Result { - crate::CLOUD_CLIENT - .object() - .copy(self, destination_bucket, path, parameters) - .await - } - - /// The synchronous equivalent of `Object::copy`. - /// - /// ### Features - /// This function requires that the feature flag `sync` is enabled in `Cargo.toml`. - #[cfg(all(feature = "global-client", feature = "sync"))] - pub fn copy_sync( - &self, - destination_bucket: &str, - path: &str, - parameters: Option, - ) -> crate::Result { - crate::runtime()?.block_on(self.copy(destination_bucket, path, parameters)) - } - - /// Moves a file from the current location to the target bucket and path. - /// - /// ## Limitations - /// This function does not yet support rewriting objects to another - /// * Geographical Location, - /// * Encryption, - /// * Storage class. - /// These limitations mean that for now, the rewrite and the copy methods do the same thing. - /// ### Example - /// ```no_run - /// # #[tokio::main] - /// # async fn main() -> Result<(), Box> { - /// use cloud_storage::object::Object; - /// - /// let obj1 = Object::read("my_bucket", "file1", None).await?; - /// let obj2 = obj1.rewrite("my_other_bucket", "file2", None).await?; - /// // obj2 is now a copy of obj1. - /// # Ok(()) - /// # } - /// ``` - #[cfg(feature = "global-client")] - pub async fn rewrite( - &self, - destination_bucket: &str, - path: &str, - parameters: Option, - ) -> crate::Result { - crate::CLOUD_CLIENT - .object() - .rewrite(self, destination_bucket, path, parameters) - .await - } - - /// The synchronous equivalent of `Object::rewrite`. - /// - /// ### Features - /// This function requires that the feature flag `sync` is enabled in `Cargo.toml`. - #[cfg(all(feature = "global-client", feature = "sync"))] - pub fn rewrite_sync( - &self, - destination_bucket: &str, - path: &str, - parameters: Option, - ) -> crate::Result { - crate::runtime()?.block_on(self.rewrite(destination_bucket, path, parameters)) - } - - /// Creates a [Signed Url](https://cloud.google.com/storage/docs/access-control/signed-urls) - /// which is valid for `duration` seconds, and lets the posessor download the file contents - /// without any authentication. - /// ### Example - /// ```no_run - /// # #[tokio::main] - /// # async fn main() -> Result<(), Box> { - /// use cloud_storage::{Client, object::{Object, ComposeRequest}}; - /// - /// let client = Client::default(); - /// let obj1 = client.object().read("my_bucket", "file1", None).await?; - /// let url = obj1.download_url(50)?; - /// // url is now a url to which an unauthenticated user can make a request to download a file - /// // for 50 seconds. - /// # Ok(()) - /// # } - /// ``` - pub fn download_url(&self, duration: u32) -> crate::Result { - self.sign(&self.name, duration, "GET", None, &HashMap::new()) - } - - /// Creates a [Signed Url](https://cloud.google.com/storage/docs/access-control/signed-urls) - /// which is valid for `duration` seconds, and lets the posessor download the file contents - /// without any authentication. - /// ### Example - /// ```no_run - /// # #[tokio::main] - /// # async fn main() -> Result<(), Box> { - /// use cloud_storage::{Client, object::{Object, ComposeRequest}}; - /// - /// let client = Client::default(); - /// let obj1 = client.object().read("my_bucket", "file1", None).await?; - /// let url = obj1.download_url(50)?; - /// // url is now a url to which an unauthenticated user can make a request to download a file - /// // for 50 seconds. - /// # Ok(()) - /// # } - /// ``` - pub fn download_url_with( - &self, - duration: u32, - opts: crate::DownloadOptions, - ) -> crate::Result { - self.sign( - &self.name, - duration, - "GET", - opts.content_disposition, - &HashMap::new(), - ) - } - - /// Creates a [Signed Url](https://cloud.google.com/storage/docs/access-control/signed-urls) - /// which is valid for `duration` seconds, and lets the posessor upload data to a blob - /// without any authentication. - /// ### Example - /// ```no_run - /// # #[tokio::main] - /// # async fn main() -> Result<(), Box> { - /// use cloud_storage::{Client, object::{Object, ComposeRequest}}; - /// - /// let client = Client::default(); - /// let obj1 = client.object().read("my_bucket", "file1", None).await?; - /// let url = obj1.upload_url(50)?; - /// // url is now a url to which an unauthenticated user can make a PUT request to upload a file - /// // for 50 seconds. - /// # Ok(()) - /// # } - /// ``` - pub fn upload_url(&self, duration: u32) -> crate::Result { - self.sign(&self.name, duration, "PUT", None, &HashMap::new()) - } - - /// Creates a [Signed Url](https://cloud.google.com/storage/docs/access-control/signed-urls) - /// which is valid for `duration` seconds, and lets the posessor upload data and custom metadata - /// to a blob without any authentication. - /// ### Example - /// ```no_run - /// # #[tokio::main] - /// # async fn main() -> Result<(), Box> { - /// use cloud_storage::{Client, object::{Object, ComposeRequest}}; - /// use std::collections::HashMap; - /// - /// let client = Client::default(); - /// let obj1 = client.object().read("my_bucket", "file1", None).await?; - /// let mut custom_metadata = HashMap::new(); - /// custom_metadata.insert(String::from("field"), String::from("value")); - /// let (url, headers) = obj1.upload_url_with(50, custom_metadata)?; - /// // url is now a url to which an unauthenticated user can make a PUT request to upload a file - /// // for 50 seconds. Note that the user must also include the returned headers in the PUT request - /// # Ok(()) - /// # } - /// ``` - pub fn upload_url_with( - &self, - duration: u32, - custom_metadata: HashMap, - ) -> crate::Result<(String, HashMap)> { - let url = self.sign(&self.name, duration, "PUT", None, &custom_metadata)?; - let mut headers = HashMap::new(); - for (k, v) in custom_metadata.iter() { - headers.insert(format!("x-goog-meta-{}", k), v.to_string()); - } - Ok((url, headers)) - } - - // /// Creates a [Signed Url](https://cloud.google.com/storage/docs/access-control/signed-urls) - // /// which is valid for `duration` seconds, and lets the posessor upload new file contents. - // /// without any authentication. - // pub fn upload_url(&self, duration: u32) -> crate::Result { - // self.sign(&self.name, duration, "POST") - // } - - #[inline(always)] - fn sign( - &self, - file_path: &str, - duration: u32, - http_verb: &str, - content_disposition: Option, - custom_metadata: &HashMap, - ) -> crate::Result { - if duration > 604800 { - let msg = format!( - "duration may not be greater than 604800, but was {}", - duration - ); - return Err(crate::Error::Other(msg)); - } - - // 0 Sort and construct the canonical headers - let mut headers = vec![("host".to_string(), "storage.googleapis.com".to_string())]; - // Add custom metadata headers, guaranteed unique by HashMap input - for (k, v) in custom_metadata.iter() { - headers.push((format!("x-goog-meta-{}", k), v.to_string())); - } - headers.sort_unstable_by(|(k1, _), (k2, _)| k1.cmp(k2)); - let canonical_headers: String = headers - .iter() - .map(|(k, v)| format!("{}:{}", k.to_lowercase(), v.to_lowercase())) - .collect::>() - .join("\n"); - let signed_headers = headers - .iter() - .map(|(k, _)| k.to_lowercase()) - .collect::>() - .join(";"); - - // 1 construct the canonical request - let issue_date = time::OffsetDateTime::now_utc(); - let file_path = self.path_to_resource(file_path); - let query_string = Self::get_canonical_query_string( - &issue_date, - duration, - &signed_headers, - content_disposition, - ); - let canonical_request = self.get_canonical_request( - &file_path, - &query_string, - http_verb, - &canonical_headers, - &signed_headers, - ); - - // 2 get hex encoded SHA256 hash the canonical request - let hex_hash = hex::encode(crypto::sha256(canonical_request.as_bytes()).as_ref()); - - // 3 construct the string to sign - let string_to_sign = format!( - "{signing_algorithm}\n\ - {current_datetime}\n\ - {credential_scope}\n\ - {hashed_canonical_request}", - signing_algorithm = "GOOG4-RSA-SHA256", - current_datetime = issue_date.format(crate::ISO_8601_BASIC_FORMAT).unwrap(), - credential_scope = Self::get_credential_scope(&issue_date), - hashed_canonical_request = hex_hash, - ); - - // 4 sign the string to sign with RSA - SHA256 - let signature = hex::encode(crypto::rsa_pkcs1_sha256(&string_to_sign)?); - - // 5 construct the signed url - Ok(format!( - "https://storage.googleapis.com{path_to_resource}?\ - {query_string}&\ - X-Goog-Signature={request_signature}", - path_to_resource = file_path, - query_string = query_string, - request_signature = signature, - )) - } - - #[inline(always)] - fn get_canonical_request( - &self, - path: &str, - query_string: &str, - http_verb: &str, - headers: &str, - signed_headers: &str, - ) -> String { - format!( - "{http_verb}\n\ - {path_to_resource}\n\ - {canonical_query_string}\n\ - {canonical_headers}\n\ - \n\ - {signed_headers}\n\ - {payload}", - http_verb = http_verb, - path_to_resource = path, - canonical_query_string = query_string, - canonical_headers = headers, - signed_headers = signed_headers, - payload = "UNSIGNED-PAYLOAD", - ) - } - - #[inline(always)] - fn get_canonical_query_string( - date: &time::OffsetDateTime, - exp: u32, - headers: &str, - content_disposition: Option, - ) -> String { - let credential = format!( - "{authorizer}/{scope}", - authorizer = crate::SERVICE_ACCOUNT.client_email, - scope = Self::get_credential_scope(date), - ); - let mut s = format!( - "X-Goog-Algorithm={algo}&\ - X-Goog-Credential={cred}&\ - X-Goog-Date={date}&\ - X-Goog-Expires={exp}&\ - X-Goog-SignedHeaders={signed}", - algo = "GOOG4-RSA-SHA256", - cred = percent_encode(&credential), - date = date.format(crate::ISO_8601_BASIC_FORMAT).unwrap(), - exp = exp, - signed = percent_encode(headers), - ); - if let Some(cd) = content_disposition { - use std::fmt::Write; - write!(s, "&response-content-disposition={}", cd).unwrap(); - // ^writing into string is infallible - } - s - } - - #[inline(always)] - fn path_to_resource(&self, path: &str) -> String { - format!( - "/{bucket}/{file_path}", - bucket = self.bucket, - file_path = percent_encode_noslash(path), - ) - } - - #[inline(always)] - fn get_credential_scope(date: &time::OffsetDateTime) -> String { - format!("{}/henk/storage/goog4_request", date.format(time::macros::format_description!("[year][month][day]")).unwrap()) - } -} - -#[cfg(feature = "openssl")] -mod openssl { - #[inline(always)] - pub fn rsa_pkcs1_sha256(message: &str) -> crate::Result> { - use openssl::{hash::MessageDigest, pkey::PKey, sign::Signer}; - - let key = PKey::private_key_from_pem(crate::SERVICE_ACCOUNT.private_key.as_bytes())?; - let mut signer = Signer::new(MessageDigest::sha256(), &key)?; - signer.update(message.as_bytes())?; - Ok(signer.sign_to_vec()?) - } - - #[inline(always)] - pub fn sha256(bytes: &[u8]) -> impl AsRef<[u8]> { - openssl::sha::sha256(bytes) - } -} - -#[cfg(feature = "ring")] -mod ring { - #[cfg_attr(all(feature = "ring", feature = "openssl"), allow(dead_code))] - #[inline(always)] - pub fn rsa_pkcs1_sha256(message: &str) -> crate::Result> { - use ring::{ - rand::SystemRandom, - signature::{RsaKeyPair, RSA_PKCS1_SHA256}, - }; - - let key_pem = pem::parse(crate::SERVICE_ACCOUNT.private_key.as_bytes())?; - let key = RsaKeyPair::from_pkcs8(&key_pem.contents)?; - let rng = SystemRandom::new(); - let mut signature = vec![0; key.public_modulus_len()]; - key.sign(&RSA_PKCS1_SHA256, &rng, message.as_bytes(), &mut signature)?; - Ok(signature) - } - - #[cfg_attr(all(feature = "ring", feature = "openssl"), allow(dead_code))] - #[inline(always)] - pub fn sha256(bytes: &[u8]) -> impl AsRef<[u8]> { - use ring::digest::{digest, SHA256}; - digest(&SHA256, bytes) - } -} - -mod crypto { - #[cfg(feature = "openssl")] - pub use super::openssl::*; - #[cfg(all(feature = "ring", not(feature = "openssl")))] - pub use super::ring::*; -} - -const ENCODE_SET: &AsciiSet = &NON_ALPHANUMERIC - .remove(b'*') - .remove(b'-') - .remove(b'.') - .remove(b'_'); - -const NOSLASH_ENCODE_SET: &AsciiSet = &ENCODE_SET.remove(b'/').remove(b'~'); - -// We need to be able to percent encode stuff, but without touching the slashes in filenames. To -// this end we create an implementation that does this, without touching the slashes. -fn percent_encode_noslash(input: &str) -> String { - utf8_percent_encode(input, NOSLASH_ENCODE_SET).to_string() -} - -pub(crate) fn percent_encode(input: &str) -> String { - utf8_percent_encode(input, ENCODE_SET).to_string() -} - -#[cfg(all(test, feature = "global-client"))] -mod tests { - use super::*; - use crate::Error; - use futures_util::{stream, StreamExt, TryStreamExt}; - - #[tokio::test] - async fn create() -> Result<(), Box> { - let bucket = crate::read_test_bucket().await; - Object::create(&bucket.name, vec![0, 1], "test-create", "text/plain", None).await?; - Ok(()) - } - - #[tokio::test] - async fn create_with() -> Result<(), Box> { - let bucket = crate::read_test_bucket().await; - let metadata = serde_json::json!({ - "metadata": { - "object_id": "1234" - } - }); - let obj = Object::create_with(&bucket.name, vec![0, 1], "test-create-meta", "text/plain", &metadata).await?; - assert_eq!(obj.metadata.unwrap().get("object_id"), Some(&String::from("1234"))); - Ok(()) - } - - #[tokio::test] - async fn create_streamed() -> Result<(), Box> { - let bucket = crate::read_test_bucket().await; - let stream = stream::iter([0u8, 1].iter()) - .map(Ok::<_, Box>) - .map_ok(|&b| bytes::BytesMut::from(&[b][..])); - Object::create_streamed( - &bucket.name, - stream, - 2, - "test-create-streamed", - "text/plain", - None, - ) - .await?; - Ok(()) - } - - #[tokio::test] - async fn list() -> Result<(), Box> { - let test_bucket = crate::read_test_bucket().await; - let _v: Vec = Object::list(&test_bucket.name, ListRequest::default()) - .await? - .try_collect() - .await?; - Ok(()) - } - - async fn flattened_list_prefix_stream( - bucket: &str, - prefix: &str, - ) -> Result, Box> { - let request = ListRequest { - prefix: Some(prefix.into()), - ..Default::default() - }; - - Ok(Object::list(bucket, request) - .await? - .map_ok(|object_list| object_list.items) - .try_concat() - .await?) - } - - #[tokio::test] - async fn list_prefix() -> Result<(), Box> { - let test_bucket = crate::read_test_bucket().await; - - let prefix_names = [ - "test-list-prefix/1", - "test-list-prefix/2", - "test-list-prefix/sub/1", - "test-list-prefix/sub/2", - ]; - - for name in &prefix_names { - Object::create(&test_bucket.name, vec![0, 1], name, "text/plain", None).await?; - } - - let list = flattened_list_prefix_stream(&test_bucket.name, "test-list-prefix/").await?; - assert_eq!(list.len(), 4); - let list = flattened_list_prefix_stream(&test_bucket.name, "test-list-prefix/sub").await?; - assert_eq!(list.len(), 2); - Ok(()) - } - - #[tokio::test] - async fn read() -> Result<(), Box> { - let bucket = crate::read_test_bucket().await; - Object::create(&bucket.name, vec![0, 1], "test-read", "text/plain", None).await?; - Object::read(&bucket.name, "test-read", None).await?; - Ok(()) - } - - #[tokio::test] - async fn download() -> Result<(), Box> { - let bucket = crate::read_test_bucket().await; - let content = b"hello world"; - Object::create( - &bucket.name, - content.to_vec(), - "test-download", - "application/octet-stream", - None, - ) - .await?; - - let data = Object::download(&bucket.name, "test-download", None).await?; - assert_eq!(data, content); - - Ok(()) - } - - #[tokio::test] - async fn download_streamed() -> Result<(), Box> { - let bucket = crate::read_test_bucket().await; - let content = b"hello world"; - Object::create( - &bucket.name, - content.to_vec(), - "test-download", - "application/octet-stream", - None, - ) - .await?; - - let result = Object::download_streamed(&bucket.name, "test-download", None).await?; - let data = result.try_collect::>().await?; - assert_eq!(data, content); - - Ok(()) - } - - #[tokio::test] - async fn download_streamed_large() -> Result<(), Box> { - let bucket = crate::read_test_bucket().await; - let content = vec![5u8; 1_000_000]; - Object::create( - &bucket.name, - content.to_vec(), - "test-download-large", - "application/octet-stream", - None, - ) - .await?; - - let mut result = - Object::download_streamed(&bucket.name, "test-download-large", None).await?; - let mut data: Vec = Vec::new(); - while let Some(part) = result.next().await { - data.push(part?); - } - assert_eq!(data, content); - - Ok(()) - } - - #[tokio::test] - async fn update() -> Result<(), Box> { - let bucket = crate::read_test_bucket().await; - let mut obj = - Object::create(&bucket.name, vec![0, 1], "test-update", "text/plain", None).await?; - obj.content_type = Some("application/xml".to_string()); - obj.update(None).await?; - Ok(()) - } - - #[tokio::test] - async fn delete() -> Result<(), Box> { - let bucket = crate::read_test_bucket().await; - Object::create(&bucket.name, vec![0, 1], "test-delete", "text/plain", None).await?; - - Object::delete(&bucket.name, "test-delete", None).await?; - - let list: Vec<_> = flattened_list_prefix_stream(&bucket.name, "test-delete").await?; - assert!(list.is_empty()); - - Ok(()) - } - - #[tokio::test] - async fn delete_nonexistent() -> Result<(), Box> { - let bucket = crate::read_test_bucket().await; - - let nonexistent_object = "test-delete-nonexistent"; - - let delete_result = Object::delete(&bucket.name, nonexistent_object, None).await; - - if let Err(Error::Google(google_error_response)) = delete_result { - assert!(google_error_response.to_string().contains(&format!( - "No such object: {}/{}", - bucket.name, nonexistent_object - ))); - } else { - panic!("Expected a Google error, instead got {:?}", delete_result); - } - - Ok(()) - } - - #[tokio::test] - async fn compose() -> Result<(), Box> { - let bucket = crate::read_test_bucket().await; - let obj1 = Object::create( - &bucket.name, - vec![0, 1], - "test-compose-1", - "text/plain", - None, - ) - .await?; - let obj2 = Object::create( - &bucket.name, - vec![2, 3], - "test-compose-2", - "text/plain", - None, - ) - .await?; - let compose_request = ComposeRequest { - kind: "storage#composeRequest".to_string(), - source_objects: vec![ - SourceObject { - name: obj1.name.clone(), - generation: None, - object_preconditions: None, - }, - SourceObject { - name: obj2.name.clone(), - generation: None, - object_preconditions: None, - }, - ], - destination: None, - }; - let obj3 = - Object::compose(&bucket.name, &compose_request, "test-concatted-file", None).await?; - let url = obj3.download_url(100)?; - let content = reqwest::get(&url).await?.text().await?; - assert_eq!(content.as_bytes(), &[0, 1, 2, 3]); - Ok(()) - } - - #[tokio::test] - async fn copy() -> Result<(), Box> { - let bucket = crate::read_test_bucket().await; - let original = - Object::create(&bucket.name, vec![2, 3], "test-copy", "text/plain", None).await?; - original - .copy(&bucket.name, "test-copy - copy", None) - .await?; - Ok(()) - } - - #[tokio::test] - async fn rewrite() -> Result<(), Box> { - let bucket = crate::read_test_bucket().await; - let obj = - Object::create(&bucket.name, vec![0, 1], "test-rewrite", "text/plain", None).await?; - let obj = obj.rewrite(&bucket.name, "test-rewritten", None).await?; - let url = obj.download_url(100)?; - let client = reqwest::Client::default(); - let download = client.head(&url).send().await?; - assert_eq!(download.status().as_u16(), 200); - Ok(()) - } - - #[tokio::test] - async fn test_url_encoding() -> Result<(), Box> { - let bucket = crate::read_test_bucket().await; - let complicated_names = [ - "asdf", - "asdf+1", - "asdf&&+1?=3,,-_()*&^%$#@!`~{}[]\\|:;\"'<>,.?/äöüëß", - "https://www.google.com", - "परिक्षण फाईल", - "测试很重要", - ]; - for name in &complicated_names { - let _obj = Object::create(&bucket.name, vec![0, 1], name, "text/plain", None).await?; - let obj = Object::read(&bucket.name, &name, None).await.unwrap(); - let url = obj.download_url(100)?; - let client = reqwest::Client::default(); - let download = client.head(&url).send().await?; - assert_eq!(download.status().as_u16(), 200); - } - Ok(()) - } - - #[tokio::test] - async fn test_download_url_with() -> Result<(), Box> { - let bucket = crate::read_test_bucket().await; - let client = reqwest::Client::new(); - let obj = - Object::create(&bucket.name, vec![0, 1], "test-rewrite", "text/plain", None).await?; - - let opts1 = crate::DownloadOptions::new().content_disposition("attachment"); - let download_url1 = obj.download_url_with(100, opts1)?; - let download1 = client.head(&download_url1).send().await?; - assert_eq!(download1.headers()["content-disposition"], "attachment"); - Ok(()) - } - - #[tokio::test] - async fn test_upload_url() -> Result<(), Box> { - let bucket = crate::read_test_bucket().await; - let client = reqwest::Client::new(); - let blob_name = "test-upload-url"; - let obj = Object::create(&bucket.name, vec![0, 1], blob_name, "text/plain", None).await?; - - let url = obj.upload_url(100).unwrap(); - let updated_content = vec![2, 3]; - let response = client - .put(&url) - .body(updated_content.clone()) - .send() - .await?; - assert!(response.status().is_success()); - let data = Object::download(&bucket.name, blob_name, None).await?; - assert_eq!(data, updated_content); - Ok(()) - } - - #[tokio::test] - async fn test_upload_url_with() -> Result<(), Box> { - let bucket = crate::read_test_bucket().await; - let client = reqwest::Client::new(); - let blob_name = "test-upload-url"; - let obj = Object::create(&bucket.name, vec![0, 1], blob_name, "text/plain", None).await?; - let mut custom_metadata = HashMap::new(); - custom_metadata.insert(String::from("field"), String::from("value")); - - let (url, headers) = obj.upload_url_with(100, custom_metadata).unwrap(); - let updated_content = vec![2, 3]; - let mut request = client.put(&url).body(updated_content); - for (metadata_field, metadata_value) in headers.iter() { - request = request.header(metadata_field, metadata_value); - } - let response = request.send().await?; - assert!(response.status().is_success()); - let updated_obj = Object::read(&bucket.name, blob_name, None).await?; - let obj_metadata = updated_obj.metadata.unwrap(); - assert_eq!(obj_metadata.get("field").unwrap(), "value"); - Ok(()) - } - - #[cfg(all(feature = "openssl", feature = "ring"))] - #[test] - fn check_matching_crypto() { - assert_eq!( - openssl::sha256(b"hello").as_ref(), - ring::sha256(b"hello").as_ref() - ); - - assert_eq!( - openssl::rsa_pkcs1_sha256("world").unwrap(), - ring::rsa_pkcs1_sha256("world").unwrap(), - ); - } - - #[cfg(feature = "sync")] - mod sync { - use super::*; - - #[test] - fn create() -> Result<(), Box> { - let bucket = crate::read_test_bucket_sync(); - Object::create_sync(&bucket.name, vec![0, 1], "test-create", "text/plain", None)?; - Ok(()) - } - - #[test] - fn create_with() -> Result<(), Box> { - let bucket = crate::read_test_bucket_sync(); - let metadata = serde_json::json!({ - "metadata": { - "object_id": "1234" - } - }); - let obj = Object::create_with_sync(&bucket.name, vec![0, 1], "test-create-meta", "text/plain", &metadata)?; - assert_eq!(obj.metadata.unwrap().get("object_id"), Some(&String::from("1234"))); - Ok(()) - } - - #[test] - fn create_streamed() -> Result<(), Box> { - let bucket = crate::read_test_bucket_sync(); - let cursor = std::io::Cursor::new([0, 1]); - Object::create_streamed_sync( - &bucket.name, - cursor, - 2, - "test-create-streamed", - "text/plain", - None, - )?; - Ok(()) - } - - #[test] - fn list() -> Result<(), Box> { - let test_bucket = crate::read_test_bucket_sync(); - Object::list_sync(&test_bucket.name, ListRequest::default())?; - Ok(()) - } - - #[test] - fn list_prefix() -> Result<(), Box> { - let test_bucket = crate::read_test_bucket_sync(); - - let prefix_names = [ - "test-list-prefix/1", - "test-list-prefix/2", - "test-list-prefix/sub/1", - "test-list-prefix/sub/2", - ]; - - for name in &prefix_names { - Object::create_sync(&test_bucket.name, vec![0, 1], name, "text/plain", None)?; - } - - let request = ListRequest { - prefix: Some("test-list-prefix/".into()), - ..Default::default() - }; - let list = Object::list_sync(&test_bucket.name, request)?; - assert_eq!(list[0].items.len(), 4); - - let request = ListRequest { - prefix: Some("test-list-prefix/sub".into()), - ..Default::default() - }; - let list = Object::list_sync(&test_bucket.name, request)?; - assert_eq!(list[0].items.len(), 2); - Ok(()) - } - - #[test] - fn list_prefix_delimiter() -> Result<(), Box> { - let test_bucket = crate::read_test_bucket_sync(); - - let prefix_names = [ - "test-list-prefix/1", - "test-list-prefix/2", - "test-list-prefix/sub/1", - "test-list-prefix/sub/2", - ]; - - for name in &prefix_names { - Object::create_sync(&test_bucket.name, vec![0, 1], name, "text/plain", None)?; - } - - let request = ListRequest { - prefix: Some("test-list-prefix/".into()), - delimiter: Some("/".into()), - ..Default::default() - }; - let list = Object::list_sync(&test_bucket.name, request)?; - assert_eq!(list[0].items.len(), 2); - assert_eq!(list[0].prefixes.len(), 1); - Ok(()) - } - - #[test] - fn read() -> Result<(), Box> { - let bucket = crate::read_test_bucket_sync(); - Object::create_sync(&bucket.name, vec![0, 1], "test-read", "text/plain", None)?; - Object::read_sync(&bucket.name, "test-read", None)?; - Ok(()) - } - - #[test] - fn download() -> Result<(), Box> { - let bucket = crate::read_test_bucket_sync(); - let content = b"hello world"; - Object::create_sync( - &bucket.name, - content.to_vec(), - "test-download", - "application/octet-stream", - None, - )?; - - let data = Object::download_sync(&bucket.name, "test-download", None)?; - assert_eq!(data, content); - - Ok(()) - } - - #[test] - fn update() -> Result<(), Box> { - let bucket = crate::read_test_bucket_sync(); - let mut obj = - Object::create_sync(&bucket.name, vec![0, 1], "test-update", "text/plain", None)?; - obj.content_type = Some("application/xml".to_string()); - obj.update_sync(None)?; - Ok(()) - } - - #[test] - fn delete() -> Result<(), Box> { - let bucket = crate::read_test_bucket_sync(); - Object::create_sync(&bucket.name, vec![0, 1], "test-delete", "text/plain", None)?; - - Object::delete_sync(&bucket.name, "test-delete", None)?; - - let request = ListRequest { - prefix: Some("test-delete".into()), - ..Default::default() - }; - - let list = Object::list_sync(&bucket.name, request)?; - assert!(list[0].items.is_empty()); - - Ok(()) - } - - #[test] - fn delete_nonexistent() -> Result<(), Box> { - let bucket = crate::read_test_bucket_sync(); - - let nonexistent_object = "test-delete-nonexistent"; - - let delete_result = Object::delete_sync(&bucket.name, nonexistent_object, None); - - if let Err(Error::Google(google_error_response)) = delete_result { - assert!(google_error_response.to_string().contains(&format!( - "No such object: {}/{}", - bucket.name, nonexistent_object - ))); - } else { - panic!("Expected a Google error, instead got {:?}", delete_result); - } - - Ok(()) - } - - #[test] - fn compose() -> Result<(), Box> { - let bucket = crate::read_test_bucket_sync(); - let obj1 = Object::create_sync( - &bucket.name, - vec![0, 1], - "test-compose-1", - "text/plain", - None, - )?; - let obj2 = Object::create_sync( - &bucket.name, - vec![2, 3], - "test-compose-2", - "text/plain", - None, - )?; - let compose_request = ComposeRequest { - kind: "storage#composeRequest".to_string(), - source_objects: vec![ - SourceObject { - name: obj1.name.clone(), - generation: None, - object_preconditions: None, - }, - SourceObject { - name: obj2.name.clone(), - generation: None, - object_preconditions: None, - }, - ], - destination: None, - }; - let obj3 = - Object::compose_sync(&bucket.name, &compose_request, "test-concatted-file", None)?; - let url = obj3.download_url(100)?; - let content = reqwest::blocking::get(&url)?.text()?; - assert_eq!(content.as_bytes(), &[0, 1, 2, 3]); - Ok(()) - } - - #[test] - fn copy() -> Result<(), Box> { - let bucket = crate::read_test_bucket_sync(); - let original = - Object::create_sync(&bucket.name, vec![2, 3], "test-copy", "text/plain", None)?; - original.copy_sync(&bucket.name, "test-copy - copy", None)?; - Ok(()) - } - - #[test] - fn rewrite() -> Result<(), Box> { - let bucket = crate::read_test_bucket_sync(); - let obj = - Object::create_sync(&bucket.name, vec![0, 1], "test-rewrite", "text/plain", None)?; - let obj = obj.rewrite_sync(&bucket.name, "test-rewritten", None)?; - let url = obj.download_url(100)?; - let client = reqwest::blocking::Client::new(); - let download = client.head(&url).send()?; - assert_eq!(download.status().as_u16(), 200); - Ok(()) - } - - #[test] - fn test_url_encoding() -> Result<(), Box> { - let bucket = crate::read_test_bucket_sync(); - let complicated_names = [ - "asdf", - "asdf+1", - "asdf&&+1?=3,,-_()*&^%$#@!`~{}[]\\|:;\"'<>,.?/äöüëß", - "https://www.google.com", - "परिक्षण फाईल", - "测试很重要", - ]; - for name in &complicated_names { - let _obj = Object::create_sync(&bucket.name, vec![0, 1], name, "text/plain", None)?; - let obj = Object::read_sync(&bucket.name, &name, None).unwrap(); - let url = obj.download_url(100)?; - let client = reqwest::blocking::Client::new(); - let download = client.head(&url).send()?; - assert_eq!(download.status().as_u16(), 200); - } - Ok(()) - } - } -} - -/// A wrapper around a downloaded object's byte stream that provides a useful `size_hint`. -pub struct SizedByteStream> + Unpin> { - size: Option, - bytes: S, -} - -impl> + Unpin> SizedByteStream { - pub(crate) fn new(bytes: S, size: Option) -> Self { - Self { size, bytes } - } -} - -impl> + Unpin> Stream for SizedByteStream { - type Item = crate::Result; - - fn poll_next( - mut self: std::pin::Pin<&mut Self>, - cx: &mut std::task::Context, - ) -> std::task::Poll> { - futures_util::StreamExt::poll_next_unpin(&mut self.bytes, cx) - } - - fn size_hint(&self) -> (usize, Option) { - let size = self - .size - .and_then(|s| std::convert::TryInto::try_into(s).ok()); - (size.unwrap_or(0), size) - } -} diff --git a/src/sized_byte_stream.rs b/src/sized_byte_stream.rs new file mode 100644 index 0000000..34b5b21 --- /dev/null +++ b/src/sized_byte_stream.rs @@ -0,0 +1,33 @@ +use futures_util::Stream; + +use crate::Error; + +/// A wrapper around a downloaded object's byte stream that provides a useful `size_hint`. +pub struct SizedByteStream> + Unpin> { + size: Option, + bytes: S, +} + +impl<'a, S: Stream> + Unpin> SizedByteStream { + pub(crate) fn new(bytes: S, size: Option) -> Self { + Self { size, bytes } + } +} + +impl<'a, S: Stream> + Unpin> Stream for SizedByteStream { + type Item = Result; + + fn poll_next( + mut self: std::pin::Pin<&mut Self>, + cx: &mut std::task::Context, + ) -> std::task::Poll> { + futures_util::StreamExt::poll_next_unpin(&mut self.bytes, cx) + } + + fn size_hint(&self) -> (usize, Option) { + let size = self + .size + .and_then(|s| std::convert::TryInto::try_into(s).ok()); + (size.unwrap_or(0), size) + } +} diff --git a/src/sync.rs b/src/sync.rs deleted file mode 100644 index c9c54d7..0000000 --- a/src/sync.rs +++ /dev/null @@ -1,79 +0,0 @@ -//! Synchronous clients for Google Cloud Storage endpoints. - -mod bucket; -mod bucket_access_control; -mod default_object_access_control; -mod hmac_key; -mod object; -mod object_access_control; - -mod helpers; // for internal use only - -pub use bucket::BucketClient; -pub use bucket_access_control::BucketAccessControlClient; -pub use default_object_access_control::DefaultObjectAccessControlClient; -pub use hmac_key::HmacKeyClient; -pub use object::ObjectClient; -pub use object_access_control::ObjectAccessControlClient; - -/// The primary synchronous entrypoint to perform operations with Google Cloud Storage. -#[derive(Debug)] -pub struct Client { - runtime: tokio::runtime::Runtime, - client: crate::client::Client, -} - -impl Client { - /// Constructs a client with the default token provider, where it attemps to obtain the - /// credentials from the following locations: - /// - /// 1. Checks for the environment variable `SERVICE_ACCOUNT`, and if it exists, reads the file - /// at the path specified there as a credentials json file. - /// 2. It attemps to do the same with the `GOOGLE_APPLICATION_CREDENTIALS` var. - /// 3. It reads the `SERVICE_ACCOUNT_JSON` environment variable directly as json and uses that - /// 4.It attemps to do the same with the `GOOGLE_APPLICATION_CREDENTIALS_JSON` var. - pub fn new() -> crate::Result { - Ok(Self { - runtime: crate::runtime()?, - client: crate::Client::default(), - }) - } - - /// Initializer with a provided refreshable token - pub fn with_cache(token_cache: impl crate::TokenCache + Send + 'static) -> crate::Result { - Ok(Self { - runtime: crate::runtime()?, - client: crate::Client::with_cache(token_cache), - }) - } - - /// Synchronous operations on [`Bucket`](crate::bucket::Bucket)s. - pub fn bucket(&self) -> BucketClient { - BucketClient(self) - } - - /// Synchronous operations on [`BucketAccessControl`](crate::bucket_access_control::BucketAccessControl)s. - pub fn bucket_access_control(&self) -> BucketAccessControlClient { - BucketAccessControlClient(self) - } - - /// Synchronous operations on [`DefaultObjectAccessControl`](crate::default_object_access_control::DefaultObjectAccessControl)s. - pub fn default_object_access_control(&self) -> DefaultObjectAccessControlClient { - DefaultObjectAccessControlClient(self) - } - - /// Synchronous operations on [`HmacKey`](crate::hmac_key::HmacKey)s. - pub fn hmac_key(&self) -> HmacKeyClient { - HmacKeyClient(self) - } - - /// Synchronous operations on [`Object`](crate::object::Object)s. - pub fn object(&self) -> ObjectClient { - ObjectClient(self) - } - - /// Synchronous operations on [`ObjectAccessControl`](crate::object_access_control::ObjectAccessControl)s. - pub fn object_access_control(&self) -> ObjectAccessControlClient { - ObjectAccessControlClient(self) - } -} diff --git a/src/sync/bucket.rs b/src/sync/bucket.rs index 874a66e..d64d6b2 100644 --- a/src/sync/bucket.rs +++ b/src/sync/bucket.rs @@ -1,11 +1,11 @@ -use crate::{ - bucket::{IamPolicy, TestIamPermission}, - Bucket, NewBucket, -}; +use crate::{models::{create, IamPolicy, TestIamPermission}, Bucket, Error}; /// Operations on [`Bucket`]()s. #[derive(Debug)] -pub struct BucketClient<'a>(pub(super) &'a super::Client); +pub struct BucketClient<'a> { + pub(crate) client: &'a crate::client::BucketClient<'a>, + pub(crate) runtime: tokio::runtime::Handle, +} impl<'a> BucketClient<'a> { /// Creates a new `Bucket`. There are many options that you can provide for creating a new @@ -16,11 +16,11 @@ impl<'a> BucketClient<'a> { /// ``` /// # fn main() -> Result<(), Box> { /// use cloud_storage::sync::Client; - /// use cloud_storage::bucket::{Bucket, NewBucket}; + /// use cloud_storage::bucket::{Bucket, create::Bucket}; /// use cloud_storage::bucket::{Location, MultiRegion}; /// /// let client = Client::new()?; - /// let new_bucket = NewBucket { + /// let new_bucket = create::Bucket { /// name: "cloud-storage-rs-doc-1".to_string(), // this is the only mandatory field /// location: Location::Multi(MultiRegion::Eu), /// ..Default::default() @@ -30,10 +30,9 @@ impl<'a> BucketClient<'a> { /// # Ok(()) /// # } /// ``` - pub fn create(&self, new_bucket: &NewBucket) -> crate::Result { - self.0 - .runtime - .block_on(self.0.client.bucket().create(new_bucket)) + pub fn create(&self, new_bucket: &create::Bucket) -> Result { + self.runtime + .block_on(self.client.create(new_bucket)) } /// Returns all `Bucket`s within this project. @@ -52,8 +51,8 @@ impl<'a> BucketClient<'a> { /// # Ok(()) /// # } /// ``` - pub fn list(&self) -> crate::Result> { - self.0.runtime.block_on(self.0.client.bucket().list()) + pub fn list(&self) -> Result, Error> { + self.runtime.block_on(self.client.list()) } /// Returns a single `Bucket` by its name. If the Bucket does not exist, an error is returned. @@ -65,7 +64,7 @@ impl<'a> BucketClient<'a> { /// /// let client = Client::new()?; /// # use cloud_storage::bucket::NewBucket; - /// # let new_bucket = NewBucket { + /// # let new_bucket = create::Bucket { /// # name: "cloud-storage-rs-doc-2".to_string(), /// # ..Default::default() /// # }; @@ -76,8 +75,8 @@ impl<'a> BucketClient<'a> { /// # Ok(()) /// # } /// ``` - pub fn read(&self, name: &str) -> crate::Result { - self.0.runtime.block_on(self.0.client.bucket().read(name)) + pub fn read(&self, name: &str) -> Result { + self.runtime.block_on(self.client.read(name)) } /// Update an existing `Bucket`. If you declare you bucket as mutable, you can edit its fields. @@ -90,7 +89,7 @@ impl<'a> BucketClient<'a> { /// /// let client = Client::new()?; /// # use cloud_storage::bucket::NewBucket; - /// # let new_bucket = NewBucket { + /// # let new_bucket = create::Bucket { /// # name: "cloud-storage-rs-doc-3".to_string(), /// # ..Default::default() /// # }; @@ -107,10 +106,9 @@ impl<'a> BucketClient<'a> { /// # Ok(()) /// # } /// ``` - pub fn update(&self, bucket: &Bucket) -> crate::Result { - self.0 - .runtime - .block_on(self.0.client.bucket().update(bucket)) + pub fn update(&self, bucket: &Bucket) -> Result { + self.runtime + .block_on(self.client.update(bucket)) } /// Delete an existing `Bucket`. This permanently removes a bucket from Google Cloud Storage. @@ -124,7 +122,7 @@ impl<'a> BucketClient<'a> { /// /// let client = Client::new()?; /// # use cloud_storage::bucket::NewBucket; - /// # let new_bucket = NewBucket { + /// # let new_bucket = create::Bucket { /// # name: "unnecessary-bucket".to_string(), /// # ..Default::default() /// # }; @@ -135,10 +133,9 @@ impl<'a> BucketClient<'a> { /// # Ok(()) /// # } /// ``` - pub fn delete(&self, bucket: Bucket) -> crate::Result<()> { - self.0 - .runtime - .block_on(self.0.client.bucket().delete(bucket)) + pub fn delete(&self, bucket: Bucket) -> Result<(), Error> { + self.runtime + .block_on(self.client.delete(bucket)) } /// Returns the [IAM Policy](https://cloud.google.com/iam/docs/) for this bucket. @@ -150,7 +147,7 @@ impl<'a> BucketClient<'a> { /// /// let client = Client::new()?; /// # use cloud_storage::bucket::NewBucket; - /// # let new_bucket = NewBucket { + /// # let new_bucket = create::Bucket { /// # name: "cloud-storage-rs-doc-4".to_string(), /// # ..Default::default() /// # }; @@ -162,10 +159,9 @@ impl<'a> BucketClient<'a> { /// # Ok(()) /// # } /// ``` - pub fn get_iam_policy(&self, bucket: &Bucket) -> crate::Result { - self.0 - .runtime - .block_on(self.0.client.bucket().get_iam_policy(bucket)) + pub fn get_iam_policy(&self, bucket: &Bucket) -> Result { + self.runtime + .block_on(self.client.get_iam_policy(bucket)) } /// Updates the [IAM Policy](https://cloud.google.com/iam/docs/) for this bucket. @@ -178,7 +174,7 @@ impl<'a> BucketClient<'a> { /// /// let client = Client::new()?; /// # use cloud_storage::bucket::NewBucket; - /// # let new_bucket = NewBucket { + /// # let new_bucket = create::Bucket { /// # name: "cloud-storage-rs-doc-5".to_string(), /// # ..Default::default() /// # }; @@ -201,10 +197,9 @@ impl<'a> BucketClient<'a> { /// # Ok(()) /// # } /// ``` - pub fn set_iam_policy(&self, bucket: &Bucket, iam: &IamPolicy) -> crate::Result { - self.0 - .runtime - .block_on(self.0.client.bucket().set_iam_policy(bucket, iam)) + pub fn set_iam_policy(&self, bucket: &Bucket, iam: &IamPolicy) -> Result { + self.runtime + .block_on(self.client.set_iam_policy(bucket, iam)) } /// Checks whether the user provided in the service account has this permission. @@ -215,7 +210,7 @@ impl<'a> BucketClient<'a> { /// use cloud_storage::Bucket; /// /// let client = Client::new()?; - /// let bucket = client.bucket().read("my-bucket")?; + /// let bucket = client.bucket("my_bucket").read()?; /// client.bucket().test_iam_permission(&bucket, "storage.buckets.get")?; /// # Ok(()) /// # } @@ -224,12 +219,7 @@ impl<'a> BucketClient<'a> { &self, bucket: &Bucket, permission: &str, - ) -> crate::Result { - self.0.runtime.block_on( - self.0 - .client - .bucket() - .test_iam_permission(bucket, permission), - ) + ) -> Result { + self.runtime.block_on(self.client.test_iam_permission(bucket, permission)) } } diff --git a/src/sync/bucket_access_control.rs b/src/sync/bucket_access_control.rs index 85610fa..b39c0c7 100644 --- a/src/sync/bucket_access_control.rs +++ b/src/sync/bucket_access_control.rs @@ -1,11 +1,15 @@ -use crate::bucket_access_control::{BucketAccessControl, Entity, NewBucketAccessControl}; +use crate::{models::{create, BucketAccessControl, Entity}, Error}; + /// Operations on [`BucketAccessControl`](BucketAccessControl)s. #[derive(Debug)] -pub struct BucketAccessControlClient<'a>(pub(super) &'a super::Client); +pub struct BucketAccessControlClient<'a> { + pub(crate) client: &'a crate::client::BucketAccessControlClient<'a>, + pub(crate) runtime: &'a tokio::runtime::Handle, +} impl<'a> BucketAccessControlClient<'a> { - /// Create a new `BucketAccessControl` using the provided `NewBucketAccessControl`, related to + /// Create a new `BucketAccessControl` using the provided `create::BucketAccessControl`, related to /// the `Bucket` provided by the `bucket_name` argument. /// /// ### Important @@ -16,29 +20,24 @@ impl<'a> BucketAccessControlClient<'a> { /// ```rust,no_run /// # fn main() -> Result<(), Box> { /// use cloud_storage::sync::Client; - /// use cloud_storage::bucket_access_control::{BucketAccessControl, NewBucketAccessControl}; + /// use cloud_storage::bucket_access_control::{BucketAccessControl, create::BucketAccessControl}; /// use cloud_storage::bucket_access_control::{Role, Entity}; /// /// let client = Client::new()?; - /// let new_bucket_access_control = NewBucketAccessControl { + /// let new_bucket_access_control = create::BucketAccessControl { /// entity: Entity::AllUsers, /// role: Role::Reader, /// }; - /// client.bucket_access_control().create("mybucket", &new_bucket_access_control)?; + /// client.bucket_access_control("my_bucket").create_using(&new_bucket_access_control)?; /// # Ok(()) /// # } /// ``` pub fn create( &self, bucket: &str, - new_bucket_access_control: &NewBucketAccessControl, - ) -> crate::Result { - self.0.runtime.block_on( - self.0 - .client - .bucket_access_control() - .create(bucket, new_bucket_access_control), - ) + new_bucket_access_control: &create::BucketAccessControl, + ) -> Result { + self.runtime.block_on(self.client.create_using(new_bucket_access_control)) } /// Returns all `BucketAccessControl`s related to this bucket. @@ -54,14 +53,12 @@ impl<'a> BucketAccessControlClient<'a> { /// use cloud_storage::bucket_access_control::BucketAccessControl; /// /// let client = Client::new()?; - /// let acls = client.bucket_access_control().list("mybucket")?; + /// let acls = client.bucket_access_control("my_bucket").list()?; /// # Ok(()) /// # } /// ``` - pub fn list(&self, bucket: &str) -> crate::Result> { - self.0 - .runtime - .block_on(self.0.client.bucket_access_control().list(bucket)) + pub fn list(&self) -> Result, Error> { + self.runtime.block_on(self.client.list()) } /// Returns the ACL entry for the specified entity on the specified bucket. @@ -77,14 +74,12 @@ impl<'a> BucketAccessControlClient<'a> { /// use cloud_storage::bucket_access_control::{BucketAccessControl, Entity}; /// /// let client = Client::new()?; - /// let controls = client.bucket_access_control().read("mybucket", &Entity::AllUsers)?; + /// let controls = client.bucket_access_control("my_bucket").read(&Entity::AllUsers)?; /// # Ok(()) /// # } /// ``` - pub fn read(&self, bucket: &str, entity: &Entity) -> crate::Result { - self.0 - .runtime - .block_on(self.0.client.bucket_access_control().read(bucket, entity)) + pub fn read(&self, bucket: &str, entity: &Entity) -> Result { + self.runtime.block_on(self.client.read(entity)) } /// Update this `BucketAccessControl`. @@ -100,22 +95,18 @@ impl<'a> BucketAccessControlClient<'a> { /// use cloud_storage::bucket_access_control::{BucketAccessControl, Entity}; /// /// let client = Client::new()?; - /// let mut acl = client.bucket_access_control().read("mybucket", &Entity::AllUsers)?; + /// let my_bucket = client.bucket_access_control("my_bucket"); + /// let mut acl = my_bucket.read(&Entity::AllUsers)?; /// acl.entity = Entity::AllAuthenticatedUsers; - /// client.bucket_access_control().update(&acl)?; + /// my_bucket.update(&acl)?; /// # Ok(()) /// # } /// ``` pub fn update( &self, bucket_access_control: &BucketAccessControl, - ) -> crate::Result { - self.0.runtime.block_on( - self.0 - .client - .bucket_access_control() - .update(bucket_access_control), - ) + ) -> Result { + self.runtime.block_on(self.client.update(bucket_access_control)) } /// Permanently deletes the ACL entry for the specified entity on the specified bucket. @@ -131,16 +122,16 @@ impl<'a> BucketAccessControlClient<'a> { /// use cloud_storage::bucket_access_control::{BucketAccessControl, Entity}; /// /// let client = Client::new()?; - /// let controls = client.bucket_access_control().read("mybucket", &Entity::AllUsers)?; - /// client.bucket_access_control().delete(controls)?; + /// let my_bucket = client.bucket_access_control("my_bucket"); + /// let controls = my_bucket.read(&Entity::AllUsers)?; + /// my_bucket.delete(controls)?; /// # Ok(()) /// # } /// ``` - pub fn delete(&self, bucket_access_control: BucketAccessControl) -> crate::Result<()> { - self.0.runtime.block_on( - self.0 + pub fn delete(&self, bucket_access_control: BucketAccessControl) -> Result<(), Error> { + self.runtime.block_on( + self .client - .bucket_access_control() .delete(bucket_access_control), ) } diff --git a/src/sync/client.rs b/src/sync/client.rs new file mode 100644 index 0000000..f18accd --- /dev/null +++ b/src/sync/client.rs @@ -0,0 +1,78 @@ +use crate::Error; + +use super::{BucketClient, BucketAccessControlClient, DefaultObjectAccessControlClient, HmacKeyClient, ObjectClient, ObjectAccessControlClient}; + +/// The primary synchronous entrypoint to perform operations with Google Cloud Storage. +#[derive(Debug)] +pub struct Client { + runtime: tokio::runtime::Runtime, + client: crate::client::Client, +} + +impl Client { + /// Constructs a client with the default token provider, where it attemps to obtain the credentials from the following locations: + pub fn new() -> Result { + Ok(Self { + runtime: crate::runtime()?, + client: crate::Client::default(), + }) + } + + /// Initializer with a provided refreshable token + pub fn with_cache(token_cache: impl crate::TokenCache + 'static) -> Result { + Ok(Self { + runtime: crate::runtime()?, + client: crate::Client::with_cache(token_cache), + }) + } + + /// Synchronous operations on [`Bucket`](crate::bucket::Bucket)s. + pub fn bucket(&self) -> BucketClient { + let handle = self.runtime.handle().to_owned(); + let client = self.client.bucket(); + BucketClient { + runtime: handle, + client: &client, + } + } + + /// Synchronous operations on [`BucketAccessControl`](crate::bucket_access_control::BucketAccessControl)s. + pub fn bucket_access_control(&self, bucket: &str) -> BucketAccessControlClient { + BucketAccessControlClient { + client: &self.client.bucket_access_control(bucket), + runtime: self.runtime.handle() + } + } + + /// Synchronous operations on [`DefaultObjectAccessControl`](crate::default_object_access_control::DefaultObjectAccessControl)s. + pub fn default_object_access_control(&self, bucket: &str) -> DefaultObjectAccessControlClient { + DefaultObjectAccessControlClient { + client: &self.client.default_object_access_control(bucket), + runtime: self.runtime.handle() + } + } + + /// Synchronous operations on [`HmacKey`](crate::hmac_key::HmacKey)s. + pub fn hmac_key(&self) -> HmacKeyClient { + HmacKeyClient { + client: &self.client.hmac_key(), + runtime: self.runtime.handle() + } + } + + /// Synchronous operations on [`Object`](crate::object::Object)s. + pub fn object(&self) -> ObjectClient { + ObjectClient { + client: &self.client.object(), + runtime: self.runtime.handle() + } + } + + /// Synchronous operations on [`ObjectAccessControl`](crate::object_access_control::ObjectAccessControl)s. + pub fn object_access_control(&self, bucket: &str, object: &str) -> ObjectAccessControlClient { + ObjectAccessControlClient { + client: &self.client.object_access_control(bucket, object), + runtime: self.runtime.handle() + } + } +} diff --git a/src/sync/default_object_access_control.rs b/src/sync/default_object_access_control.rs index de19aaf..9e798b2 100644 --- a/src/sync/default_object_access_control.rs +++ b/src/sync/default_object_access_control.rs @@ -1,11 +1,11 @@ -use crate::{ - bucket_access_control::Entity, - default_object_access_control::{DefaultObjectAccessControl, NewDefaultObjectAccessControl}, -}; +use crate::{models::{create, DefaultObjectAccessControl, Entity}, Error}; /// Operations on [`DefaultObjectAccessControl`](DefaultObjectAccessControl)s. #[derive(Debug)] -pub struct DefaultObjectAccessControlClient<'a>(pub(super) &'a super::Client); +pub struct DefaultObjectAccessControlClient<'a> { + pub(crate) client: &'a crate::client::DefaultObjectAccessControlClient<'a>, + pub(crate) runtime: &'a tokio::runtime::Handle, +} impl<'a> DefaultObjectAccessControlClient<'a> { /// Create a new `DefaultObjectAccessControl` entry on the specified bucket. @@ -18,29 +18,26 @@ impl<'a> DefaultObjectAccessControlClient<'a> { /// # fn main() -> Result<(), Box> { /// use cloud_storage::sync::Client; /// use cloud_storage::default_object_access_control::{ - /// DefaultObjectAccessControl, NewDefaultObjectAccessControl, Role, Entity, + /// DefaultObjectAccessControl, create::DefaultObjectAccessControl, Role, Entity, /// }; /// /// let client = Client::new()?; - /// let new_acl = NewDefaultObjectAccessControl { + /// let new_acl = create::DefaultObjectAccessControl { /// entity: Entity::AllAuthenticatedUsers, /// role: Role::Reader, /// }; - /// let default_acl = client.default_object_access_control().create("mybucket", &new_acl)?; + /// let default_acl = client.default_object_access_control("my_bucket").create(&new_acl)?; /// # client.default_object_access_control().delete(default_acl)?; /// # Ok(()) /// # } /// ``` pub fn create( &self, - bucket: &str, - new_acl: &NewDefaultObjectAccessControl, - ) -> crate::Result { - self.0.runtime.block_on( - self.0 - .client - .default_object_access_control() - .create(bucket, new_acl), + new_acl: &create::DefaultObjectAccessControl, + ) -> Result { + self.runtime.block_on( + self.client + .create(new_acl), ) } @@ -56,14 +53,13 @@ impl<'a> DefaultObjectAccessControlClient<'a> { /// use cloud_storage::default_object_access_control::DefaultObjectAccessControl; /// /// let client = Client::new()?; - /// let default_acls = client.default_object_access_control().list("mybucket")?; + /// let default_acls = client.default_object_access_control().list("my_bucket")?; /// # Ok(()) /// # } /// ``` - pub fn list(&self, bucket: &str) -> crate::Result> { - self.0 - .runtime - .block_on(self.0.client.default_object_access_control().list(bucket)) + pub fn list(&self, bucket: &str) -> Result, Error> { + self.runtime + .block_on(self.client.list()) } /// Read a single `DefaultObjectAccessControl`. @@ -82,16 +78,13 @@ impl<'a> DefaultObjectAccessControlClient<'a> { /// use cloud_storage::default_object_access_control::{DefaultObjectAccessControl, Entity}; /// /// let client = Client::new()?; - /// let default_acl = client.default_object_access_control().read("mybucket", &Entity::AllUsers)?; + /// let default_acl = client.default_object_access_control("my_bucket").read(&Entity::AllUsers)?; /// # Ok(()) /// # } /// ``` - pub fn read(&self, bucket: &str, entity: &Entity) -> crate::Result { - self.0.runtime.block_on( - self.0 - .client - .default_object_access_control() - .read(bucket, entity), + pub fn read(&self, bucket: &str, entity: &Entity) -> Result { + self.runtime.block_on( + self.client.read(entity), ) } @@ -107,7 +100,7 @@ impl<'a> DefaultObjectAccessControlClient<'a> { /// use cloud_storage::default_object_access_control::{DefaultObjectAccessControl, Entity}; /// /// let client = Client::new()?; - /// let mut default_acl = client.default_object_access_control().read("my_bucket", &Entity::AllUsers)?; + /// let mut default_acl = client.default_object_access_control("my_bucket").read(&Entity::AllUsers)?; /// default_acl.entity = Entity::AllAuthenticatedUsers; /// client.default_object_access_control().update(&default_acl)?; /// # Ok(()) @@ -116,11 +109,9 @@ impl<'a> DefaultObjectAccessControlClient<'a> { pub fn update( &self, default_object_access_control: &DefaultObjectAccessControl, - ) -> crate::Result { - self.0.runtime.block_on( - self.0 - .client - .default_object_access_control() + ) -> Result { + self.runtime.block_on( + self.client .update(default_object_access_control), ) } @@ -137,7 +128,7 @@ impl<'a> DefaultObjectAccessControlClient<'a> { /// use cloud_storage::default_object_access_control::{DefaultObjectAccessControl, Entity}; /// /// let client = Client::new()?; - /// let mut default_acl = client.default_object_access_control().read("my_bucket", &Entity::AllUsers)?; + /// let mut default_acl = client.default_object_access_control("my_bucket").read(&Entity::AllUsers)?; /// client.default_object_access_control().delete(default_acl)?; /// # Ok(()) /// # } @@ -146,10 +137,8 @@ impl<'a> DefaultObjectAccessControlClient<'a> { &self, default_object_access_control: DefaultObjectAccessControl, ) -> Result<(), crate::Error> { - self.0.runtime.block_on( - self.0 - .client - .default_object_access_control() + self.runtime.block_on( + self.client .delete(default_object_access_control), ) } diff --git a/src/sync/hmac_key.rs b/src/sync/hmac_key.rs index d962977..c41afac 100644 --- a/src/sync/hmac_key.rs +++ b/src/sync/hmac_key.rs @@ -1,8 +1,11 @@ -use crate::hmac_key::{HmacKey, HmacMeta, HmacState}; +use crate::{Error, models::{HmacKey, HmacMeta, HmacState}}; /// Operations on [`HmacKey`](HmacKey)s. #[derive(Debug)] -pub struct HmacKeyClient<'a>(pub(super) &'a super::Client); +pub struct HmacKeyClient<'a> { + pub(crate) client: &'a crate::client::HmacKeyClient<'a>, + pub(crate) runtime: &'a tokio::runtime::Handle, +} impl<'a> HmacKeyClient<'a> { /// Creates a new HMAC key for the specified service account. @@ -26,8 +29,8 @@ impl<'a> HmacKeyClient<'a> { /// # Ok(()) /// # } /// ``` - pub fn create(&self) -> crate::Result { - self.0.runtime.block_on(self.0.client.hmac_key().create()) + pub fn create(&self) -> Result { + self.runtime.block_on(self.client.create()) } /// Retrieves a list of HMAC keys matching the criteria. Since the HmacKey is secret, this does @@ -50,8 +53,8 @@ impl<'a> HmacKeyClient<'a> { /// # Ok(()) /// # } /// ``` - pub fn list(&self) -> crate::Result> { - self.0.runtime.block_on(self.0.client.hmac_key().list()) + pub fn list(&self) -> Result, Error> { + self.runtime.block_on(self.client.list()) } /// Retrieves an HMAC key's metadata. Since the HmacKey is secret, this does not return a @@ -73,10 +76,9 @@ impl<'a> HmacKeyClient<'a> { /// let key = client.hmac_key().read("some identifier")?; /// # Ok(()) /// # } - pub fn read(&self, access_id: &str) -> crate::Result { - self.0 - .runtime - .block_on(self.0.client.hmac_key().read(access_id)) + pub fn read(&self, access_id: &str) -> Result { + self.runtime + .block_on(self.client.read(access_id)) } /// Updates the state of an HMAC key. See the HMAC Key resource descriptor for valid states. @@ -98,10 +100,9 @@ impl<'a> HmacKeyClient<'a> { /// let key = client.hmac_key().update("your key", HmacState::Active)?; /// # Ok(()) /// # } - pub fn update(&self, access_id: &str, state: HmacState) -> crate::Result { - self.0 - .runtime - .block_on(self.0.client.hmac_key().update(access_id, state)) + pub fn update(&self, access_id: &str, state: HmacState) -> Result { + self.runtime + .block_on(self.client.update(access_id, state)) } /// Deletes an HMAC key. Note that a key must be set to `Inactive` first. @@ -122,9 +123,8 @@ impl<'a> HmacKeyClient<'a> { /// client.hmac_key().delete(&key.access_id)?; /// # Ok(()) /// # } - pub fn delete(&self, access_id: &str) -> crate::Result<()> { - self.0 - .runtime - .block_on(self.0.client.hmac_key().delete(access_id)) + pub fn delete(&self, access_id: &str) -> Result<(), Error> { + self.runtime + .block_on(self.client.delete(access_id)) } } diff --git a/src/sync/mod.rs b/src/sync/mod.rs new file mode 100644 index 0000000..fc26791 --- /dev/null +++ b/src/sync/mod.rs @@ -0,0 +1,19 @@ +//! Synchronous clients for Google Cloud Storage endpoints. + +mod bucket; +mod bucket_access_control; +mod client; +mod default_object_access_control; +mod hmac_key; +mod object; +mod object_access_control; + +mod helpers; // for internal use only + +pub use client::Client; +pub use bucket::BucketClient; +pub use bucket_access_control::BucketAccessControlClient; +pub use default_object_access_control::DefaultObjectAccessControlClient; +pub use hmac_key::HmacKeyClient; +pub use object::ObjectClient; +pub use object_access_control::ObjectAccessControlClient; \ No newline at end of file diff --git a/src/sync/object.rs b/src/sync/object.rs index 6d1c020..57a1381 100644 --- a/src/sync/object.rs +++ b/src/sync/object.rs @@ -1,20 +1,16 @@ -use crate::{ - object::{ - ComposeParameters, ComposeRequest, CopyParameters, CreateParameters, DeleteParameters, - ObjectList, ReadParameters, RewriteParameters, UpdateParameters, - }, - ListRequest, Object, -}; - -use futures_util::io::AllowStdIo; -use futures_util::StreamExt; -use futures_util::TryStreamExt; +use bytes::Buf; +use futures_util::{io::AllowStdIo, StreamExt, TryStreamExt}; use tokio::io::AsyncWriteExt; use tokio_util::compat::FuturesAsyncWriteCompatExt; +use crate::{models::{CreateParameters, ObjectList, ReadParameters, UpdateParameters, DeleteParameters, ComposeRequest, ComposeParameters, CopyParameters, RewriteParameters}, Object, Error, ListRequest}; + /// Operations on [`Object`](Object)s. #[derive(Debug)] -pub struct ObjectClient<'a>(pub(super) &'a super::Client); +pub struct ObjectClient<'a> { + pub(crate) client: &'a crate::client::ObjectClient<'a>, + pub(crate) runtime: &'a tokio::runtime::Handle, +} impl<'a> ObjectClient<'a> { /// Create a new object. @@ -29,7 +25,7 @@ impl<'a> ObjectClient<'a> { /// /// let file: Vec = read_cute_cat("cat.png"); /// let client = Client::new()?; - /// client.object().create("cat-photos", file, "recently read cat.png", "image/png", None)?; + /// client.object("cat-photos").create(file, "recently read cat.png", "image/png", None)?; /// # Ok(()) /// # } /// ``` @@ -40,11 +36,9 @@ impl<'a> ObjectClient<'a> { filename: &str, mime_type: &str, parameters: Option, - ) -> crate::Result { - self.0.runtime.block_on( - self.0 - .client - .object() + ) -> Result { + self.runtime.block_on( + self.client .create(bucket, file, filename, mime_type, parameters), ) } @@ -66,7 +60,7 @@ impl<'a> ObjectClient<'a> { /// "custom_id": "1234" /// } /// }); - /// client.object().create_with("cat-photos", file, "recently read cat.png", "image/png", &metadata)?; + /// client.object("cat-photos").create_with(file, "recently read cat.png", "image/png", &metadata)?; /// # Ok(()) /// # } /// ``` @@ -77,11 +71,9 @@ impl<'a> ObjectClient<'a> { filename: &str, mime_type: &str, metadata: &serde_json::Value, - ) -> crate::Result { - self.0.runtime.block_on( - self.0 - .client - .object() + ) -> Result { + self.runtime.block_on( + self.client .create_with(bucket, file, filename, mime_type, metadata), ) } @@ -96,16 +88,14 @@ impl<'a> ObjectClient<'a> { filename: &str, mime_type: &str, parameters: Option, - ) -> crate::Result + ) -> Result where R: std::io::Read + Send + Sync + Unpin + 'static, { let stream = super::helpers::ReaderStream::new(file); - self.0.runtime.block_on( - self.0 - .client - .object() + self.runtime.block_on( + self.client .create_streamed(bucket, stream, length, filename, mime_type, parameters), ) } @@ -119,16 +109,14 @@ impl<'a> ObjectClient<'a> { filename: &str, mime_type: &str, metadata: &serde_json::Value, - ) -> crate::Result + ) -> Result where R: std::io::Read + Send + Sync + Unpin + 'static, { let stream = super::helpers::ReaderStream::new(file); - self.0.runtime.block_on( - self.0 - .client - .object() + self.runtime.block_on( + self.client .create_streamed_with(bucket, stream, filename, mime_type, metadata), ) } @@ -141,7 +129,7 @@ impl<'a> ObjectClient<'a> { /// use cloud_storage::{Object, ListRequest}; /// /// let client = Client::new()?; - /// let all_objects = client.object().list("my_bucket", ListRequest::default())?; + /// let all_objects = client.object("my_bucket").list(ListRequest::default())?; /// # Ok(()) /// # } /// ``` @@ -149,9 +137,9 @@ impl<'a> ObjectClient<'a> { &self, bucket: &'a str, list_request: ListRequest, - ) -> crate::Result> { - let rt = &self.0.runtime; - let listed = rt.block_on(self.0.client.object().list(bucket, list_request))?; + ) -> Result, Error> { + let rt = &self.runtime; + let listed = rt.block_on(self.client.list(bucket, list_request))?; rt.block_on(listed.try_collect()) } @@ -163,7 +151,7 @@ impl<'a> ObjectClient<'a> { /// use cloud_storage::Object; /// /// let client = Client::new()?; - /// let object = client.object().read("my_bucket", "path/to/my/file.png", None)?; + /// let object = client.object("my_bucket").read("path/to/my/file.png", None)?; /// # Ok(()) /// # } /// ``` @@ -172,10 +160,9 @@ impl<'a> ObjectClient<'a> { bucket: &str, file_name: &str, parameters: Option, - ) -> crate::Result { - self.0 - .runtime - .block_on(self.0.client.object().read(bucket, file_name, parameters)) + ) -> Result { + self.runtime + .block_on(self.client.read(bucket, file_name, parameters)) } /// Download the content of the object with the specified name in the specified bucket. @@ -186,7 +173,7 @@ impl<'a> ObjectClient<'a> { /// use cloud_storage::Object; /// /// let client = Client::new()?; - /// let bytes = client.object().download("my_bucket", "path/to/my/file.png", None)?; + /// let bytes = client.object("my_bucket").download("path/to/my/file.png", None)?; /// # Ok(()) /// # } /// ``` @@ -195,11 +182,9 @@ impl<'a> ObjectClient<'a> { bucket: &str, file_name: &str, parameters: Option, - ) -> crate::Result> { - self.0.runtime.block_on( - self.0 - .client - .object() + ) -> Result, Error> { + self.runtime.block_on( + self.client .download(bucket, file_name, parameters), ) } @@ -216,25 +201,22 @@ impl<'a> ObjectClient<'a> { /// /// let client = Client::new()?; /// let file = File::create("somefile")?; - /// let bytes = client.object().download("my_bucket", "path/to/my/file.png", file)?; + /// let bytes = client.object("my_bucket").download("path/to/my/file.png", file)?; /// # Ok(()) /// # } /// ``` - pub fn download_streamed(&self, bucket: &str, file_name: &str, file: W) -> crate::Result<()> + pub fn download_streamed(&self, bucket: &str, file_name: &str, file: W) -> Result<(), Error> where W: std::io::Write, // + Send + Sync + Unpin + 'static, { - self.0.runtime.block_on(async { - let mut stream = self - .0 - .client - .object() - .download_streamed(bucket, file_name) + self.runtime.block_on(async { + let mut stream = self.client + .download_streamed(bucket, file_name, None) .await?; let mut writer = tokio::io::BufWriter::new(AllowStdIo::new(file).compat_write()); while let Some(byte) = stream.next().await { - writer.write_all(&[byte?]).await?; + writer.write_all(byte?.chunk()).await?; } writer.flush().await?; Ok(()) @@ -249,7 +231,7 @@ impl<'a> ObjectClient<'a> { /// use cloud_storage::Object; /// /// let client = Client::new()?; - /// let mut object = client.object().read("my_bucket", "path/to/my/file.png", None)?; + /// let mut object = client.object("my_bucket").read("path/to/my/file.png", None)?; /// object.content_type = Some("application/xml".to_string()); /// client.object().update(&object, None)?; /// # Ok(()) @@ -259,10 +241,9 @@ impl<'a> ObjectClient<'a> { &self, object: &Object, parameters: Option, - ) -> crate::Result { - self.0 - .runtime - .block_on(self.0.client.object().update(object, parameters)) + ) -> Result { + self.runtime + .block_on(self.client.update(object, parameters)) } /// Deletes a single object with the specified name in the specified bucket. @@ -273,7 +254,7 @@ impl<'a> ObjectClient<'a> { /// use cloud_storage::Object; /// /// let client = Client::new()?; - /// client.object().delete("my_bucket", "path/to/my/file.png", None)?; + /// client.object("my_bucket").delete("path/to/my/file.png", None)?; /// # Ok(()) /// # } /// ``` @@ -282,10 +263,9 @@ impl<'a> ObjectClient<'a> { bucket: &str, file_name: &str, parameters: Option, - ) -> crate::Result<()> { - self.0 - .runtime - .block_on(self.0.client.object().delete(bucket, file_name, parameters)) + ) -> Result<(), Error> { + self.runtime + .block_on(self.client.delete(bucket, file_name, parameters)) } /// Obtains a single object with the specified name in the specified bucket. @@ -296,8 +276,8 @@ impl<'a> ObjectClient<'a> { /// use cloud_storage::object::{Object, ComposeRequest, SourceObject}; /// /// let client = Client::new()?; - /// let obj1 = client.object().read("my_bucket", "file1", None)?; - /// let obj2 = client.object().read("my_bucket", "file2", None)?; + /// let obj1 = client.object("my_bucket").read("file1", None)?; + /// let obj2 = client.object("my_bucket").read("file2", None)?; /// let compose_request = ComposeRequest { /// kind: "storage#composeRequest".to_string(), /// source_objects: vec![ @@ -314,7 +294,7 @@ impl<'a> ObjectClient<'a> { /// ], /// destination: None, /// }; - /// let obj3 = client.object().compose("my_bucket", &compose_request, "test-concatted-file", None)?; + /// let obj3 = client.object("my_bucket").compose(&compose_request, "test-concatted-file", None)?; /// // obj3 is now a file with the content of obj1 and obj2 concatted together. /// # Ok(()) /// # } @@ -325,8 +305,8 @@ impl<'a> ObjectClient<'a> { req: &ComposeRequest, destination_object: &str, parameters: Option, - ) -> crate::Result { - self.0.runtime.block_on(self.0.client.object().compose( + ) -> Result { + self.runtime.block_on(self.client.compose( bucket, req, destination_object, @@ -342,7 +322,7 @@ impl<'a> ObjectClient<'a> { /// use cloud_storage::object::{Object, ComposeRequest}; /// /// let client = Client::new()?; - /// let obj1 = client.object().read("my_bucket", "file1", None)?; + /// let obj1 = client.object("my_bucket").read("file1", None)?; /// let obj2 = client.object().copy(&obj1, "my_other_bucket", "file2", None)?; /// // obj2 is now a copy of obj1. /// # Ok(()) @@ -354,8 +334,8 @@ impl<'a> ObjectClient<'a> { destination_bucket: &str, path: &str, parameters: Option, - ) -> crate::Result { - self.0.runtime.block_on(self.0.client.object().copy( + ) -> Result { + self.runtime.block_on(self.client.copy( object, destination_bucket, path, @@ -378,7 +358,7 @@ impl<'a> ObjectClient<'a> { /// use cloud_storage::object::Object; /// /// let client = Client::new()?; - /// let obj1 = client.object().read("my_bucket", "file1", None)?; + /// let obj1 = client.object("my_bucket").read("file1", None)?; /// let obj2 = client.object().rewrite(&obj1, "my_other_bucket", "file2", None)?; /// // obj2 is now a copy of obj1. /// # Ok(()) @@ -390,8 +370,8 @@ impl<'a> ObjectClient<'a> { destination_bucket: &str, path: &str, parameters: Option, - ) -> crate::Result { - self.0.runtime.block_on(self.0.client.object().rewrite( + ) -> Result { + self.runtime.block_on(self.client.rewrite( object, destination_bucket, path, diff --git a/src/sync/object_access_control.rs b/src/sync/object_access_control.rs index 21203e4..476c3c1 100644 --- a/src/sync/object_access_control.rs +++ b/src/sync/object_access_control.rs @@ -1,11 +1,12 @@ -use crate::{ - bucket_access_control::Entity, - object_access_control::{NewObjectAccessControl, ObjectAccessControl}, -}; +use crate::{models::{create, ObjectAccessControl, Entity}, Error}; + /// Operations on [`ObjectAccessControl`](ObjectAccessControl)s. #[derive(Debug)] -pub struct ObjectAccessControlClient<'a>(pub(super) &'a super::Client); +pub struct ObjectAccessControlClient<'a> { + pub(crate) client: &'a crate::client::ObjectAccessControlClient<'a>, + pub(crate) runtime: &'a tokio::runtime::Handle, +} impl<'a> ObjectAccessControlClient<'a> { /// Creates a new ACL entry on the specified `object`. @@ -16,17 +17,10 @@ impl<'a> ObjectAccessControlClient<'a> { /// control access instead. pub fn create( &self, - bucket: &str, - object: &str, - new_object_access_control: &NewObjectAccessControl, - ) -> crate::Result { - self.0 - .runtime - .block_on(self.0.client.object_access_control().create( - bucket, - object, - new_object_access_control, - )) + new_object_access_control: &create::ObjectAccessControl, + ) -> Result { + self.runtime + .block_on(self.client.create(new_object_access_control)) } /// Retrieves `ACL` entries on the specified object. @@ -35,10 +29,9 @@ impl<'a> ObjectAccessControlClient<'a> { /// Important: This method fails with a 400 Bad Request response for buckets with uniform /// bucket-level access enabled. Use `Bucket::get_iam_policy` and `Bucket::set_iam_policy` to /// control access instead. - pub fn list(&self, bucket: &str, object: &str) -> crate::Result> { - self.0 - .runtime - .block_on(self.0.client.object_access_control().list(bucket, object)) + pub fn list(&self, bucket: &str, object: &str) -> Result, Error> { + self.runtime + .block_on(self.client.list()) } /// Returns the `ACL` entry for the specified entity on the specified bucket. @@ -52,12 +45,9 @@ impl<'a> ObjectAccessControlClient<'a> { bucket: &str, object: &str, entity: &Entity, - ) -> crate::Result { - self.0.runtime.block_on( - self.0 - .client - .object_access_control() - .read(bucket, object, entity), + ) -> Result { + self.runtime.block_on( + self.client.read(entity), ) } @@ -70,12 +60,9 @@ impl<'a> ObjectAccessControlClient<'a> { pub fn update( &self, object_access_control: &ObjectAccessControl, - ) -> crate::Result { - self.0.runtime.block_on( - self.0 - .client - .object_access_control() - .update(object_access_control), + ) -> Result { + self.runtime.block_on( + self.client.update(object_access_control), ) } @@ -85,11 +72,9 @@ impl<'a> ObjectAccessControlClient<'a> { /// Important: This method fails with a 400 Bad Request response for buckets with uniform /// bucket-level access enabled. Use `Bucket::get_iam_policy` and `Bucket::set_iam_policy` to /// control access instead. - pub fn delete(&self, object_access_control: ObjectAccessControl) -> crate::Result<()> { - self.0.runtime.block_on( - self.0 - .client - .object_access_control() + pub fn delete(&self, object_access_control: ObjectAccessControl) -> Result<(), Error> { + self.runtime.block_on( + self.client .delete(object_access_control), ) } diff --git a/src/token.rs b/src/token.rs index d9239a5..68f121b 100644 --- a/src/token.rs +++ b/src/token.rs @@ -1,37 +1,35 @@ +use crate::Error; use std::fmt::{Display, Formatter}; /// Trait that refreshes a token when it is expired #[async_trait::async_trait] -pub trait TokenCache: Sync { +pub trait TokenCache: Sync + Send { /// Returns the token that is currently held within the instance of `TokenCache`, together with /// the expiry of that token as a u64 in seconds sine the Unix Epoch (1 Jan 1970). - async fn token_and_exp(&self) -> Option<(String, u64)>; + async fn token_and_exp(&self) -> Option; /// Updates the token to the value `token`. - async fn set_token(&self, token: String, exp: u64) -> crate::Result<()>; + async fn set_token(&self, token_data: TokenData) -> Result<(), Error>; /// Returns the intended scope for the current token. async fn scope(&self) -> String; /// Returns a valid, unexpired token. If the contained token is expired, it updates and returns /// the token. - async fn get(&self, client: &reqwest::Client) -> crate::Result { + async fn get(&self, client: &reqwest::Client, client_email: String, private_key: &[u8]) -> Result { match self.token_and_exp().await { - Some((token, exp)) if now() + 300 < exp => Ok(token), + Some(token_data) if now() + 300 < token_data.expires_at => Ok(token_data.jwt), _ => { - let (token, exp) = self.fetch_token(client).await?; - self.set_token(token, exp).await?; + let token_data = self.fetch_token(client, client_email, private_key).await?; + self.set_token(token_data).await?; - self.token_and_exp() - .await - .map(|(t, _)| t) - .ok_or_else(|| crate::Error::Other("Token is not set".to_string())) + self.token_and_exp().await.map(|token_data| token_data.jwt).ok_or_else(|| crate::Error::Other("Token is not set".to_string())) } } } /// Fetches and returns the token using the service account - async fn fetch_token(&self, client: &reqwest::Client) -> crate::Result<(String, u64)>; + async fn fetch_token(&self, client: &reqwest::Client, client_email: String, private_key: &[u8]) -> Result; } #[derive(serde::Serialize)] @@ -55,17 +53,29 @@ struct TokenResponse { pub struct Token { // this field contains the JWT and the expiry thereof. They are in the same Option because if // one of them is `Some`, we require that the other be `Some` as well. - token: tokio::sync::RwLock>, + token: tokio::sync::RwLock>, // store the access scope for later use if we need to refresh the token access_scope: String, } #[derive(Debug, Clone)] -pub struct DefaultTokenData(String, u64); +pub struct TokenData { + jwt: String, + expires_at: u64 +} + +impl TokenData { + pub(crate) fn new(jwt: String, expires_at: u64) -> Self { + TokenData { + jwt, + expires_at + } + } +} -impl Display for DefaultTokenData { +impl Display for TokenData { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - write!(f, "{}", self.0) + write!(f, "{}", self.jwt) } } @@ -90,21 +100,21 @@ impl TokenCache for Token { self.access_scope.clone() } - async fn token_and_exp(&self) -> Option<(String, u64)> { - self.token.read().await.as_ref().map(|d| (d.0.clone(), d.1)) + async fn token_and_exp(&self) -> Option { + self.token.read().await.clone() } - async fn set_token(&self, token: String, exp: u64) -> crate::Result<()> { - *self.token.write().await = Some(DefaultTokenData(token, exp)); + async fn set_token(&self, token_data: TokenData) -> Result<(), Error> { + *self.token.write().await = Some(token_data); Ok(()) } - async fn fetch_token(&self, client: &reqwest::Client) -> crate::Result<(String, u64)> { + async fn fetch_token(&self, client: &reqwest::Client, client_email: String, private_key: &[u8]) -> Result { let now = now(); let exp = now + 3600; let claims = Claims { - iss: crate::SERVICE_ACCOUNT.client_email.clone(), + iss: client_email, scope: self.scope().await, aud: "https://www.googleapis.com/oauth2/v4/token".to_string(), exp, @@ -114,7 +124,7 @@ impl TokenCache for Token { alg: jsonwebtoken::Algorithm::RS256, ..Default::default() }; - let private_key_bytes = crate::SERVICE_ACCOUNT.private_key.as_bytes(); + let private_key_bytes = private_key; let private_key = jsonwebtoken::EncodingKey::from_rsa_pem(private_key_bytes)?; let jwt = jsonwebtoken::encode(&header, &claims, &private_key)?; let body = [ @@ -128,7 +138,7 @@ impl TokenCache for Token { .await? .json() .await?; - Ok((response.access_token, now + response.expires_in)) + Ok(TokenData::new(response.access_token, now + response.expires_in)) } } From 4dd9a717c55c5a16b027b6bd121c2f19c2dd64a3 Mon Sep 17 00:00:00 2001 From: SonnyX Date: Tue, 2 May 2023 15:51:53 +0200 Subject: [PATCH 12/26] fix ObjectClient --- src/client/client.rs | 1 - src/client/object.rs | 10 ++++------ 2 files changed, 4 insertions(+), 7 deletions(-) diff --git a/src/client/client.rs b/src/client/client.rs index ba4937d..213cd7c 100644 --- a/src/client/client.rs +++ b/src/client/client.rs @@ -96,7 +96,6 @@ impl Client { /// Operations on [`Object`](crate::object::Object)s. pub fn object(&self) -> ObjectClient { ObjectClient { - object_creation_url: todo!(), base_url: "https://storage.googleapis.com/storage/v1/", client: self, } diff --git a/src/client/object.rs b/src/client/object.rs index 3243d56..9048a26 100644 --- a/src/client/object.rs +++ b/src/client/object.rs @@ -1,4 +1,3 @@ -use bytes::Buf; use futures_util::{Stream, stream, TryStream}; use reqwest::StatusCode; @@ -8,7 +7,6 @@ use crate::{models::{CreateParameters, ObjectList, ReadParameters, UpdateParamet #[derive(Debug)] pub struct ObjectClient<'a> { pub(crate) client: &'a super::client::Client, - pub(crate) object_creation_url: &'a str, // {}/{}/o?name={}&uploadType=media pub(crate) base_url: &'a str, } @@ -40,7 +38,7 @@ impl<'a> ObjectClient<'a> { ) -> Result { use reqwest::header::{CONTENT_LENGTH, CONTENT_TYPE}; - let url = &format!("{}&uploadType=media", self.object_creation_url); + let url = &format!("{}/{}/o?name={}&uploadType=media", self.base_url, bucket, filename); let mut headers = self.client.get_headers().await?; headers.insert(CONTENT_TYPE, mime_type.parse()?); headers.insert(CONTENT_LENGTH, file.len().to_string().parse()?); @@ -88,7 +86,7 @@ impl<'a> ObjectClient<'a> { mime_type: &str, metadata: &serde_json::Value, ) -> Result { - let url = &format!("{}&uploadType=multipart", self.object_creation_url); + let url = &format!("{}/{}/o?name={}&uploadType=multipart", self.base_url, bucket, filename); // single-request upload that includes metadata require a mutlipart request where // part 1 is metadata, and part2 is the file to upload @@ -145,7 +143,7 @@ impl<'a> ObjectClient<'a> { S::Error: Into>, bytes::Bytes: From, { - let url = &format!("{}&uploadType=multipart", self.object_creation_url); + let url = &format!("{}/{}/o?name={}&uploadType=multipart", self.base_url, bucket, filename); let headers = self.client.get_headers().await?; // single-request upload that includes metadata require a mutlipart request where @@ -206,7 +204,7 @@ impl<'a> ObjectClient<'a> { { use reqwest::header::{CONTENT_LENGTH, CONTENT_TYPE}; - let url = &format!("{}&uploadType=media", self.object_creation_url); + let url = &format!("{}/{}/o?name={}&uploadType=media", self.base_url, bucket, filename); let mut headers = self.client.get_headers().await?; headers.insert(CONTENT_TYPE, mime_type.parse()?); if let Some(length) = length.into() { From d9d4b984c913489b8d03042248a8b704311559bb Mon Sep 17 00:00:00 2001 From: SonnyX Date: Tue, 2 May 2023 15:54:19 +0200 Subject: [PATCH 13/26] Fix warnings --- src/client/default_object_access_control.rs | 3 +-- src/models/create/mod.rs | 4 ++-- src/models/mod.rs | 2 +- 3 files changed, 4 insertions(+), 5 deletions(-) diff --git a/src/client/default_object_access_control.rs b/src/client/default_object_access_control.rs index ec4278b..0f36c1a 100644 --- a/src/client/default_object_access_control.rs +++ b/src/client/default_object_access_control.rs @@ -53,8 +53,7 @@ impl<'a> DefaultObjectAccessControlClient<'a> { .await?; match result { crate::models::Response::Success(mut s) => { - // todo: - // s.bucket = bucket.to_string(); + s.bucket = self.bucket.clone(); Ok(s) } crate::models::Response::Error(e) => Err(e.into()), diff --git a/src/models/create/mod.rs b/src/models/create/mod.rs index ad036b3..9dce89f 100644 --- a/src/models/create/mod.rs +++ b/src/models/create/mod.rs @@ -2,7 +2,7 @@ mod bucket; mod bucket_access_control; mod default_object_access_control; //mod notification; -mod payload_format; +//mod payload_format; mod object_access_control; pub(crate) use self::{ @@ -10,7 +10,7 @@ pub(crate) use self::{ bucket_access_control::BucketAccessControl, default_object_access_control::DefaultObjectAccessControl, //notification::Notification, - payload_format::PayloadFormat, + //payload_format::PayloadFormat, object_access_control::ObjectAccessControl, }; \ No newline at end of file diff --git a/src/models/mod.rs b/src/models/mod.rs index d035ace..15ca97f 100644 --- a/src/models/mod.rs +++ b/src/models/mod.rs @@ -128,5 +128,5 @@ pub(crate) use self::{ response::Response, list_response::ListResponse, update_hmac_metadata::UpdateHmacMetadata, - update_hmac_request::UpdateHmacRequest, + //update_hmac_request::UpdateHmacRequest, }; \ No newline at end of file From 0fc5805fcae7d8b88f68393fa9a13c92756b420a Mon Sep 17 00:00:00 2001 From: SonnyX Date: Tue, 2 May 2023 16:01:06 +0200 Subject: [PATCH 14/26] Make models public --- src/lib.rs | 2 +- src/models/mod.rs | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/src/lib.rs b/src/lib.rs index 23f1fed..e8a7f4f 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -95,7 +95,7 @@ pub mod sync; mod configuration; -mod models; +pub mod models; mod download_options; mod error; mod token; diff --git a/src/models/mod.rs b/src/models/mod.rs index 15ca97f..68aad98 100644 --- a/src/models/mod.rs +++ b/src/models/mod.rs @@ -1,3 +1,4 @@ +//! Models used for communication with Google Cloud Platform pub(crate)mod create; mod legacy_iam_role; From 227f0b10f562a7c6c7d657407e1b997d582e90ab Mon Sep 17 00:00:00 2001 From: SonnyX Date: Tue, 2 May 2023 16:19:44 +0200 Subject: [PATCH 15/26] export time, so implementing libraries can use it --- src/lib.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/lib.rs b/src/lib.rs index e8a7f4f..018ffc2 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -92,7 +92,8 @@ pub mod client; #[cfg(feature = "sync")] pub mod sync; - +// export time, so implementing libraries can use it +pub extern crate time; mod configuration; pub mod models; From b3a469be60704bc7ca091fa6cf97d66fd294f65a Mon Sep 17 00:00:00 2001 From: SonnyX Date: Wed, 3 May 2023 10:26:29 +0200 Subject: [PATCH 16/26] unwrap_or_default --- src/client/client.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/client/client.rs b/src/client/client.rs index 213cd7c..50180e7 100644 --- a/src/client/client.rs +++ b/src/client/client.rs @@ -140,7 +140,7 @@ impl ClientBuilder { Client { reqwest: self.client.unwrap_or_default(), token_cache: self.token_cache.unwrap_or(sync::Arc::new(crate::Token::default())), - service_account: self.service_account.unwrap_or(crate::ServiceAccount::default()) + service_account: self.service_account.unwrap_or_default() } } From e0be0e48af6b665245cfd56a1dca2fffeb28d592 Mon Sep 17 00:00:00 2001 From: SonnyX Date: Wed, 3 May 2023 21:57:42 +0200 Subject: [PATCH 17/26] Fix the library --- Cargo.toml | 5 +- README.md | 2 +- src/client/bucket.rs | 10 +- src/client/bucket_access_control.rs | 11 +- src/client/client.rs | 11 +- src/client/default_object_access_control.rs | 74 +++----- src/client/hmac_key.rs | 4 +- src/client/object.rs | 170 ++++++++---------- src/client/object_access_control.rs | 13 +- src/configuration/service_account.rs | 17 +- src/global_client/bucket.rs | 4 +- .../default_object_access_control.rs | 4 - src/global_client/hmac_key.rs | 2 +- src/global_client/mod.rs | 18 +- src/global_client/object.rs | 73 ++++---- src/models/default_object_access_control.rs | 3 +- src/models/notification.rs | 14 +- src/models/object.rs | 4 +- src/models/object_read_parameters.rs | 2 +- src/models/response.rs | 30 ++-- src/models/uniform_bucket_level_access.rs | 6 +- src/sized_byte_stream.rs | 4 +- test.sh | 10 +- 23 files changed, 229 insertions(+), 262 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 6303c54..fccf749 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -13,9 +13,8 @@ categories = ["api-bindings", "web-programming"] # maintenance = { status = "actively-developed" } [features] -default = ["native-tls", "ring", "pem", "global-client"] +default = ["native-tls", "ring", "pem", "global-client", "dotenv"] -dotenv = ["dep:dotenv"] global-client = [] sync = ["reqwest/blocking"] native-tls = ["reqwest/default-tls", "openssl"] @@ -30,7 +29,7 @@ serde = { version = "1.0.160", default-features = false, features = [ serde_json = { version = "1.0.96", default-features = false } base64 = { version = "0.21.0", default-features = false } once_cell = { version = "1.17.1", default-features = false } -time = { version = "0.3.20", default-features = false, features = ["serde-well-known", "serde-human-readable", "macros"]} +time = { version = "0.3.20", default-features = false, features = ["serde", "formatting", "parsing"]} hex = { version = "0.4.3", default-features = false, features = ["alloc"] } tokio = { version = "1.28.0", default-features = false, features = ["macros", "rt"] } tokio-util = { version = "0.7.8", default-features = false, features = ["compat"] } diff --git a/README.md b/README.md index 257008e..8aac8a9 100644 --- a/README.md +++ b/README.md @@ -22,7 +22,7 @@ let mut object = Object::create("my_bucket", content, "folder/filename.txt", "ap // let's copy the file object.copy("my_bucket2: electric boogaloo", "otherfolder/filename.txt", None).await?; // print a link to the file -println!("{}", object.download_url(1000)); // download link for 1000 seconds +println!("{}", object.download_url(1000)); // download link that expires after 1000 seconds // remove the file from the bucket object.delete(None).await?; ``` diff --git a/src/client/bucket.rs b/src/client/bucket.rs index 58bf7fc..f679f0f 100644 --- a/src/client/bucket.rs +++ b/src/client/bucket.rs @@ -5,8 +5,8 @@ use crate::{models::{create, ListResponse, IamPolicy, TestIamPermission}, Bucket #[derive(Debug)] pub struct BucketClient<'a> { pub(crate) client: &'a super::client::Client, - pub(crate) bucket_url: &'a str, - pub(crate) project_id: &'a str, + pub(crate) bucket_url: String, + pub(crate) project_id: String, } impl<'a> BucketClient<'a> { @@ -36,7 +36,7 @@ impl<'a> BucketClient<'a> { pub async fn create(&self, new_bucket: &create::Bucket) -> Result { let headers = self.client.get_headers().await?; let url = format!("{}/", self.bucket_url); - let project = self.project_id; + let project = &self.project_id; let query = [("project", project)]; let result: crate::models::Response = self.client.reqwest.post(&url).headers(headers).query(&query).json(new_bucket).send().await?.json().await?; Ok(result?) @@ -62,7 +62,7 @@ impl<'a> BucketClient<'a> { pub async fn list(&self) -> Result, Error> { let headers = self.client.get_headers().await?; let url = format!("{}/", self.bucket_url); - let project = self.project_id; + let project = &self.project_id; let query = [("project", project)]; let result: crate::models::Response> = self.client.reqwest.get(&url).headers(headers).query(&query).send().await?.json().await?; Ok(result?.items) @@ -91,7 +91,7 @@ impl<'a> BucketClient<'a> { /// ``` pub async fn read(&self, name: &str) -> Result { let headers = self.client.get_headers().await?; - let url = format!("{}/{}", self.bucket_url, crate::percent_encode(name),); + let url = format!("{}/{}", self.bucket_url, crate::percent_encode(name)); let result: crate::models::Response = self.client.reqwest.get(&url).headers(headers).send().await?.json().await?; Ok(result?) } diff --git a/src/client/bucket_access_control.rs b/src/client/bucket_access_control.rs index f91ae1a..0a3852e 100644 --- a/src/client/bucket_access_control.rs +++ b/src/client/bucket_access_control.rs @@ -1,4 +1,4 @@ -use crate::{models::{create, BucketAccessControl, ListResponse, Entity}, Error}; +use crate::{models::{create, BucketAccessControl, ListResponse, Entity, Response}, Error}; /// Operations on [`BucketAccessControl`](BucketAccessControl)s. #[derive(Debug)] @@ -60,11 +60,10 @@ impl<'a> BucketAccessControlClient<'a> { /// ``` pub async fn list(&self) -> Result, Error> { let headers = self.client.get_headers().await?; - let result: crate::models::Response> = self.client.reqwest.get(&self.bucket_acl_url).headers(headers).send().await?.json().await?; - match result { - crate::models::Response::Success(s) => Ok(s.items), - crate::models::Response::Error(e) => Err(e.into()), - } + let response = self.client.reqwest.get(&self.bucket_acl_url).headers(headers).send().await?; + + let object = response.json::>>().await??.items; + Ok(object) } /// Returns the ACL entry for the specified entity. diff --git a/src/client/client.rs b/src/client/client.rs index 50180e7..10a2e9f 100644 --- a/src/client/client.rs +++ b/src/client/client.rs @@ -59,8 +59,8 @@ impl Client { /// Operations on [`Bucket`](crate::bucket::Bucket)s. pub fn bucket(&self) -> BucketClient { BucketClient { - bucket_url: "https://storage.googleapis.com/storage/v1/b/", - project_id: &self.service_account.project_id, + bucket_url: "https://storage.googleapis.com/storage/v1/b".to_string(), + project_id: self.service_account.project_id.clone(), client: self, } } @@ -70,7 +70,7 @@ impl Client { let url = format!("https://storage.googleapis.com/storage/v1/b/{}/acl", crate::percent_encode(bucket)); BucketAccessControlClient { bucket_acl_url: url, - client: &self + client: self } } @@ -94,9 +94,10 @@ impl Client { } /// Operations on [`Object`](crate::object::Object)s. - pub fn object(&self) -> ObjectClient { + pub fn object(&self, bucket: &str) -> ObjectClient { ObjectClient { - base_url: "https://storage.googleapis.com/storage/v1/", + base_url: format!("https://storage.googleapis.com/storage/v1/b/{}/o", crate::percent_encode(bucket)), + insert_url: format!("https://storage.googleapis.com/upload/storage/v1/b/{}/o", crate::percent_encode(bucket)), client: self, } } diff --git a/src/client/default_object_access_control.rs b/src/client/default_object_access_control.rs index 0f36c1a..c8893ae 100644 --- a/src/client/default_object_access_control.rs +++ b/src/client/default_object_access_control.rs @@ -1,4 +1,4 @@ -use crate::{models::{create, DefaultObjectAccessControl, ListResponse, Entity}, Error}; +use crate::{models::{create, DefaultObjectAccessControl, ListResponse, Entity, Response}, Error}; /// Operations on [`DefaultObjectAccessControl`](DefaultObjectAccessControl)s. @@ -39,25 +39,17 @@ impl<'a> DefaultObjectAccessControlClient<'a> { new_acl: &create::DefaultObjectAccessControl, ) -> Result { let headers = self.client.get_headers().await?; - let url = format!( - "{}", - self.base_url - ); - let result: crate::models::Response = self.client.reqwest + let url = self.base_url.to_string(); + let response = self.client.reqwest .post(&url) .headers(headers) .json(new_acl) .send() - .await? - .json() .await?; - match result { - crate::models::Response::Success(mut s) => { - s.bucket = self.bucket.clone(); - Ok(s) - } - crate::models::Response::Error(e) => Err(e.into()), - } + + let mut object = response.json::>().await??; + object.bucket = self.bucket.clone(); + Ok(object) } /// Retrieves default object ACL entries on the specified bucket. @@ -79,15 +71,14 @@ impl<'a> DefaultObjectAccessControlClient<'a> { /// ``` pub async fn list(&self) -> Result, Error> { let headers = self.client.get_headers().await?; - let result: crate::models::Response> = self.client.reqwest.get(&self.base_url).headers(headers).send().await?.json().await?; - match result { - crate::models::Response::Success(s) => Ok(s.items.into_iter() - .map(|item| DefaultObjectAccessControl { - bucket: self.bucket.to_string(), - ..item - }).collect()), - crate::models::Response::Error(e) => Err(e.into()), - } + let response = self.client.reqwest.get(&self.base_url).headers(headers).send().await?; + + let mut object = response.json::>>().await??.items; + object = object.into_iter().map(|item| DefaultObjectAccessControl { + bucket: self.bucket.to_string(), + ..item + }).collect(); + Ok(object) } /// Read a single `DefaultObjectAccessControl`. @@ -121,20 +112,15 @@ impl<'a> DefaultObjectAccessControlClient<'a> { self.base_url, crate::percent_encode(&entity.to_string()), ); - let result: crate::models::Response = self.client.reqwest + let response = self.client.reqwest .get(&url) .headers(headers) .send() - .await? - .json() .await?; - match result { - crate::models::Response::Success(mut s) => { - s.bucket = self.bucket.to_string(); - Ok(s) - } - crate::models::Response::Error(e) => Err(e.into()), - } + + let mut object = response.json::>().await??; + object.bucket = self.bucket.clone(); + Ok(object) } /// Update the current `DefaultObjectAccessControl`. @@ -166,14 +152,11 @@ impl<'a> DefaultObjectAccessControlClient<'a> { self.base_url, crate::percent_encode(&default_object_access_control.entity.to_string()), ); - let result: crate::models::Response = self.client.reqwest.put(&url).headers(headers).json(default_object_access_control).send().await?.json().await?; - match result { - crate::models::Response::Success(mut s) => { - s.bucket = default_object_access_control.bucket.to_string(); - Ok(s) - } - crate::models::Response::Error(e) => Err(e.into()), - } + let response = self.client.reqwest.put(&url).headers(headers).json(default_object_access_control).send().await?; + + let mut object = response.json::>().await??; + object.bucket = self.bucket.clone(); + Ok(object) } /// Delete this 'DefaultObjectAccessControl`. @@ -199,16 +182,13 @@ impl<'a> DefaultObjectAccessControlClient<'a> { default_object_access_control: DefaultObjectAccessControl, ) -> Result<(), crate::Error> { let headers = self.client.get_headers().await?; - let url = format!( - "{}/{}", - self.base_url, - crate::percent_encode(&default_object_access_control.entity.to_string()), - ); + let url = format!("{}/{}", self.base_url, crate::percent_encode(&default_object_access_control.entity.to_string())); let response = self.client.reqwest .delete(&url) .headers(headers) .send() .await?; + if response.status().is_success() { Ok(()) } else { diff --git a/src/client/hmac_key.rs b/src/client/hmac_key.rs index 6ddbf37..16004ab 100644 --- a/src/client/hmac_key.rs +++ b/src/client/hmac_key.rs @@ -78,6 +78,8 @@ impl<'a> HmacKeyClient<'a> { .text() .await?; let result: Result>, serde_json::Error> = serde_json::from_str(&response); + let single_result: Result, serde_json::Error> = serde_json::from_str(&response); + // todo: test this with one hmac key // This function rquires more complicated error handling because when there is only one // entry, Google will return the response `{ "kind": "storage#hmacKeysMetadata" }` instead @@ -87,7 +89,7 @@ impl<'a> HmacKeyClient<'a> { crate::models::Response::Success(s) => Ok(s.items), crate::models::Response::Error(e) => Err(e.into()), }, - Err(_) => Ok(vec![]), + Err(_) => Ok(vec![single_result??]), } } diff --git a/src/client/object.rs b/src/client/object.rs index 9048a26..8835423 100644 --- a/src/client/object.rs +++ b/src/client/object.rs @@ -1,13 +1,12 @@ use futures_util::{Stream, stream, TryStream}; -use reqwest::StatusCode; - -use crate::{models::{CreateParameters, ObjectList, ReadParameters, UpdateParameters, DeleteParameters, ComposeRequest, ComposeParameters, CopyParameters, RewriteParameters, rewrite_response::RewriteResponse}, Object, Error, ListRequest, sized_byte_stream::SizedByteStream}; +use crate::{models::{CreateParameters, ObjectList, ReadParameters, UpdateParameters, DeleteParameters, ComposeRequest, ComposeParameters, CopyParameters, RewriteParameters, Response, rewrite_response::RewriteResponse}, Object, Error, ListRequest, sized_byte_stream::SizedByteStream}; /// Operations on [`Object`](Object)s. #[derive(Debug)] pub struct ObjectClient<'a> { pub(crate) client: &'a super::client::Client, - pub(crate) base_url: &'a str, + pub(crate) base_url: String, + pub(crate) insert_url: String, } impl<'a> ObjectClient<'a> { @@ -30,7 +29,6 @@ impl<'a> ObjectClient<'a> { /// ``` pub async fn create( &self, - bucket: &str, file: Vec, filename: &str, mime_type: &str, @@ -38,7 +36,7 @@ impl<'a> ObjectClient<'a> { ) -> Result { use reqwest::header::{CONTENT_LENGTH, CONTENT_TYPE}; - let url = &format!("{}/{}/o?name={}&uploadType=media", self.base_url, bucket, filename); + let url = &format!("{}?name={}&uploadType=media", self.insert_url, crate::percent_encode(filename)); let mut headers = self.client.get_headers().await?; headers.insert(CONTENT_TYPE, mime_type.parse()?); headers.insert(CONTENT_LENGTH, file.len().to_string().parse()?); @@ -49,11 +47,11 @@ impl<'a> ObjectClient<'a> { .body(file) .send() .await?; - if response.status() == 200 { - Ok(serde_json::from_str(&response.text().await?)?) - } else { - Err(crate::Error::new(&response.text().await?)) - } + + let mut object = response.json::>().await??; + object.private_key = Some(self.client.service_account.private_key.clone()); + object.client_email = Some(self.client.service_account.client_email.clone()); + Ok(object) } /// Create a new object. This works in the same way as `ObjectClient::create` but allows setting of metadata for this object. @@ -80,13 +78,12 @@ impl<'a> ObjectClient<'a> { /// ``` pub async fn create_with( &self, - bucket: &str, file: Vec, filename: &str, mime_type: &str, metadata: &serde_json::Value, ) -> Result { - let url = &format!("{}/{}/o?name={}&uploadType=multipart", self.base_url, bucket, filename); + let url = &format!("{}?name={}&uploadType=multipart", self.insert_url, crate::percent_encode(filename)); // single-request upload that includes metadata require a mutlipart request where // part 1 is metadata, and part2 is the file to upload @@ -103,12 +100,10 @@ impl<'a> ObjectClient<'a> { .multipart(form) .send() .await?; - - if response.status() == 200 { - Ok(serde_json::from_str(&response.text().await?)?) - } else { - Err(crate::Error::new(&response.text().await?)) - } + let mut object = response.json::>().await??; + object.private_key = Some(self.client.service_account.private_key.clone()); + object.client_email = Some(self.client.service_account.client_email.clone()); + Ok(object) } /// Create a new object. This works in the same way as `ObjectClient::create`, except it does not need @@ -132,7 +127,6 @@ impl<'a> ObjectClient<'a> { /// ``` pub async fn create_streamed_with( &self, - bucket: &str, stream: S, filename: &str, mime_type: &str, @@ -143,7 +137,7 @@ impl<'a> ObjectClient<'a> { S::Error: Into>, bytes::Bytes: From, { - let url = &format!("{}/{}/o?name={}&uploadType=multipart", self.base_url, bucket, filename); + let url = &format!("{}?name={}&uploadType=multipart", self.insert_url, crate::percent_encode(filename)); let headers = self.client.get_headers().await?; // single-request upload that includes metadata require a mutlipart request where @@ -162,11 +156,10 @@ impl<'a> ObjectClient<'a> { .multipart(form) .send() .await?; - if response.status() == 200 { - Ok(serde_json::from_str(&response.text().await?)?) - } else { - Err(crate::Error::new(&response.text().await?)) - } + let mut object = response.json::>().await??; + object.private_key = Some(self.client.service_account.private_key.clone()); + object.client_email = Some(self.client.service_account.client_email.clone()); + Ok(object) } /// Create a new object. This works in the same way as `ObjectClient::create`, except it does not need @@ -190,7 +183,6 @@ impl<'a> ObjectClient<'a> { /// ``` pub async fn create_streamed( &self, - bucket: &str, stream: S, length: impl Into>, filename: &str, @@ -204,7 +196,7 @@ impl<'a> ObjectClient<'a> { { use reqwest::header::{CONTENT_LENGTH, CONTENT_TYPE}; - let url = &format!("{}/{}/o?name={}&uploadType=media", self.base_url, bucket, filename); + let url = &format!("{}?name={}&uploadType=media", self.insert_url, crate::percent_encode(filename)); let mut headers = self.client.get_headers().await?; headers.insert(CONTENT_TYPE, mime_type.parse()?); if let Some(length) = length.into() { @@ -219,11 +211,10 @@ impl<'a> ObjectClient<'a> { .body(body) .send() .await?; - if response.status() == 200 { - Ok(serde_json::from_str(&response.text().await?)?) - } else { - Err(crate::Error::new(&response.text().await?)) - } + let mut object = response.json::>().await??; + object.private_key = Some(self.client.service_account.private_key.clone()); + object.client_email = Some(self.client.service_account.client_email.clone()); + Ok(object) } /// Obtain a list of objects within this Bucket. @@ -241,12 +232,9 @@ impl<'a> ObjectClient<'a> { /// ``` pub async fn list( &self, - bucket: &str, list_request: ListRequest, ) -> Result>, Error> { - let bucket = bucket.clone(); - - enum ListState { + enum ListState { Start(ListRequest), HasMore(ListRequest), Done, @@ -269,8 +257,8 @@ impl<'a> ObjectClient<'a> { } let reqwest = self.client.reqwest.clone(); - let headers = self.client.get_headers().await?.clone(); - let url = format!("{}/b/{}/o", self.base_url, crate::percent_encode(bucket)); + let headers = self.client.get_headers().await?; + let url = self.base_url.to_string(); Ok(stream::unfold(ListState::Start(list_request), move |mut state| { let reqwest = reqwest.clone(); @@ -278,8 +266,6 @@ impl<'a> ObjectClient<'a> { let headers = headers.clone(); async move { - - let req = state.req_mut()?; if req.max_results == Some(0) { return None; @@ -345,26 +331,26 @@ impl<'a> ObjectClient<'a> { /// ``` pub async fn read( &self, - bucket: &str, file_name: &str, parameters: Option, ) -> Result { //let paramters = qs:: let url = format!( - "{}/b/{}/o/{}", + "{}/{}", self.base_url, - crate::percent_encode(bucket), crate::percent_encode(file_name), ); - let result: crate::models::Response = self.client.reqwest + let response = self.client.reqwest .get(&url) .query(¶meters) .headers(self.client.get_headers().await?) .send() - .await? - .json() .await?; - Ok(result?) + + let mut object = response.json::>().await??; + object.private_key = Some(self.client.service_account.private_key.clone()); + object.client_email = Some(self.client.service_account.client_email.clone()); + Ok(object) } /// Download the content of the object with the specified name in the specified bucket. @@ -382,27 +368,26 @@ impl<'a> ObjectClient<'a> { /// ``` pub async fn download( &self, - bucket: &str, file_name: &str, parameters: Option, ) -> Result, Error> { let url = format!( - "{}/b/{}/o/{}?alt=media", + "{}/{}?alt=media", self.base_url, - crate::percent_encode(bucket), crate::percent_encode(file_name), ); - let resp = self.client.reqwest + let response = self.client.reqwest .get(&url) .query(¶meters) .headers(self.client.get_headers().await?) .send() .await?; - if resp.status() == StatusCode::NOT_FOUND { - Err(crate::Error::Other(resp.text().await?)) - } else { - Ok(resp.error_for_status()?.bytes().await?.to_vec()) - } + + if response.status() == reqwest::StatusCode::NOT_FOUND { + Err(crate::Error::Other(response.text().await?)) + } else { + Ok(response.error_for_status()?.bytes().await?.to_vec()) + } } /// Download the content of the object with the specified name in the specified bucket, without @@ -429,15 +414,13 @@ impl<'a> ObjectClient<'a> { /// ``` pub async fn download_streamed( &self, - bucket: &str, file_name: &str, parameters: Option, ) -> Result> + Unpin, Error> { use futures_util::TryStreamExt; let url = format!( - "{}/b/{}/o/{}?alt=media", + "{}/{}?alt=media", self.base_url, - crate::percent_encode(bucket), crate::percent_encode(file_name), ); let response = self.client.reqwest @@ -448,8 +431,7 @@ impl<'a> ObjectClient<'a> { .await? .error_for_status()?; let size = response.content_length(); - let bytes = response - .bytes_stream().map_err(Error::from); + let bytes = response.bytes_stream().map_err(Error::from); Ok(SizedByteStream::new(bytes, size)) } @@ -478,21 +460,22 @@ impl<'a> ObjectClient<'a> { parameters: Option, ) -> Result { let url = format!( - "{}/b/{}/o/{}", + "{}/{}", self.base_url, - crate::percent_encode(&object.bucket), crate::percent_encode(&object.name), ); - let result: crate::models::Response = self.client.reqwest + let response = self.client.reqwest .put(&url) .query(¶meters) .headers(self.client.get_headers().await?) .json(&object) .send() - .await? - .json() .await?; - Ok(result?) + + let mut object = response.json::>().await??; + object.private_key = Some(self.client.service_account.private_key.clone()); + object.client_email = Some(self.client.service_account.client_email.clone()); + Ok(object) } /// Deletes a single object with the specified name in the specified bucket. @@ -510,14 +493,12 @@ impl<'a> ObjectClient<'a> { /// ``` pub async fn delete( &self, - bucket: &str, file_name: &str, parameters: Option, ) -> Result<(), Error> { let url = format!( - "{}/b/{}/o/{}", + "{}/{}", self.base_url, - crate::percent_encode(bucket), crate::percent_encode(file_name), ); let response = self.client.reqwest @@ -526,7 +507,8 @@ impl<'a> ObjectClient<'a> { .headers(self.client.get_headers().await?) .send() .await?; - if response.status().is_success() { + + if response.status().is_success() { Ok(()) } else { Err(crate::Error::Google(response.json().await?)) @@ -567,27 +549,27 @@ impl<'a> ObjectClient<'a> { /// ``` pub async fn compose( &self, - bucket: &str, req: &ComposeRequest, destination_object: &str, parameters: Option, ) -> Result { let url = format!( - "{}/b/{}/o/{}/compose", + "{}/{}/compose", self.base_url, - crate::percent_encode(bucket), crate::percent_encode(destination_object) ); - let result: crate::models::Response = self.client.reqwest + let response = self.client.reqwest .post(&url) .query(¶meters) .headers(self.client.get_headers().await?) .json(req) .send() - .await? - .json() .await?; - Ok(result?) + + let mut object = response.json::>().await??; + object.private_key = Some(self.client.service_account.private_key.clone()); + object.client_email = Some(self.client.service_account.client_email.clone()); + Ok(object) } /// Copy this object to the target bucket and path. @@ -600,7 +582,7 @@ impl<'a> ObjectClient<'a> { /// /// let client = Client::default(); /// let obj1 = client.object("my_bucket").read("file1", None).await?; - /// let obj2 = client.object().copy(&obj1, "my_other_bucket", "file2", None).await?; + /// let obj2 = client.object("my_bucket").copy(&obj1, "my_other_bucket", "file2", None).await?; /// // obj2 is now a copy of obj1. /// # Ok(()) /// # } @@ -615,24 +597,25 @@ impl<'a> ObjectClient<'a> { use reqwest::header::CONTENT_LENGTH; let url = format!( - "{base}/b/{sBucket}/o/{sObject}/copyTo/b/{dBucket}/o/{dObject}", + "{base}/{sObject}/copyTo/b/{dBucket}/o/{dObject}", base = self.base_url, - sBucket = crate::percent_encode(&object.bucket), sObject = crate::percent_encode(&object.name), dBucket = crate::percent_encode(destination_bucket), dObject = crate::percent_encode(path), ); let mut headers = self.client.get_headers().await?; headers.insert(CONTENT_LENGTH, "0".parse()?); - let result: crate::models::Response = self.client.reqwest + let response = self.client.reqwest .post(&url) .query(¶meters) .headers(headers) .send() - .await? - .json() .await?; - Ok(result?) + + let mut object = response.json::>().await??; + object.private_key = Some(self.client.service_account.private_key.clone()); + object.client_email = Some(self.client.service_account.client_email.clone()); + Ok(object) } /// Moves a file from the current location to the target bucket and path. @@ -652,7 +635,7 @@ impl<'a> ObjectClient<'a> { /// /// let client = Client::default(); /// let obj1 = client.object("my_bucket").read("file1", None).await?; - /// let obj2 = client.object().rewrite(&obj1, "my_other_bucket", "file2", None).await?; + /// let obj2 = client.object("my_bucket").rewrite(&obj1, "my_other_bucket", "file2", None).await?; /// // obj2 is now a copy of obj1. /// # Ok(()) /// # } @@ -667,25 +650,24 @@ impl<'a> ObjectClient<'a> { use reqwest::header::CONTENT_LENGTH; let url = format!( - "{base}/b/{sBucket}/o/{sObject}/rewriteTo/b/{dBucket}/o/{dObject}", + "{base}/{sObject}/rewriteTo/b/{dBucket}/o/{dObject}", base = self.base_url, - sBucket = crate::percent_encode(&object.bucket), sObject = crate::percent_encode(&object.name), dBucket = crate::percent_encode(destination_bucket), dObject = crate::percent_encode(path), ); let mut headers = self.client.get_headers().await?; headers.insert(CONTENT_LENGTH, "0".parse()?); - let s = self.client.reqwest + let response = self.client.reqwest .post(&url) .query(¶meters) .headers(headers) .send() - .await? - .text() .await?; - let result: RewriteResponse = serde_json::from_str(&s).unwrap(); - Ok(result.resource) + let mut object = response.json::().await?.resource; + object.private_key = Some(self.client.service_account.private_key.clone()); + object.client_email = Some(self.client.service_account.client_email.clone()); + Ok(object) } } diff --git a/src/client/object_access_control.rs b/src/client/object_access_control.rs index f65fae6..ae1357c 100644 --- a/src/client/object_access_control.rs +++ b/src/client/object_access_control.rs @@ -1,4 +1,4 @@ -use crate::{models::{create, ObjectAccessControl, ListResponse, Entity}, Error}; +use crate::{models::{create, ObjectAccessControl, ListResponse, Entity, Response}, Error}; /// Operations on [`ObjectAccessControl`](ObjectAccessControl)s. @@ -39,17 +39,14 @@ impl<'a> ObjectAccessControlClient<'a> { pub async fn list( &self ) -> Result, Error> { - let result: crate::models::Response> = self.client.reqwest + let result = self.client.reqwest .get(&self.acl_url) .headers(self.client.get_headers().await?) .send() .await? - .json() - .await?; - match result { - crate::models::Response::Success(s) => Ok(s.items), - crate::models::Response::Error(e) => Err(e.into()), - } + .json::>>() + .await??; + Ok(result.items) } /// Returns the `ACL` entry for the specified entity on the specified bucket. diff --git a/src/configuration/service_account.rs b/src/configuration/service_account.rs index 46f8b0c..aa29cf3 100644 --- a/src/configuration/service_account.rs +++ b/src/configuration/service_account.rs @@ -1,3 +1,7 @@ +use std::str::FromStr; + +use crate::Error; + /// A deserialized `service-account-********.json`-file. #[derive(serde::Deserialize, Debug)] pub struct ServiceAccount { @@ -27,7 +31,7 @@ pub struct ServiceAccount { impl Default for ServiceAccount { fn default() -> Self { #[cfg(feature = "dotenv")] - dotenv::dotenv().ok(); + dotenv::dotenv().unwrap(); let credentials_json = std::env::var("SERVICE_ACCOUNT") .or_else(|_| std::env::var("GOOGLE_APPLICATION_CREDENTIALS")) .map(|path| std::fs::read_to_string(path).expect("SERVICE_ACCOUNT file not found")) @@ -45,14 +49,15 @@ impl Default for ServiceAccount { } } -impl ServiceAccount { - /// Method for creating a `ServiceAccount` from a json string. - pub fn from_str(credentials_json: &str) -> Self { - let account: Self = serde_json::from_str(&credentials_json).expect("Format for Service Account invalid"); +impl FromStr for ServiceAccount { + type Err = Error; + + fn from_str(credentials_json: &str) -> Result { + let account: Self = serde_json::from_str(credentials_json).expect("Format for Service Account invalid"); assert_eq!( account.r#type, "service_account", "`type` should be 'service_account'" ); - account + Ok(account) } } diff --git a/src/global_client/bucket.rs b/src/global_client/bucket.rs index 4b76781..7b89db3 100644 --- a/src/global_client/bucket.rs +++ b/src/global_client/bucket.rs @@ -280,7 +280,7 @@ mod tests { #[tokio::test] async fn create() -> Result<(), Box> { #[cfg(feature = "dotenv")] - dotenv::dotenv().ok(); + dotenv::dotenv().unwrap(); let base_name = std::env::var("TEST_BUCKET")?; // use a more complex bucket in this test. let new_bucket = create::Bucket { @@ -376,7 +376,7 @@ mod tests { #[test] fn create() -> Result<(), Box> { #[cfg(feature = "dotenv")] - dotenv::dotenv().ok(); + dotenv::dotenv().unwrap(); let base_name = std::env::var("TEST_BUCKET")?; // use a more complex bucket in this test. let new_bucket = create::Bucket { diff --git a/src/global_client/default_object_access_control.rs b/src/global_client/default_object_access_control.rs index 0235a52..1d138d0 100644 --- a/src/global_client/default_object_access_control.rs +++ b/src/global_client/default_object_access_control.rs @@ -183,10 +183,6 @@ mod tests { #[tokio::test] async fn read() -> Result<(), Box> { let bucket = crate::global_client::read_test_bucket().await; - create::DefaultObjectAccessControl { - entity: Entity::AllUsers, - role: Role::Reader, - }; DefaultObjectAccessControl::read(&bucket.name, &Entity::AllUsers).await?; Ok(()) } diff --git a/src/global_client/hmac_key.rs b/src/global_client/hmac_key.rs index 0965f30..f0fb8a5 100644 --- a/src/global_client/hmac_key.rs +++ b/src/global_client/hmac_key.rs @@ -186,7 +186,7 @@ mod tests { #[tokio::test] async fn list() -> Result<(), Box> { - HmacKey::list().await?; + let keys = HmacKey::list().await?; Ok(()) } diff --git a/src/global_client/mod.rs b/src/global_client/mod.rs index 458484e..0ddd237 100644 --- a/src/global_client/mod.rs +++ b/src/global_client/mod.rs @@ -17,16 +17,18 @@ mod test_helpers { pub(crate) async fn read_test_bucket() -> Bucket { #[cfg(feature = "dotenv")] - dotenv::dotenv().ok(); + dotenv::dotenv().unwrap(); let name = std::env::var("TEST_BUCKET").unwrap(); match Bucket::read(&name).await { Ok(bucket) => bucket, - Err(_not_found) => Bucket::create(&create::Bucket { - name, - ..create::Bucket::default() - }) - .await - .unwrap(), + Err(_not_found) => { + Bucket::create(&create::Bucket { + name, + ..create::Bucket::default() + }) + .await + .unwrap() + }, } } @@ -48,7 +50,7 @@ mod test_helpers { std::thread::sleep(std::time::Duration::from_millis(1500)); // avoid getting rate limited #[cfg(feature = "dotenv")] - dotenv::dotenv().ok(); + dotenv::dotenv().unwrap(); let base_name = std::env::var("TEST_BUCKET").unwrap(); let name = format!("{}-{}", base_name, name); let new_bucket = create::Bucket { diff --git a/src/global_client/object.rs b/src/global_client/object.rs index dec3603..c7379ec 100644 --- a/src/global_client/object.rs +++ b/src/global_client/object.rs @@ -26,8 +26,8 @@ impl Object { parameters: Option, ) -> Result { crate::CLOUD_CLIENT - .object() - .create(bucket, file, filename, mime_type, parameters) + .object(bucket) + .create(file, filename, mime_type, parameters) .await } @@ -43,7 +43,7 @@ impl Object { mime_type: &str, parameters: Option, ) -> Result { - crate::runtime()?.block_on(Self::create(bucket, file, filename, mime_type, parameters)) + crate::runtime()?.block_on(Self::create(file, filename, mime_type, parameters)) } /// Create a new object with metadata. @@ -74,8 +74,8 @@ impl Object { metadata: &serde_json::Value, ) -> Result { crate::CLOUD_CLIENT - .object() - .create_with(bucket, file, filename, mime_type, metadata) + .object(bucket) + .create_with(file, filename, mime_type, metadata) .await } @@ -127,8 +127,8 @@ impl Object { bytes::Bytes: From, { crate::CLOUD_CLIENT - .object() - .create_streamed(bucket, stream, length, filename, mime_type, parameters) + .object(bucket) + .create_streamed(stream, length, filename, mime_type, parameters) .await } @@ -170,12 +170,12 @@ impl Object { /// # Ok(()) /// # } /// ``` - pub async fn list<'a>( - bucket: &'a str, + pub async fn list( + bucket: &str, list_request: ListRequest, - ) -> Result> + '_, Error> { - let object_client : crate::client::ObjectClient<'a> = crate::CLOUD_CLIENT.object(); - object_client.list(bucket.clone(), list_request).await + ) -> Result>, Error> { + let object_client = crate::CLOUD_CLIENT.object(bucket); + object_client.list(list_request).await } /// The synchronous equivalent of `Object::list`. @@ -187,7 +187,7 @@ impl Object { use futures_util::TryStreamExt; let rt = crate::runtime()?; - let listed = rt.block_on(Self::list(bucket, list_request))?; + let listed = rt.block_on(Self::list(list_request))?; rt.block_on(listed.try_collect()) } @@ -208,8 +208,8 @@ impl Object { parameters: Option, ) -> Result { crate::CLOUD_CLIENT - .object() - .read(bucket, file_name, parameters) + .object(bucket) + .read(file_name, parameters) .await } @@ -223,7 +223,7 @@ impl Object { file_name: &str, parameters: Option, ) -> Result { - crate::runtime()?.block_on(Self::read(bucket, file_name, parameters)) + crate::runtime()?.block_on(Self::read(file_name, parameters)) } /// Download the content of the object with the specified name in the specified bucket. @@ -243,8 +243,8 @@ impl Object { parameters: Option, ) -> Result, Error> { crate::CLOUD_CLIENT - .object() - .download(bucket, file_name, parameters) + .object(bucket) + .download(file_name, parameters) .await } @@ -258,7 +258,7 @@ impl Object { file_name: &str, parameters: Option, ) -> Result, Error> { - crate::runtime()?.block_on(Self::download(bucket, file_name, parameters)) + crate::runtime()?.block_on(Self::download(file_name, parameters)) } /// Download the content of the object with the specified name in the specified bucket, without @@ -286,8 +286,8 @@ impl Object { parameters: Option, ) -> Result> + Unpin, Error> { crate::CLOUD_CLIENT - .object() - .download_streamed(bucket, file_name, parameters) + .object(bucket) + .download_streamed(file_name, parameters) .await } @@ -305,7 +305,7 @@ impl Object { /// # } /// ``` pub async fn update(&self, parameters: Option) -> Result { - crate::CLOUD_CLIENT.object().update(self, parameters).await + crate::CLOUD_CLIENT.object(&self.bucket).update(self, parameters).await } /// The synchronous equivalent of `Object::download`. @@ -334,8 +334,8 @@ impl Object { parameters: Option, ) -> Result<(), Error> { crate::CLOUD_CLIENT - .object() - .delete(bucket, file_name, parameters) + .object(bucket) + .delete(file_name, parameters) .await } @@ -349,7 +349,7 @@ impl Object { file_name: &str, parameters: Option, ) -> Result<(), Error> { - crate::runtime()?.block_on(Self::delete(bucket, file_name, parameters)) + crate::runtime()?.block_on(Self::delete(file_name, parameters)) } /// Obtains a single object with the specified name in the specified bucket. @@ -389,8 +389,8 @@ impl Object { parameters: Option, ) -> Result { crate::CLOUD_CLIENT - .object() - .compose(bucket, req, destination_object, parameters) + .object(bucket) + .compose(req, destination_object, parameters) .await } @@ -406,7 +406,7 @@ impl Object { parameters: Option, ) -> Result { - crate::runtime()?.block_on(Self::compose(bucket, req, destination_object, parameters)) + crate::runtime()?.block_on(Self::compose(req, destination_object, parameters)) } /// Copy this object to the target bucket and path @@ -429,7 +429,7 @@ impl Object { parameters: Option, ) -> Result { crate::CLOUD_CLIENT - .object() + .object(&self.bucket) .copy(self, destination_bucket, path, parameters) .await } @@ -475,7 +475,7 @@ impl Object { parameters: Option, ) -> Result { crate::CLOUD_CLIENT - .object() + .object(&self.bucket) .rewrite(self, destination_bucket, path, parameters) .await } @@ -643,7 +643,7 @@ mod tests { let mut result = Object::download_streamed(&bucket.name, "test-download", None).await?; let mut data: Vec = Vec::new(); while let Some(part) = result.next().await { - data.write_all(part?.chunk()); + data.write_all(part?.chunk())?; } assert_eq!(data, content); @@ -667,7 +667,7 @@ mod tests { Object::download_streamed(&bucket.name, "test-download-large", None).await?; let mut data: Vec = Vec::new(); while let Some(part) = result.next().await { - data.write_all(part?.chunk()); + data.write_all(part?.chunk())?; } assert_eq!(data, content); @@ -728,6 +728,7 @@ mod tests { None, ) .await?; + let obj2 = Object::create( &bucket.name, vec![2, 3], @@ -752,8 +753,7 @@ mod tests { ], destination: None, }; - let obj3 = - Object::compose(&bucket.name, &compose_request, "test-concatted-file", None).await?; + let obj3 = Object::compose(&bucket.name, &compose_request, "test-concatted-file", None).await?; let url = obj3.download_url(100)?; let content = reqwest::get(&url).await?.text().await?; assert_eq!(content.as_bytes(), &[0, 1, 2, 3]); @@ -774,8 +774,7 @@ mod tests { #[tokio::test] async fn rewrite() -> Result<(), Box> { let bucket = crate::global_client::read_test_bucket().await; - let obj = - Object::create(&bucket.name, vec![0, 1], "test-rewrite", "text/plain", None).await?; + let obj = Object::create(&bucket.name, vec![0, 1], "test-rewrite", "text/plain", None).await?; let obj = obj.rewrite(&bucket.name, "test-rewritten", None).await?; let url = obj.download_url(100)?; let client = reqwest::Client::default(); @@ -797,7 +796,7 @@ mod tests { ]; for name in &complicated_names { let _obj = Object::create(&bucket.name, vec![0, 1], name, "text/plain", None).await?; - let obj = Object::read(&bucket.name, &name, None).await.unwrap(); + let obj = Object::read(&bucket.name, name, None).await.unwrap(); let url = obj.download_url(100)?; let client = reqwest::Client::default(); let download = client.head(&url).send().await?; diff --git a/src/models/default_object_access_control.rs b/src/models/default_object_access_control.rs index 1c2633a..78201f2 100644 --- a/src/models/default_object_access_control.rs +++ b/src/models/default_object_access_control.rs @@ -41,6 +41,5 @@ pub struct DefaultObjectAccessControl { pub etag: String, /// The bucket this resource belongs to. #[serde(default)] - pub bucket: String, // this field is not returned by Google, but we populate it manually for the - // convenience of the end user. + pub bucket: String, // this field is not returned by Google, but we populate it manually for the convenience of the end user. } \ No newline at end of file diff --git a/src/models/notification.rs b/src/models/notification.rs index f06b22c..428c646 100644 --- a/src/models/notification.rs +++ b/src/models/notification.rs @@ -64,15 +64,13 @@ impl Notification { /// Retrieves a list of notification subscriptions for a given bucket.} pub fn list(bucket: &str) -> Result, crate::Error> { let url = format!("{}/v1/b/{}/notificationConfigs", crate::BASE_URL, bucket); - let result: crate::models::Response> = crate::CLIENT + let result = crate::CLIENT .get(&url) .headers(crate::get_headers()?) .send()? - .json()?; - match result { - crate::models::Response::Success(s) => Ok(s.items), - crate::models::Response::Error(e) => Err(e.into()), - } + .json::>>()?; + + Ok(result.items) } /// Permanently deletes a notification subscription. @@ -100,7 +98,7 @@ mod tests { fn create() { let bucket = crate::global_client::read_test_bucket(); #[cfg(feature = "dotenv")] - dotenv::dotenv().ok(); + dotenv::dotenv().unwrap(); let service_account = crate::ServiceAccount::default(); let topic = format!( "//pubsub.googleapis.com/projects/{}/topics/{}", @@ -131,7 +129,7 @@ mod tests { fn delete() { let bucket = crate::global_client::read_test_bucket(); #[cfg(feature = "dotenv")] - dotenv::dotenv().ok(); + dotenv::dotenv().unwrap(); let service_account = crate::ServiceAccount::default(); let topic = format!( "//pubsub.googleapis.com/projects/{}/topics/{}", diff --git a/src/models/object.rs b/src/models/object.rs index 539cbf8..b57620f 100644 --- a/src/models/object.rs +++ b/src/models/object.rs @@ -98,9 +98,9 @@ pub struct Object { pub kms_key_name: Option, #[serde(skip)] - private_key: Option, + pub(crate) private_key: Option, #[serde(skip)] - client_email: Option, + pub(crate) client_email: Option, } impl Object { diff --git a/src/models/object_read_parameters.rs b/src/models/object_read_parameters.rs index 06c91d2..0107f6c 100644 --- a/src/models/object_read_parameters.rs +++ b/src/models/object_read_parameters.rs @@ -16,7 +16,7 @@ pub struct ReadParameters { /// Makes the operation conditional on whether the object's current metageneration matches the given value. pub if_metageneration_match: Option, - /// Makes the operation conditional on whether the object's current metageneration does not match the given value. + /// Makes the operation conditional on whether the object's current metageneration does not match the given value. pub if_metageneration_not_match: Option, /// Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full. diff --git a/src/models/response.rs b/src/models/response.rs index 85623d0..2ff79dd 100644 --- a/src/models/response.rs +++ b/src/models/response.rs @@ -53,29 +53,35 @@ impl std::ops::FromResidual> for Resp #[cfg(test)] mod tests { - use crate::{models::{ErrorResponse, ErrorList}, Error}; + use crate::{models::{ErrorResponse, ErrorList}, Error, Bucket}; use super::Response; #[test] fn test_try_impl() -> Result<(), Error> { - let response = Response::Success(()); + let response = "{\n \"kind\": \"storage#bucket\",\n \"selfLink\": \"https://www.googleapis.com/storage/v1/b/test-bucket-test-create\",\n \"id\": \"test-bucket-test-create\",\n \"name\": \"test-bucket-test-create\",\n \"projectNumber\": \"543254\",\n \"metageneration\": \"1\",\n \"location\": \"US-EAST1\",\n \"storageClass\": \"STANDARD\",\n \"etag\": \"CAE=\",\n \"defaultEventBasedHold\": true,\n \"timeCreated\": \"2023-05-03T16:44:38.911Z\",\n \"updated\": \"2023-05-03T16:44:38.911Z\",\n \"acl\": [\n {\n \"kind\": \"storage#bucketAccessControl\",\n \"id\": \"test-bucket-test-create/allUsers\",\n \"selfLink\": \"https://www.googleapis.com/storage/v1/b/test-bucket-test-create/acl/allUsers\",\n \"bucket\": \"test-bucket-test-create\",\n \"entity\": \"allUsers\",\n \"role\": \"READER\",\n \"etag\": \"CAE=\"\n },\n {\n \"kind\": \"storage#bucketAccessControl\",\n \"id\": \"test-bucket-test-create/project-owners-454645\",\n \"selfLink\": \"https://www.googleapis.com/storage/v1/b/test-bucket-test-create/acl/project-owners-45645\",\n \"bucket\": \"test-bucket-test-create\",\n \"entity\": \"project-owners-456456\",\n \"role\": \"OWNER\",\n \"etag\": \"CAE=\",\n \"projectTeam\": {\n \"projectNumber\": \"45674\",\n \"team\": \"owners\"\n }\n }\n ],\n \"defaultObjectAcl\": [\n {\n \"kind\": \"storage#objectAccessControl\",\n \"entity\": \"allUsers\",\n \"role\": \"READER\",\n \"etag\": \"CAE=\"\n }\n ],\n \"owner\": {\n \"entity\": \"project-owners-4564\"\n },\n \"iamConfiguration\": {\n \"bucketPolicyOnly\": {\n \"enabled\": false\n },\n \"uniformBucketLevelAccess\": {\n \"enabled\": false\n },\n \"publicAccessPrevention\": \"inherited\"\n },\n \"locationType\": \"region\"\n}\n"; + let response = serde_json::from_slice::>(response.as_bytes()); + let response = response.expect("failed to map response as a response"); + let output = response?; - assert_eq!(output, ()); + assert_eq!(output.kind, "storage#bucket"); Ok(()) } #[test] fn test_try_impl_error() -> Result<(), Error> { - let response = Response::Error::<()>(ErrorResponse { - error: ErrorList { - errors: Vec::new(), - code: 250, - message: "Some error occurred".to_string(), - }, - }); - let output = response?; - assert_eq!(output, ()); + let function = || { + let response = Response::Error::<()>(ErrorResponse { + error: ErrorList { + errors: Vec::new(), + code: 250, + message: "Some error occurred".to_string(), + }, + }); + response?; + Ok::<(), Error>(()) + }; + assert_eq!(function().is_err(), true); Ok(()) } } \ No newline at end of file diff --git a/src/models/uniform_bucket_level_access.rs b/src/models/uniform_bucket_level_access.rs index ba69398..02c8392 100644 --- a/src/models/uniform_bucket_level_access.rs +++ b/src/models/uniform_bucket_level_access.rs @@ -1,3 +1,5 @@ +use time::OffsetDateTime; + /// Access that is configured for all objects in one go. #[derive(Debug, PartialEq, serde::Serialize, serde::Deserialize)] #[serde(rename_all = "camelCase")] @@ -10,6 +12,6 @@ pub struct UniformBucketLevelAccess { /// /// iamConfiguration.uniformBucketLevelAccess.enabled may be changed from true to false until /// the locked time, after which the field is immutable. - #[serde(with = "time::serde::rfc3339::option")] - pub locked_time: Option, + #[serde(default, with = "time::serde::rfc3339::option")] + pub locked_time: Option, } \ No newline at end of file diff --git a/src/sized_byte_stream.rs b/src/sized_byte_stream.rs index 34b5b21..0148678 100644 --- a/src/sized_byte_stream.rs +++ b/src/sized_byte_stream.rs @@ -8,13 +8,13 @@ pub struct SizedByteStream> + Unpin bytes: S, } -impl<'a, S: Stream> + Unpin> SizedByteStream { +impl> + Unpin> SizedByteStream { pub(crate) fn new(bytes: S, size: Option) -> Self { Self { size, bytes } } } -impl<'a, S: Stream> + Unpin> Stream for SizedByteStream { +impl> + Unpin> Stream for SizedByteStream { type Item = Result; fn poll_next( diff --git a/test.sh b/test.sh index 909c340..537b9f4 100644 --- a/test.sh +++ b/test.sh @@ -1,7 +1,7 @@ set -e -echo && echo '--------------------------------' && echo 'Runing sync tests' -cargo test --features sync,global-client -- --test-threads=1 -echo && echo '--------------------------------' && echo 'Runing sync tests with rustls' -cargo test --no-default-features --features sync,rustls-tls,global-client -- --test-threads=1 +echo && echo '--------------------------------' && echo 'Runing async tests' +cargo test --features dotenv,sync,global-client -- --test-threads=1 +echo && echo '--------------------------------' && echo 'Runing async tests with rustls' +cargo test --no-default-features --features dotenv,sync,rustls-tls,global-client -- --test-threads=1 echo && echo '--------------------------------' && echo 'Runing sync tests with all features' -cargo test --all-features -- --test-threads=1 +/cargo test --all-features -- --test-threads=1 From 36dac7605d6269e69fac2884508d0c169c44fab5 Mon Sep 17 00:00:00 2001 From: SonnyX Date: Thu, 4 May 2023 14:02:01 +0200 Subject: [PATCH 18/26] Fix the sync module --- .github/workflows/rust.yml | 12 ++++----- CHANGELOG.md | 12 +++++++++ Cargo.toml | 4 +-- src/global_client/hmac_key.rs | 2 +- src/global_client/object.rs | 12 ++++----- src/sync/bucket.rs | 4 +-- src/sync/bucket_access_control.rs | 5 ++-- src/sync/client.rs | 18 ++++++------- src/sync/default_object_access_control.rs | 6 ++--- src/sync/hmac_key.rs | 2 +- src/sync/object.rs | 32 ++++++++--------------- src/sync/object_access_control.rs | 6 ++--- 12 files changed, 55 insertions(+), 60 deletions(-) diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 9092e62..001e8bf 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -19,26 +19,24 @@ jobs: steps: - uses: actions/checkout@v2 + # - name: Run cargo fmt # uses: actions-rs/cargo@v1 # with: # command: fmt # args: -- --check + - name: Run cargo clippy uses: actions-rs/clippy-check@v1 with: token: ${{ secrets.GITHUB_TOKEN }} args: -- -D warnings - - name: Create Secret - run: 'echo "$SECRET_FILE" > auth.json' - env: - SECRET_FILE: ${{secrets.GOOGLE_APPLICATION_CREDENTIALS}} - - name: Create Test File - run: 'echo "Temporary test text to test the transfer." > myemma.txt' + - name: Build run: cargo build + - name: Run tests run: cargo test ${{ matrix.features }} -- --test-threads=1 env: - GOOGLE_APPLICATION_CREDENTIALS: auth.json + SERVICE_ACCOUNT_JSON: ${{secrets.GOOGLE_APPLICATION_CREDENTIALS}} TEST_BUCKET: cloud-storage-rs-test-bucket diff --git a/CHANGELOG.md b/CHANGELOG.md index bc65882..5fbfe84 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,15 @@ +# 1.0.0 +Refactored the library to have one model per file, to improve maintainabillity. +Refactor the library to use less static variables (mainly ServiceAccount) +Added a ClientBuilder by @alexpusch +Made the unmaintained dependency `dotenv` optional +Provided a new way to load in ServiceAccount configuration: `ServiceAccount::from_str()` +Dramatically improved download performance by streaming an array of bytes, rather than a single byte per poll +Moved variables used by all functions in a client to the constructor of the client, most commonly the bucket +Replaced `chrono` with `time` by @Elykz +Added optional QueryParameters to be sent along with the requests by @SergenN +Added missing GCP locations by @trigovision + # 0.9 Refactor the library away from having a single global client, but provide a client of our own that the user of the library is responsible for. This means that the user has control over the allocation diff --git a/Cargo.toml b/Cargo.toml index fccf749..fc51147 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "cloud-storage" version = "1.0.0" -authors = ["Luuk Wester "] +authors = ["Luuk Wester ", "Randy von der Weide "] edition = "2021" description = "A crate for uploading files to Google cloud storage, and for generating download urls." license = "MIT" @@ -13,7 +13,7 @@ categories = ["api-bindings", "web-programming"] # maintenance = { status = "actively-developed" } [features] -default = ["native-tls", "ring", "pem", "global-client", "dotenv"] +default = ["native-tls", "ring", "pem", "global-client", "sync"] global-client = [] sync = ["reqwest/blocking"] diff --git a/src/global_client/hmac_key.rs b/src/global_client/hmac_key.rs index f0fb8a5..2b3722e 100644 --- a/src/global_client/hmac_key.rs +++ b/src/global_client/hmac_key.rs @@ -186,7 +186,7 @@ mod tests { #[tokio::test] async fn list() -> Result<(), Box> { - let keys = HmacKey::list().await?; + let _keys = HmacKey::list().await?; Ok(()) } diff --git a/src/global_client/object.rs b/src/global_client/object.rs index c7379ec..fc45135 100644 --- a/src/global_client/object.rs +++ b/src/global_client/object.rs @@ -43,7 +43,7 @@ impl Object { mime_type: &str, parameters: Option, ) -> Result { - crate::runtime()?.block_on(Self::create(file, filename, mime_type, parameters)) + crate::runtime()?.block_on(Self::create(bucket, file, filename, mime_type, parameters)) } /// Create a new object with metadata. @@ -187,7 +187,7 @@ impl Object { use futures_util::TryStreamExt; let rt = crate::runtime()?; - let listed = rt.block_on(Self::list(list_request))?; + let listed = rt.block_on(Self::list(bucket, list_request))?; rt.block_on(listed.try_collect()) } @@ -223,7 +223,7 @@ impl Object { file_name: &str, parameters: Option, ) -> Result { - crate::runtime()?.block_on(Self::read(file_name, parameters)) + crate::runtime()?.block_on(Self::read(bucket, file_name, parameters)) } /// Download the content of the object with the specified name in the specified bucket. @@ -258,7 +258,7 @@ impl Object { file_name: &str, parameters: Option, ) -> Result, Error> { - crate::runtime()?.block_on(Self::download(file_name, parameters)) + crate::runtime()?.block_on(Self::download(bucket, file_name, parameters)) } /// Download the content of the object with the specified name in the specified bucket, without @@ -349,7 +349,7 @@ impl Object { file_name: &str, parameters: Option, ) -> Result<(), Error> { - crate::runtime()?.block_on(Self::delete(file_name, parameters)) + crate::runtime()?.block_on(Self::delete(bucket, file_name, parameters)) } /// Obtains a single object with the specified name in the specified bucket. @@ -406,7 +406,7 @@ impl Object { parameters: Option, ) -> Result { - crate::runtime()?.block_on(Self::compose(req, destination_object, parameters)) + crate::runtime()?.block_on(Self::compose(bucket, req, destination_object, parameters)) } /// Copy this object to the target bucket and path diff --git a/src/sync/bucket.rs b/src/sync/bucket.rs index d64d6b2..c8bce6f 100644 --- a/src/sync/bucket.rs +++ b/src/sync/bucket.rs @@ -3,8 +3,8 @@ use crate::{models::{create, IamPolicy, TestIamPermission}, Bucket, Error}; /// Operations on [`Bucket`]()s. #[derive(Debug)] pub struct BucketClient<'a> { - pub(crate) client: &'a crate::client::BucketClient<'a>, - pub(crate) runtime: tokio::runtime::Handle, + pub(crate) client: crate::client::BucketClient<'a>, + pub(crate) runtime: &'a tokio::runtime::Handle, } impl<'a> BucketClient<'a> { diff --git a/src/sync/bucket_access_control.rs b/src/sync/bucket_access_control.rs index b39c0c7..76e61e7 100644 --- a/src/sync/bucket_access_control.rs +++ b/src/sync/bucket_access_control.rs @@ -4,7 +4,7 @@ use crate::{models::{create, BucketAccessControl, Entity}, Error}; /// Operations on [`BucketAccessControl`](BucketAccessControl)s. #[derive(Debug)] pub struct BucketAccessControlClient<'a> { - pub(crate) client: &'a crate::client::BucketAccessControlClient<'a>, + pub(crate) client: crate::client::BucketAccessControlClient<'a>, pub(crate) runtime: &'a tokio::runtime::Handle, } @@ -34,7 +34,6 @@ impl<'a> BucketAccessControlClient<'a> { /// ``` pub fn create( &self, - bucket: &str, new_bucket_access_control: &create::BucketAccessControl, ) -> Result { self.runtime.block_on(self.client.create_using(new_bucket_access_control)) @@ -78,7 +77,7 @@ impl<'a> BucketAccessControlClient<'a> { /// # Ok(()) /// # } /// ``` - pub fn read(&self, bucket: &str, entity: &Entity) -> Result { + pub fn read(&self, entity: &Entity) -> Result { self.runtime.block_on(self.client.read(entity)) } diff --git a/src/sync/client.rs b/src/sync/client.rs index f18accd..cede64c 100644 --- a/src/sync/client.rs +++ b/src/sync/client.rs @@ -28,18 +28,16 @@ impl Client { /// Synchronous operations on [`Bucket`](crate::bucket::Bucket)s. pub fn bucket(&self) -> BucketClient { - let handle = self.runtime.handle().to_owned(); - let client = self.client.bucket(); BucketClient { - runtime: handle, - client: &client, + client: self.client.bucket(), + runtime: self.runtime.handle(), } } /// Synchronous operations on [`BucketAccessControl`](crate::bucket_access_control::BucketAccessControl)s. pub fn bucket_access_control(&self, bucket: &str) -> BucketAccessControlClient { BucketAccessControlClient { - client: &self.client.bucket_access_control(bucket), + client: self.client.bucket_access_control(bucket), runtime: self.runtime.handle() } } @@ -47,7 +45,7 @@ impl Client { /// Synchronous operations on [`DefaultObjectAccessControl`](crate::default_object_access_control::DefaultObjectAccessControl)s. pub fn default_object_access_control(&self, bucket: &str) -> DefaultObjectAccessControlClient { DefaultObjectAccessControlClient { - client: &self.client.default_object_access_control(bucket), + client: self.client.default_object_access_control(bucket), runtime: self.runtime.handle() } } @@ -55,15 +53,15 @@ impl Client { /// Synchronous operations on [`HmacKey`](crate::hmac_key::HmacKey)s. pub fn hmac_key(&self) -> HmacKeyClient { HmacKeyClient { - client: &self.client.hmac_key(), + client: self.client.hmac_key(), runtime: self.runtime.handle() } } /// Synchronous operations on [`Object`](crate::object::Object)s. - pub fn object(&self) -> ObjectClient { + pub fn object(&self, bucket: &str) -> ObjectClient { ObjectClient { - client: &self.client.object(), + client: self.client.object(bucket), runtime: self.runtime.handle() } } @@ -71,7 +69,7 @@ impl Client { /// Synchronous operations on [`ObjectAccessControl`](crate::object_access_control::ObjectAccessControl)s. pub fn object_access_control(&self, bucket: &str, object: &str) -> ObjectAccessControlClient { ObjectAccessControlClient { - client: &self.client.object_access_control(bucket, object), + client: self.client.object_access_control(bucket, object), runtime: self.runtime.handle() } } diff --git a/src/sync/default_object_access_control.rs b/src/sync/default_object_access_control.rs index 9e798b2..ce23cbf 100644 --- a/src/sync/default_object_access_control.rs +++ b/src/sync/default_object_access_control.rs @@ -3,7 +3,7 @@ use crate::{models::{create, DefaultObjectAccessControl, Entity}, Error}; /// Operations on [`DefaultObjectAccessControl`](DefaultObjectAccessControl)s. #[derive(Debug)] pub struct DefaultObjectAccessControlClient<'a> { - pub(crate) client: &'a crate::client::DefaultObjectAccessControlClient<'a>, + pub(crate) client: crate::client::DefaultObjectAccessControlClient<'a>, pub(crate) runtime: &'a tokio::runtime::Handle, } @@ -57,7 +57,7 @@ impl<'a> DefaultObjectAccessControlClient<'a> { /// # Ok(()) /// # } /// ``` - pub fn list(&self, bucket: &str) -> Result, Error> { + pub fn list(&self) -> Result, Error> { self.runtime .block_on(self.client.list()) } @@ -82,7 +82,7 @@ impl<'a> DefaultObjectAccessControlClient<'a> { /// # Ok(()) /// # } /// ``` - pub fn read(&self, bucket: &str, entity: &Entity) -> Result { + pub fn read(&self, entity: &Entity) -> Result { self.runtime.block_on( self.client.read(entity), ) diff --git a/src/sync/hmac_key.rs b/src/sync/hmac_key.rs index c41afac..d5a8166 100644 --- a/src/sync/hmac_key.rs +++ b/src/sync/hmac_key.rs @@ -3,7 +3,7 @@ use crate::{Error, models::{HmacKey, HmacMeta, HmacState}}; /// Operations on [`HmacKey`](HmacKey)s. #[derive(Debug)] pub struct HmacKeyClient<'a> { - pub(crate) client: &'a crate::client::HmacKeyClient<'a>, + pub(crate) client: crate::client::HmacKeyClient<'a>, pub(crate) runtime: &'a tokio::runtime::Handle, } diff --git a/src/sync/object.rs b/src/sync/object.rs index 57a1381..e9bed87 100644 --- a/src/sync/object.rs +++ b/src/sync/object.rs @@ -8,7 +8,7 @@ use crate::{models::{CreateParameters, ObjectList, ReadParameters, UpdateParamet /// Operations on [`Object`](Object)s. #[derive(Debug)] pub struct ObjectClient<'a> { - pub(crate) client: &'a crate::client::ObjectClient<'a>, + pub(crate) client: crate::client::ObjectClient<'a>, pub(crate) runtime: &'a tokio::runtime::Handle, } @@ -31,7 +31,6 @@ impl<'a> ObjectClient<'a> { /// ``` pub fn create( &self, - bucket: &str, file: Vec, filename: &str, mime_type: &str, @@ -39,7 +38,7 @@ impl<'a> ObjectClient<'a> { ) -> Result { self.runtime.block_on( self.client - .create(bucket, file, filename, mime_type, parameters), + .create(file, filename, mime_type, parameters), ) } @@ -66,7 +65,6 @@ impl<'a> ObjectClient<'a> { /// ``` pub fn create_with( &self, - bucket: &str, file: Vec, filename: &str, mime_type: &str, @@ -74,7 +72,7 @@ impl<'a> ObjectClient<'a> { ) -> Result { self.runtime.block_on( self.client - .create_with(bucket, file, filename, mime_type, metadata), + .create_with(file, filename, mime_type, metadata), ) } @@ -82,7 +80,6 @@ impl<'a> ObjectClient<'a> { /// to load the entire file in ram. pub fn create_streamed( &self, - bucket: &str, file: R, length: impl Into>, filename: &str, @@ -96,7 +93,7 @@ impl<'a> ObjectClient<'a> { self.runtime.block_on( self.client - .create_streamed(bucket, stream, length, filename, mime_type, parameters), + .create_streamed(stream, length, filename, mime_type, parameters), ) } @@ -104,7 +101,6 @@ impl<'a> ObjectClient<'a> { /// to load the entire file in ram. pub fn create_streamed_with( &self, - bucket: &str, file: R, filename: &str, mime_type: &str, @@ -117,7 +113,7 @@ impl<'a> ObjectClient<'a> { self.runtime.block_on( self.client - .create_streamed_with(bucket, stream, filename, mime_type, metadata), + .create_streamed_with(stream, filename, mime_type, metadata), ) } @@ -135,11 +131,10 @@ impl<'a> ObjectClient<'a> { /// ``` pub fn list( &self, - bucket: &'a str, list_request: ListRequest, ) -> Result, Error> { let rt = &self.runtime; - let listed = rt.block_on(self.client.list(bucket, list_request))?; + let listed = rt.block_on(self.client.list(list_request))?; rt.block_on(listed.try_collect()) } @@ -157,12 +152,11 @@ impl<'a> ObjectClient<'a> { /// ``` pub fn read( &self, - bucket: &str, file_name: &str, parameters: Option, ) -> Result { self.runtime - .block_on(self.client.read(bucket, file_name, parameters)) + .block_on(self.client.read(file_name, parameters)) } /// Download the content of the object with the specified name in the specified bucket. @@ -179,13 +173,12 @@ impl<'a> ObjectClient<'a> { /// ``` pub fn download( &self, - bucket: &str, file_name: &str, parameters: Option, ) -> Result, Error> { self.runtime.block_on( self.client - .download(bucket, file_name, parameters), + .download(file_name, parameters), ) } @@ -205,13 +198,13 @@ impl<'a> ObjectClient<'a> { /// # Ok(()) /// # } /// ``` - pub fn download_streamed(&self, bucket: &str, file_name: &str, file: W) -> Result<(), Error> + pub fn download_streamed(&self, file_name: &str, file: W) -> Result<(), Error> where W: std::io::Write, // + Send + Sync + Unpin + 'static, { self.runtime.block_on(async { let mut stream = self.client - .download_streamed(bucket, file_name, None) + .download_streamed(file_name, None) .await?; let mut writer = tokio::io::BufWriter::new(AllowStdIo::new(file).compat_write()); @@ -260,12 +253,11 @@ impl<'a> ObjectClient<'a> { /// ``` pub fn delete( &self, - bucket: &str, file_name: &str, parameters: Option, ) -> Result<(), Error> { self.runtime - .block_on(self.client.delete(bucket, file_name, parameters)) + .block_on(self.client.delete(file_name, parameters)) } /// Obtains a single object with the specified name in the specified bucket. @@ -301,13 +293,11 @@ impl<'a> ObjectClient<'a> { /// ``` pub fn compose( &self, - bucket: &str, req: &ComposeRequest, destination_object: &str, parameters: Option, ) -> Result { self.runtime.block_on(self.client.compose( - bucket, req, destination_object, parameters, diff --git a/src/sync/object_access_control.rs b/src/sync/object_access_control.rs index 476c3c1..f01e74a 100644 --- a/src/sync/object_access_control.rs +++ b/src/sync/object_access_control.rs @@ -4,7 +4,7 @@ use crate::{models::{create, ObjectAccessControl, Entity}, Error}; /// Operations on [`ObjectAccessControl`](ObjectAccessControl)s. #[derive(Debug)] pub struct ObjectAccessControlClient<'a> { - pub(crate) client: &'a crate::client::ObjectAccessControlClient<'a>, + pub(crate) client: crate::client::ObjectAccessControlClient<'a>, pub(crate) runtime: &'a tokio::runtime::Handle, } @@ -29,7 +29,7 @@ impl<'a> ObjectAccessControlClient<'a> { /// Important: This method fails with a 400 Bad Request response for buckets with uniform /// bucket-level access enabled. Use `Bucket::get_iam_policy` and `Bucket::set_iam_policy` to /// control access instead. - pub fn list(&self, bucket: &str, object: &str) -> Result, Error> { + pub fn list(&self) -> Result, Error> { self.runtime .block_on(self.client.list()) } @@ -42,8 +42,6 @@ impl<'a> ObjectAccessControlClient<'a> { /// control access instead. pub fn read( &self, - bucket: &str, - object: &str, entity: &Entity, ) -> Result { self.runtime.block_on( From e6eb452a93df253e42de3c6a543d03fb96f188fd Mon Sep 17 00:00:00 2001 From: SonnyX Date: Thu, 4 May 2023 14:15:52 +0200 Subject: [PATCH 19/26] Update ReadMe.md --- LICENSE | 2 +- README.md | 21 ++++++++++----------- src/global_client/bucket.rs | 2 -- 3 files changed, 11 insertions(+), 14 deletions(-) diff --git a/LICENSE b/LICENSE index a8cfb35..9eaa67f 100644 --- a/LICENSE +++ b/LICENSE @@ -1,6 +1,6 @@ MIT License -Copyright (c) 2020 Luuk Wester +Copyright (c) 2023 Randy von der Weide Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/README.md b/README.md index 8aac8a9..4f11ac5 100644 --- a/README.md +++ b/README.md @@ -8,23 +8,22 @@ A library that can be used to push blobs to [Google Cloud Storage](https://cloud Add the following line to your Cargo.toml ```toml [dependencies] -cloud-storage = "0.10" +cloud-storage = "1.0.0" ``` ### Examples ```rust // create a new Bucket -let new_bucket = create::Bucket { name: "my_bucket", ..Default::default() } -let bucket = Bucket::create(new_bucket).await?; +let new_bucket = create::Bucket { name: "my_bucket".to_string(), ..Default::default() }; +let bucket = Bucket::create(&new_bucket).await?; // upload a file to our new bucket -let content = b"Your file is now on google cloud storage!"; -bucket.upload(content, "folder/filename.txt", "application/text", None).await?; -let mut object = Object::create("my_bucket", content, "folder/filename.txt", "application/text", None).await?; +let content = b"Your file is now on google cloud storage!".to_vec(); +let object = Object::create(&bucket.name, content, "folder/filename.txt", "application/text", None).await?; // let's copy the file -object.copy("my_bucket2: electric boogaloo", "otherfolder/filename.txt", None).await?; +object.copy("my_other_bucket", "otherfolder/filename.txt", None).await?; // print a link to the file -println!("{}", object.download_url(1000)); // download link that expires after 1000 seconds +println!("{}", object.download_url(1000)?); // download link that expires after 1000 seconds // remove the file from the bucket -object.delete(None).await?; +Object::delete(&bucket.name, "folder/filename.txt", None).await?; ``` The service account should have the roles `Service Account Token Creator` (for generating access tokens) and `Storage Object Admin` (for generating sign urls to download the files). @@ -35,7 +34,7 @@ If you're not (yet) interested in running an async executor, then `cloud_storage You will need to set both the `global-client` and `sync` flags in your Cargo.toml, for example: ``` -cloud-storage = { version = "0.11.0", features = ["global-client", "sync"] } +cloud-storage = { version = "1.0.0", features = ["global-client", "sync"] } ``` ### Testing @@ -43,4 +42,4 @@ To run the tests for this project, first create an enviroment parameter (or entr ```bash sh test.sh ``` -The `test-threads=1` is necessary so that the tests don't exceed the 2 per second bucket creating rate limit. (Depending on your internet speed, you may be able to use more than 1 test thread) +The `test-threads=1` is necessary so that the tests don't exceed the 2 per second bucket creating rate limit. (Depending on your internet speed, you may be able to use more than 1 test thread) \ No newline at end of file diff --git a/src/global_client/bucket.rs b/src/global_client/bucket.rs index 7b89db3..ed74ae7 100644 --- a/src/global_client/bucket.rs +++ b/src/global_client/bucket.rs @@ -275,8 +275,6 @@ impl Bucket { mod tests { use crate::{models::{create, Entity, Role, IamConfiguration, UniformBucketLevelAccess, RetentionPolicy, StandardIamRole, IamPolicy, Binding, IamRole}, Bucket}; - - #[tokio::test] async fn create() -> Result<(), Box> { #[cfg(feature = "dotenv")] From 343db5c19fbfa4558fb408e591fd3831f76af539 Mon Sep 17 00:00:00 2001 From: SonnyX Date: Thu, 4 May 2023 14:23:52 +0200 Subject: [PATCH 20/26] Update changelog --- CHANGELOG.md | 35 ++++++++++++++++------------------- 1 file changed, 16 insertions(+), 19 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5fbfe84..56ae947 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,28 +1,25 @@ # 1.0.0 -Refactored the library to have one model per file, to improve maintainabillity. -Refactor the library to use less static variables (mainly ServiceAccount) -Added a ClientBuilder by @alexpusch -Made the unmaintained dependency `dotenv` optional -Provided a new way to load in ServiceAccount configuration: `ServiceAccount::from_str()` -Dramatically improved download performance by streaming an array of bytes, rather than a single byte per poll -Moved variables used by all functions in a client to the constructor of the client, most commonly the bucket -Replaced `chrono` with `time` by @Elykz -Added optional QueryParameters to be sent along with the requests by @SergenN -Added missing GCP locations by @trigovision +* Refactored the library to have one model per file, to improve maintainabillity. +* Refactor the library to use less static variables (mainly ServiceAccount) +* Made the unmaintained dependency `dotenv` optional +* Provided a new way to load in ServiceAccount configuration: `ServiceAccount::from_str()` +* Dramatically improved download performance by streaming an array of bytes, rather than a single byte per poll +* Moved variables used by all functions in a client to the constructor of the client, most commonly the bucket +* Added a ClientBuilder by [@alexpusch](https://github.com/alexpusch) +* Replaced `chrono` with `time` by [@Elykz](https://github.com/Elykz) +* Added optional QueryParameters to be sent along with the requests by [@SergenN](https://github.com/SergenN) +* Added missing GCP locations by [@trigovision](https://github.com/trigovision) # 0.9 -Refactor the library away from having a single global client, but provide a client of our own that -the user of the library is responsible for. This means that the user has control over the allocation -and destruction of the client. This solves issue #60 and is enabled due to tireless work by -shepmaster. Big thanks! +* Refactor the library away from having a single global client, but provide a client of our own that the user of the library is responsible for. This means that the user has control over the allocation and destruction of the client. This solves issue #60 and is enabled due to tireless work by shepmaster. Big thanks! # 0.10 -Small fix to the public interface of `sync::ObjectClient` that was not properly sync. -Fix urlencoding url paths correctly in several places. -Update cloud storage to use the new url, `www.googleapis.com` => `storage.googleapis.com` +* Small fix to the public interface of `sync::ObjectClient` that was not properly sync. +* Fix urlencoding url paths correctly in several places. +* Update cloud storage to use the new url, `www.googleapis.com` => `storage.googleapis.com` # 0.11 -@pseguin2011: Implemented a configurable authentication layer through the `TokenCache` trait. +* [@pseguin2011](https://github.com/pseguin2011): Implemented a configurable authentication layer through the `TokenCache` trait. # 0.12 -Implement customisable authentication providers, via the `Client::with_cache` method. +* Implement customisable authentication providers, via the `Client::with_cache` method. From 1f25e3d7ef974ca0e461e514bc03db3a1f3e3a93 Mon Sep 17 00:00:00 2001 From: SonnyX Date: Thu, 4 May 2023 16:04:00 +0200 Subject: [PATCH 21/26] Update documenation to reflect changes --- Cargo.toml | 2 +- src/client/bucket.rs | 67 ++++--- src/client/bucket_access_control.rs | 40 ++--- src/client/client.rs | 165 ----------------- src/client/default_object_access_control.rs | 51 +++--- src/client/hmac_key.rs | 42 ++--- src/client/mod.rs | 168 +++++++++++++++++- src/client/object.rs | 107 +++++------ src/client/object_access_control.rs | 2 +- src/download_options.rs | 4 +- src/global_client/bucket.rs | 33 ++-- src/global_client/bucket_access_control.rs | 12 +- .../default_object_access_control.rs | 18 +- src/global_client/hmac_key.rs | 12 +- src/global_client/mod.rs | 2 +- src/global_client/object.rs | 37 ++-- src/lib.rs | 37 ++-- src/models/cors.rs | 2 +- src/models/create/mod.rs | 3 +- src/models/create/object_access_control.rs | 4 +- src/models/mod.rs | 2 +- src/models/object.rs | 34 ++-- src/models/object_access_control.rs | 4 +- src/models/response.rs | 4 +- src/models/test_iam_permission.rs | 2 +- src/sync/bucket.rs | 79 ++++---- src/sync/bucket_access_control.rs | 34 ++-- src/sync/client.rs | 22 +-- src/sync/default_object_access_control.rs | 51 +++--- src/sync/hmac_key.rs | 32 ++-- src/sync/mod.rs | 2 +- src/sync/object.rs | 85 ++++----- 32 files changed, 588 insertions(+), 571 deletions(-) delete mode 100644 src/client/client.rs diff --git a/Cargo.toml b/Cargo.toml index fc51147..199e4f9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -13,7 +13,7 @@ categories = ["api-bindings", "web-programming"] # maintenance = { status = "actively-developed" } [features] -default = ["native-tls", "ring", "pem", "global-client", "sync"] +default = ["native-tls", "ring", "pem", "global-client", "sync", "dotenv"] global-client = [] sync = ["reqwest/blocking"] diff --git a/src/client/bucket.rs b/src/client/bucket.rs index f679f0f..fbbc819 100644 --- a/src/client/bucket.rs +++ b/src/client/bucket.rs @@ -4,25 +4,25 @@ use crate::{models::{create, ListResponse, IamPolicy, TestIamPermission}, Bucket /// Operations on [`Bucket`]()s. #[derive(Debug)] pub struct BucketClient<'a> { - pub(crate) client: &'a super::client::Client, + pub(crate) client: &'a super::CloudStorageClient, pub(crate) bucket_url: String, pub(crate) project_id: String, } impl<'a> BucketClient<'a> { /// Creates a new `Bucket`. There are many options that you can provide for creating a new - /// bucket, so the `NewBucket` resource contains all of them. Note that `NewBucket` implements + /// bucket, so the `create::Bucket` resource contains all of them. Note that `create::Bucket` implements /// `Default`, so you don't have to specify the fields you're not using. And error is returned /// if that bucket name is already taken. /// ### Example /// ``` /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { - /// use cloud_storage::Client; - /// use cloud_storage::bucket::{Bucket, create::Bucket}; - /// use cloud_storage::bucket::{Location, MultiRegion}; + /// # use cloud_storage::CloudStorageClient; + /// # use cloud_storage::models::{Bucket, create}; + /// # use cloud_storage::models::{Location, MultiRegion}; /// - /// let client = Client::default(); + /// let client = CloudStorageClient::default(); /// let new_bucket = create::Bucket { /// name: "cloud-storage-rs-doc-1".to_string(), // this is the only mandatory field /// location: Location::Multi(MultiRegion::Eu), @@ -51,10 +51,10 @@ impl<'a> BucketClient<'a> { /// ``` /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { - /// use cloud_storage::Client; - /// use cloud_storage::Bucket; + /// # use cloud_storage::CloudStorageClient; + /// # use cloud_storage::Bucket; /// - /// let client = Client::default(); + /// let client = CloudStorageClient::default(); /// let buckets = client.bucket().list().await?; /// # Ok(()) /// # } @@ -73,11 +73,11 @@ impl<'a> BucketClient<'a> { /// ``` /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { - /// use cloud_storage::Client; - /// use cloud_storage::Bucket; + /// # use cloud_storage::CloudStorageClient; + /// # use cloud_storage::Bucket; /// - /// let client = Client::default(); - /// # use cloud_storage::bucket::NewBucket; + /// let client = CloudStorageClient::default(); + /// # use cloud_storage::models::create; /// # let new_bucket = create::Bucket { /// # name: "cloud-storage-rs-doc-2".to_string(), /// # ..Default::default() @@ -102,11 +102,11 @@ impl<'a> BucketClient<'a> { /// ``` /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { - /// use cloud_storage::Client; - /// use cloud_storage::bucket::{Bucket, RetentionPolicy}; + /// # use cloud_storage::CloudStorageClient; + /// # use cloud_storage::models::{Bucket, RetentionPolicy}; /// - /// let client = Client::default(); - /// # use cloud_storage::bucket::NewBucket; + /// let client = CloudStorageClient::default(); + /// # use cloud_storage::models::create; /// # let new_bucket = create::Bucket { /// # name: "cloud-storage-rs-doc-3".to_string(), /// # ..Default::default() @@ -138,11 +138,11 @@ impl<'a> BucketClient<'a> { /// ```no_run /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { - /// use cloud_storage::Client; - /// use cloud_storage::Bucket; + /// # use cloud_storage::CloudStorageClient; + /// # use cloud_storage::Bucket; /// - /// let client = Client::default(); - /// # use cloud_storage::bucket::NewBucket; + /// let client = CloudStorageClient::default(); + /// # use cloud_storage::models::create; /// # let new_bucket = create::Bucket { /// # name: "unnecessary-bucket".to_string(), /// # ..Default::default() @@ -170,11 +170,11 @@ impl<'a> BucketClient<'a> { /// ``` /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { - /// use cloud_storage::Client; - /// use cloud_storage::Bucket; + /// # use cloud_storage::CloudStorageClient; + /// # use cloud_storage::Bucket; /// - /// let client = Client::default(); - /// # use cloud_storage::bucket::NewBucket; + /// let client = CloudStorageClient::default(); + /// # use cloud_storage::models::create; /// # let new_bucket = create::Bucket { /// # name: "cloud-storage-rs-doc-4".to_string(), /// # ..Default::default() @@ -199,12 +199,12 @@ impl<'a> BucketClient<'a> { /// ``` /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { - /// use cloud_storage::Client; - /// use cloud_storage::Bucket; - /// use cloud_storage::bucket::{IamPolicy, Binding, IamRole, StandardIamRole, Entity}; + /// # use cloud_storage::CloudStorageClient; + /// # use cloud_storage::Bucket; + /// # use cloud_storage::models::{IamPolicy, Binding, IamRole, StandardIamRole, Entity}; /// - /// let client = Client::default(); - /// # use cloud_storage::bucket::NewBucket; + /// let client = CloudStorageClient::default(); + /// # use cloud_storage::models::create; /// # let new_bucket = create::Bucket { /// # name: "cloud-storage-rs-doc-5".to_string(), /// # ..Default::default() @@ -244,10 +244,9 @@ impl<'a> BucketClient<'a> { /// ```no_run /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { - /// use cloud_storage::Client; - /// use cloud_storage::Bucket; - /// - /// let bucket_client = Client::default().bucket(); + /// # use cloud_storage::CloudStorageClient; + /// let cloud_storage_client = CloudStorageClient::default(); + /// let bucket_client = cloud_storage_client.bucket(); /// let bucket = bucket_client.read("my_bucket").await?; /// bucket_client.test_iam_permission(&bucket, "storage.buckets.get").await?; /// # Ok(()) diff --git a/src/client/bucket_access_control.rs b/src/client/bucket_access_control.rs index 0a3852e..7ccc305 100644 --- a/src/client/bucket_access_control.rs +++ b/src/client/bucket_access_control.rs @@ -3,7 +3,7 @@ use crate::{models::{create, BucketAccessControl, ListResponse, Entity, Response /// Operations on [`BucketAccessControl`](BucketAccessControl)s. #[derive(Debug)] pub struct BucketAccessControlClient<'a> { - pub(crate) client: &'a super::client::Client, + pub(crate) client: &'a super::CloudStorageClient, pub(crate) bucket_acl_url: String } @@ -18,11 +18,11 @@ impl<'a> BucketAccessControlClient<'a> { /// ```rust,no_run /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { - /// use cloud_storage::Client; - /// use cloud_storage::bucket_access_control::{BucketAccessControl, create::BucketAccessControl}; - /// use cloud_storage::bucket_access_control::{Role, Entity}; + /// # use cloud_storage::CloudStorageClient; + /// # use cloud_storage::models::{BucketAccessControl, create}; + /// # use cloud_storage::models::{Role, Entity}; /// - /// let client = Client::default(); + /// let client = CloudStorageClient::default(); /// let new_bucket_access_control = create::BucketAccessControl { /// entity: Entity::AllUsers, /// role: Role::Reader, @@ -50,10 +50,10 @@ impl<'a> BucketAccessControlClient<'a> { /// ```rust,no_run /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { - /// use cloud_storage::Client; - /// use cloud_storage::bucket_access_control::BucketAccessControl; + /// # use cloud_storage::CloudStorageClient; + /// # use cloud_storage::models::BucketAccessControl; /// - /// let client = Client::default(); + /// let client = CloudStorageClient::default(); /// let acls = client.bucket_access_control("my_bucket").list().await?; /// # Ok(()) /// # } @@ -76,10 +76,10 @@ impl<'a> BucketAccessControlClient<'a> { /// ```rust,no_run /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { - /// use cloud_storage::Client; - /// use cloud_storage::bucket_access_control::{BucketAccessControl, Entity}; + /// # use cloud_storage::CloudStorageClient; + /// # use cloud_storage::models::{BucketAccessControl, Entity}; /// - /// let client = Client::default(); + /// let client = CloudStorageClient::default(); /// let controls = client.bucket_access_control("my_bucket").read(&Entity::AllUsers).await?; /// # Ok(()) /// # } @@ -105,13 +105,13 @@ impl<'a> BucketAccessControlClient<'a> { /// ```rust,no_run /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { - /// use cloud_storage::Client; - /// use cloud_storage::bucket_access_control::{BucketAccessControl, Entity}; - /// - /// let client = Client::default(); - /// let mut acl = client.bucket_access_control("my_bucket").read(&Entity::AllUsers).await?; + /// # use cloud_storage::CloudStorageClient; + /// # use cloud_storage::models::{BucketAccessControl, Entity}; + /// let cloud_storage_client = CloudStorageClient::default(); + /// let client = cloud_storage_client.bucket_access_control("my_bucket"); + /// let mut acl = client.read(&Entity::AllUsers).await?; /// acl.entity = Entity::AllAuthenticatedUsers; - /// client.bucket_access_control().update(&acl).await?; + /// client.update(&acl).await?; /// # Ok(()) /// # } /// ``` @@ -139,10 +139,10 @@ impl<'a> BucketAccessControlClient<'a> { /// ```rust,no_run /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { - /// use cloud_storage::Client; - /// use cloud_storage::bucket_access_control::{BucketAccessControl, Entity}; + /// # use cloud_storage::CloudStorageClient; + /// # use cloud_storage::models::{BucketAccessControl, Entity}; /// - /// let client = Client::default(); + /// let client = CloudStorageClient::default(); /// let my_bucket = client.bucket_access_control("my_bucket"); /// let controls = my_bucket.read(&Entity::AllUsers).await?; /// my_bucket.delete(controls).await?; diff --git a/src/client/client.rs b/src/client/client.rs deleted file mode 100644 index 10a2e9f..0000000 --- a/src/client/client.rs +++ /dev/null @@ -1,165 +0,0 @@ -//! Clients for Google Cloud Storage endpoints. - -use std::{fmt, sync}; -use crate::{Error, token::TokenCache, ServiceAccount}; - -use super::{BucketClient, BucketAccessControlClient, DefaultObjectAccessControlClient, HmacKeyClient, ObjectClient, ObjectAccessControlClient}; - -/// The primary entrypoint to perform operations with Google Cloud Storage. -pub struct Client { - pub(crate) reqwest: reqwest::Client, - pub(crate) service_account: crate::ServiceAccount, - /// Static `Token` struct that caches - pub(crate) token_cache: sync::Arc, -} - -impl fmt::Debug for Client { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("Client") - .field("client", &self.reqwest) - .field("token_cache", &"") - .finish() - } -} - -impl Default for Client { - fn default() -> Self { - Self { - reqwest: Default::default(), - token_cache: sync::Arc::new(crate::Token::default()), - service_account: crate::ServiceAccount::default() - } - } -} - -impl Client { - /// Constucts a client with given reqwest client - pub fn with_client(client: reqwest::Client) -> Self { - Self { - reqwest: client, - token_cache: sync::Arc::new(crate::Token::default()), - service_account: crate::ServiceAccount::default() - } - } - - /// Initializer with a provided refreshable token - pub fn with_cache(token: impl TokenCache + 'static) -> Self { - Self { - reqwest: Default::default(), - token_cache: sync::Arc::new(token), - service_account: crate::ServiceAccount::default() - } - } - - /// Creates a new [ClientBuilder] - pub fn builder() -> ClientBuilder { - ClientBuilder::new() - } - - /// Operations on [`Bucket`](crate::bucket::Bucket)s. - pub fn bucket(&self) -> BucketClient { - BucketClient { - bucket_url: "https://storage.googleapis.com/storage/v1/b".to_string(), - project_id: self.service_account.project_id.clone(), - client: self, - } - } - - /// Operations on [`BucketAccessControl`](crate::bucket_access_control::BucketAccessControl)s. - pub fn bucket_access_control(&self, bucket: &str) -> BucketAccessControlClient { - let url = format!("https://storage.googleapis.com/storage/v1/b/{}/acl", crate::percent_encode(bucket)); - BucketAccessControlClient { - bucket_acl_url: url, - client: self - } - } - - /// Operations on [`DefaultObjectAccessControl`](crate::default_object_access_control::DefaultObjectAccessControl)s. - pub fn default_object_access_control(&self, bucket: &str) -> DefaultObjectAccessControlClient { - let url = format!("https://storage.googleapis.com/storage/v1/b/{}/defaultObjectAcl", crate::percent_encode(bucket)); - DefaultObjectAccessControlClient { - base_url: url, - bucket: bucket.to_string(), - client: self - } - } - - /// Operations on [`HmacKey`](crate::hmac_key::HmacKey)s. - pub fn hmac_key(&self) -> HmacKeyClient { - HmacKeyClient { - hmac_keys_url: format!("https://storage.googleapis.com/storage/v1/projects/{}/hmacKeys", &self.service_account.project_id), - client_email: self.service_account.client_email.clone(), - client: self, - } - } - - /// Operations on [`Object`](crate::object::Object)s. - pub fn object(&self, bucket: &str) -> ObjectClient { - ObjectClient { - base_url: format!("https://storage.googleapis.com/storage/v1/b/{}/o", crate::percent_encode(bucket)), - insert_url: format!("https://storage.googleapis.com/upload/storage/v1/b/{}/o", crate::percent_encode(bucket)), - client: self, - } - } - - /// Operations on [`ObjectAccessControl`](crate::object_access_control::ObjectAccessControl)s. - pub fn object_access_control(&self, bucket: &str, object: &str,) -> ObjectAccessControlClient { - ObjectAccessControlClient { - acl_url: format!("https://storage.googleapis.com/storage/v1/b/{}/o/{}/acl", crate::percent_encode(bucket), crate::percent_encode(object)), - client: self - } - } - - pub(crate) async fn get_headers(&self) -> Result { - let mut result = reqwest::header::HeaderMap::new(); - let token = self.token_cache.get(&self.reqwest, self.service_account.client_email.clone(), self.service_account.private_key.as_bytes()).await?; - result.insert( - reqwest::header::AUTHORIZATION, - format!("Bearer {}", token).parse().unwrap(), - ); - Ok(result) - } -} - -/// A ClientBuilder can be used to create a Client with custom configuration. -#[derive(Default)] -pub struct ClientBuilder { - client: Option, - /// Static `Token` struct that caches - token_cache: Option>, - service_account: Option -} - -impl ClientBuilder { - /// Constructs a new ClientBuilder - pub fn new() -> Self { - Default::default() - } - - /// Returns a `Client` that uses this `ClientBuilder` configuration. - pub fn build(self) -> Client { - Client { - reqwest: self.client.unwrap_or_default(), - token_cache: self.token_cache.unwrap_or(sync::Arc::new(crate::Token::default())), - service_account: self.service_account.unwrap_or_default() - } - } - - /// Sets refreshable token - pub fn with_cache(&mut self, token: impl TokenCache + 'static) -> &mut Self { - self.token_cache = Some(sync::Arc::new(token)); - self - } - - /// Sets service account - pub fn with_service_account(&mut self, service_account: crate::ServiceAccount) -> &mut Self { - self.service_account = Some(service_account); - self - } - - /// Sets internal [reqwest Client](https://docs.rs/reqwest/latest/reqwest/struct.Client.html) - pub fn with_reqwest_client(&mut self, reqwest_client: reqwest::Client) -> &mut Self { - self.client = Some(reqwest_client); - self - } -} diff --git a/src/client/default_object_access_control.rs b/src/client/default_object_access_control.rs index c8893ae..f81328d 100644 --- a/src/client/default_object_access_control.rs +++ b/src/client/default_object_access_control.rs @@ -4,7 +4,7 @@ use crate::{models::{create, DefaultObjectAccessControl, ListResponse, Entity, R /// Operations on [`DefaultObjectAccessControl`](DefaultObjectAccessControl)s. #[derive(Debug)] pub struct DefaultObjectAccessControlClient<'a> { - pub(crate) client: &'a super::client::Client, + pub(crate) client: &'a super::CloudStorageClient, pub(crate) base_url: String, pub(crate) bucket: String, } @@ -19,18 +19,19 @@ impl<'a> DefaultObjectAccessControlClient<'a> { /// ```no_run /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { - /// use cloud_storage::Client; - /// use cloud_storage::default_object_access_control::{ - /// DefaultObjectAccessControl, create::DefaultObjectAccessControl, Role, Entity, - /// }; + /// # use cloud_storage::CloudStorageClient; + /// # use cloud_storage::models::{ + /// # DefaultObjectAccessControl, create, Role, Entity, + /// # }; /// - /// let client = Client::default(); + /// let cloud_storage_client = CloudStorageClient::default(); + /// let client = cloud_storage_client.default_object_access_control("my_bucket"); /// let new_acl = create::DefaultObjectAccessControl { /// entity: Entity::AllAuthenticatedUsers, /// role: Role::Reader, /// }; - /// let default_acl = client.default_object_access_control("my_bucket").create(&new_acl).await?; - /// # client.default_object_access_control().delete(default_acl).await?; + /// let default_acl = client.create(&new_acl).await?; + /// # client.delete(default_acl).await?; /// # Ok(()) /// # } /// ``` @@ -61,10 +62,10 @@ impl<'a> DefaultObjectAccessControlClient<'a> { /// ```no_run /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { - /// use cloud_storage::Client; - /// use cloud_storage::default_object_access_control::DefaultObjectAccessControl; + /// # use cloud_storage::CloudStorageClient; + /// # use cloud_storage::models::DefaultObjectAccessControl; /// - /// let client = Client::default(); + /// let client = CloudStorageClient::default(); /// let default_acls = client.default_object_access_control("my_bucket").list().await?; /// # Ok(()) /// # } @@ -94,10 +95,10 @@ impl<'a> DefaultObjectAccessControlClient<'a> { /// ```no_run /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { - /// use cloud_storage::Client; - /// use cloud_storage::default_object_access_control::{DefaultObjectAccessControl, Entity}; + /// # use cloud_storage::CloudStorageClient; + /// # use cloud_storage::models::{DefaultObjectAccessControl, Entity}; /// - /// let client = Client::default(); + /// let client = CloudStorageClient::default(); /// let default_acl = client.default_object_access_control("my_bucket").read(&Entity::AllUsers).await?; /// # Ok(()) /// # } @@ -132,13 +133,14 @@ impl<'a> DefaultObjectAccessControlClient<'a> { /// ```no_run /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { - /// use cloud_storage::Client; - /// use cloud_storage::default_object_access_control::{DefaultObjectAccessControl, Entity}; + /// # use cloud_storage::CloudStorageClient; + /// # use cloud_storage::models::{DefaultObjectAccessControl, Entity}; /// - /// let client = Client::default(); - /// let mut default_acl = client.default_object_access_control("my_bucket").read(&Entity::AllUsers).await?; + /// let cloud_storage_client = CloudStorageClient::default(); + /// let client = cloud_storage_client.default_object_access_control("my_bucket"); + /// let mut default_acl = client.read(&Entity::AllUsers).await?; /// default_acl.entity = Entity::AllAuthenticatedUsers; - /// client.default_object_access_control().update(&default_acl).await?; + /// client.update(&default_acl).await?; /// # Ok(()) /// # } /// ``` @@ -168,12 +170,13 @@ impl<'a> DefaultObjectAccessControlClient<'a> { /// ```no_run /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { - /// use cloud_storage::Client; - /// use cloud_storage::default_object_access_control::{DefaultObjectAccessControl, Entity}; + /// # use cloud_storage::CloudStorageClient; + /// # use cloud_storage::models::{DefaultObjectAccessControl, Entity}; /// - /// let client = Client::default(); - /// let mut default_acl = client.default_object_access_control("my_bucket").read(&Entity::AllUsers).await?; - /// client.default_object_access_control().delete(default_acl).await?; + /// let cloud_storage_client = CloudStorageClient::default(); + /// let client = cloud_storage_client.default_object_access_control("my_bucket"); + /// let mut default_acl = client.read(&Entity::AllUsers).await?; + /// client.delete(default_acl).await?; /// # Ok(()) /// # } /// ``` diff --git a/src/client/hmac_key.rs b/src/client/hmac_key.rs index 16004ab..784e339 100644 --- a/src/client/hmac_key.rs +++ b/src/client/hmac_key.rs @@ -3,7 +3,7 @@ use crate::{Error, models::{HmacKey, HmacMeta, Response, ListResponse, HmacState /// Operations on [`HmacKey`](HmacKey)s. #[derive(Debug)] pub struct HmacKeyClient<'a> { - pub(crate) client: &'a super::client::Client, + pub(crate) client: &'a super::CloudStorageClient, pub(crate) hmac_keys_url: String, pub(crate) client_email: String, } @@ -20,14 +20,14 @@ impl<'a> HmacKeyClient<'a> { /// ``` /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { - /// use cloud_storage::Client; - /// use cloud_storage::hmac_key::HmacKey; - /// - /// let client = Client::default(); - /// let hmac_key = client.hmac_key().create().await?; - /// # use cloud_storage::hmac_key::HmacState; - /// # client.hmac_key().update(&hmac_key.metadata.access_id, HmacState::Inactive).await?; - /// # client.hmac_key().delete(&hmac_key.metadata.access_id).await?; + /// # use cloud_storage::CloudStorageClient; + /// # use cloud_storage::models::HmacKey; + /// let cloud_storage_client = CloudStorageClient::default(); + /// let client = cloud_storage_client.hmac_key(); + /// let hmac_key = client.create().await?; + /// # use cloud_storage::models::HmacState; + /// # client.update(&hmac_key.metadata.access_id, HmacState::Inactive).await?; + /// # client.delete(&hmac_key.metadata.access_id).await?; /// # Ok(()) /// # } /// ``` @@ -61,10 +61,10 @@ impl<'a> HmacKeyClient<'a> { /// ``` /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { - /// use cloud_storage::Client; - /// use cloud_storage::hmac_key::HmacKey; + /// # use cloud_storage::CloudStorageClient; + /// # use cloud_storage::models::HmacKey; /// - /// let client = Client::default(); + /// let client = CloudStorageClient::default(); /// let all_hmac_keys = client.hmac_key().list().await?; /// # Ok(()) /// # } @@ -106,10 +106,10 @@ impl<'a> HmacKeyClient<'a> { /// ```no_run /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { - /// use cloud_storage::Client; - /// use cloud_storage::hmac_key::HmacKey; + /// # use cloud_storage::CloudStorageClient; + /// # use cloud_storage::models::HmacKey; /// - /// let client = Client::default(); + /// let client = CloudStorageClient::default(); /// let key = client.hmac_key().read("some identifier").await?; /// # Ok(()) /// # } @@ -138,10 +138,10 @@ impl<'a> HmacKeyClient<'a> { /// ```no_run /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { - /// use cloud_storage::Client; - /// use cloud_storage::hmac_key::{HmacKey, HmacState}; + /// # use cloud_storage::CloudStorageClient; + /// # use cloud_storage::models::{HmacKey, HmacState}; /// - /// let client = Client::default(); + /// let client = CloudStorageClient::default(); /// let key = client.hmac_key().update("your key", HmacState::Active).await?; /// # Ok(()) /// # } @@ -174,10 +174,10 @@ impl<'a> HmacKeyClient<'a> { /// ```no_run /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { - /// use cloud_storage::Client; - /// use cloud_storage::hmac_key::{HmacKey, HmacState}; + /// # use cloud_storage::CloudStorageClient; + /// # use cloud_storage::models::{HmacKey, HmacState}; /// - /// let client = Client::default(); + /// let client = CloudStorageClient::default(); /// let key = client.hmac_key().update("your key", HmacState::Inactive).await?; // this is required. /// client.hmac_key().delete(&key.access_id).await?; /// # Ok(()) diff --git a/src/client/mod.rs b/src/client/mod.rs index af31ac0..c407cb8 100644 --- a/src/client/mod.rs +++ b/src/client/mod.rs @@ -1,17 +1,177 @@ //! Clients for Google Cloud Storage endpoints. - mod bucket; mod bucket_access_control; -mod client; mod default_object_access_control; mod hmac_key; mod object; mod object_access_control; -pub use client::Client; pub use bucket::BucketClient; pub use bucket_access_control::BucketAccessControlClient; pub use default_object_access_control::DefaultObjectAccessControlClient; pub use hmac_key::HmacKeyClient; pub use object::ObjectClient; -pub use object_access_control::ObjectAccessControlClient; \ No newline at end of file +pub use object_access_control::ObjectAccessControlClient; + +use std::{fmt, sync}; +use crate::{Error, token::TokenCache, ServiceAccount}; + + +/// The primary entrypoint to perform operations with Google Cloud Storage. +pub struct CloudStorageClient { + pub(crate) reqwest: reqwest::Client, + pub(crate) service_account: crate::ServiceAccount, + /// Static `Token` struct that caches + pub(crate) token_cache: sync::Arc, +} + +impl fmt::Debug for CloudStorageClient { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("CloudStorageClient") + .field("client", &self.reqwest) + .field("token_cache", &"") + .finish() + } +} + +impl Default for CloudStorageClient { + fn default() -> Self { + Self { + reqwest: Default::default(), + token_cache: sync::Arc::new(crate::Token::default()), + service_account: crate::ServiceAccount::default() + } + } +} + +impl CloudStorageClient { + /// Constucts a client with given reqwest client + pub fn with_client(client: reqwest::Client) -> Self { + Self { + reqwest: client, + token_cache: sync::Arc::new(crate::Token::default()), + service_account: crate::ServiceAccount::default() + } + } + + /// Initializer with a provided refreshable token + pub fn with_cache(token: impl TokenCache + 'static) -> Self { + Self { + reqwest: Default::default(), + token_cache: sync::Arc::new(token), + service_account: crate::ServiceAccount::default() + } + } + + /// Creates a new [`CloudStorageClientBuilder`](crate::CloudStorageClientBuilder) + pub fn builder() -> CloudStorageClientBuilder { + CloudStorageClientBuilder::new() + } + + /// Operations on [`Bucket`](crate::Bucket)s. + pub fn bucket(&self) -> BucketClient { + BucketClient { + bucket_url: "https://storage.googleapis.com/storage/v1/b".to_string(), + project_id: self.service_account.project_id.clone(), + client: self, + } + } + + /// Operations on [`BucketAccessControl`](crate::models::BucketAccessControl)s. + pub fn bucket_access_control(&self, bucket: &str) -> BucketAccessControlClient { + let url = format!("https://storage.googleapis.com/storage/v1/b/{}/acl", crate::percent_encode(bucket)); + BucketAccessControlClient { + bucket_acl_url: url, + client: self + } + } + + /// Operations on [`DefaultObjectAccessControl`](crate::models::DefaultObjectAccessControl)s. + pub fn default_object_access_control(&self, bucket: &str) -> DefaultObjectAccessControlClient { + let url = format!("https://storage.googleapis.com/storage/v1/b/{}/defaultObjectAcl", crate::percent_encode(bucket)); + DefaultObjectAccessControlClient { + base_url: url, + bucket: bucket.to_string(), + client: self + } + } + + /// Operations on [`HmacKey`](crate::models::HmacKey)s. + pub fn hmac_key(&self) -> HmacKeyClient { + HmacKeyClient { + hmac_keys_url: format!("https://storage.googleapis.com/storage/v1/projects/{}/hmacKeys", &self.service_account.project_id), + client_email: self.service_account.client_email.clone(), + client: self, + } + } + + /// Operations on [`Object`](crate::models::Object)s. + pub fn object(&self, bucket: &str) -> ObjectClient { + ObjectClient { + base_url: format!("https://storage.googleapis.com/storage/v1/b/{}/o", crate::percent_encode(bucket)), + insert_url: format!("https://storage.googleapis.com/upload/storage/v1/b/{}/o", crate::percent_encode(bucket)), + client: self, + } + } + + /// Operations on [`ObjectAccessControl`](crate::models::ObjectAccessControl)s. + pub fn object_access_control(&self, bucket: &str, object: &str,) -> ObjectAccessControlClient { + ObjectAccessControlClient { + acl_url: format!("https://storage.googleapis.com/storage/v1/b/{}/o/{}/acl", crate::percent_encode(bucket), crate::percent_encode(object)), + client: self + } + } + + pub(crate) async fn get_headers(&self) -> Result { + let mut result = reqwest::header::HeaderMap::new(); + let token = self.token_cache.get(&self.reqwest, self.service_account.client_email.clone(), self.service_account.private_key.as_bytes()).await?; + result.insert( + reqwest::header::AUTHORIZATION, + format!("Bearer {}", token).parse().unwrap(), + ); + Ok(result) + } +} + +/// A [`CloudStorageClientBuilder`] can be used to create a [`CloudStorageClient`] with custom configuration. +#[derive(Default)] +pub struct CloudStorageClientBuilder { + client: Option, + /// Static `Token` struct that caches + token_cache: Option>, + service_account: Option +} + +impl CloudStorageClientBuilder { + /// Constructs a new ClientBuilder + pub fn new() -> Self { + Default::default() + } + + /// Returns a `Client` that uses this `ClientBuilder` configuration. + pub fn build(self) -> CloudStorageClient { + CloudStorageClient { + reqwest: self.client.unwrap_or_default(), + token_cache: self.token_cache.unwrap_or(sync::Arc::new(crate::Token::default())), + service_account: self.service_account.unwrap_or_default() + } + } + + /// Sets refreshable token + pub fn with_cache(&mut self, token: impl TokenCache + 'static) -> &mut Self { + self.token_cache = Some(sync::Arc::new(token)); + self + } + + /// Sets service account + pub fn with_service_account(&mut self, service_account: crate::ServiceAccount) -> &mut Self { + self.service_account = Some(service_account); + self + } + + /// Sets internal [reqwest Client](https://docs.rs/reqwest/latest/reqwest/struct.Client.html) + pub fn with_reqwest_client(&mut self, reqwest_client: reqwest::Client) -> &mut Self { + self.client = Some(reqwest_client); + self + } +} diff --git a/src/client/object.rs b/src/client/object.rs index 8835423..094ceb8 100644 --- a/src/client/object.rs +++ b/src/client/object.rs @@ -4,7 +4,7 @@ use crate::{models::{CreateParameters, ObjectList, ReadParameters, UpdateParamet /// Operations on [`Object`](Object)s. #[derive(Debug)] pub struct ObjectClient<'a> { - pub(crate) client: &'a super::client::Client, + pub(crate) client: &'a super::CloudStorageClient, pub(crate) base_url: String, pub(crate) insert_url: String, } @@ -18,11 +18,11 @@ impl<'a> ObjectClient<'a> { /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { /// # fn read_cute_cat(_in: &str) -> Vec { vec![0, 1] } - /// use cloud_storage::Client; - /// use cloud_storage::Object; + /// # use cloud_storage::CloudStorageClient; + /// # use cloud_storage::Object; /// /// let file: Vec = read_cute_cat("cat.png"); - /// let client = Client::default(); + /// let client = CloudStorageClient::default(); /// client.object("cat-photos").create(file, "recently read cat.png", "image/png", None).await?; /// # Ok(()) /// # } @@ -62,11 +62,11 @@ impl<'a> ObjectClient<'a> { /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { /// # fn read_cute_cat(_in: &str) -> Vec { vec![0, 1] } - /// use cloud_storage::Client; - /// use cloud_storage::Object; + /// # use cloud_storage::CloudStorageClient; + /// # use cloud_storage::Object; /// /// let file: Vec = read_cute_cat("cat.png"); - /// let client = Client::default(); + /// let client = CloudStorageClient::default(); /// let metadata = serde_json::json!({ /// "metadata": { /// "custom_id": "1234" @@ -112,16 +112,21 @@ impl<'a> ObjectClient<'a> { /// ```rust,no_run /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { - /// use cloud_storage::Client; - /// use cloud_storage::Object; + /// # use cloud_storage::CloudStorageClient; + /// # use cloud_storage::Object; /// - /// let client = Client::default(); + /// let client = CloudStorageClient::default(); /// let file = reqwest::Client::new() /// .get("https://my_domain.rs/nice_cat_photo.png") /// .send() /// .await? /// .bytes_stream(); - /// client.object("cat-photos").create_streamed(file, 10, "recently read cat.png", "image/png").await?; + /// let metadata = serde_json::json!({ + /// "metadata": { + /// "custom_id": "1234" + /// } + /// }); + /// client.object("cat-photos").create_streamed_with(file, "recently read cat.png", "image/png", &metadata).await?; /// # Ok(()) /// # } /// ``` @@ -168,10 +173,10 @@ impl<'a> ObjectClient<'a> { /// ```rust,no_run /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { - /// use cloud_storage::Client; - /// use cloud_storage::Object; + /// # use cloud_storage::CloudStorageClient; + /// # use cloud_storage::Object; /// - /// let client = Client::default(); + /// let client = CloudStorageClient::default(); /// let file = reqwest::Client::new() /// .get("https://my_domain.rs/nice_cat_photo.png") /// .send() @@ -222,10 +227,10 @@ impl<'a> ObjectClient<'a> { /// ```no_run /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { - /// use cloud_storage::Client; - /// use cloud_storage::{Object, ListRequest}; + /// # use cloud_storage::CloudStorageClient; + /// # use cloud_storage::{Object, ListRequest}; /// - /// let client = Client::default(); + /// let client = CloudStorageClient::default(); /// let all_objects = client.object("my_bucket").list(ListRequest::default()).await?; /// # Ok(()) /// # } @@ -321,10 +326,10 @@ impl<'a> ObjectClient<'a> { /// ```no_run /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { - /// use cloud_storage::Client; - /// use cloud_storage::Object; + /// # use cloud_storage::CloudStorageClient; + /// # use cloud_storage::Object; /// - /// let client = Client::default(); + /// let client = CloudStorageClient::default(); /// let object = client.object("my_bucket").read("path/to/my/file.png", None).await?; /// # Ok(()) /// # } @@ -358,10 +363,10 @@ impl<'a> ObjectClient<'a> { /// ```no_run /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { - /// use cloud_storage::Client; - /// use cloud_storage::Object; + /// # use cloud_storage::CloudStorageClient; + /// # use cloud_storage::Object; /// - /// let client = Client::default(); + /// let client = CloudStorageClient::default(); /// let bytes = client.object("my_bucket").download("path/to/my/file.png", None).await?; /// # Ok(()) /// # } @@ -396,17 +401,18 @@ impl<'a> ObjectClient<'a> { /// ```no_run /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { - /// use cloud_storage::Client; - /// use cloud_storage::Object; - /// use futures_util::stream::StreamExt; - /// use tokio::fs::File; - /// use tokio::io::{AsyncWriteExt, BufWriter}; - /// - /// let client = Client::default(); - /// let mut stream = client.object("my_bucket").download_streamed("path/to/my/file.png", None).await?; + /// # use cloud_storage::CloudStorageClient; + /// # use cloud_storage::Object; + /// # use futures_util::stream::StreamExt; + /// # use tokio::fs::File; + /// # use tokio::io::{AsyncWriteExt, BufWriter}; + /// # use bytes::Buf; + /// let cloud_storage_client = CloudStorageClient::default(); + /// let client = cloud_storage_client.object("my_bucket"); + /// let mut stream = client.download_streamed("path/to/my/file.png", None).await?; /// let mut file = BufWriter::new(File::create("file.png").await.unwrap()); /// while let Some(byte) = stream.next().await { - /// file.write_all(&[byte.unwrap()]).await.unwrap(); + /// file.write_all(byte.unwrap().chunk()).await.unwrap(); /// } /// file.flush().await?; /// # Ok(()) @@ -439,18 +445,19 @@ impl<'a> ObjectClient<'a> { /// information in `object`. /// /// Note that if the `name` or `bucket` fields are changed, the object will not be found. - /// See [`rewrite`] or [`copy`] for similar operations. + /// See [`rewrite`](Self::rewrite()) or [`copy`](Self::copy()) for similar operations. /// ### Example /// ```no_run /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { - /// use cloud_storage::Client; - /// use cloud_storage::Object; + /// # use cloud_storage::CloudStorageClient; + /// # use cloud_storage::Object; /// - /// let client = Client::default(); - /// let mut object = client.object("my_bucket").read("path/to/my/file.png", None).await?; + /// let cloud_storage_client = CloudStorageClient::default(); + /// let client = cloud_storage_client.object("my_bucket"); + /// let mut object = client.read("path/to/my/file.png", None).await?; /// object.content_type = Some("application/xml".to_string()); - /// client.object().update(&object, None).await?; + /// client.update(&object, None).await?; /// # Ok(()) /// # } /// ``` @@ -483,10 +490,10 @@ impl<'a> ObjectClient<'a> { /// ```no_run /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { - /// use cloud_storage::Client; - /// use cloud_storage::Object; + /// # use cloud_storage::CloudStorageClient; + /// # use cloud_storage::Object; /// - /// let client = Client::default(); + /// let client = CloudStorageClient::default(); /// client.object("my_bucket").delete("path/to/my/file.png", None).await?; /// # Ok(()) /// # } @@ -520,10 +527,10 @@ impl<'a> ObjectClient<'a> { /// ```no_run /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { - /// use cloud_storage::Client; - /// use cloud_storage::object::{Object, ComposeRequest, SourceObject}; + /// # use cloud_storage::CloudStorageClient; + /// # use cloud_storage::models::{Object, ComposeRequest, SourceObject}; /// - /// let client = Client::default(); + /// let client = CloudStorageClient::default(); /// let obj1 = client.object("my_bucket").read("file1", None).await?; /// let obj2 = client.object("my_bucket").read("file2", None).await?; /// let compose_request = ComposeRequest { @@ -577,10 +584,10 @@ impl<'a> ObjectClient<'a> { /// ```no_run /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { - /// use cloud_storage::Client; - /// use cloud_storage::object::{Object, ComposeRequest}; + /// # use cloud_storage::CloudStorageClient; + /// # use cloud_storage::models::{Object, ComposeRequest}; /// - /// let client = Client::default(); + /// let client = CloudStorageClient::default(); /// let obj1 = client.object("my_bucket").read("file1", None).await?; /// let obj2 = client.object("my_bucket").copy(&obj1, "my_other_bucket", "file2", None).await?; /// // obj2 is now a copy of obj1. @@ -630,10 +637,10 @@ impl<'a> ObjectClient<'a> { /// ```no_run /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { - /// use cloud_storage::Client; - /// use cloud_storage::object::Object; + /// # use cloud_storage::CloudStorageClient; + /// # use cloud_storage::models::Object; /// - /// let client = Client::default(); + /// let client = CloudStorageClient::default(); /// let obj1 = client.object("my_bucket").read("file1", None).await?; /// let obj2 = client.object("my_bucket").rewrite(&obj1, "my_other_bucket", "file2", None).await?; /// // obj2 is now a copy of obj1. diff --git a/src/client/object_access_control.rs b/src/client/object_access_control.rs index ae1357c..3aed41d 100644 --- a/src/client/object_access_control.rs +++ b/src/client/object_access_control.rs @@ -4,7 +4,7 @@ use crate::{models::{create, ObjectAccessControl, ListResponse, Entity, Response /// Operations on [`ObjectAccessControl`](ObjectAccessControl)s. #[derive(Debug)] pub struct ObjectAccessControlClient<'a> { - pub(crate) client: &'a super::client::Client, + pub(crate) client: &'a super::CloudStorageClient, pub(crate) acl_url: String, } diff --git a/src/download_options.rs b/src/download_options.rs index caf4fd1..59b75df 100644 --- a/src/download_options.rs +++ b/src/download_options.rs @@ -9,7 +9,7 @@ impl DownloadOptions { /// /// ### Example /// ```rust - /// use cloud_storage::DownloadOptions; + /// # use cloud_storage::DownloadOptions; /// /// let opts = DownloadOptions::new(); /// ``` @@ -21,7 +21,7 @@ impl DownloadOptions { /// /// ### Example /// ```rust - /// use cloud_storage::DownloadOptions; + /// # use cloud_storage::DownloadOptions; /// /// let opts = DownloadOptions::new() /// .content_disposition("attachment"); diff --git a/src/global_client/bucket.rs b/src/global_client/bucket.rs index ed74ae7..e3b671e 100644 --- a/src/global_client/bucket.rs +++ b/src/global_client/bucket.rs @@ -2,15 +2,15 @@ use crate::{Bucket, models::{create, IamPolicy, TestIamPermission}, Error}; impl Bucket { /// Creates a new `Bucket`. There are many options that you can provide for creating a new - /// bucket, so the `NewBucket` resource contains all of them. Note that `NewBucket` implements + /// bucket, so the `create::Bucket` resource contains all of them. Note that `create::Bucket` implements /// `Default`, so you don't have to specify the fields you're not using. And error is returned /// if that bucket name is already taken. /// ### Example /// ``` /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { - /// use cloud_storage::bucket::{Bucket, create::Bucket}; - /// use cloud_storage::bucket::{Location, MultiRegion}; + /// # use cloud_storage::models::{Bucket, create}; + /// # use cloud_storage::models::{Location, MultiRegion}; /// /// let new_bucket = create::Bucket { /// name: "cloud-storage-rs-doc-1".to_string(), // this is the only mandatory field @@ -44,7 +44,7 @@ impl Bucket { /// ``` /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { - /// use cloud_storage::Bucket; + /// # use cloud_storage::Bucket; /// /// let buckets = Bucket::list().await?; /// # Ok(()) @@ -68,8 +68,8 @@ impl Bucket { /// ``` /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { - /// use cloud_storage::Bucket; - /// # use cloud_storage::bucket::NewBucket; + /// # use cloud_storage::Bucket; + /// # use cloud_storage::models::create; /// # let new_bucket = create::Bucket { /// # name: "cloud-storage-rs-doc-2".to_string(), /// # ..Default::default() @@ -100,8 +100,8 @@ impl Bucket { /// ``` /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { - /// use cloud_storage::bucket::{Bucket, RetentionPolicy}; - /// # use cloud_storage::bucket::NewBucket; + /// # use cloud_storage::models::{Bucket, RetentionPolicy}; + /// # use cloud_storage::models::create; /// # let new_bucket = create::Bucket { /// # name: "cloud-storage-rs-doc-3".to_string(), /// # ..Default::default() @@ -139,8 +139,8 @@ impl Bucket { /// ```no_run /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { - /// use cloud_storage::Bucket; - /// # use cloud_storage::bucket::NewBucket; + /// # use cloud_storage::Bucket; + /// # use cloud_storage::models::create; /// # let new_bucket = create::Bucket { /// # name: "unnecessary-bucket".to_string(), /// # ..Default::default() @@ -170,8 +170,8 @@ impl Bucket { /// ``` /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { - /// use cloud_storage::Bucket; - /// # use cloud_storage::bucket::NewBucket; + /// # use cloud_storage::Bucket; + /// # use cloud_storage::models::create; /// # let new_bucket = create::Bucket { /// # name: "cloud-storage-rs-doc-4".to_string(), /// # ..Default::default() @@ -202,9 +202,9 @@ impl Bucket { /// ``` /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { - /// use cloud_storage::Bucket; - /// use cloud_storage::bucket::{IamPolicy, Binding, IamRole, StandardIamRole, Entity}; - /// # use cloud_storage::bucket::NewBucket; + /// # use cloud_storage::Bucket; + /// # use cloud_storage::models::{IamPolicy, Binding, IamRole, StandardIamRole, Entity}; + /// # use cloud_storage::models::create; /// # let new_bucket = create::Bucket { /// # name: "cloud-storage-rs-doc-5".to_string(), /// # ..Default::default() @@ -246,8 +246,7 @@ impl Bucket { /// ```no_run /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { - /// use cloud_storage::Bucket; - /// + /// # use cloud_storage::Bucket; /// let bucket = Bucket::read("my_bucket").await?; /// bucket.test_iam_permission("storage.buckets.get").await?; /// # Ok(()) diff --git a/src/global_client/bucket_access_control.rs b/src/global_client/bucket_access_control.rs index 8eeab7f..d1046a2 100644 --- a/src/global_client/bucket_access_control.rs +++ b/src/global_client/bucket_access_control.rs @@ -12,8 +12,8 @@ impl BucketAccessControl { /// ```rust,no_run /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { - /// use cloud_storage::bucket_access_control::{BucketAccessControl, create::BucketAccessControl}; - /// use cloud_storage::bucket_access_control::{Role, Entity}; + /// # use cloud_storage::models::{BucketAccessControl, create}; + /// # use cloud_storage::models::{Role, Entity}; /// /// let new_bucket_access_control = create::BucketAccessControl { /// entity: Entity::AllUsers, @@ -55,7 +55,7 @@ impl BucketAccessControl { /// ```rust,no_run /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { - /// use cloud_storage::bucket_access_control::BucketAccessControl; + /// # use cloud_storage::models::BucketAccessControl; /// /// let acls = BucketAccessControl::list("my_bucket").await?; /// # Ok(()) @@ -87,7 +87,7 @@ impl BucketAccessControl { /// ```rust,no_run /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { - /// use cloud_storage::bucket_access_control::{BucketAccessControl, Entity}; + /// # use cloud_storage::models::{BucketAccessControl, Entity}; /// /// let controls = BucketAccessControl::read("my_bucket", &Entity::AllUsers).await?; /// # Ok(()) @@ -119,7 +119,7 @@ impl BucketAccessControl { /// ```rust,no_run /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { - /// use cloud_storage::bucket_access_control::{BucketAccessControl, Entity}; + /// # use cloud_storage::models::{BucketAccessControl, Entity}; /// /// let mut acl = BucketAccessControl::read("my_bucket", &Entity::AllUsers).await?; /// acl.entity = Entity::AllAuthenticatedUsers; @@ -153,7 +153,7 @@ impl BucketAccessControl { /// ```rust,no_run /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { - /// use cloud_storage::bucket_access_control::{BucketAccessControl, Entity}; + /// # use cloud_storage::models::{BucketAccessControl, Entity}; /// /// let controls = BucketAccessControl::read("my_bucket", &Entity::AllUsers).await?; /// controls.delete().await?; diff --git a/src/global_client/default_object_access_control.rs b/src/global_client/default_object_access_control.rs index 1d138d0..49eff3d 100644 --- a/src/global_client/default_object_access_control.rs +++ b/src/global_client/default_object_access_control.rs @@ -10,10 +10,9 @@ impl DefaultObjectAccessControl { /// ```no_run /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { - /// use cloud_storage::default_object_access_control::{ - /// DefaultObjectAccessControl, create::DefaultObjectAccessControl, Role, Entity, - /// }; - /// + /// # use cloud_storage::models::{ + /// # DefaultObjectAccessControl, create, Role, Entity, + /// # }; /// let new_acl = create::DefaultObjectAccessControl { /// entity: Entity::AllAuthenticatedUsers, /// role: Role::Reader, @@ -51,8 +50,7 @@ impl DefaultObjectAccessControl { /// ```no_run /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { - /// use cloud_storage::default_object_access_control::DefaultObjectAccessControl; - /// + /// # use cloud_storage::models::DefaultObjectAccessControl; /// let default_acls = DefaultObjectAccessControl::list("my_bucket").await?; /// # Ok(()) /// # } @@ -83,8 +81,7 @@ impl DefaultObjectAccessControl { /// ```no_run /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { - /// use cloud_storage::default_object_access_control::{DefaultObjectAccessControl, Entity}; - /// + /// # use cloud_storage::models::{DefaultObjectAccessControl, Entity}; /// let default_acl = DefaultObjectAccessControl::read("my_bucket", &Entity::AllUsers).await?; /// # Ok(()) /// # } @@ -112,8 +109,7 @@ impl DefaultObjectAccessControl { /// ```no_run /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { - /// use cloud_storage::default_object_access_control::{DefaultObjectAccessControl, Entity}; - /// + /// # use cloud_storage::models::{DefaultObjectAccessControl, Entity}; /// let mut default_acl = DefaultObjectAccessControl::read("my_bucket", &Entity::AllUsers).await?; /// default_acl.entity = Entity::AllAuthenticatedUsers; /// default_acl.update().await?; @@ -142,7 +138,7 @@ impl DefaultObjectAccessControl { /// ```no_run /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { - /// use cloud_storage::default_object_access_control::{DefaultObjectAccessControl, Entity}; + /// # use cloud_storage::models::{DefaultObjectAccessControl, Entity}; /// /// let mut default_acl = DefaultObjectAccessControl::read("my_bucket", &Entity::AllUsers).await?; /// default_acl.delete().await?; diff --git a/src/global_client/hmac_key.rs b/src/global_client/hmac_key.rs index 2b3722e..652bbc7 100644 --- a/src/global_client/hmac_key.rs +++ b/src/global_client/hmac_key.rs @@ -12,10 +12,10 @@ impl HmacKey { /// ``` /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { - /// use cloud_storage::hmac_key::HmacKey; + /// # use cloud_storage::models::HmacKey; /// /// let hmac_key = HmacKey::create().await?; - /// # use cloud_storage::hmac_key::HmacState; + /// # use cloud_storage::models::HmacState; /// # HmacKey::update(&hmac_key.metadata.access_id, HmacState::Inactive).await?; /// # HmacKey::delete(&hmac_key.metadata.access_id).await?; /// # Ok(()) @@ -47,7 +47,7 @@ impl HmacKey { /// ``` /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { - /// use cloud_storage::hmac_key::HmacKey; + /// # use cloud_storage::models::HmacKey; /// /// let all_hmac_keys = HmacKey::list().await?; /// # Ok(()) @@ -79,7 +79,7 @@ impl HmacKey { /// ```no_run /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { - /// use cloud_storage::hmac_key::HmacKey; + /// # use cloud_storage::models::HmacKey; /// /// let key = HmacKey::read("some identifier").await?; /// # Ok(()) @@ -110,7 +110,7 @@ impl HmacKey { /// ```no_run /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { - /// use cloud_storage::hmac_key::{HmacKey, HmacState}; + /// # use cloud_storage::models::{HmacKey, HmacState}; /// /// let key = HmacKey::update("your key", HmacState::Active).await?; /// # Ok(()) @@ -142,7 +142,7 @@ impl HmacKey { /// ```no_run /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { - /// use cloud_storage::hmac_key::{HmacKey, HmacState}; + /// # use cloud_storage::models::{HmacKey, HmacState}; /// /// let key = HmacKey::update("your key", HmacState::Inactive).await?; // this is required. /// HmacKey::delete(&key.access_id).await?; diff --git a/src/global_client/mod.rs b/src/global_client/mod.rs index 0ddd237..d898c1c 100644 --- a/src/global_client/mod.rs +++ b/src/global_client/mod.rs @@ -6,7 +6,7 @@ mod object_access_control; mod object; use once_cell::sync::Lazy; -pub(crate) static CLOUD_CLIENT: Lazy = Lazy::new(crate::client::Client::default); +pub(crate) static CLOUD_CLIENT: Lazy = Lazy::new(crate::client::CloudStorageClient::default); #[cfg(test)] pub(crate) use self::test_helpers::*; diff --git a/src/global_client/object.rs b/src/global_client/object.rs index fc45135..ffd49b7 100644 --- a/src/global_client/object.rs +++ b/src/global_client/object.rs @@ -11,7 +11,7 @@ impl Object { /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { /// # fn read_cute_cat(_in: &str) -> Vec { vec![0, 1] } - /// use cloud_storage::Object; + /// # use cloud_storage::Object; /// /// let file: Vec = read_cute_cat("cat.png"); /// Object::create("cat-photos", file, "recently read cat.png", "image/png", None).await?; @@ -54,7 +54,7 @@ impl Object { /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { /// # fn read_cute_cat(_in: &str) -> Vec { vec![0, 1] } - /// use cloud_storage::Object; + /// # use cloud_storage::Object; /// /// let file: Vec = read_cute_cat("cat.png"); /// let metadata = serde_json::json!({ @@ -62,7 +62,7 @@ impl Object { /// "custom_id": "1234" /// } /// }); - /// Object::create("cat-photos", file, "recently read cat.png", "image/png", &metadata).await?; + /// Object::create_with("cat-photos", file, "recently read cat.png", "image/png", &metadata).await?; /// # Ok(()) /// # } /// ``` @@ -102,7 +102,7 @@ impl Object { /// ```rust,no_run /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { - /// use cloud_storage::Object; + /// # use cloud_storage::Object; /// /// let file = reqwest::Client::new() /// .get("https://my_domain.rs/nice_cat_photo.png") @@ -164,7 +164,7 @@ impl Object { /// ```no_run /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { - /// use cloud_storage::{Object, ListRequest}; + /// # use cloud_storage::{Object, ListRequest}; /// /// let all_objects = Object::list("my_bucket", ListRequest::default()).await?; /// # Ok(()) @@ -196,7 +196,7 @@ impl Object { /// ```no_run /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { - /// use cloud_storage::Object; + /// # use cloud_storage::Object; /// /// let object = Object::read("my_bucket", "path/to/my/file.png", None).await?; /// # Ok(()) @@ -231,7 +231,7 @@ impl Object { /// ```no_run /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { - /// use cloud_storage::Object; + /// # use cloud_storage::Object; /// /// let bytes = Object::download("my_bucket", "path/to/my/file.png", None).await?; /// # Ok(()) @@ -267,15 +267,16 @@ impl Object { /// ```no_run /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { - /// use cloud_storage::Object; - /// use futures_util::stream::StreamExt; - /// use std::fs::File; - /// use std::io::{BufWriter, Write}; + /// # use cloud_storage::Object; + /// # use futures_util::stream::StreamExt; + /// # use std::fs::File; + /// # use std::io::{BufWriter, Write}; + /// # use bytes::Buf; /// /// let mut stream = Object::download_streamed("my_bucket", "path/to/my/file.png", None).await?; /// let mut file = BufWriter::new(File::create("file.png").unwrap()); - /// while let Some(byte) = stream.next().await { - /// file.write_all(&[byte.unwrap()]).unwrap(); + /// while let Some(bytes) = stream.next().await { + /// file.write_all(bytes.unwrap().chunk()).unwrap(); /// } /// # Ok(()) /// # } @@ -296,7 +297,7 @@ impl Object { /// ```no_run /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { - /// use cloud_storage::Object; + /// # use cloud_storage::Object; /// /// let mut object = Object::read("my_bucket", "path/to/my/file.png", None).await?; /// object.content_type = Some("application/xml".to_string()); @@ -322,7 +323,7 @@ impl Object { /// ```no_run /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { - /// use cloud_storage::Object; + /// # use cloud_storage::Object; /// /// Object::delete("my_bucket", "path/to/my/file.png", None).await?; /// # Ok(()) @@ -357,7 +358,7 @@ impl Object { /// ```no_run /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { - /// use cloud_storage::object::{Object, ComposeRequest, SourceObject}; + /// # use cloud_storage::models::{Object, ComposeRequest, SourceObject}; /// /// let obj1 = Object::read("my_bucket", "file1", None).await?; /// let obj2 = Object::read("my_bucket", "file2", None).await?; @@ -414,7 +415,7 @@ impl Object { /// ```no_run /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { - /// use cloud_storage::object::{Object, ComposeRequest}; + /// # use cloud_storage::models::{Object, ComposeRequest}; /// /// let obj1 = Object::read("my_bucket", "file1", None).await?; /// let obj2 = obj1.copy("my_other_bucket", "file2", None).await?; @@ -460,7 +461,7 @@ impl Object { /// ```no_run /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { - /// use cloud_storage::object::Object; + /// # use cloud_storage::models::Object; /// /// let obj1 = Object::read("my_bucket", "file1", None).await?; /// let obj2 = obj1.rewrite("my_other_bucket", "file2", None).await?; diff --git a/src/lib.rs b/src/lib.rs index 018ffc2..d96db04 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -19,7 +19,7 @@ //! Add the following line to your `Cargo.toml` //! ```toml //! [dependencies] -//! cloud-storage = "0.10" +//! cloud-storage = "1.0.0" //! ``` //! The two most important concepts are [Buckets](bucket/struct.Bucket.html), which represent //! file systems, and [Objects](object/struct.Object.html), which represent files. @@ -27,10 +27,10 @@ //! ## Examples: //! Creating a new Bucket in Google Cloud Storage: //! ```rust -//! # use cloud_storage::{Client, Bucket, create::Bucket}; +//! # use cloud_storage::{CloudStorageClient, Bucket, create}; //! # #[tokio::main] //! # async fn main() -> Result<(), Box> { -//! let client = Client::default(); +//! let client = CloudStorageClient::default(); //! let bucket = client.bucket().create(&create::Bucket { //! name: "doctest-bucket".to_string(), //! ..Default::default() @@ -41,17 +41,17 @@ //! ``` //! Connecting to an existing Bucket in Google Cloud Storage: //! ```no_run -//! # use cloud_storage::{Client, Bucket}; +//! # use cloud_storage::{CloudStorageClient, Bucket}; //! # #[tokio::main] //! # async fn main() -> Result<(), Box> { -//! let client = Client::default(); +//! let client = CloudStorageClient::default(); //! let bucket = client.bucket().read("my_bucket").await?; //! # Ok(()) //! # } //! ``` //! Read a file from disk and store it on googles server: //! ```rust,no_run -//! # use cloud_storage::{Client, Object}; +//! # use cloud_storage::{CloudStorageClient, Object}; //! # use std::fs::File; //! # use std::io::Read; //! # #[tokio::main] @@ -60,30 +60,31 @@ //! for byte in File::open("myfile.txt")?.bytes() { //! bytes.push(byte?) //! } -//! let client = Client::default(); -//! client.object("my_bucket").create(bytes, "myfile.txt", "text/plain").await?; +//! let client = CloudStorageClient::default(); +//! client.object("my_bucket").create(bytes, "myfile.txt", "text/plain", None).await?; //! # Ok(()) //! # } //! ``` //! Renaming/moving a file //! ```rust,no_run -//! # use cloud_storage::{Client, Object}; +//! # use cloud_storage::{CloudStorageClient, Object}; //! # #[tokio::main] //! # async fn main() -> Result<(), Box> { -//! let client = Client::default(); -//! let mut object = client.object("my_bucket").read("myfile").await?; +//! let cloud_storage_client = CloudStorageClient::default(); +//! let client = cloud_storage_client.object("my_bucket"); +//! let mut object = client.read("myfile", None).await?; //! object.content_type = Some("application/xml".to_string()); -//! client.object().update(&object).await?; +//! client.update(&object, None).await?; //! # Ok(()) //! # } //! ``` //! Removing a file //! ```rust,no_run -//! # use cloud_storage::{Client, Object}; +//! # use cloud_storage::{CloudStorageClient, Object}; //! # #[tokio::main] //! # async fn main() -> Result<(), Box> { -//! let client = Client::default(); -//! client.object("my_bucket").delete("myfile").await?; +//! let client = CloudStorageClient::default(); +//! client.object("my_bucket").delete("myfile", None).await?; //! # Ok(()) //! # } //! ``` @@ -109,10 +110,10 @@ mod sized_byte_stream; use crate::global_client::CLOUD_CLIENT; pub use crate::{ - client::Client, + client::{CloudStorageClient, CloudStorageClientBuilder}, download_options::DownloadOptions, error::Error, - models::{Bucket, ListRequest, Object}, + models::{Bucket, ListRequest, Object, create}, configuration::ServiceAccount, token::{Token, TokenCache}, }; @@ -161,5 +162,5 @@ pub(crate) fn percent_encode(input: &str) -> String { #[cfg(feature = "sync")] fn runtime() -> Result { - Ok(tokio::runtime::Builder::new_current_thread().thread_name("cloud-storage-worker").enable_time().enable_io().build()?) + Ok(tokio::runtime::Builder::new_multi_thread().thread_name("cloud-storage-worker").enable_time().enable_io().build()?) } \ No newline at end of file diff --git a/src/models/cors.rs b/src/models/cors.rs index e02f71a..d31aee7 100644 --- a/src/models/cors.rs +++ b/src/models/cors.rs @@ -11,7 +11,7 @@ pub struct Cors { #[serde(default)] pub method: Vec, /// The list of HTTP headers other than the simple response headers to give permission for the - /// user-agent to share across domains. + /// # user-agent to share across domains. #[serde(default)] pub response_header: Vec, /// The value, in seconds, to return in the Access-Control-Max-Age header used in preflight diff --git a/src/models/create/mod.rs b/src/models/create/mod.rs index 9dce89f..a812ea3 100644 --- a/src/models/create/mod.rs +++ b/src/models/create/mod.rs @@ -1,3 +1,4 @@ +//! Creation models used for communication with Google Cloud Platform mod bucket; mod bucket_access_control; mod default_object_access_control; @@ -5,7 +6,7 @@ mod default_object_access_control; //mod payload_format; mod object_access_control; -pub(crate) use self::{ +pub use self::{ bucket::Bucket, bucket_access_control::BucketAccessControl, default_object_access_control::DefaultObjectAccessControl, diff --git a/src/models/create/object_access_control.rs b/src/models/create/object_access_control.rs index e29fef8..d93e291 100644 --- a/src/models/create/object_access_control.rs +++ b/src/models/create/object_access_control.rs @@ -6,8 +6,8 @@ use crate::models::{Entity, Role}; pub struct ObjectAccessControl { /// The entity holding the permission, in one of the following forms: /// - /// user-userId - /// user-email + /// # user-userId + /// # user-email /// group-groupId /// group-email /// domain-domain diff --git a/src/models/mod.rs b/src/models/mod.rs index 68aad98..57ba6f3 100644 --- a/src/models/mod.rs +++ b/src/models/mod.rs @@ -1,5 +1,5 @@ //! Models used for communication with Google Cloud Platform -pub(crate)mod create; +pub mod create; mod legacy_iam_role; mod test_iam_permission; diff --git a/src/models/object.rs b/src/models/object.rs index b57620f..f5ec6e4 100644 --- a/src/models/object.rs +++ b/src/models/object.rs @@ -111,9 +111,9 @@ impl Object { /// ```no_run /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { - /// use cloud_storage::{Client, object::{Object, ComposeRequest}}; + /// # use cloud_storage::{CloudStorageClient, models::{Object, ComposeRequest}}; /// - /// let client = Client::default(); + /// let client = CloudStorageClient::default(); /// let obj1 = client.object("my_bucket").read("file1", None).await?; /// let url = obj1.download_url(50)?; /// // url is now a url to which an unauthenticated user can make a request to download a file @@ -132,9 +132,9 @@ impl Object { /// ```no_run /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { - /// use cloud_storage::{Client, object::{Object, ComposeRequest}}; + /// # use cloud_storage::{CloudStorageClient, models::{Object, ComposeRequest}}; /// - /// let client = Client::default(); + /// let client = CloudStorageClient::default(); /// let obj1 = client.object("my_bucket").read("file1", None).await?; /// let url = obj1.download_url(50)?; /// // url is now a url to which an unauthenticated user can make a request to download a file @@ -163,9 +163,9 @@ impl Object { /// ```no_run /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { - /// use cloud_storage::{Client, object::{Object, ComposeRequest}}; + /// # use cloud_storage::{CloudStorageClient, models::{Object, ComposeRequest}}; /// - /// let client = Client::default(); + /// let client = CloudStorageClient::default(); /// let obj1 = client.object("my_bucket").read("file1", None).await?; /// let url = obj1.upload_url(50)?; /// // url is now a url to which an unauthenticated user can make a PUT request to upload a file @@ -184,10 +184,10 @@ impl Object { /// ```no_run /// # #[tokio::main] /// # async fn main() -> Result<(), Box> { - /// use cloud_storage::{Client, object::{Object, ComposeRequest}}; - /// use std::collections::HashMap; + /// # use cloud_storage::{CloudStorageClient, models::{Object, ComposeRequest}}; + /// # use std::collections::HashMap; /// - /// let client = Client::default(); + /// let client = CloudStorageClient::default(); /// let obj1 = client.object("my_bucket").read("file1", None).await?; /// let mut custom_metadata = HashMap::new(); /// custom_metadata.insert(String::from("field"), String::from("value")); @@ -210,10 +210,18 @@ impl Object { /// which is valid for `duration` seconds, and lets the posessor upload new file contents. /// without any authentication. /// ### Example - /// ```no_run - /// pub fn upload_url(&self, duration: u32) -> Result { - /// self.sign(&self.name, duration, "POST", None, &HashMap::new()) - /// } + /// ```ignore + /// # #[tokio::main] + /// # async fn main() -> Result<(), Box> { + /// # use cloud_storage::{CloudStorageClient, models::{Object, ComposeRequest}}; + /// # use std::collections::HashMap; + /// let client = CloudStorageClient::default(); + /// let obj1 = client.object("my_bucket").read("file1", None).await?; + /// let mut custom_metadata = HashMap::new(); + /// custom_metadata.insert(String::from("field"), String::from("value")); + /// let url = obj1.sign(obj1.name, 50, "POST", None, custom_metadata)?; + /// # Ok(()) + /// # } /// ``` #[inline(always)] fn sign( diff --git a/src/models/object_access_control.rs b/src/models/object_access_control.rs index c395973..59c66e5 100644 --- a/src/models/object_access_control.rs +++ b/src/models/object_access_control.rs @@ -35,8 +35,8 @@ pub struct ObjectAccessControl { pub generation: Option, /// The entity holding the permission, in one of the following forms: /// - /// user-userId - /// user-email + /// # user-userId + /// # user-email /// group-groupId /// group-email /// domain-domain diff --git a/src/models/response.rs b/src/models/response.rs index 2ff79dd..0efb035 100644 --- a/src/models/response.rs +++ b/src/models/response.rs @@ -12,14 +12,14 @@ pub(crate) enum Response { } /// Enable desugaring for `Response`, e.g. the use of the `?` on an object of type `Response` -/// ```no_run +/// ```ignore,no_run /// if let Response::Error(error) = my_response { /// return error; /// } /// let my_response = my_response.unwrap(); /// ``` /// becomes: -/// ```no_run +/// ```ignore,no_run /// my_response?; /// ``` impl std::ops::Try for Response { diff --git a/src/models/test_iam_permission.rs b/src/models/test_iam_permission.rs index 26ed9ba..e359ffa 100644 --- a/src/models/test_iam_permission.rs +++ b/src/models/test_iam_permission.rs @@ -7,7 +7,7 @@ pub struct TestIamPermission { /// The permissions held by the caller. Permissions are always of the format /// `storage.resource.capability`, where resource is one of buckets or objects. See /// [Cloud Storage IAM Permissions] - /// (https://cloud.google.com/storage/docs/access-control/iam-permissions) for a list of + /// for a list of /// supported permissions. permissions: Vec, } \ No newline at end of file diff --git a/src/sync/bucket.rs b/src/sync/bucket.rs index c8bce6f..02877b3 100644 --- a/src/sync/bucket.rs +++ b/src/sync/bucket.rs @@ -9,17 +9,17 @@ pub struct BucketClient<'a> { impl<'a> BucketClient<'a> { /// Creates a new `Bucket`. There are many options that you can provide for creating a new - /// bucket, so the `NewBucket` resource contains all of them. Note that `NewBucket` implements + /// bucket, so the `create::Bucket` resource contains all of them. Note that `create::Bucket` implements /// `Default`, so you don't have to specify the fields you're not using. And error is returned /// if that bucket name is already taken. /// ### Example /// ``` /// # fn main() -> Result<(), Box> { - /// use cloud_storage::sync::Client; - /// use cloud_storage::bucket::{Bucket, create::Bucket}; - /// use cloud_storage::bucket::{Location, MultiRegion}; + /// # use cloud_storage::sync::CloudStorageClient; + /// # use cloud_storage::models::{Bucket, create}; + /// # use cloud_storage::models::{Location, MultiRegion}; /// - /// let client = Client::new()?; + /// let client = CloudStorageClient::new()?; /// let new_bucket = create::Bucket { /// name: "cloud-storage-rs-doc-1".to_string(), // this is the only mandatory field /// location: Location::Multi(MultiRegion::Eu), @@ -43,10 +43,10 @@ impl<'a> BucketClient<'a> { /// ### Example /// ``` /// # fn main() -> Result<(), Box> { - /// use cloud_storage::sync::Client; - /// use cloud_storage::Bucket; + /// # use cloud_storage::sync::CloudStorageClient; + /// # use cloud_storage::Bucket; /// - /// let client = Client::new()?; + /// let client = CloudStorageClient::new()?; /// let buckets = client.bucket().list()?; /// # Ok(()) /// # } @@ -59,11 +59,11 @@ impl<'a> BucketClient<'a> { /// ### Example /// ``` /// # fn main() -> Result<(), Box> { - /// use cloud_storage::sync::Client; - /// use cloud_storage::Bucket; + /// # use cloud_storage::sync::CloudStorageClient; + /// # use cloud_storage::Bucket; /// - /// let client = Client::new()?; - /// # use cloud_storage::bucket::NewBucket; + /// let client = CloudStorageClient::new()?; + /// # use cloud_storage::models::create; /// # let new_bucket = create::Bucket { /// # name: "cloud-storage-rs-doc-2".to_string(), /// # ..Default::default() @@ -84,11 +84,11 @@ impl<'a> BucketClient<'a> { /// ### Example /// ``` /// # fn main() -> Result<(), Box> { - /// use cloud_storage::sync::Client; - /// use cloud_storage::bucket::{Bucket, RetentionPolicy}; + /// # use cloud_storage::sync::CloudStorageClient; + /// # use cloud_storage::models::{Bucket, RetentionPolicy}; /// - /// let client = Client::new()?; - /// # use cloud_storage::bucket::NewBucket; + /// let client = CloudStorageClient::new()?; + /// # use cloud_storage::models::create; /// # let new_bucket = create::Bucket { /// # name: "cloud-storage-rs-doc-3".to_string(), /// # ..Default::default() @@ -117,11 +117,11 @@ impl<'a> BucketClient<'a> { /// ### Example /// ```no_run /// # fn main() -> Result<(), Box> { - /// use cloud_storage::sync::Client; - /// use cloud_storage::Bucket; - /// - /// let client = Client::new()?; - /// # use cloud_storage::bucket::NewBucket; + /// # use cloud_storage::sync::CloudStorageClient; + /// # use cloud_storage::Bucket; + /// # + /// let client = CloudStorageClient::new()?; + /// # use cloud_storage::models::create; /// # let new_bucket = create::Bucket { /// # name: "unnecessary-bucket".to_string(), /// # ..Default::default() @@ -142,20 +142,21 @@ impl<'a> BucketClient<'a> { /// ### Example /// ``` /// # fn main() -> Result<(), Box> { - /// use cloud_storage::sync::Client; - /// use cloud_storage::Bucket; - /// - /// let client = Client::new()?; - /// # use cloud_storage::bucket::NewBucket; + /// # use cloud_storage::sync::CloudStorageClient; + /// # use cloud_storage::Bucket; + /// # use cloud_storage::models::create; + /// # + /// let cloud_storage_client = CloudStorageClient::new()?; + /// let client = cloud_storage_client.bucket(); /// # let new_bucket = create::Bucket { /// # name: "cloud-storage-rs-doc-4".to_string(), /// # ..Default::default() /// # }; - /// # let _ = client.bucket().create(&new_bucket)?; + /// # let _ = client.create(&new_bucket)?; /// - /// let bucket = client.bucket().read("cloud-storage-rs-doc-4")?; - /// let policy = client.bucket().get_iam_policy(&bucket)?; - /// # client.bucket().delete(bucket)?; + /// let bucket = client.read("cloud-storage-rs-doc-4")?; + /// let policy = client.get_iam_policy(&bucket)?; + /// # client.delete(bucket)?; /// # Ok(()) /// # } /// ``` @@ -168,12 +169,12 @@ impl<'a> BucketClient<'a> { /// ### Example /// ``` /// # fn main() -> Result<(), Box> { - /// use cloud_storage::sync::Client; - /// use cloud_storage::Bucket; - /// use cloud_storage::bucket::{IamPolicy, Binding, IamRole, StandardIamRole, Entity}; + /// # use cloud_storage::sync::CloudStorageClient; + /// # use cloud_storage::Bucket; + /// # use cloud_storage::models::{IamPolicy, Binding, IamRole, StandardIamRole, Entity}; /// - /// let client = Client::new()?; - /// # use cloud_storage::bucket::NewBucket; + /// let client = CloudStorageClient::new()?; + /// # use cloud_storage::models::create; /// # let new_bucket = create::Bucket { /// # name: "cloud-storage-rs-doc-5".to_string(), /// # ..Default::default() @@ -206,11 +207,11 @@ impl<'a> BucketClient<'a> { /// ### Example /// ```no_run /// # fn main() -> Result<(), Box> { - /// use cloud_storage::sync::Client; - /// use cloud_storage::Bucket; + /// # use cloud_storage::sync::CloudStorageClient; + /// # use cloud_storage::Bucket; /// - /// let client = Client::new()?; - /// let bucket = client.bucket("my_bucket").read()?; + /// let client = CloudStorageClient::new()?; + /// let bucket = client.bucket().read("my_bucket")?; /// client.bucket().test_iam_permission(&bucket, "storage.buckets.get")?; /// # Ok(()) /// # } diff --git a/src/sync/bucket_access_control.rs b/src/sync/bucket_access_control.rs index 76e61e7..5f9bcce 100644 --- a/src/sync/bucket_access_control.rs +++ b/src/sync/bucket_access_control.rs @@ -19,16 +19,16 @@ impl<'a> BucketAccessControlClient<'a> { /// ### Example /// ```rust,no_run /// # fn main() -> Result<(), Box> { - /// use cloud_storage::sync::Client; - /// use cloud_storage::bucket_access_control::{BucketAccessControl, create::BucketAccessControl}; - /// use cloud_storage::bucket_access_control::{Role, Entity}; + /// # use cloud_storage::sync::CloudStorageClient; + /// # use cloud_storage::models::{BucketAccessControl, create}; + /// # use cloud_storage::models::{Role, Entity}; /// - /// let client = Client::new()?; + /// let client = CloudStorageClient::new()?; /// let new_bucket_access_control = create::BucketAccessControl { /// entity: Entity::AllUsers, /// role: Role::Reader, /// }; - /// client.bucket_access_control("my_bucket").create_using(&new_bucket_access_control)?; + /// client.bucket_access_control("my_bucket").create(&new_bucket_access_control)?; /// # Ok(()) /// # } /// ``` @@ -48,10 +48,10 @@ impl<'a> BucketAccessControlClient<'a> { /// ### Example /// ```rust,no_run /// # fn main() -> Result<(), Box> { - /// use cloud_storage::sync::Client; - /// use cloud_storage::bucket_access_control::BucketAccessControl; + /// # use cloud_storage::sync::CloudStorageClient; + /// # use cloud_storage::models::BucketAccessControl; /// - /// let client = Client::new()?; + /// let client = CloudStorageClient::new()?; /// let acls = client.bucket_access_control("my_bucket").list()?; /// # Ok(()) /// # } @@ -69,10 +69,10 @@ impl<'a> BucketAccessControlClient<'a> { /// ### Example /// ```rust,no_run /// # fn main() -> Result<(), Box> { - /// use cloud_storage::sync::Client; - /// use cloud_storage::bucket_access_control::{BucketAccessControl, Entity}; + /// # use cloud_storage::sync::CloudStorageClient; + /// # use cloud_storage::models::{BucketAccessControl, Entity}; /// - /// let client = Client::new()?; + /// let client = CloudStorageClient::new()?; /// let controls = client.bucket_access_control("my_bucket").read(&Entity::AllUsers)?; /// # Ok(()) /// # } @@ -90,10 +90,10 @@ impl<'a> BucketAccessControlClient<'a> { /// ### Example /// ```rust,no_run /// # fn main() -> Result<(), Box> { - /// use cloud_storage::sync::Client; - /// use cloud_storage::bucket_access_control::{BucketAccessControl, Entity}; + /// # use cloud_storage::sync::CloudStorageClient; + /// # use cloud_storage::models::{BucketAccessControl, Entity}; /// - /// let client = Client::new()?; + /// let client = CloudStorageClient::new()?; /// let my_bucket = client.bucket_access_control("my_bucket"); /// let mut acl = my_bucket.read(&Entity::AllUsers)?; /// acl.entity = Entity::AllAuthenticatedUsers; @@ -117,10 +117,10 @@ impl<'a> BucketAccessControlClient<'a> { /// ### Example /// ```rust,no_run /// # fn main() -> Result<(), Box> { - /// use cloud_storage::sync::Client; - /// use cloud_storage::bucket_access_control::{BucketAccessControl, Entity}; + /// # use cloud_storage::sync::CloudStorageClient; + /// # use cloud_storage::models::{BucketAccessControl, Entity}; /// - /// let client = Client::new()?; + /// let client = CloudStorageClient::new()?; /// let my_bucket = client.bucket_access_control("my_bucket"); /// let controls = my_bucket.read(&Entity::AllUsers)?; /// my_bucket.delete(controls)?; diff --git a/src/sync/client.rs b/src/sync/client.rs index cede64c..4250340 100644 --- a/src/sync/client.rs +++ b/src/sync/client.rs @@ -4,17 +4,17 @@ use super::{BucketClient, BucketAccessControlClient, DefaultObjectAccessControlC /// The primary synchronous entrypoint to perform operations with Google Cloud Storage. #[derive(Debug)] -pub struct Client { +pub struct CloudStorageClient { runtime: tokio::runtime::Runtime, - client: crate::client::Client, + client: crate::client::CloudStorageClient, } -impl Client { +impl CloudStorageClient { /// Constructs a client with the default token provider, where it attemps to obtain the credentials from the following locations: pub fn new() -> Result { Ok(Self { runtime: crate::runtime()?, - client: crate::Client::default(), + client: crate::CloudStorageClient::default(), }) } @@ -22,11 +22,11 @@ impl Client { pub fn with_cache(token_cache: impl crate::TokenCache + 'static) -> Result { Ok(Self { runtime: crate::runtime()?, - client: crate::Client::with_cache(token_cache), + client: crate::CloudStorageClient::with_cache(token_cache), }) } - /// Synchronous operations on [`Bucket`](crate::bucket::Bucket)s. + /// Synchronous operations on [`Bucket`](crate::Bucket)s. pub fn bucket(&self) -> BucketClient { BucketClient { client: self.client.bucket(), @@ -34,7 +34,7 @@ impl Client { } } - /// Synchronous operations on [`BucketAccessControl`](crate::bucket_access_control::BucketAccessControl)s. + /// Synchronous operations on [`BucketAccessControl`](crate::models::BucketAccessControl)s. pub fn bucket_access_control(&self, bucket: &str) -> BucketAccessControlClient { BucketAccessControlClient { client: self.client.bucket_access_control(bucket), @@ -42,7 +42,7 @@ impl Client { } } - /// Synchronous operations on [`DefaultObjectAccessControl`](crate::default_object_access_control::DefaultObjectAccessControl)s. + /// Synchronous operations on [`DefaultObjectAccessControl`](crate::models::DefaultObjectAccessControl)s. pub fn default_object_access_control(&self, bucket: &str) -> DefaultObjectAccessControlClient { DefaultObjectAccessControlClient { client: self.client.default_object_access_control(bucket), @@ -50,7 +50,7 @@ impl Client { } } - /// Synchronous operations on [`HmacKey`](crate::hmac_key::HmacKey)s. + /// Synchronous operations on [`HmacKey`](crate::models::HmacKey)s. pub fn hmac_key(&self) -> HmacKeyClient { HmacKeyClient { client: self.client.hmac_key(), @@ -58,7 +58,7 @@ impl Client { } } - /// Synchronous operations on [`Object`](crate::object::Object)s. + /// Synchronous operations on [`Object`](crate::models::Object)s. pub fn object(&self, bucket: &str) -> ObjectClient { ObjectClient { client: self.client.object(bucket), @@ -66,7 +66,7 @@ impl Client { } } - /// Synchronous operations on [`ObjectAccessControl`](crate::object_access_control::ObjectAccessControl)s. + /// Synchronous operations on [`ObjectAccessControl`](crate::models::ObjectAccessControl)s. pub fn object_access_control(&self, bucket: &str, object: &str) -> ObjectAccessControlClient { ObjectAccessControlClient { client: self.client.object_access_control(bucket, object), diff --git a/src/sync/default_object_access_control.rs b/src/sync/default_object_access_control.rs index ce23cbf..c2a54fd 100644 --- a/src/sync/default_object_access_control.rs +++ b/src/sync/default_object_access_control.rs @@ -16,18 +16,19 @@ impl<'a> DefaultObjectAccessControlClient<'a> { /// ### Example /// ```no_run /// # fn main() -> Result<(), Box> { - /// use cloud_storage::sync::Client; - /// use cloud_storage::default_object_access_control::{ - /// DefaultObjectAccessControl, create::DefaultObjectAccessControl, Role, Entity, - /// }; + /// # use cloud_storage::sync::CloudStorageClient; + /// # use cloud_storage::models::{ + /// # DefaultObjectAccessControl, create, Role, Entity, + /// # }; /// - /// let client = Client::new()?; + /// let cloud_storage_client = CloudStorageClient::new()?; + /// let client = cloud_storage_client.default_object_access_control("my_bucket"); /// let new_acl = create::DefaultObjectAccessControl { /// entity: Entity::AllAuthenticatedUsers, /// role: Role::Reader, /// }; - /// let default_acl = client.default_object_access_control("my_bucket").create(&new_acl)?; - /// # client.default_object_access_control().delete(default_acl)?; + /// let default_acl = client.create(&new_acl)?; + /// # client.delete(default_acl)?; /// # Ok(()) /// # } /// ``` @@ -49,11 +50,11 @@ impl<'a> DefaultObjectAccessControlClient<'a> { /// ### Example /// ```no_run /// # fn main() -> Result<(), Box> { - /// use cloud_storage::sync::Client; - /// use cloud_storage::default_object_access_control::DefaultObjectAccessControl; + /// # use cloud_storage::sync::CloudStorageClient; + /// # use cloud_storage::models::DefaultObjectAccessControl; /// - /// let client = Client::new()?; - /// let default_acls = client.default_object_access_control().list("my_bucket")?; + /// let client = CloudStorageClient::new()?; + /// let default_acls = client.default_object_access_control("my_bucket").list()?; /// # Ok(()) /// # } /// ``` @@ -74,10 +75,10 @@ impl<'a> DefaultObjectAccessControlClient<'a> { /// ### Example /// ```no_run /// # fn main() -> Result<(), Box> { - /// use cloud_storage::sync::Client; - /// use cloud_storage::default_object_access_control::{DefaultObjectAccessControl, Entity}; + /// # use cloud_storage::sync::CloudStorageClient; + /// # use cloud_storage::models::{DefaultObjectAccessControl, Entity}; /// - /// let client = Client::new()?; + /// let client = CloudStorageClient::new()?; /// let default_acl = client.default_object_access_control("my_bucket").read(&Entity::AllUsers)?; /// # Ok(()) /// # } @@ -96,13 +97,14 @@ impl<'a> DefaultObjectAccessControlClient<'a> { /// ### Example /// ```no_run /// # fn main() -> Result<(), Box> { - /// use cloud_storage::sync::Client; - /// use cloud_storage::default_object_access_control::{DefaultObjectAccessControl, Entity}; + /// # use cloud_storage::sync::CloudStorageClient; + /// # use cloud_storage::models::{DefaultObjectAccessControl, Entity}; /// - /// let client = Client::new()?; - /// let mut default_acl = client.default_object_access_control("my_bucket").read(&Entity::AllUsers)?; + /// let cloud_storage_client = CloudStorageClient::new()?; + /// let client = cloud_storage_client.default_object_access_control("my_bucket"); + /// let mut default_acl = client.read(&Entity::AllUsers)?; /// default_acl.entity = Entity::AllAuthenticatedUsers; - /// client.default_object_access_control().update(&default_acl)?; + /// client.update(&default_acl)?; /// # Ok(()) /// # } /// ``` @@ -124,12 +126,13 @@ impl<'a> DefaultObjectAccessControlClient<'a> { /// ### Example /// ```no_run /// # fn main() -> Result<(), Box> { - /// use cloud_storage::sync::Client; - /// use cloud_storage::default_object_access_control::{DefaultObjectAccessControl, Entity}; + /// # use cloud_storage::sync::CloudStorageClient; + /// # use cloud_storage::models::{DefaultObjectAccessControl, Entity}; /// - /// let client = Client::new()?; - /// let mut default_acl = client.default_object_access_control("my_bucket").read(&Entity::AllUsers)?; - /// client.default_object_access_control().delete(default_acl)?; + /// let cloud_storage_client = CloudStorageClient::new()?; + /// let client = cloud_storage_client.default_object_access_control("my_bucket"); + /// let mut default_acl = client.read(&Entity::AllUsers)?; + /// client.delete(default_acl)?; /// # Ok(()) /// # } /// ``` diff --git a/src/sync/hmac_key.rs b/src/sync/hmac_key.rs index d5a8166..244410b 100644 --- a/src/sync/hmac_key.rs +++ b/src/sync/hmac_key.rs @@ -18,12 +18,12 @@ impl<'a> HmacKeyClient<'a> { /// ### Example /// ``` /// # fn main() -> Result<(), Box> { - /// use cloud_storage::sync::Client; - /// use cloud_storage::hmac_key::HmacKey; + /// # use cloud_storage::sync::CloudStorageClient; + /// # use cloud_storage::models::HmacKey; /// - /// let client = Client::new()?; + /// let client = CloudStorageClient::new()?; /// let hmac_key = client.hmac_key().create()?; - /// # use cloud_storage::hmac_key::HmacState; + /// # use cloud_storage::models::HmacState; /// # client.hmac_key().update(&hmac_key.metadata.access_id, HmacState::Inactive)?; /// # client.hmac_key().delete(&hmac_key.metadata.access_id)?; /// # Ok(()) @@ -45,10 +45,10 @@ impl<'a> HmacKeyClient<'a> { /// ### Example /// ``` /// # fn main() -> Result<(), Box> { - /// use cloud_storage::sync::Client; - /// use cloud_storage::hmac_key::HmacKey; + /// # use cloud_storage::sync::CloudStorageClient; + /// # use cloud_storage::models::HmacKey; /// - /// let client = Client::new()?; + /// let client = CloudStorageClient::new()?; /// let all_hmac_keys = client.hmac_key().list()?; /// # Ok(()) /// # } @@ -69,10 +69,10 @@ impl<'a> HmacKeyClient<'a> { /// ### Example /// ```no_run /// # fn main() -> Result<(), Box> { - /// use cloud_storage::sync::Client; - /// use cloud_storage::hmac_key::HmacKey; + /// # use cloud_storage::sync::CloudStorageClient; + /// # use cloud_storage::models::HmacKey; /// - /// let client = Client::new()?; + /// let client = CloudStorageClient::new()?; /// let key = client.hmac_key().read("some identifier")?; /// # Ok(()) /// # } @@ -93,10 +93,10 @@ impl<'a> HmacKeyClient<'a> { /// ### Example /// ```no_run /// # fn main() -> Result<(), Box> { - /// use cloud_storage::sync::Client; - /// use cloud_storage::hmac_key::{HmacKey, HmacState}; + /// # use cloud_storage::sync::CloudStorageClient; + /// # use cloud_storage::models::{HmacKey, HmacState}; /// - /// let client = Client::new()?; + /// let client = CloudStorageClient::new()?; /// let key = client.hmac_key().update("your key", HmacState::Active)?; /// # Ok(()) /// # } @@ -115,10 +115,10 @@ impl<'a> HmacKeyClient<'a> { /// ### Example /// ```no_run /// # fn main() -> Result<(), Box> { - /// use cloud_storage::sync::Client; - /// use cloud_storage::hmac_key::{HmacKey, HmacState}; + /// # use cloud_storage::sync::CloudStorageClient; + /// # use cloud_storage::models::{HmacKey, HmacState}; /// - /// let client = Client::new()?; + /// let client = CloudStorageClient::new()?; /// let key = client.hmac_key().update("your key", HmacState::Inactive)?; // this is required. /// client.hmac_key().delete(&key.access_id)?; /// # Ok(()) diff --git a/src/sync/mod.rs b/src/sync/mod.rs index fc26791..f250d9a 100644 --- a/src/sync/mod.rs +++ b/src/sync/mod.rs @@ -10,7 +10,7 @@ mod object_access_control; mod helpers; // for internal use only -pub use client::Client; +pub use client::CloudStorageClient; pub use bucket::BucketClient; pub use bucket_access_control::BucketAccessControlClient; pub use default_object_access_control::DefaultObjectAccessControlClient; diff --git a/src/sync/object.rs b/src/sync/object.rs index e9bed87..3209926 100644 --- a/src/sync/object.rs +++ b/src/sync/object.rs @@ -20,11 +20,11 @@ impl<'a> ObjectClient<'a> { /// ```rust,no_run /// # fn main() -> Result<(), Box> { /// # fn read_cute_cat(_in: &str) -> Vec { vec![0, 1] } - /// use cloud_storage::sync::Client; - /// use cloud_storage::Object; + /// # use cloud_storage::sync::CloudStorageClient; + /// # use cloud_storage::Object; /// /// let file: Vec = read_cute_cat("cat.png"); - /// let client = Client::new()?; + /// let client = CloudStorageClient::new()?; /// client.object("cat-photos").create(file, "recently read cat.png", "image/png", None)?; /// # Ok(()) /// # } @@ -49,11 +49,11 @@ impl<'a> ObjectClient<'a> { /// ```rust,no_run /// # fn main() -> Result<(), Box> { /// # fn read_cute_cat(_in: &str) -> Vec { vec![0, 1] } - /// use cloud_storage::sync::Client; - /// use cloud_storage::Object; + /// # use cloud_storage::sync::CloudStorageClient; + /// # use cloud_storage::Object; /// /// let file: Vec = read_cute_cat("cat.png"); - /// let client = Client::new()?; + /// let client = CloudStorageClient::new()?; /// let metadata = serde_json::json!({ /// "metadata": { /// "custom_id": "1234" @@ -121,10 +121,10 @@ impl<'a> ObjectClient<'a> { /// ### Example /// ```no_run /// # fn main() -> Result<(), Box> { - /// use cloud_storage::sync::Client; - /// use cloud_storage::{Object, ListRequest}; + /// # use cloud_storage::sync::CloudStorageClient; + /// # use cloud_storage::{Object, ListRequest}; /// - /// let client = Client::new()?; + /// let client = CloudStorageClient::new()?; /// let all_objects = client.object("my_bucket").list(ListRequest::default())?; /// # Ok(()) /// # } @@ -142,10 +142,10 @@ impl<'a> ObjectClient<'a> { /// ### Example /// ```no_run /// # fn main() -> Result<(), Box> { - /// use cloud_storage::sync::Client; - /// use cloud_storage::Object; + /// # use cloud_storage::sync::CloudStorageClient; + /// # use cloud_storage::Object; /// - /// let client = Client::new()?; + /// let client = CloudStorageClient::new()?; /// let object = client.object("my_bucket").read("path/to/my/file.png", None)?; /// # Ok(()) /// # } @@ -163,10 +163,10 @@ impl<'a> ObjectClient<'a> { /// ### Example /// ```no_run /// # fn main() -> Result<(), Box> { - /// use cloud_storage::sync::Client; - /// use cloud_storage::Object; + /// # use cloud_storage::sync::CloudStorageClient; + /// # use cloud_storage::Object; /// - /// let client = Client::new()?; + /// let client = CloudStorageClient::new()?; /// let bytes = client.object("my_bucket").download("path/to/my/file.png", None)?; /// # Ok(()) /// # } @@ -189,12 +189,13 @@ impl<'a> ObjectClient<'a> { /// ### Example /// ```no_run /// # fn main() -> Result<(), Box> { - /// use cloud_storage::sync::Client; - /// use cloud_storage::Object; + /// # use cloud_storage::sync::CloudStorageClient; + /// # use cloud_storage::Object; + /// # use std::fs::File; /// - /// let client = Client::new()?; + /// let client = CloudStorageClient::new()?; /// let file = File::create("somefile")?; - /// let bytes = client.object("my_bucket").download("path/to/my/file.png", file)?; + /// let bytes = client.object("my_bucket").download_streamed("path/to/my/file.png", file)?; /// # Ok(()) /// # } /// ``` @@ -220,13 +221,13 @@ impl<'a> ObjectClient<'a> { /// ### Example /// ```no_run /// # fn main() -> Result<(), Box> { - /// use cloud_storage::sync::Client; - /// use cloud_storage::Object; - /// - /// let client = Client::new()?; - /// let mut object = client.object("my_bucket").read("path/to/my/file.png", None)?; + /// # use cloud_storage::sync::CloudStorageClient; + /// # use cloud_storage::Object; + /// let client = CloudStorageClient::new()?; + /// let my_bucket = client.object("my_bucket"); + /// let mut object = my_bucket.read("path/to/my/file.png", None)?; /// object.content_type = Some("application/xml".to_string()); - /// client.object().update(&object, None)?; + /// my_bucket.update(&object, None)?; /// # Ok(()) /// # } /// ``` @@ -243,10 +244,10 @@ impl<'a> ObjectClient<'a> { /// ### Example /// ```no_run /// # fn main() -> Result<(), Box> { - /// use cloud_storage::sync::Client; - /// use cloud_storage::Object; + /// # use cloud_storage::sync::CloudStorageClient; + /// # use cloud_storage::Object; /// - /// let client = Client::new()?; + /// let client = CloudStorageClient::new()?; /// client.object("my_bucket").delete("path/to/my/file.png", None)?; /// # Ok(()) /// # } @@ -264,10 +265,10 @@ impl<'a> ObjectClient<'a> { /// ### Example /// ```no_run /// # fn main() -> Result<(), Box> { - /// use cloud_storage::sync::Client; - /// use cloud_storage::object::{Object, ComposeRequest, SourceObject}; + /// # use cloud_storage::sync::CloudStorageClient; + /// # use cloud_storage::models::{Object, ComposeRequest, SourceObject}; /// - /// let client = Client::new()?; + /// let client = CloudStorageClient::new()?; /// let obj1 = client.object("my_bucket").read("file1", None)?; /// let obj2 = client.object("my_bucket").read("file2", None)?; /// let compose_request = ComposeRequest { @@ -308,12 +309,13 @@ impl<'a> ObjectClient<'a> { /// ### Example /// ```no_run /// # fn main() -> Result<(), Box> { - /// use cloud_storage::sync::Client; - /// use cloud_storage::object::{Object, ComposeRequest}; + /// # use cloud_storage::sync::CloudStorageClient; + /// # use cloud_storage::models::{Object, ComposeRequest}; /// - /// let client = Client::new()?; - /// let obj1 = client.object("my_bucket").read("file1", None)?; - /// let obj2 = client.object().copy(&obj1, "my_other_bucket", "file2", None)?; + /// let cloud_storage_client = CloudStorageClient::new()?; + /// let client = cloud_storage_client.object("my_bucket"); + /// let obj1 = client.read("file1", None)?; + /// let obj2 = client.copy(&obj1, "my_other_bucket", "file2", None)?; /// // obj2 is now a copy of obj1. /// # Ok(()) /// # } @@ -344,12 +346,13 @@ impl<'a> ObjectClient<'a> { /// ### Example /// ```no_run /// # fn main() -> Result<(), Box> { - /// use cloud_storage::sync::Client; - /// use cloud_storage::object::Object; + /// # use cloud_storage::sync::CloudStorageClient; + /// # use cloud_storage::models::Object; /// - /// let client = Client::new()?; - /// let obj1 = client.object("my_bucket").read("file1", None)?; - /// let obj2 = client.object().rewrite(&obj1, "my_other_bucket", "file2", None)?; + /// let cloud_storage_client = CloudStorageClient::new()?; + /// let client = cloud_storage_client.object("my_bucket"); + /// let obj1 = client.read("file1", None)?; + /// let obj2 = client.rewrite(&obj1, "my_other_bucket", "file2", None)?; /// // obj2 is now a copy of obj1. /// # Ok(()) /// # } From 3637c98713f42f024ad058e4a5180192479403ff Mon Sep 17 00:00:00 2001 From: SonnyX Date: Thu, 4 May 2023 16:27:39 +0200 Subject: [PATCH 22/26] minor updates --- Cargo.toml | 4 ++-- LICENSE | 2 +- README.md | 6 +++++- 3 files changed, 8 insertions(+), 4 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 199e4f9..dce5807 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,11 +1,11 @@ [package] name = "cloud-storage" version = "1.0.0" -authors = ["Luuk Wester ", "Randy von der Weide "] +authors = ["Luuk Wester ", "SonnyX "] edition = "2021" description = "A crate for uploading files to Google cloud storage, and for generating download urls." license = "MIT" -repository = "https://github.com/SonnyX/cloud-storage-rs" +repository = "https://github.com/ThouCheese/cloud-storage-rs" documentation = "https://docs.rs/cloud-storage" keywords = ["google", "cloud", "storage"] readme = "README.md" diff --git a/LICENSE b/LICENSE index 9eaa67f..c8ca8f8 100644 --- a/LICENSE +++ b/LICENSE @@ -1,6 +1,6 @@ MIT License -Copyright (c) 2023 Randy von der Weide +Copyright (c) 2023 Luuk Wester Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/README.md b/README.md index 4f11ac5..bd74a9b 100644 --- a/README.md +++ b/README.md @@ -26,7 +26,11 @@ println!("{}", object.download_url(1000)?); // download link that expires after Object::delete(&bucket.name, "folder/filename.txt", None).await?; ``` -The service account should have the roles `Service Account Token Creator` (for generating access tokens) and `Storage Object Admin` (for generating sign urls to download the files). +When using `CloudStorageClient::default()`, `sync::CloudStorageClient::new()` or the global client, an ServiceAccount will be created based on either of the environmental variables: + * `SERVICE_ACCOUNT` or `GOOGLE_APPLICATION_CREDENTIALS` which should contain path to the `service-account-*******.json` + * `SERVICE_ACCOUNT_JSON` or `GOOGLE_APPLICATION_CREDENTIALS_JSON` containing the contents of `service-account-*******.json` + +The service account requires the roles `Service Account Token Creator` (for generating access tokens) and `Storage Object Admin` (for generating signed urls to download the files). ### Sync If you're not (yet) interested in running an async executor, then `cloud_storage` exposes a sync api. To use it, enable the feature flag `sync`, and then call instead of calling `function().await`, call `function_sync()`. From 86f65a7cc1c4c71dbdf172713dda6ed33d433327 Mon Sep 17 00:00:00 2001 From: SonnyX Date: Fri, 19 May 2023 19:58:29 +0200 Subject: [PATCH 23/26] Add missing locations --- src/models/location.rs | 63 +++++++++++++++++++++++++++++++++++++----- src/models/mod.rs | 2 +- src/models/object.rs | 8 ++---- 3 files changed, 60 insertions(+), 13 deletions(-) diff --git a/src/models/location.rs b/src/models/location.rs index fb1c598..25f4b64 100644 --- a/src/models/location.rs +++ b/src/models/location.rs @@ -29,7 +29,9 @@ pub enum SingleRegion { /// All options in Asia. Asia(AsiaLocation), /// All options in Australia. - Australia(AusLocation), + Australia(AustraliaLocation), + /// All options in the Middle East + MiddleEast(MiddleEastLocation), } /// All options in North America. @@ -39,6 +41,9 @@ pub enum NALocation { /// Store the files in Montréal. #[serde(rename = "NORTHAMERICA-NORTHEAST1")] Montreal, + /// Store the files in Toronto. + #[serde(rename = "NORTHAMERICA-NORTHEAST2")] + Toronto, /// Store the files in Iowa. #[serde(rename = "US-CENTRAL1")] Iowa, @@ -48,12 +53,24 @@ pub enum NALocation { /// Store the files in Northern Virginia. #[serde(rename = "US-EAST4")] NorthernVirginia, + /// Store the files in Columbus. + #[serde(rename = "US-EAST5")] + Columbus, + /// Store the files in Dallas. + #[serde(rename = "US-SOUTH1")] + Dallas, /// Store the files in Oregon. #[serde(rename = "US-WEST1")] Oregon, /// Store the files in Los Angeles. #[serde(rename = "US-WEST2")] LosAngeles, + /// Store the files in Salt Lake City. + #[serde(rename = "US-WEST3")] + SaltLakeCity, + /// Store the files in Las Vegas. + #[serde(rename = "US-WEST4")] + LasVegas, } /// All options in South America. @@ -63,6 +80,21 @@ pub enum SALocation { /// Store the files in Soa Paulo. #[serde(rename = "SOUTHAMERICA-EAST1")] SaoPaulo, + /// Store the files in Santiago. + #[serde(rename = "SOUTHAMERICA-EAST2")] + Santiago, +} + +/// All options in Middle East. +#[allow(clippy::upper_case_acronyms)] +#[derive(Debug, PartialEq, serde::Serialize, serde::Deserialize)] +pub enum MiddleEastLocation { + /// Store the files in Doha. + #[serde(rename = "ME-CENTRAL1")] + Doha, + /// Store the files in Tel Aviv. + #[serde(rename = "ME-WEST1")] + TelAviv, } /// All options in Europe. @@ -92,6 +124,9 @@ pub enum EuropeLocation { /// Store the files in Paris. #[serde(rename = "EUROPE-WEST9")] Paris, + /// Store the files in Turin. + #[serde(rename = "EUROPE-WEST12")] + Turin, /// Store the files in Warsaw. #[serde(rename = "EUROPE-CENTRAL2")] Warsaw, @@ -115,20 +150,32 @@ pub enum AsiaLocation { /// Store the files in Osaka. #[serde(rename = "ASIA-NORTHEAST2")] Osaka, + /// Store the files in Seoul. + #[serde(rename = "ASIA-NORTHEAST3")] + Seoul, /// Store the files in Mumbai. #[serde(rename = "ASIA-SOUTH1")] Mumbai, + /// Store the files in Delhi. + #[serde(rename = "ASIA-SOUTH2")] + Delhi, /// Store the files in Singapore. #[serde(rename = "ASIA-SOUTHEAST1")] Singapore, + /// Store the files in Jakarta. + #[serde(rename = "ASIA-SOUTHEAST2")] + Jakarta, } /// All options in Australia. #[derive(Debug, PartialEq, serde::Serialize, serde::Deserialize)] -pub enum AusLocation { +pub enum AustraliaLocation { /// Store the files in Sydney. #[serde(rename = "AUSTRALIA-SOUTHEAST1")] Sydney, + /// Store the files in Melbourne. + #[serde(rename = "AUSTRALIA-SOUTHEAST2")] + Melbourne, } /// The possible options for multi-region storage. @@ -137,10 +184,10 @@ pub enum AusLocation { pub enum MultiRegion { /// Data centers in Asia Asia, - /// Data centers in the European Union + /// Data centers within member states of the European Union: /// - /// Object data added to a bucket in the EU multi-region is not stored in the EUROPE-WEST2 or - /// EUROPE-WEST6 data center. + /// Object data added to a bucket in the `EU` multi-region is not stored in the EUROPE-WEST2 (London) or + /// EUROPE-WEST6 (Zurich) data centers. Eu, /// Data centers in the United States Us, @@ -150,8 +197,10 @@ pub enum MultiRegion { #[derive(Debug, PartialEq, serde::Serialize, serde::Deserialize)] #[serde(rename_all = "UPPERCASE")] pub enum DualRegion { - /// EUROPE-NORTH1 and EUROPE-WEST4. Additionally, object metadata may be stored in EUROPE-WEST1. + /// Tokyo and Osaka. + Asia1, + /// Finland and Netherlands. Eur4, - /// US-CENTRAL1 and US-EAST1. Additionally, object metadata may be stored in Tulsa, Oklahoma. + /// Iowa and South Carolina. Nam4, } diff --git a/src/models/mod.rs b/src/models/mod.rs index 57ba6f3..60ca75b 100644 --- a/src/models/mod.rs +++ b/src/models/mod.rs @@ -100,7 +100,7 @@ pub use self::{ hmac_key::HmacKey, hmac_metadata::HmacMeta, hmac_state::HmacState, - location::{Location, AusLocation, AsiaLocation, EuropeLocation, NALocation, SALocation, DualRegion, MultiRegion, SingleRegion}, + location::{Location, AustraliaLocation, AsiaLocation, EuropeLocation, NALocation, SALocation, DualRegion, MultiRegion, SingleRegion}, customer_encryption::CustomerEncrypton, compose_request::ComposeRequest, source_object::SourceObject, diff --git a/src/models/object.rs b/src/models/object.rs index f5ec6e4..7522790 100644 --- a/src/models/object.rs +++ b/src/models/object.rs @@ -2,7 +2,7 @@ use std::collections::HashMap; use crate::Error; -use super::{CustomerEncrypton, Owner, ObjectAccessControl}; +use super::{CustomerEncrypton, Owner, ObjectAccessControl, Location}; /// A resource representing a file in Google Cloud Storage. #[derive(Debug, PartialEq, serde::Serialize, serde::Deserialize)] @@ -223,7 +223,6 @@ impl Object { /// # Ok(()) /// # } /// ``` - #[inline(always)] fn sign( &self, file_path: &str, @@ -380,9 +379,8 @@ impl Object { #[inline(always)] fn get_credential_scope(date: &time::OffsetDateTime) -> String { format!( - "{}/henk/storage/goog4_request", - date.format(time::macros::format_description!("[year][month][day]")) - .unwrap() + "{}/auto/storage/goog4_request", + date.format(time::macros::format_description!("[year][month][day]")).unwrap(), ) } } \ No newline at end of file From 0e7a16b008026fa0f45a27008199d873ca4b2e0c Mon Sep 17 00:00:00 2001 From: SonnyX Date: Thu, 25 May 2023 12:51:36 +0200 Subject: [PATCH 24/26] Don't unwrap on dotenv(), convert to option instead --- src/configuration/service_account.rs | 2 +- src/global_client/bucket.rs | 4 ++-- src/global_client/mod.rs | 4 ++-- src/models/notification.rs | 4 ++-- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/src/configuration/service_account.rs b/src/configuration/service_account.rs index aa29cf3..0207d4b 100644 --- a/src/configuration/service_account.rs +++ b/src/configuration/service_account.rs @@ -31,7 +31,7 @@ pub struct ServiceAccount { impl Default for ServiceAccount { fn default() -> Self { #[cfg(feature = "dotenv")] - dotenv::dotenv().unwrap(); + dotenv::dotenv().ok(); let credentials_json = std::env::var("SERVICE_ACCOUNT") .or_else(|_| std::env::var("GOOGLE_APPLICATION_CREDENTIALS")) .map(|path| std::fs::read_to_string(path).expect("SERVICE_ACCOUNT file not found")) diff --git a/src/global_client/bucket.rs b/src/global_client/bucket.rs index e3b671e..c19908b 100644 --- a/src/global_client/bucket.rs +++ b/src/global_client/bucket.rs @@ -277,7 +277,7 @@ mod tests { #[tokio::test] async fn create() -> Result<(), Box> { #[cfg(feature = "dotenv")] - dotenv::dotenv().unwrap(); + dotenv::dotenv().ok(); let base_name = std::env::var("TEST_BUCKET")?; // use a more complex bucket in this test. let new_bucket = create::Bucket { @@ -373,7 +373,7 @@ mod tests { #[test] fn create() -> Result<(), Box> { #[cfg(feature = "dotenv")] - dotenv::dotenv().unwrap(); + dotenv::dotenv().ok(); let base_name = std::env::var("TEST_BUCKET")?; // use a more complex bucket in this test. let new_bucket = create::Bucket { diff --git a/src/global_client/mod.rs b/src/global_client/mod.rs index d898c1c..b586b3c 100644 --- a/src/global_client/mod.rs +++ b/src/global_client/mod.rs @@ -17,7 +17,7 @@ mod test_helpers { pub(crate) async fn read_test_bucket() -> Bucket { #[cfg(feature = "dotenv")] - dotenv::dotenv().unwrap(); + dotenv::dotenv().ok(); let name = std::env::var("TEST_BUCKET").unwrap(); match Bucket::read(&name).await { Ok(bucket) => bucket, @@ -50,7 +50,7 @@ mod test_helpers { std::thread::sleep(std::time::Duration::from_millis(1500)); // avoid getting rate limited #[cfg(feature = "dotenv")] - dotenv::dotenv().unwrap(); + dotenv::dotenv().ok(); let base_name = std::env::var("TEST_BUCKET").unwrap(); let name = format!("{}-{}", base_name, name); let new_bucket = create::Bucket { diff --git a/src/models/notification.rs b/src/models/notification.rs index 428c646..0d7b5e9 100644 --- a/src/models/notification.rs +++ b/src/models/notification.rs @@ -98,7 +98,7 @@ mod tests { fn create() { let bucket = crate::global_client::read_test_bucket(); #[cfg(feature = "dotenv")] - dotenv::dotenv().unwrap(); + dotenv::dotenv().ok(); let service_account = crate::ServiceAccount::default(); let topic = format!( "//pubsub.googleapis.com/projects/{}/topics/{}", @@ -129,7 +129,7 @@ mod tests { fn delete() { let bucket = crate::global_client::read_test_bucket(); #[cfg(feature = "dotenv")] - dotenv::dotenv().unwrap(); + dotenv::dotenv().ok(); let service_account = crate::ServiceAccount::default(); let topic = format!( "//pubsub.googleapis.com/projects/{}/topics/{}", From 4652ad16537ccbb625acc39c6533a27a756717ea Mon Sep 17 00:00:00 2001 From: SonnyX Date: Wed, 14 Jun 2023 13:27:55 +0200 Subject: [PATCH 25/26] Remove unstable feature try_trait2 --- .cargo/config.toml | 4 +- Cargo.toml | 2 +- src/client/bucket.rs | 14 ++-- src/client/bucket_access_control.rs | 8 +-- src/client/default_object_access_control.rs | 8 +-- src/client/hmac_key.rs | 14 ++-- src/client/object.rs | 22 +++--- src/client/object_access_control.rs | 8 +-- src/error.rs | 6 +- src/lib.rs | 1 - src/models/object.rs | 2 +- src/models/response.rs | 79 ++++++++------------- 12 files changed, 71 insertions(+), 97 deletions(-) diff --git a/.cargo/config.toml b/.cargo/config.toml index 4eb4ae5..e040630 100644 --- a/.cargo/config.toml +++ b/.cargo/config.toml @@ -1,5 +1,3 @@ [env] # Required for linking openssl on windows, has no effect on other platforms. -VCPKGRS_DYNAMIC="1" -# Remove this when https://github.com/rust-lang/rust/issues/84277 becomes stable -RUSTC_BOOTSTRAP="1" \ No newline at end of file +VCPKGRS_DYNAMIC="1" \ No newline at end of file diff --git a/Cargo.toml b/Cargo.toml index dce5807..146ab92 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -13,7 +13,7 @@ categories = ["api-bindings", "web-programming"] # maintenance = { status = "actively-developed" } [features] -default = ["native-tls", "ring", "pem", "global-client", "sync", "dotenv"] +default = ["rustls-tls", "global-client", "sync", "dotenv"] global-client = [] sync = ["reqwest/blocking"] diff --git a/src/client/bucket.rs b/src/client/bucket.rs index fbbc819..94e95c8 100644 --- a/src/client/bucket.rs +++ b/src/client/bucket.rs @@ -39,7 +39,7 @@ impl<'a> BucketClient<'a> { let project = &self.project_id; let query = [("project", project)]; let result: crate::models::Response = self.client.reqwest.post(&url).headers(headers).query(&query).json(new_bucket).send().await?.json().await?; - Ok(result?) + Ok(result.ok()?) } /// Returns all `Bucket`s within this project. @@ -65,7 +65,7 @@ impl<'a> BucketClient<'a> { let project = &self.project_id; let query = [("project", project)]; let result: crate::models::Response> = self.client.reqwest.get(&url).headers(headers).query(&query).send().await?.json().await?; - Ok(result?.items) + Ok(result.ok()?.items) } /// Returns a single `Bucket` by its name. If the Bucket does not exist, an error is returned. @@ -93,7 +93,7 @@ impl<'a> BucketClient<'a> { let headers = self.client.get_headers().await?; let url = format!("{}/{}", self.bucket_url, crate::percent_encode(name)); let result: crate::models::Response = self.client.reqwest.get(&url).headers(headers).send().await?.json().await?; - Ok(result?) + Ok(result.ok()?) } /// Update an existing `Bucket`. If you declare you bucket as mutable, you can edit its fields. @@ -128,7 +128,7 @@ impl<'a> BucketClient<'a> { let headers = self.client.get_headers().await?; let url = format!("{}/{}", self.bucket_url, crate::percent_encode(&bucket.name),); let result: crate::models::Response = self.client.reqwest.put(&url).headers(headers).json(bucket).send().await?.json().await?; - Ok(result?) + Ok(result.ok()?) } /// Delete an existing `Bucket`. This permanently removes a bucket from Google Cloud Storage. @@ -191,7 +191,7 @@ impl<'a> BucketClient<'a> { let headers = self.client.get_headers().await?; let url = format!("{}/{}/iam", self.bucket_url, crate::percent_encode(&bucket.name)); let result: crate::models::Response = self.client.reqwest.get(&url).headers(headers).send().await?.json().await?; - Ok(result?) + Ok(result.ok()?) } /// Updates the [IAM Policy](https://cloud.google.com/iam/docs/) for this bucket. @@ -236,7 +236,7 @@ impl<'a> BucketClient<'a> { let headers = self.client.get_headers().await?; let url = format!("{}/{}/iam", self.bucket_url, crate::percent_encode(&bucket.name)); let result: crate::models::Response = self.client.reqwest.put(&url).headers(headers).json(iam).send().await?.json().await?; - Ok(result?) + Ok(result.ok()?) } /// Checks whether the user provided in the service account has this permission. @@ -269,6 +269,6 @@ impl<'a> BucketClient<'a> { ); let headers = self.client.get_headers().await?; let result: crate::models::Response = self.client.reqwest.get(&url).headers(headers).query(&[("permissions", permission)]).send().await?.json().await?; - Ok(result?) + Ok(result.ok()?) } } diff --git a/src/client/bucket_access_control.rs b/src/client/bucket_access_control.rs index 7ccc305..6542acf 100644 --- a/src/client/bucket_access_control.rs +++ b/src/client/bucket_access_control.rs @@ -37,7 +37,7 @@ impl<'a> BucketAccessControlClient<'a> { ) -> Result { let headers = self.client.get_headers().await?; let result: crate::models::Response = self.client.reqwest.post(&self.bucket_acl_url).headers(headers).json(new_bucket_access_control).send().await?.json().await?; - Ok(result?) + Ok(result.ok()?) } /// Returns all `BucketAccessControl`s related to this bucket. @@ -62,7 +62,7 @@ impl<'a> BucketAccessControlClient<'a> { let headers = self.client.get_headers().await?; let response = self.client.reqwest.get(&self.bucket_acl_url).headers(headers).send().await?; - let object = response.json::>>().await??.items; + let object = response.json::>>().await?.ok()?.items; Ok(object) } @@ -92,7 +92,7 @@ impl<'a> BucketAccessControlClient<'a> { ); let headers = self.client.get_headers().await?; let result: crate::models::Response = self.client.reqwest.get(&url).headers(headers).send().await?.json().await?; - Ok(result?) + Ok(result.ok()?) } /// Update this `BucketAccessControl`. @@ -126,7 +126,7 @@ impl<'a> BucketAccessControlClient<'a> { ); let headers = self.client.get_headers().await?; let result: crate::models::Response = self.client.reqwest.put(&url).headers(headers).json(bucket_access_control).send().await?.json().await?; - Ok(result?) + Ok(result.ok()?) } /// Permanently deletes the ACL entry for the specified entity on the specified bucket. diff --git a/src/client/default_object_access_control.rs b/src/client/default_object_access_control.rs index f81328d..5f62898 100644 --- a/src/client/default_object_access_control.rs +++ b/src/client/default_object_access_control.rs @@ -48,7 +48,7 @@ impl<'a> DefaultObjectAccessControlClient<'a> { .send() .await?; - let mut object = response.json::>().await??; + let mut object = response.json::>().await?.ok()?; object.bucket = self.bucket.clone(); Ok(object) } @@ -74,7 +74,7 @@ impl<'a> DefaultObjectAccessControlClient<'a> { let headers = self.client.get_headers().await?; let response = self.client.reqwest.get(&self.base_url).headers(headers).send().await?; - let mut object = response.json::>>().await??.items; + let mut object = response.json::>>().await?.ok()?.items; object = object.into_iter().map(|item| DefaultObjectAccessControl { bucket: self.bucket.to_string(), ..item @@ -119,7 +119,7 @@ impl<'a> DefaultObjectAccessControlClient<'a> { .send() .await?; - let mut object = response.json::>().await??; + let mut object = response.json::>().await?.ok()?; object.bucket = self.bucket.clone(); Ok(object) } @@ -156,7 +156,7 @@ impl<'a> DefaultObjectAccessControlClient<'a> { ); let response = self.client.reqwest.put(&url).headers(headers).json(default_object_access_control).send().await?; - let mut object = response.json::>().await??; + let mut object = response.json::>().await?.ok()?; object.bucket = self.bucket.clone(); Ok(object) } diff --git a/src/client/hmac_key.rs b/src/client/hmac_key.rs index 784e339..c4e3953 100644 --- a/src/client/hmac_key.rs +++ b/src/client/hmac_key.rs @@ -45,7 +45,7 @@ impl<'a> HmacKeyClient<'a> { .await? .json() .await?; - Ok(result?) + Ok(result.ok()?) } /// Retrieves a list of HMAC keys matching the criteria. Since the HmacKey is secret, this does @@ -85,11 +85,11 @@ impl<'a> HmacKeyClient<'a> { // entry, Google will return the response `{ "kind": "storage#hmacKeysMetadata" }` instead // of a list with one element. This breaks the parser. match result { - Ok(parsed) => match parsed { - crate::models::Response::Success(s) => Ok(s.items), - crate::models::Response::Error(e) => Err(e.into()), + Ok(parsed) => match parsed.ok() { + Ok(s) => Ok(s.items), + Err(e) => Err(e.into()), }, - Err(_) => Ok(vec![single_result??]), + Err(_) => Ok(vec![single_result?.ok()?]), } } @@ -122,7 +122,7 @@ impl<'a> HmacKeyClient<'a> { .await? .json() .await?; - Ok(result?) + Ok(result.ok()?) } /// Updates the state of an HMAC key. See the HMAC Key resource descriptor for valid states. @@ -160,7 +160,7 @@ impl<'a> HmacKeyClient<'a> { .await? .json() .await?; - Ok(result?) + Ok(result.ok()?) } /// Deletes an HMAC key. Note that a key must be set to `Inactive` first. diff --git a/src/client/object.rs b/src/client/object.rs index 094ceb8..a5bb3fe 100644 --- a/src/client/object.rs +++ b/src/client/object.rs @@ -48,7 +48,7 @@ impl<'a> ObjectClient<'a> { .send() .await?; - let mut object = response.json::>().await??; + let mut object = response.json::>().await?.ok()?; object.private_key = Some(self.client.service_account.private_key.clone()); object.client_email = Some(self.client.service_account.client_email.clone()); Ok(object) @@ -100,7 +100,7 @@ impl<'a> ObjectClient<'a> { .multipart(form) .send() .await?; - let mut object = response.json::>().await??; + let mut object = response.json::>().await?.ok()?; object.private_key = Some(self.client.service_account.private_key.clone()); object.client_email = Some(self.client.service_account.client_email.clone()); Ok(object) @@ -161,7 +161,7 @@ impl<'a> ObjectClient<'a> { .multipart(form) .send() .await?; - let mut object = response.json::>().await??; + let mut object = response.json::>().await?.ok()?; object.private_key = Some(self.client.service_account.private_key.clone()); object.client_email = Some(self.client.service_account.client_email.clone()); Ok(object) @@ -216,7 +216,7 @@ impl<'a> ObjectClient<'a> { .body(body) .send() .await?; - let mut object = response.json::>().await??; + let mut object = response.json::>().await?.ok()?; object.private_key = Some(self.client.service_account.private_key.clone()); object.client_email = Some(self.client.service_account.client_email.clone()); Ok(object) @@ -300,9 +300,9 @@ impl<'a> ObjectClient<'a> { Err(e) => return Some((Err(e.into()), state)), }; - let response_body = match result { - crate::models::Response::Success(success) => success, - crate::models::Response::Error(e) => return Some((Err(e.into()), state)), + let response_body = match result.ok() { + Ok(success) => success, + Err(e) => return Some((Err(e.into()), state)), }; let next_state = if let Some(ref page_token) = response_body.next_page_token { @@ -352,7 +352,7 @@ impl<'a> ObjectClient<'a> { .send() .await?; - let mut object = response.json::>().await??; + let mut object = response.json::>().await?.ok()?; object.private_key = Some(self.client.service_account.private_key.clone()); object.client_email = Some(self.client.service_account.client_email.clone()); Ok(object) @@ -479,7 +479,7 @@ impl<'a> ObjectClient<'a> { .send() .await?; - let mut object = response.json::>().await??; + let mut object = response.json::>().await?.ok()?; object.private_key = Some(self.client.service_account.private_key.clone()); object.client_email = Some(self.client.service_account.client_email.clone()); Ok(object) @@ -573,7 +573,7 @@ impl<'a> ObjectClient<'a> { .send() .await?; - let mut object = response.json::>().await??; + let mut object = response.json::>().await?.ok()?; object.private_key = Some(self.client.service_account.private_key.clone()); object.client_email = Some(self.client.service_account.client_email.clone()); Ok(object) @@ -619,7 +619,7 @@ impl<'a> ObjectClient<'a> { .send() .await?; - let mut object = response.json::>().await??; + let mut object = response.json::>().await?.ok()?; object.private_key = Some(self.client.service_account.private_key.clone()); object.client_email = Some(self.client.service_account.client_email.clone()); Ok(object) diff --git a/src/client/object_access_control.rs b/src/client/object_access_control.rs index 3aed41d..1f0ecaf 100644 --- a/src/client/object_access_control.rs +++ b/src/client/object_access_control.rs @@ -27,7 +27,7 @@ impl<'a> ObjectAccessControlClient<'a> { .await? .json() .await?; - Ok(result?) + Ok(result.ok()?) } /// Retrieves `ACL` entries on the specified object. @@ -45,7 +45,7 @@ impl<'a> ObjectAccessControlClient<'a> { .send() .await? .json::>>() - .await??; + .await?.ok()?; Ok(result.items) } @@ -71,7 +71,7 @@ impl<'a> ObjectAccessControlClient<'a> { .await? .json() .await?; - Ok(result?) + Ok(result.ok()?) } /// Updates an ACL entry on the specified object. @@ -97,7 +97,7 @@ impl<'a> ObjectAccessControlClient<'a> { .await? .json() .await?; - Ok(result?) + Ok(result.ok()?) } /// Permanently deletes the ACL entry for the specified entity on the specified object. diff --git a/src/error.rs b/src/error.rs index 7a62d18..9d7861f 100644 --- a/src/error.rs +++ b/src/error.rs @@ -8,7 +8,7 @@ pub enum Error { /// If another network error causes something to fail, this variant is used. Reqwest(reqwest::Error), /// If we encounter a problem decoding the private key, this variant is used. - #[cfg(feature = "ring")] + #[cfg(feature = "pem")] Pem(pem::PemError), /// If we encounter a problem parsing the private key, this variant is used. #[cfg(feature = "ring")] @@ -46,7 +46,7 @@ impl std::error::Error for Error { Self::Reqwest(e) => Some(e), #[cfg(feature = "openssl")] Self::Ssl(e) => Some(e), - #[cfg(feature = "ring")] + #[cfg(feature = "pem")] Self::Pem(e) => Some(e), #[cfg(feature = "ring")] Self::KeyRejected(e) => Some(e), @@ -72,7 +72,7 @@ impl From for Error { } } -#[cfg(feature = "ring")] +#[cfg(feature = "pem")] impl From for Error { fn from(err: pem::PemError) -> Self { Self::Pem(err) diff --git a/src/lib.rs b/src/lib.rs index d96db04..a3e7973 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,4 +1,3 @@ -#![feature(try_trait_v2)] //! This crate aims to simplify interacting with the Google Cloud Storage JSON API. Use it until //! Google releases a Cloud Storage Client Library for Rust. Shoutout to //! [MyEmma](https://myemma.io/) for funding this free and open source project. diff --git a/src/models/object.rs b/src/models/object.rs index 7522790..3556f2a 100644 --- a/src/models/object.rs +++ b/src/models/object.rs @@ -2,7 +2,7 @@ use std::collections::HashMap; use crate::Error; -use super::{CustomerEncrypton, Owner, ObjectAccessControl, Location}; +use super::{CustomerEncrypton, Owner, ObjectAccessControl}; /// A resource representing a file in Google Cloud Storage. #[derive(Debug, PartialEq, serde::Serialize, serde::Deserialize)] diff --git a/src/models/response.rs b/src/models/response.rs index 0efb035..ab4b153 100644 --- a/src/models/response.rs +++ b/src/models/response.rs @@ -1,59 +1,38 @@ -use std::ops::ControlFlow; +use serde::Deserialize; +use super::ErrorResponse; -use super::{ErrorResponse}; -use crate::Error; +#[derive(Debug)] +pub(crate) struct Response(Result); -#[derive(Debug, serde::Deserialize)] -#[serde(rename = "camelCase")] +#[derive(serde::Deserialize)] #[serde(untagged)] -pub(crate) enum Response { +/// Private Response that will be transformed into Response in the Deserialize trait of Response +enum EnumResponse { Success(T), Error(ErrorResponse), } -/// Enable desugaring for `Response`, e.g. the use of the `?` on an object of type `Response` -/// ```ignore,no_run -/// if let Response::Error(error) = my_response { -/// return error; -/// } -/// let my_response = my_response.unwrap(); -/// ``` -/// becomes: -/// ```ignore,no_run -/// my_response?; -/// ``` -impl std::ops::Try for Response { - type Output = T; - type Residual = Result; - #[inline] - fn from_output(output: Self::Output) -> Self { - Response::Success(output) - } - #[inline] - fn branch(self) -> ControlFlow { - match self { - Response::Success(t) => ControlFlow::Continue(t), - Response::Error(error) => ControlFlow::Break(Err(Error::Google(error))), - } +impl<'de, T: Deserialize<'de>> Deserialize<'de> for Response { + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de> { + match EnumResponse::::deserialize(deserializer)? { + EnumResponse::Success(value) => Ok(Response(Ok(value))), + EnumResponse::Error(value) => Ok(Response(Err(value))), + } } } - -impl std::ops::FromResidual> for Response { - #[inline] - #[track_caller] - fn from_residual(residual: ::Residual) -> Self { - if let Err(Error::Google(err)) = residual { - Response::Error(err) - } else { - panic!("Non expected residual type encountered") - } +impl Response { + /// Transform the output into an result + pub fn ok(self) -> Result { + self.0 } } #[cfg(test)] mod tests { - use crate::{models::{ErrorResponse, ErrorList}, Error, Bucket}; + use crate::{Error, Bucket}; use super::Response; @@ -63,7 +42,7 @@ mod tests { let response = serde_json::from_slice::>(response.as_bytes()); let response = response.expect("failed to map response as a response"); - let output = response?; + let output = response.ok()?; assert_eq!(output.kind, "storage#bucket"); Ok(()) } @@ -71,17 +50,15 @@ mod tests { #[test] fn test_try_impl_error() -> Result<(), Error> { let function = || { - let response = Response::Error::<()>(ErrorResponse { - error: ErrorList { - errors: Vec::new(), - code: 250, - message: "Some error occurred".to_string(), - }, - }); - response?; + let response = r#"{"error":{"errors":[{"domain":"global","reason":"required","message":"Login Required","locationType":"header","location":"Authorization"}],"code":401,"message":"Login Required"}}"#; + let response = serde_json::from_slice::>(response.as_bytes()); + response?.ok()?; Ok::<(), Error>(()) }; - assert_eq!(function().is_err(), true); + let result = function(); + let value = format!("{:?}", result); + println!("{}", value); + assert_eq!(result.is_err(), true); Ok(()) } } \ No newline at end of file From e51adbef1618c7399eadae3a6b5d53a5f09b4c38 Mon Sep 17 00:00:00 2001 From: SonnyX Date: Wed, 14 Jun 2023 13:37:01 +0200 Subject: [PATCH 26/26] fix clippy --- src/crypto/ring.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/crypto/ring.rs b/src/crypto/ring.rs index fab8e53..bf9dd7f 100644 --- a/src/crypto/ring.rs +++ b/src/crypto/ring.rs @@ -6,7 +6,7 @@ pub fn rsa_pkcs1_sha256(message: &str, private_pem: &[u8]) -> Result, Er }; let key_pem = pem::parse(private_pem)?; - let key = RsaKeyPair::from_pkcs8(&key_pem.contents())?; + let key = RsaKeyPair::from_pkcs8(key_pem.contents())?; let rng = SystemRandom::new(); let mut signature = vec![0; key.public_modulus_len()]; key.sign(&RSA_PKCS1_SHA256, &rng, message.as_bytes(), &mut signature)?;