Add accessor

This commit is contained in:
photino 2023-01-22 01:10:35 +08:00
parent 9ece859580
commit df5cd2ad81
17 changed files with 446 additions and 44 deletions

View File

@ -1,6 +1,6 @@
# zino
`zino` is a full featured web application framework for Rust which focuses on
`zino` is a full-featured web application framework for Rust which focuses on
productivity and performance.
[![Crates.io](https://img.shields.io/crates/v/zino)][zino]

View File

@ -1,6 +1,6 @@
[package]
name = "axum-app"
version = "0.4.3"
version = "0.4.4"
rust-version = "1.68"
edition = "2021"
publish = false
@ -12,9 +12,9 @@ tracing = "0.1.37"
[dependencies.zino]
path = "../../zino"
version = "0.4.3"
version = "0.4.4"
features = ["axum"]
[dependencies.zino-model]
path = "../../zino-model"
version = "0.3.5"
version = "0.3.6"

View File

@ -1,6 +1,6 @@
name = "data-cube"
version = "0.4.3"
version = "0.4.4"
[main]
host = "127.0.0.1"

View File

@ -1,6 +1,6 @@
name = "data-cube"
version = "0.4.3"
version = "0.4.4"
[main]
host = "127.0.0.1"

View File

@ -1,7 +1,7 @@
[package]
name = "zino-core"
description = "Core types and traits for zino."
version = "0.4.3"
version = "0.4.4"
rust-version = "1.68"
edition = "2021"
license = "MIT"
@ -12,12 +12,10 @@ repository = "https://github.com/photino/zino"
documentation = "https://docs.rs/zino-core"
readme = "README.md"
[package.metadata.docs.rs]
features = ["axum"]
[dependencies]
aes-gcm-siv = "0.11.1"
async-trait = "0.1.60"
backon = "0.2.0"
base64 = "0.21.0"
bytes = "1.3.0"
cron = "0.12.0"
@ -48,17 +46,46 @@ url = "2.3.1"
version = "0.4.23"
features = ["serde"]
[dependencies.opendal]
version = "0.25.0"
features = [
"layers-all",
"compress",
"native-tls",
"services-ftp",
"services-ipfs",
"services-memcached",
"services-moka",
"services-redis",
]
[dependencies.serde]
version = "1.0.152"
features = ["derive"]
[dependencies.reqwest]
version = "0.11.13"
features = ["cookies", "gzip", "brotli", "deflate", "json", "multipart", "stream"]
features = [
"cookies",
"gzip",
"brotli",
"deflate",
"json",
"multipart",
"stream",
"native-tls",
"socks",
]
[dependencies.sqlx]
version = "0.6.2"
features = ["runtime-tokio-native-tls", "postgres", "uuid", "chrono", "json"]
features = [
"runtime-tokio-native-tls",
"postgres",
"uuid",
"chrono",
"json",
]
[dependencies.tracing-subscriber]
version = "0.3.16"
@ -66,4 +93,9 @@ features = ["env-filter", "json", "local-time"]
[dependencies.uuid]
version = "1.2.2"
features = ["fast-rng", "serde", "v4", "v7"]
features = [
"fast-rng",
"serde",
"v4",
"v7",
]

View File

@ -0,0 +1,352 @@
//! Unified data access to databases and storage backends.
use crate::state::State;
use backon::ExponentialBackoff;
use opendal::{
layers::{MetricsLayer, RetryLayer, TracingLayer},
services::{
azblob, azdfs, fs, ftp, gcs, ipfs, ipmfs, memcached, memory, moka, obs, oss, redis, s3,
},
Error,
ErrorKind::{Unexpected, Unsupported},
Operator, Result,
};
use std::time::Duration;
/// Storage accessor built on the top of [`opendal`](https://crates.io/crates/opendal).
#[derive(Debug)]
pub struct StorageAccessor {}
impl StorageAccessor {
/// Creates a new operator for the specific storage backend.
pub fn new_operator(scheme: &'static str, name: Option<&'static str>) -> Result<Operator> {
let config = State::shared().config();
let operator = if scheme == "memory" {
let mut builder = memory::Builder::default();
Ok(Operator::new(builder.build()?))
} else if let Some(accessors) = config.get("accessor").and_then(|v| v.as_array()) {
if let Some(accessor) = accessors
.iter()
.filter_map(|v| v.as_table())
.filter(|t| {
t.get("scheme").and_then(|v| v.as_str()).contains(&scheme)
&& t.get("name").and_then(|v| v.as_str()) == name
})
.next()
{
match scheme {
"azblob" => {
let mut builder = azblob::Builder::default();
if let Some(root) = accessor.get("root").and_then(|v| v.as_str()) {
builder.root(root);
}
if let Some(container) = accessor.get("container").and_then(|v| v.as_str())
{
builder.container(container);
}
if let Some(endpoint) = accessor.get("endpoint").and_then(|v| v.as_str()) {
builder.endpoint(endpoint);
}
if let Some(account_name) =
accessor.get("account-name").and_then(|v| v.as_str())
{
builder.account_name(account_name);
}
if let Some(account_key) =
accessor.get("account-key").and_then(|v| v.as_str())
{
builder.account_key(account_key);
}
if let Some(sas_token) = accessor.get("sas-token").and_then(|v| v.as_str())
{
builder.sas_token(sas_token);
}
Ok(Operator::new(builder.build()?))
}
"azdfs" => {
let mut builder = azdfs::Builder::default();
if let Some(root) = accessor.get("root").and_then(|v| v.as_str()) {
builder.root(root);
}
if let Some(filesystem) =
accessor.get("filesystem").and_then(|v| v.as_str())
{
builder.filesystem(filesystem);
}
if let Some(endpoint) = accessor.get("endpoint").and_then(|v| v.as_str()) {
builder.endpoint(endpoint);
}
if let Some(account_name) =
accessor.get("account-name").and_then(|v| v.as_str())
{
builder.account_name(account_name);
}
if let Some(account_key) =
accessor.get("account-key").and_then(|v| v.as_str())
{
builder.account_key(account_key);
}
Ok(Operator::new(builder.build()?))
}
"fs" => {
let mut builder = fs::Builder::default();
if let Some(root) = accessor.get("root").and_then(|v| v.as_str()) {
builder.root(root);
}
if let Some(atomic_write_dir) =
accessor.get("atomic-write-dir").and_then(|v| v.as_str())
{
builder.atomic_write_dir(atomic_write_dir);
}
Ok(Operator::new(builder.build()?))
}
"ftp" => {
let mut builder = ftp::Builder::default();
if let Some(root) = accessor.get("root").and_then(|v| v.as_str()) {
builder.root(root);
}
if let Some(endpoint) = accessor.get("endpoint").and_then(|v| v.as_str()) {
builder.endpoint(endpoint);
}
if let Some(user) = accessor.get("user").and_then(|v| v.as_str()) {
builder.user(user);
}
if let Some(password) = accessor.get("password").and_then(|v| v.as_str()) {
builder.password(password);
}
Ok(Operator::new(builder.build()?))
}
"gcs" => {
let mut builder = gcs::Builder::default();
if let Some(root) = accessor.get("root").and_then(|v| v.as_str()) {
builder.root(root);
}
if let Some(bucket) = accessor.get("bucket").and_then(|v| v.as_str()) {
builder.bucket(bucket);
}
if let Some(endpoint) = accessor.get("endpoint").and_then(|v| v.as_str()) {
builder.endpoint(endpoint);
}
if let Some(service_account) =
accessor.get("service-account").and_then(|v| v.as_str())
{
builder.service_account(service_account);
}
if let Some(credential) =
accessor.get("credential").and_then(|v| v.as_str())
{
builder.credential(credential);
}
if let Some(credential_path) =
accessor.get("credential-path").and_then(|v| v.as_str())
{
builder.credential_path(credential_path);
}
Ok(Operator::new(builder.build()?))
}
"ipfs" => {
let mut builder = ipfs::Builder::default();
if let Some(root) = accessor.get("root").and_then(|v| v.as_str()) {
builder.root(root);
}
if let Some(endpoint) = accessor.get("endpoint").and_then(|v| v.as_str()) {
builder.endpoint(endpoint);
}
Ok(Operator::new(builder.build()?))
}
"ipmfs" => {
let mut builder = ipmfs::Builder::default();
if let Some(root) = accessor.get("root").and_then(|v| v.as_str()) {
builder.root(root);
}
if let Some(endpoint) = accessor.get("endpoint").and_then(|v| v.as_str()) {
builder.endpoint(endpoint);
}
Ok(Operator::new(builder.build()?))
}
"memcached" => {
let mut builder = memcached::Builder::default();
if let Some(root) = accessor.get("root").and_then(|v| v.as_str()) {
builder.root(root);
}
if let Some(endpoint) = accessor.get("endpoint").and_then(|v| v.as_str()) {
builder.endpoint(endpoint);
}
if let Some(default_ttl) = accessor
.get("default-ttl")
.and_then(|v| v.as_integer())
.and_then(|i| u64::try_from(i).ok())
{
builder.default_ttl(Duration::from_secs(default_ttl));
}
Ok(Operator::new(builder.build()?))
}
"moka" => {
let mut builder = moka::Builder::default();
if let Some(name) = accessor.get("name").and_then(|v| v.as_str()) {
builder.name(name);
}
if let Some(max_capacity) = accessor
.get("max-capacity")
.and_then(|v| v.as_integer())
.and_then(|i| u64::try_from(i).ok())
{
builder.max_capacity(max_capacity);
}
if let Some(time_to_live) = accessor
.get("time-to-live")
.and_then(|v| v.as_integer())
.and_then(|i| u64::try_from(i).ok())
{
builder.time_to_live(Duration::from_secs(time_to_live));
}
if let Some(time_to_idle) = accessor
.get("time-to-idle")
.and_then(|v| v.as_integer())
.and_then(|i| u64::try_from(i).ok())
{
builder.time_to_idle(Duration::from_secs(time_to_idle));
}
if let Some(segments) = accessor
.get("segments")
.and_then(|v| v.as_integer())
.and_then(|i| usize::try_from(i).ok())
{
builder.segments(segments);
}
if let Some(thread_pool_enabled) = accessor
.get("thread-pool-enabled")
.and_then(|v| v.as_bool())
{
builder.thread_pool_enabled(thread_pool_enabled);
}
Ok(Operator::new(builder.build()?))
}
"obs" => {
let mut builder = obs::Builder::default();
if let Some(root) = accessor.get("root").and_then(|v| v.as_str()) {
builder.root(root);
}
if let Some(bucket) = accessor.get("bucket").and_then(|v| v.as_str()) {
builder.bucket(bucket);
}
if let Some(endpoint) = accessor.get("endpoint").and_then(|v| v.as_str()) {
builder.endpoint(endpoint);
}
if let Some(access_key_id) =
accessor.get("access-key-id").and_then(|v| v.as_str())
{
builder.access_key_id(access_key_id);
}
if let Some(secret_access_key) =
accessor.get("secret_access_key").and_then(|v| v.as_str())
{
builder.secret_access_key(secret_access_key);
}
Ok(Operator::new(builder.build()?))
}
"oss" => {
let mut builder = oss::Builder::default();
if let Some(root) = accessor.get("root").and_then(|v| v.as_str()) {
builder.root(root);
}
if let Some(bucket) = accessor.get("bucket").and_then(|v| v.as_str()) {
builder.bucket(bucket);
}
if let Some(endpoint) = accessor.get("endpoint").and_then(|v| v.as_str()) {
builder.endpoint(endpoint);
}
if let Some(presign_endpoint) =
accessor.get("presign-endpoint").and_then(|v| v.as_str())
{
builder.presign_endpoint(presign_endpoint);
}
if let Some(access_key_id) =
accessor.get("access-key-id").and_then(|v| v.as_str())
{
builder.access_key_id(access_key_id);
}
if let Some(access_key_secret) =
accessor.get("access-key-secret").and_then(|v| v.as_str())
{
builder.access_key_secret(access_key_secret);
}
Ok(Operator::new(builder.build()?))
}
"redis" => {
let mut builder = redis::Builder::default();
if let Some(root) = accessor.get("root").and_then(|v| v.as_str()) {
builder.root(root);
}
if let Some(endpoint) = accessor.get("endpoint").and_then(|v| v.as_str()) {
builder.endpoint(endpoint);
}
if let Some(username) = accessor.get("username").and_then(|v| v.as_str()) {
builder.username(username);
}
if let Some(password) = accessor.get("password").and_then(|v| v.as_str()) {
builder.password(password);
}
if let Some(db) = accessor.get("db").and_then(|v| v.as_integer()) {
builder.db(db);
}
if let Some(default_ttl) = accessor
.get("default-ttl")
.and_then(|v| v.as_integer())
.and_then(|i| u64::try_from(i).ok())
{
builder.default_ttl(Duration::from_secs(default_ttl));
}
Ok(Operator::new(builder.build()?))
}
"s3" => {
let mut builder = s3::Builder::default();
if let Some(root) = accessor.get("root").and_then(|v| v.as_str()) {
builder.root(root);
}
if let Some(bucket) = accessor.get("bucket").and_then(|v| v.as_str()) {
builder.bucket(bucket);
}
if let Some(endpoint) = accessor.get("endpoint").and_then(|v| v.as_str()) {
builder.endpoint(endpoint);
}
if let Some(region) = accessor.get("region").and_then(|v| v.as_str()) {
builder.region(region);
}
if let Some(access_key_id) =
accessor.get("access-key-id").and_then(|v| v.as_str())
{
builder.access_key_id(access_key_id);
}
if let Some(secret_access_key) =
accessor.get("secret-access-key").and_then(|v| v.as_str())
{
builder.secret_access_key(secret_access_key);
}
if let Some(role_arn) = accessor.get("role-arn").and_then(|v| v.as_str()) {
builder.role_arn(role_arn);
}
if let Some(external_id) =
accessor.get("external-id").and_then(|v| v.as_str())
{
builder.external_id(external_id);
}
Ok(Operator::new(builder.build()?))
}
_ => Err(Error::new(Unsupported, "scheme is unsupported")),
}
} else {
Err(Error::new(Unexpected, "failed to find the storage backend"))
}
} else if name.is_none() {
scheme.parse().and_then(Operator::from_env)
} else {
Err(Error::new(Unexpected, "failed to create the operator"))
};
operator.map(|op| {
op.layer(TracingLayer)
.layer(MetricsLayer)
.layer(RetryLayer::new(ExponentialBackoff::default()))
})
}
}

View File

@ -35,7 +35,7 @@ pub struct Authentication {
/// Content-Type header value.
content_type: Option<String>,
/// Date header.
date_header: (String, DateTime),
date_header: (&'static str, DateTime),
/// Expires.
expires: Option<DateTime>,
/// Canonicalized headers.
@ -56,7 +56,7 @@ impl Authentication {
accept: None,
content_md5: None,
content_type: None,
date_header: ("date".to_owned(), DateTime::now()),
date_header: ("date", DateTime::now()),
expires: None,
headers: Vec::new(),
resource: String::new(),
@ -99,9 +99,9 @@ impl Authentication {
self.content_type = content_type.into();
}
/// Sets the `date` header value.
/// Sets the header value for the date.
#[inline]
pub fn set_date_header(&mut self, header_name: String, date: DateTime) {
pub fn set_date_header(&mut self, header_name: &'static str, date: DateTime) {
self.date_header = (header_name, date);
}

View File

@ -1,5 +1,6 @@
//! Global cache for the application.
use crate::state::State;
use lru::LruCache;
use parking_lot::RwLock;
use serde_json::Value;
@ -129,7 +130,7 @@ impl GlobalCache {
/// Global cache.
static GLOBAL_CACHE: LazyLock<RwLock<LruCache<String, Value>>> = LazyLock::new(|| {
let config = crate::state::SHARED_STATE.config();
let config = State::shared().config();
let capacity = match config.get("cache") {
Some(cache) => cache
.as_table()

View File

@ -1,6 +1,6 @@
//! Connection pool and ORM.
use crate::{crypto, state::SHARED_STATE};
use crate::{crypto, state::State, SharedString};
use base64::{engine::general_purpose::STANDARD_NO_PAD, Engine};
use sqlx::{
postgres::{PgConnectOptions, PgPoolOptions},
@ -25,9 +25,9 @@ pub use schema::Schema;
#[derive(Debug, Clone)]
pub struct ConnectionPool {
/// Name.
name: String,
name: &'static str,
/// Database.
database: String,
database: SharedString,
/// Pool.
pool: PgPool,
}
@ -57,7 +57,7 @@ impl ConnectionPool {
}
/// Connects lazily to the database according to the config.
pub fn connect_lazy(application_name: &str, config: &Table) -> Self {
pub fn connect_lazy(application_name: &str, config: &'static Table) -> Self {
// Connect options.
let statement_cache_capacity = config
.get("statement-cache-capacity")
@ -105,7 +105,8 @@ impl ConnectionPool {
let database = connect_options
.get_database()
.unwrap_or_default()
.to_owned();
.to_owned()
.into();
// Pool options.
let max_connections = config
@ -141,8 +142,7 @@ impl ConnectionPool {
let name = config
.get("name")
.and_then(|v| v.as_str())
.unwrap_or("main")
.to_owned();
.unwrap_or("main");
Self {
name,
database,
@ -150,16 +150,16 @@ impl ConnectionPool {
}
}
/// Returns the name as a str.
/// Returns the name.
#[inline]
pub fn name(&self) -> &str {
&self.name
pub fn name(&self) -> &'static str {
self.name
}
/// Returns the database as a str.
/// Returns the database.
#[inline]
pub fn database(&self) -> &str {
&self.database
self.database.as_ref()
}
/// Returns a reference to the pool.
@ -170,7 +170,7 @@ impl ConnectionPool {
}
/// A list of database connection pools.
#[derive(Debug, Clone)]
#[derive(Debug)]
struct ConnectionPools(Vec<ConnectionPool>);
impl ConnectionPools {
@ -183,7 +183,7 @@ impl ConnectionPools {
/// Shared connection pools.
static SHARED_CONNECTION_POOLS: LazyLock<ConnectionPools> = LazyLock::new(|| {
let config = SHARED_STATE.config();
let config = State::shared().config();
// Application name.
let application_name = config
@ -212,7 +212,7 @@ static SHARED_CONNECTION_POOLS: LazyLock<ConnectionPools> = LazyLock::new(|| {
/// Database namespace prefix.
static NAMESPACE_PREFIX: LazyLock<&'static str> = LazyLock::new(|| {
SHARED_STATE
State::shared()
.config()
.get("database")
.expect("the `database` field should be specified")

View File

@ -8,12 +8,14 @@
#![feature(let_chains)]
#![feature(nonzero_min_max)]
#![feature(once_cell)]
#![feature(option_result_contains)]
#![feature(string_leak)]
#![feature(type_alias_impl_trait)]
#![forbid(unsafe_code)]
mod crypto;
pub mod accessor;
pub mod application;
pub mod authentication;
pub mod cache;

View File

@ -277,7 +277,7 @@ pub trait RequestContext {
let current = DateTime::now();
let max_tolerance = Duration::from_secs(900);
if date >= current - max_tolerance && date <= current + max_tolerance {
authentication.set_date_header("date".to_owned(), date);
authentication.set_date_header("date", date);
} else {
validation.record_fail("date", "untrusted date");
}

View File

@ -134,6 +134,12 @@ impl State {
listeners
}
/// Returns a reference to the shared state.
#[inline]
pub(crate) fn shared() -> &'static State {
LazyLock::force(&SHARED_STATE)
}
}
impl Default for State {

View File

@ -1,7 +1,7 @@
[package]
name = "zino-derive"
description = "Derived traits for zino."
version = "0.3.5"
version = "0.3.6"
rust-version = "1.68"
edition = "2021"
license = "MIT"
@ -23,4 +23,4 @@ features = ["full", "extra-traits"]
[dependencies.zino-core]
path = "../zino-core"
version = "0.4.3"
version = "0.4.4"

View File

@ -1,7 +1,7 @@
[package]
name = "zino-model"
description = "Model types for zino."
version = "0.3.5"
version = "0.3.6"
rust-version = "1.68"
edition = "2021"
license = "MIT"
@ -16,8 +16,8 @@ features = ["derive"]
[dependencies.zino-core]
path = "../zino-core"
version = "0.4.3"
version = "0.4.4"
[dependencies.zino-derive]
path = "../zino-derive"
version = "0.3.5"
version = "0.3.6"

View File

@ -1,7 +1,7 @@
[package]
name = "zino"
description = "Full featured web application framework for Rust."
version = "0.4.3"
version = "0.4.4"
rust-version = "1.68"
edition = "2021"
license = "MIT"
@ -12,8 +12,17 @@ repository = "https://github.com/photino/zino"
documentation = "https://docs.rs/zino"
readme = "README.md"
[package.metadata.docs.rs]
features = ["axum"]
[features]
axum = ["dep:axum", "dep:tokio", "dep:tokio-stream", "dep:tower", "dep:tower-http"]
axum = [
"dep:axum",
"dep:tokio",
"dep:tokio-stream",
"dep:tower",
"dep:tower-http",
]
[dependencies]
async-trait = "0.1.60"
@ -57,4 +66,4 @@ optional = true
[dependencies.zino-core]
path = "../zino-core"
version = "0.4.3"
version = "0.4.4"

View File

@ -1,6 +1,6 @@
# zino
`zino` is a full featured web application framework for Rust which focuses on
`zino` is a full-featured web application framework for Rust which focuses on
productivity and performance.
## Highlights

View File

@ -1,4 +1,4 @@
//! [`zino`] is a full featured web application framework for Rust
//! [`zino`] is a full-featured web application framework for Rust
//! which focuses on productivity and performance.
//!
//! ## Highlights