This is page 52 of 74. Use http://codebase.md/apache/opendal?lines=true&page={x} to view the full context.
# Directory Structure
```
├── .asf.yaml
├── .config
│ └── nextest.toml
├── .devcontainer
│ ├── devcontainer.json
│ └── post_create.sh
├── .editorconfig
├── .env.example
├── .gitattributes
├── .github
│ ├── actions
│ │ ├── fuzz_test
│ │ │ └── action.yaml
│ │ ├── setup
│ │ │ └── action.yaml
│ │ ├── setup-hadoop
│ │ │ └── action.yaml
│ │ ├── setup-ocaml
│ │ │ └── action.yaml
│ │ ├── test_behavior_binding_c
│ │ │ └── action.yaml
│ │ ├── test_behavior_binding_cpp
│ │ │ └── action.yaml
│ │ ├── test_behavior_binding_go
│ │ │ └── action.yaml
│ │ ├── test_behavior_binding_java
│ │ │ └── action.yaml
│ │ ├── test_behavior_binding_nodejs
│ │ │ └── action.yaml
│ │ ├── test_behavior_binding_python
│ │ │ └── action.yaml
│ │ ├── test_behavior_core
│ │ │ └── action.yaml
│ │ └── test_behavior_integration_object_store
│ │ └── action.yml
│ ├── CODEOWNERS
│ ├── dependabot.yml
│ ├── ISSUE_TEMPLATE
│ │ ├── 1-bug-report.yml
│ │ ├── 2-feature-request.yml
│ │ ├── 3-new-release.md
│ │ └── config.yml
│ ├── pull_request_template.md
│ ├── release.yml
│ ├── scripts
│ │ ├── test_behavior
│ │ │ ├── __init__.py
│ │ │ ├── plan.py
│ │ │ └── test_plan.py
│ │ ├── test_go_binding
│ │ │ ├── generate_test_scheme.py
│ │ │ └── matrix.yaml
│ │ └── weekly_update
│ │ ├── .gitignore
│ │ ├── .python-version
│ │ ├── main.py
│ │ ├── pyproject.toml
│ │ ├── README.md
│ │ └── uv.lock
│ ├── services
│ │ ├── aliyun_drive
│ │ │ └── aliyun_drive
│ │ │ └── disable_action.yml
│ │ ├── alluxio
│ │ │ └── alluxio
│ │ │ └── action.yml
│ │ ├── azblob
│ │ │ ├── azure_azblob
│ │ │ │ └── action.yml
│ │ │ └── azurite_azblob
│ │ │ └── action.yml
│ │ ├── azdls
│ │ │ └── azdls
│ │ │ └── action.yml
│ │ ├── azfile
│ │ │ └── azfile
│ │ │ └── action.yml
│ │ ├── b2
│ │ │ └── b2
│ │ │ └── action.yml
│ │ ├── cacache
│ │ │ └── cacache
│ │ │ └── action.yml
│ │ ├── compfs
│ │ │ └── compfs
│ │ │ └── action.yml
│ │ ├── cos
│ │ │ └── cos
│ │ │ └── action.yml
│ │ ├── dashmap
│ │ │ └── dashmap
│ │ │ └── action.yml
│ │ ├── dropbox
│ │ │ └── dropbox
│ │ │ └── disable_action.yml
│ │ ├── etcd
│ │ │ ├── etcd
│ │ │ │ └── action.yml
│ │ │ ├── etcd-cluster
│ │ │ │ └── action.yml
│ │ │ └── etcd-tls
│ │ │ └── action.yml
│ │ ├── fs
│ │ │ └── local_fs
│ │ │ └── action.yml
│ │ ├── ftp
│ │ │ └── vsftpd
│ │ │ └── disable_action.yml
│ │ ├── gcs
│ │ │ ├── gcs
│ │ │ │ └── action.yml
│ │ │ └── gcs_with_default_storage_class
│ │ │ └── action.yml
│ │ ├── gdrive
│ │ │ └── gdrive
│ │ │ └── action.yml
│ │ ├── gridfs
│ │ │ ├── gridfs
│ │ │ │ └── action.yml
│ │ │ └── gridfs_with_basic_auth
│ │ │ └── action.yml
│ │ ├── hdfs
│ │ │ ├── hdfs_cluster
│ │ │ │ └── action.yml
│ │ │ ├── hdfs_cluster_with_atomic_write_dir
│ │ │ │ └── action.yml
│ │ │ ├── hdfs_default
│ │ │ │ └── action.yml
│ │ │ ├── hdfs_default_gcs
│ │ │ │ └── action.yml
│ │ │ ├── hdfs_default_on_azurite_azblob
│ │ │ │ └── action.yml
│ │ │ ├── hdfs_default_on_minio_s3
│ │ │ │ └── action.yml
│ │ │ └── hdfs_default_with_atomic_write_dir
│ │ │ └── action.yml
│ │ ├── hdfs_native
│ │ │ └── hdfs_native_cluster
│ │ │ └── action.yml
│ │ ├── http
│ │ │ ├── caddy
│ │ │ │ └── action.yml
│ │ │ └── nginx
│ │ │ └── action.yml
│ │ ├── huggingface
│ │ │ └── huggingface
│ │ │ └── action.yml
│ │ ├── koofr
│ │ │ └── koofr
│ │ │ └── disable_action.yml
│ │ ├── memcached
│ │ │ ├── memcached
│ │ │ │ └── action.yml
│ │ │ └── memcached_with_auth
│ │ │ └── action.yml
│ │ ├── memory
│ │ │ └── memory
│ │ │ └── action.yml
│ │ ├── mini_moka
│ │ │ └── mini_moka
│ │ │ └── action.yml
│ │ ├── moka
│ │ │ └── moka
│ │ │ └── action.yml
│ │ ├── mongodb
│ │ │ ├── mongodb_with_basic_auth
│ │ │ │ └── action.yml
│ │ │ └── mongodb_with_no_auth
│ │ │ └── action.yml
│ │ ├── monoiofs
│ │ │ └── monoiofs
│ │ │ └── action.yml
│ │ ├── mysql
│ │ │ └── mysql
│ │ │ └── action.yml
│ │ ├── oss
│ │ │ ├── oss
│ │ │ │ └── action.yml
│ │ │ └── oss_with_versioning
│ │ │ └── action.yml
│ │ ├── persy
│ │ │ └── persy
│ │ │ └── action.yml
│ │ ├── postgresql
│ │ │ └── postgresql
│ │ │ └── action.yml
│ │ ├── redb
│ │ │ └── redb
│ │ │ └── action.yml
│ │ ├── redis
│ │ │ ├── dragonfly
│ │ │ │ └── action.yml
│ │ │ ├── kvrocks
│ │ │ │ └── action.yml
│ │ │ ├── redis
│ │ │ │ └── action.yml
│ │ │ ├── redis_tls
│ │ │ │ └── action.yml
│ │ │ ├── redis_with_cluster
│ │ │ │ └── action.yml
│ │ │ └── redis_with_cluster_tls
│ │ │ └── action.yml
│ │ ├── rocksdb
│ │ │ └── rocksdb
│ │ │ └── action.yml
│ │ ├── s3
│ │ │ ├── 0_minio_s3
│ │ │ │ └── action.yml
│ │ │ ├── aws_s3
│ │ │ │ └── action.yml
│ │ │ ├── aws_s3_with_list_objects_v1
│ │ │ │ └── action.yml
│ │ │ ├── aws_s3_with_sse_c
│ │ │ │ └── action.yml
│ │ │ ├── aws_s3_with_versioning
│ │ │ │ └── action.yml
│ │ │ ├── aws_s3_with_virtual_host
│ │ │ │ └── action.yml
│ │ │ ├── ceph_radios_s3_with_versioning
│ │ │ │ └── disable_action.yml
│ │ │ ├── ceph_rados_s3
│ │ │ │ └── disable_action.yml
│ │ │ ├── minio_s3_with_anonymous
│ │ │ │ └── action.yml
│ │ │ ├── minio_s3_with_list_objects_v1
│ │ │ │ └── action.yml
│ │ │ ├── minio_s3_with_versioning
│ │ │ │ └── action.yml
│ │ │ └── r2
│ │ │ └── disabled_action.yml
│ │ ├── seafile
│ │ │ └── seafile
│ │ │ └── action.yml
│ │ ├── sftp
│ │ │ ├── sftp
│ │ │ │ └── action.yml
│ │ │ └── sftp_with_default_root
│ │ │ └── action.yml
│ │ ├── sled
│ │ │ ├── sled
│ │ │ │ └── action.yml
│ │ │ └── sled_with_tree
│ │ │ └── action.yml
│ │ ├── sqlite
│ │ │ └── sqlite
│ │ │ └── action.yml
│ │ ├── swift
│ │ │ ├── ceph_rados_swift
│ │ │ │ └── action.yml
│ │ │ └── swift
│ │ │ └── action.yml
│ │ ├── tikv
│ │ │ └── tikv
│ │ │ └── disable_action.yml
│ │ ├── webdav
│ │ │ ├── 0_nginx
│ │ │ │ └── action.yml
│ │ │ ├── jfrog
│ │ │ │ └── disabled_action.yml
│ │ │ ├── nextcloud
│ │ │ │ └── action.yml
│ │ │ ├── nginx_with_empty_password
│ │ │ │ └── action.yml
│ │ │ ├── nginx_with_password
│ │ │ │ └── action.yml
│ │ │ ├── nginx_with_redirect
│ │ │ │ └── action.yml
│ │ │ └── owncloud
│ │ │ └── action.yml
│ │ └── webhdfs
│ │ ├── webhdfs
│ │ │ └── action.yml
│ │ ├── webhdfs_with_list_batch_disabled
│ │ │ └── action.yml
│ │ └── webhdfs_with_user_name
│ │ └── action.yml
│ └── workflows
│ ├── ci_bindings_c.yml
│ ├── ci_bindings_cpp.yml
│ ├── ci_bindings_d.yml
│ ├── ci_bindings_dart.yml
│ ├── ci_bindings_dotnet.yml
│ ├── ci_bindings_go.yml
│ ├── ci_bindings_haskell.yml
│ ├── ci_bindings_java.yml
│ ├── ci_bindings_lua.yml
│ ├── ci_bindings_nodejs.yml
│ ├── ci_bindings_ocaml.yml
│ ├── ci_bindings_php.yml
│ ├── ci_bindings_python.yml
│ ├── ci_bindings_ruby.yml
│ ├── ci_bindings_swift.yml
│ ├── ci_bindings_zig.yml
│ ├── ci_check.yml
│ ├── ci_core.yml
│ ├── ci_integration_dav_server.yml
│ ├── ci_integration_object_store.yml
│ ├── ci_integration_parquet.yml
│ ├── ci_integration_spring.yml
│ ├── ci_integration_unftp_sbe.yml
│ ├── ci_odev.yml
│ ├── ci_weekly_update.yml
│ ├── discussion-thread-link.yml
│ ├── docs.yml
│ ├── full-ci-promote.yml
│ ├── release_dart.yml
│ ├── release_java.yml
│ ├── release_nodejs.yml
│ ├── release_python.yml
│ ├── release_ruby.yml
│ ├── release_rust.yml
│ ├── service_test_ghac.yml
│ ├── test_behavior_binding_c.yml
│ ├── test_behavior_binding_cpp.yml
│ ├── test_behavior_binding_go.yml
│ ├── test_behavior_binding_java.yml
│ ├── test_behavior_binding_nodejs.yml
│ ├── test_behavior_binding_python.yml
│ ├── test_behavior_core.yml
│ ├── test_behavior_integration_object_store.yml
│ ├── test_behavior.yml
│ ├── test_edge.yml
│ └── test_fuzz.yml
├── .gitignore
├── .taplo.toml
├── .typos.toml
├── .vscode
│ └── settings.json
├── .yamlfmt
├── AGENTS.md
├── bindings
│ ├── java
│ │ ├── .cargo
│ │ │ └── config.toml
│ │ ├── .gitignore
│ │ ├── .mvn
│ │ │ └── wrapper
│ │ │ └── maven-wrapper.properties
│ │ ├── Cargo.toml
│ │ ├── DEPENDENCIES.md
│ │ ├── DEPENDENCIES.rust.tsv
│ │ ├── mvnw
│ │ ├── mvnw.cmd
│ │ ├── pom.xml
│ │ ├── README.md
│ │ ├── src
│ │ │ ├── async_operator.rs
│ │ │ ├── convert.rs
│ │ │ ├── error.rs
│ │ │ ├── executor.rs
│ │ │ ├── layer.rs
│ │ │ ├── lib.rs
│ │ │ ├── main
│ │ │ │ ├── java
│ │ │ │ │ └── org
│ │ │ │ │ └── apache
│ │ │ │ │ └── opendal
│ │ │ │ │ ├── AsyncExecutor.java
│ │ │ │ │ ├── AsyncOperator.java
│ │ │ │ │ ├── Capability.java
│ │ │ │ │ ├── Entry.java
│ │ │ │ │ ├── Environment.java
│ │ │ │ │ ├── layer
│ │ │ │ │ │ ├── ConcurrentLimitLayer.java
│ │ │ │ │ │ ├── package-info.java
│ │ │ │ │ │ └── RetryLayer.java
│ │ │ │ │ ├── Layer.java
│ │ │ │ │ ├── ListOptions.java
│ │ │ │ │ ├── Metadata.java
│ │ │ │ │ ├── NativeLibrary.java
│ │ │ │ │ ├── NativeObject.java
│ │ │ │ │ ├── OpenDAL.java
│ │ │ │ │ ├── OpenDALException.java
│ │ │ │ │ ├── Operator.java
│ │ │ │ │ ├── OperatorInfo.java
│ │ │ │ │ ├── OperatorInputStream.java
│ │ │ │ │ ├── OperatorOutputStream.java
│ │ │ │ │ ├── package-info.java
│ │ │ │ │ ├── PresignedRequest.java
│ │ │ │ │ ├── ReadOptions.java
│ │ │ │ │ ├── ServiceConfig.java
│ │ │ │ │ ├── StatOptions.java
│ │ │ │ │ └── WriteOptions.java
│ │ │ │ └── resources
│ │ │ │ ├── bindings.properties
│ │ │ │ └── META-INF
│ │ │ │ └── NOTICE
│ │ │ ├── operator_input_stream.rs
│ │ │ ├── operator_output_stream.rs
│ │ │ ├── operator.rs
│ │ │ ├── test
│ │ │ │ └── java
│ │ │ │ └── org
│ │ │ │ └── apache
│ │ │ │ └── opendal
│ │ │ │ └── test
│ │ │ │ ├── AsyncExecutorTest.java
│ │ │ │ ├── behavior
│ │ │ │ │ ├── AsyncCopyTest.java
│ │ │ │ │ ├── AsyncCreateDirTest.java
│ │ │ │ │ ├── AsyncListTest.java
│ │ │ │ │ ├── AsyncPresignTest.java
│ │ │ │ │ ├── AsyncReadOnlyTest.java
│ │ │ │ │ ├── AsyncRenameTest.java
│ │ │ │ │ ├── AsyncStatOptionsTest.java
│ │ │ │ │ ├── AsyncWriteOptionsTest.java
│ │ │ │ │ ├── AsyncWriteTest.java
│ │ │ │ │ ├── BehaviorExtension.java
│ │ │ │ │ ├── BehaviorTestBase.java
│ │ │ │ │ ├── BlockingCopyTest.java
│ │ │ │ │ ├── BlockingCreateDirTest.java
│ │ │ │ │ ├── BlockingListTest.java
│ │ │ │ │ ├── BlockingReadOnlyTest.java
│ │ │ │ │ ├── BlockingRenameTest.java
│ │ │ │ │ ├── BlockingStatOptionsTest.java
│ │ │ │ │ ├── BlockingWriteOptionTest.java
│ │ │ │ │ ├── BlockingWriteTest.java
│ │ │ │ │ └── RegressionTest.java
│ │ │ │ ├── condition
│ │ │ │ │ └── OpenDALExceptionCondition.java
│ │ │ │ ├── LayerTest.java
│ │ │ │ ├── MetadataTest.java
│ │ │ │ ├── OperatorDuplicateTest.java
│ │ │ │ ├── OperatorInfoTest.java
│ │ │ │ ├── OperatorInputOutputStreamTest.java
│ │ │ │ ├── OperatorUtf8DecodeTest.java
│ │ │ │ └── UtilityTest.java
│ │ │ └── utility.rs
│ │ ├── tools
│ │ │ └── build.py
│ │ ├── upgrade.md
│ │ └── users.md
│ ├── nodejs
│ │ ├── .cargo
│ │ │ └── config.toml
│ │ ├── .gitignore
│ │ ├── .node-version
│ │ ├── .npmignore
│ │ ├── .npmrc
│ │ ├── .prettierignore
│ │ ├── benchmark
│ │ │ ├── deno.ts
│ │ │ ├── node.js
│ │ │ └── README.md
│ │ ├── build.rs
│ │ ├── Cargo.toml
│ │ ├── CONTRIBUTING.md
│ │ ├── DEPENDENCIES.md
│ │ ├── DEPENDENCIES.rust.tsv
│ │ ├── devbox.json
│ │ ├── devbox.lock
│ │ ├── generated.d.ts
│ │ ├── generated.js
│ │ ├── index.cjs
│ │ ├── index.d.ts
│ │ ├── index.mjs
│ │ ├── npm
│ │ │ ├── darwin-arm64
│ │ │ │ ├── package.json
│ │ │ │ └── README.md
│ │ │ ├── darwin-x64
│ │ │ │ ├── package.json
│ │ │ │ └── README.md
│ │ │ ├── linux-arm64-gnu
│ │ │ │ ├── package.json
│ │ │ │ └── README.md
│ │ │ ├── linux-arm64-musl
│ │ │ │ ├── package.json
│ │ │ │ └── README.md
│ │ │ ├── linux-x64-gnu
│ │ │ │ ├── package.json
│ │ │ │ └── README.md
│ │ │ ├── linux-x64-musl
│ │ │ │ ├── package.json
│ │ │ │ └── README.md
│ │ │ ├── win32-arm64-msvc
│ │ │ │ ├── package.json
│ │ │ │ └── README.md
│ │ │ └── win32-x64-msvc
│ │ │ ├── package.json
│ │ │ └── README.md
│ │ ├── package.json
│ │ ├── pnpm-lock.yaml
│ │ ├── README.md
│ │ ├── scripts
│ │ │ └── header.mjs
│ │ ├── src
│ │ │ ├── capability.rs
│ │ │ ├── layer.rs
│ │ │ ├── lib.rs
│ │ │ └── options.rs
│ │ ├── tests
│ │ │ ├── service.test.mjs
│ │ │ ├── suites
│ │ │ │ ├── async.suite.mjs
│ │ │ │ ├── asyncDeleteOptions.suite.mjs
│ │ │ │ ├── asyncLister.suite.mjs
│ │ │ │ ├── asyncListOptions.suite.mjs
│ │ │ │ ├── asyncReadOptions.suite.mjs
│ │ │ │ ├── asyncStatOptions.suite.mjs
│ │ │ │ ├── asyncWriteOptions.suite.mjs
│ │ │ │ ├── index.mjs
│ │ │ │ ├── layer.suite.mjs
│ │ │ │ ├── services.suite.mjs
│ │ │ │ ├── sync.suite.mjs
│ │ │ │ ├── syncDeleteOptions.suite.mjs
│ │ │ │ ├── syncLister.suite.mjs
│ │ │ │ ├── syncListOptions.suite.mjs
│ │ │ │ ├── syncReadOptions.suite.mjs
│ │ │ │ ├── syncStatOptions.suite.mjs
│ │ │ │ └── syncWriteOptions.suite.mjs
│ │ │ └── utils.mjs
│ │ ├── theme
│ │ │ ├── index.tsx
│ │ │ └── package.json
│ │ ├── tsconfig.json
│ │ ├── tsconfig.theme.json
│ │ ├── typedoc.json
│ │ ├── upgrade.md
│ │ └── vitest.config.mjs
│ ├── python
│ │ ├── .gitignore
│ │ ├── benchmark
│ │ │ ├── async_opendal_benchmark.py
│ │ │ ├── async_origin_s3_benchmark_with_gevent.py
│ │ │ └── README.md
│ │ ├── Cargo.toml
│ │ ├── CONTRIBUTING.md
│ │ ├── DEPENDENCIES.md
│ │ ├── DEPENDENCIES.rust.tsv
│ │ ├── docs
│ │ │ ├── api
│ │ │ │ ├── async_file.md
│ │ │ │ ├── async_operator.md
│ │ │ │ ├── capability.md
│ │ │ │ ├── exceptions.md
│ │ │ │ ├── file.md
│ │ │ │ ├── layers.md
│ │ │ │ ├── operator.md
│ │ │ │ └── types.md
│ │ │ └── index.md
│ │ ├── justfile
│ │ ├── mkdocs.yml
│ │ ├── pyproject.toml
│ │ ├── pyrightconfig.json
│ │ ├── python
│ │ │ └── opendal
│ │ │ ├── __init__.py
│ │ │ ├── capability.pyi
│ │ │ ├── exceptions.pyi
│ │ │ ├── file.pyi
│ │ │ ├── layers.pyi
│ │ │ ├── operator.pyi
│ │ │ ├── py.typed
│ │ │ ├── services.pyi
│ │ │ └── types.pyi
│ │ ├── README.md
│ │ ├── ruff.toml
│ │ ├── src
│ │ │ ├── capability.rs
│ │ │ ├── errors.rs
│ │ │ ├── file.rs
│ │ │ ├── layers.rs
│ │ │ ├── lib.rs
│ │ │ ├── lister.rs
│ │ │ ├── metadata.rs
│ │ │ ├── operator.rs
│ │ │ ├── options.rs
│ │ │ ├── services.rs
│ │ │ └── utils.rs
│ │ ├── template
│ │ │ └── module.html.jinja2
│ │ ├── tests
│ │ │ ├── conftest.py
│ │ │ ├── test_async_check.py
│ │ │ ├── test_async_copy.py
│ │ │ ├── test_async_delete.py
│ │ │ ├── test_async_exists.py
│ │ │ ├── test_async_list.py
│ │ │ ├── test_async_pickle_types.py
│ │ │ ├── test_async_rename.py
│ │ │ ├── test_capability.py
│ │ │ ├── test_exceptions.py
│ │ │ ├── test_pickle_rw.py
│ │ │ ├── test_read.py
│ │ │ ├── test_sync_check.py
│ │ │ ├── test_sync_copy.py
│ │ │ ├── test_sync_delete.py
│ │ │ ├── test_sync_exists.py
│ │ │ ├── test_sync_list.py
│ │ │ ├── test_sync_pickle_types.py
│ │ │ ├── test_sync_rename.py
│ │ │ └── test_write.py
│ │ ├── upgrade.md
│ │ ├── users.md
│ │ └── uv.lock
│ └── README.md
├── CHANGELOG.md
├── CITATION.cff
├── CLAUDE.md
├── CONTRIBUTING.md
├── core
│ ├── benches
│ │ ├── ops
│ │ │ ├── main.rs
│ │ │ ├── read.rs
│ │ │ ├── README.md
│ │ │ ├── utils.rs
│ │ │ └── write.rs
│ │ ├── README.md
│ │ ├── types
│ │ │ ├── buffer.rs
│ │ │ ├── main.rs
│ │ │ ├── README.md
│ │ │ └── tasks.rs
│ │ ├── vs_fs
│ │ │ ├── Cargo.toml
│ │ │ ├── README.md
│ │ │ └── src
│ │ │ └── main.rs
│ │ └── vs_s3
│ │ ├── Cargo.toml
│ │ ├── README.md
│ │ └── src
│ │ └── main.rs
│ ├── Cargo.lock
│ ├── Cargo.toml
│ ├── CHANGELOG.md
│ ├── CONTRIBUTING.md
│ ├── core
│ │ ├── Cargo.toml
│ │ └── src
│ │ ├── blocking
│ │ │ ├── delete.rs
│ │ │ ├── list.rs
│ │ │ ├── mod.rs
│ │ │ ├── operator.rs
│ │ │ ├── read
│ │ │ │ ├── buffer_iterator.rs
│ │ │ │ ├── mod.rs
│ │ │ │ ├── reader.rs
│ │ │ │ ├── std_bytes_iterator.rs
│ │ │ │ └── std_reader.rs
│ │ │ └── write
│ │ │ ├── mod.rs
│ │ │ ├── std_writer.rs
│ │ │ └── writer.rs
│ │ ├── docs
│ │ │ ├── comparisons
│ │ │ │ ├── mod.rs
│ │ │ │ └── vs_object_store.md
│ │ │ ├── concepts.rs
│ │ │ ├── internals
│ │ │ │ ├── accessor.rs
│ │ │ │ ├── layer.rs
│ │ │ │ └── mod.rs
│ │ │ ├── mod.rs
│ │ │ ├── performance
│ │ │ │ ├── concurrent_write.md
│ │ │ │ ├── http_optimization.md
│ │ │ │ └── mod.rs
│ │ │ ├── rfcs
│ │ │ │ ├── 0000_example.md
│ │ │ │ ├── 0041_object_native_api.md
│ │ │ │ ├── 0044_error_handle.md
│ │ │ │ ├── 0057_auto_region.md
│ │ │ │ ├── 0069_object_stream.md
│ │ │ │ ├── 0090_limited_reader.md
│ │ │ │ ├── 0112_path_normalization.md
│ │ │ │ ├── 0191_async_streaming_io.md
│ │ │ │ ├── 0203_remove_credential.md
│ │ │ │ ├── 0221_create_dir.md
│ │ │ │ ├── 0247_retryable_error.md
│ │ │ │ ├── 0293_object_id.md
│ │ │ │ ├── 0337_dir_entry.md
│ │ │ │ ├── 0409_accessor_capabilities.md
│ │ │ │ ├── 0413_presign.md
│ │ │ │ ├── 0423_command_line_interface.md
│ │ │ │ ├── 0429_init_from_iter.md
│ │ │ │ ├── 0438_multipart.md
│ │ │ │ ├── 0443_gateway.md
│ │ │ │ ├── 0501_new_builder.md
│ │ │ │ ├── 0554_write_refactor.md
│ │ │ │ ├── 0561_list_metadata_reuse.md
│ │ │ │ ├── 0599_blocking_api.md
│ │ │ │ ├── 0623_redis_service.md
│ │ │ │ ├── 0627_split_capabilities.md
│ │ │ │ ├── 0661_path_in_accessor.md
│ │ │ │ ├── 0793_generic_kv_services.md
│ │ │ │ ├── 0926_object_reader.md
│ │ │ │ ├── 0977_refactor_error.md
│ │ │ │ ├── 1085_object_handler.md
│ │ │ │ ├── 1391_object_metadataer.md
│ │ │ │ ├── 1398_query_based_metadata.md
│ │ │ │ ├── 1420_object_writer.md
│ │ │ │ ├── 1477_remove_object_concept.md
│ │ │ │ ├── 1735_operation_extension.md
│ │ │ │ ├── 2083_writer_sink_api.md
│ │ │ │ ├── 2133_append_api.md
│ │ │ │ ├── 2299_chain_based_operator_api.md
│ │ │ │ ├── 2602_object_versioning.md
│ │ │ │ ├── 2758_merge_append_into_write.md
│ │ │ │ ├── 2774_lister_api.md
│ │ │ │ ├── 2779_list_with_metakey.md
│ │ │ │ ├── 2852_native_capability.md
│ │ │ │ ├── 2884_merge_range_read_into_read.md
│ │ │ │ ├── 3017_remove_write_copy_from.md
│ │ │ │ ├── 3197_config.md
│ │ │ │ ├── 3232_align_list_api.md
│ │ │ │ ├── 3243_list_prefix.md
│ │ │ │ ├── 3356_lazy_reader.md
│ │ │ │ ├── 3526_list_recursive.md
│ │ │ │ ├── 3574_concurrent_stat_in_list.md
│ │ │ │ ├── 3734_buffered_reader.md
│ │ │ │ ├── 3898_concurrent_writer.md
│ │ │ │ ├── 3911_deleter_api.md
│ │ │ │ ├── 4382_range_based_read.md
│ │ │ │ ├── 4638_executor.md
│ │ │ │ ├── 5314_remove_metakey.md
│ │ │ │ ├── 5444_operator_from_uri.md
│ │ │ │ ├── 5479_context.md
│ │ │ │ ├── 5485_conditional_reader.md
│ │ │ │ ├── 5495_list_with_deleted.md
│ │ │ │ ├── 5556_write_returns_metadata.md
│ │ │ │ ├── 5871_read_returns_metadata.md
│ │ │ │ ├── 6189_remove_native_blocking.md
│ │ │ │ ├── 6209_glob_support.md
│ │ │ │ ├── 6213_options_api.md
│ │ │ │ ├── 6370_foyer_integration.md
│ │ │ │ ├── 6678_simulate_layer.md
│ │ │ │ ├── 6707_capability_override_layer.md
│ │ │ │ ├── 6817_checksum.md
│ │ │ │ ├── 6828_core.md
│ │ │ │ ├── 7130_route_layer.md
│ │ │ │ ├── mod.rs
│ │ │ │ └── README.md
│ │ │ └── upgrade.md
│ │ ├── layers
│ │ │ ├── complete.rs
│ │ │ ├── correctness_check.rs
│ │ │ ├── error_context.rs
│ │ │ ├── http_client.rs
│ │ │ ├── mod.rs
│ │ │ ├── simulate.rs
│ │ │ └── type_eraser.rs
│ │ ├── lib.rs
│ │ ├── raw
│ │ │ ├── accessor.rs
│ │ │ ├── atomic_util.rs
│ │ │ ├── enum_utils.rs
│ │ │ ├── futures_util.rs
│ │ │ ├── http_util
│ │ │ │ ├── body.rs
│ │ │ │ ├── bytes_content_range.rs
│ │ │ │ ├── bytes_range.rs
│ │ │ │ ├── client.rs
│ │ │ │ ├── error.rs
│ │ │ │ ├── header.rs
│ │ │ │ ├── mod.rs
│ │ │ │ ├── multipart.rs
│ │ │ │ └── uri.rs
│ │ │ ├── layer.rs
│ │ │ ├── mod.rs
│ │ │ ├── oio
│ │ │ │ ├── buf
│ │ │ │ │ ├── flex_buf.rs
│ │ │ │ │ ├── mod.rs
│ │ │ │ │ ├── pooled_buf.rs
│ │ │ │ │ └── queue_buf.rs
│ │ │ │ ├── delete
│ │ │ │ │ ├── api.rs
│ │ │ │ │ ├── batch_delete.rs
│ │ │ │ │ ├── mod.rs
│ │ │ │ │ └── one_shot_delete.rs
│ │ │ │ ├── entry.rs
│ │ │ │ ├── list
│ │ │ │ │ ├── api.rs
│ │ │ │ │ ├── flat_list.rs
│ │ │ │ │ ├── hierarchy_list.rs
│ │ │ │ │ ├── mod.rs
│ │ │ │ │ ├── page_list.rs
│ │ │ │ │ └── prefix_list.rs
│ │ │ │ ├── mod.rs
│ │ │ │ ├── read
│ │ │ │ │ ├── api.rs
│ │ │ │ │ └── mod.rs
│ │ │ │ └── write
│ │ │ │ ├── api.rs
│ │ │ │ ├── append_write.rs
│ │ │ │ ├── block_write.rs
│ │ │ │ ├── mod.rs
│ │ │ │ ├── multipart_write.rs
│ │ │ │ ├── one_shot_write.rs
│ │ │ │ └── position_write.rs
│ │ │ ├── operation.rs
│ │ │ ├── ops.rs
│ │ │ ├── path_cache.rs
│ │ │ ├── path.rs
│ │ │ ├── rps.rs
│ │ │ ├── serde_util.rs
│ │ │ ├── std_io_util.rs
│ │ │ ├── time.rs
│ │ │ ├── tokio_util.rs
│ │ │ └── version.rs
│ │ ├── services
│ │ │ ├── memory
│ │ │ │ ├── backend.rs
│ │ │ │ ├── config.rs
│ │ │ │ ├── core.rs
│ │ │ │ ├── deleter.rs
│ │ │ │ ├── docs.md
│ │ │ │ ├── lister.rs
│ │ │ │ ├── mod.rs
│ │ │ │ └── writer.rs
│ │ │ └── mod.rs
│ │ └── types
│ │ ├── buffer.rs
│ │ ├── builder.rs
│ │ ├── capability.rs
│ │ ├── context
│ │ │ ├── mod.rs
│ │ │ ├── read.rs
│ │ │ └── write.rs
│ │ ├── delete
│ │ │ ├── deleter.rs
│ │ │ ├── futures_delete_sink.rs
│ │ │ ├── input.rs
│ │ │ └── mod.rs
│ │ ├── entry.rs
│ │ ├── error.rs
│ │ ├── execute
│ │ │ ├── api.rs
│ │ │ ├── executor.rs
│ │ │ ├── executors
│ │ │ │ ├── mod.rs
│ │ │ │ └── tokio_executor.rs
│ │ │ └── mod.rs
│ │ ├── list.rs
│ │ ├── metadata.rs
│ │ ├── mod.rs
│ │ ├── mode.rs
│ │ ├── operator
│ │ │ ├── builder.rs
│ │ │ ├── info.rs
│ │ │ ├── mod.rs
│ │ │ ├── operator_futures.rs
│ │ │ ├── operator.rs
│ │ │ ├── registry.rs
│ │ │ └── uri.rs
│ │ ├── options.rs
│ │ ├── read
│ │ │ ├── buffer_stream.rs
│ │ │ ├── futures_async_reader.rs
│ │ │ ├── futures_bytes_stream.rs
│ │ │ ├── mod.rs
│ │ │ └── reader.rs
│ │ └── write
│ │ ├── buffer_sink.rs
│ │ ├── futures_async_writer.rs
│ │ ├── futures_bytes_sink.rs
│ │ ├── mod.rs
│ │ └── writer.rs
│ ├── DEPENDENCIES.md
│ ├── DEPENDENCIES.rust.tsv
│ ├── edge
│ │ ├── file_write_on_full_disk
│ │ │ ├── Cargo.toml
│ │ │ ├── README.md
│ │ │ └── src
│ │ │ └── main.rs
│ │ ├── README.md
│ │ ├── s3_aws_assume_role_with_web_identity
│ │ │ ├── Cargo.toml
│ │ │ ├── README.md
│ │ │ └── src
│ │ │ └── main.rs
│ │ └── s3_read_on_wasm
│ │ ├── .gitignore
│ │ ├── Cargo.toml
│ │ ├── README.md
│ │ ├── src
│ │ │ └── lib.rs
│ │ └── webdriver.json
│ ├── fuzz
│ │ ├── .gitignore
│ │ ├── Cargo.toml
│ │ ├── fuzz_reader.rs
│ │ ├── fuzz_writer.rs
│ │ └── README.md
│ ├── layers
│ │ ├── async-backtrace
│ │ │ ├── Cargo.toml
│ │ │ └── src
│ │ │ └── lib.rs
│ │ ├── await-tree
│ │ │ ├── Cargo.toml
│ │ │ └── src
│ │ │ └── lib.rs
│ │ ├── capability-check
│ │ │ ├── Cargo.toml
│ │ │ └── src
│ │ │ └── lib.rs
│ │ ├── chaos
│ │ │ ├── Cargo.toml
│ │ │ └── src
│ │ │ └── lib.rs
│ │ ├── concurrent-limit
│ │ │ ├── Cargo.toml
│ │ │ └── src
│ │ │ └── lib.rs
│ │ ├── dtrace
│ │ │ ├── Cargo.toml
│ │ │ └── src
│ │ │ └── lib.rs
│ │ ├── fastmetrics
│ │ │ ├── Cargo.toml
│ │ │ └── src
│ │ │ └── lib.rs
│ │ ├── fastrace
│ │ │ ├── Cargo.toml
│ │ │ └── src
│ │ │ └── lib.rs
│ │ ├── foyer
│ │ │ ├── Cargo.toml
│ │ │ └── src
│ │ │ └── lib.rs
│ │ ├── hotpath
│ │ │ ├── Cargo.toml
│ │ │ └── src
│ │ │ └── lib.rs
│ │ ├── immutable-index
│ │ │ ├── Cargo.toml
│ │ │ └── src
│ │ │ └── lib.rs
│ │ ├── logging
│ │ │ ├── Cargo.toml
│ │ │ └── src
│ │ │ └── lib.rs
│ │ ├── metrics
│ │ │ ├── Cargo.toml
│ │ │ └── src
│ │ │ └── lib.rs
│ │ ├── mime-guess
│ │ │ ├── Cargo.toml
│ │ │ └── src
│ │ │ └── lib.rs
│ │ ├── observe-metrics-common
│ │ │ ├── Cargo.toml
│ │ │ └── src
│ │ │ └── lib.rs
│ │ ├── otelmetrics
│ │ │ ├── Cargo.toml
│ │ │ └── src
│ │ │ └── lib.rs
│ │ ├── oteltrace
│ │ │ ├── Cargo.toml
│ │ │ └── src
│ │ │ └── lib.rs
│ │ ├── prometheus
│ │ │ ├── Cargo.toml
│ │ │ └── src
│ │ │ └── lib.rs
│ │ ├── prometheus-client
│ │ │ ├── Cargo.toml
│ │ │ └── src
│ │ │ └── lib.rs
│ │ ├── retry
│ │ │ ├── Cargo.toml
│ │ │ └── src
│ │ │ └── lib.rs
│ │ ├── route
│ │ │ ├── Cargo.toml
│ │ │ └── src
│ │ │ └── lib.rs
│ │ ├── tail-cut
│ │ │ ├── Cargo.toml
│ │ │ └── src
│ │ │ └── lib.rs
│ │ ├── throttle
│ │ │ ├── Cargo.toml
│ │ │ └── src
│ │ │ └── lib.rs
│ │ ├── timeout
│ │ │ ├── Cargo.toml
│ │ │ └── src
│ │ │ └── lib.rs
│ │ └── tracing
│ │ ├── Cargo.toml
│ │ └── src
│ │ └── lib.rs
│ ├── LICENSE
│ ├── README.md
│ ├── services
│ │ ├── aliyun-drive
│ │ │ ├── Cargo.toml
│ │ │ └── src
│ │ │ ├── backend.rs
│ │ │ ├── config.rs
│ │ │ ├── core.rs
│ │ │ ├── deleter.rs
│ │ │ ├── docs.md
│ │ │ ├── error.rs
│ │ │ ├── lib.rs
│ │ │ ├── lister.rs
│ │ │ └── writer.rs
│ │ ├── alluxio
│ │ │ ├── Cargo.toml
│ │ │ └── src
│ │ │ ├── backend.rs
│ │ │ ├── config.rs
│ │ │ ├── core.rs
│ │ │ ├── deleter.rs
│ │ │ ├── docs.md
│ │ │ ├── error.rs
│ │ │ ├── lib.rs
│ │ │ ├── lister.rs
│ │ │ └── writer.rs
│ │ ├── azblob
│ │ │ ├── Cargo.toml
│ │ │ └── src
│ │ │ ├── backend.rs
│ │ │ ├── config.rs
│ │ │ ├── core.rs
│ │ │ ├── deleter.rs
│ │ │ ├── docs.md
│ │ │ ├── error.rs
│ │ │ ├── lib.rs
│ │ │ ├── lister.rs
│ │ │ └── writer.rs
│ │ ├── azdls
│ │ │ ├── Cargo.toml
│ │ │ └── src
│ │ │ ├── backend.rs
│ │ │ ├── config.rs
│ │ │ ├── core.rs
│ │ │ ├── deleter.rs
│ │ │ ├── docs.md
│ │ │ ├── error.rs
│ │ │ ├── lib.rs
│ │ │ ├── lister.rs
│ │ │ └── writer.rs
│ │ ├── azfile
│ │ │ ├── Cargo.toml
│ │ │ └── src
│ │ │ ├── backend.rs
│ │ │ ├── config.rs
│ │ │ ├── core.rs
│ │ │ ├── deleter.rs
│ │ │ ├── docs.md
│ │ │ ├── error.rs
│ │ │ ├── lib.rs
│ │ │ ├── lister.rs
│ │ │ └── writer.rs
│ │ ├── azure-common
│ │ │ ├── Cargo.toml
│ │ │ └── src
│ │ │ └── lib.rs
│ │ ├── b2
│ │ │ ├── Cargo.toml
│ │ │ └── src
│ │ │ ├── backend.rs
│ │ │ ├── config.rs
│ │ │ ├── core.rs
│ │ │ ├── deleter.rs
│ │ │ ├── docs.md
│ │ │ ├── error.rs
│ │ │ ├── lib.rs
│ │ │ ├── lister.rs
│ │ │ └── writer.rs
│ │ ├── cacache
│ │ │ ├── Cargo.toml
│ │ │ └── src
│ │ │ ├── backend.rs
│ │ │ ├── config.rs
│ │ │ ├── core.rs
│ │ │ ├── deleter.rs
│ │ │ ├── docs.md
│ │ │ ├── lib.rs
│ │ │ └── writer.rs
│ │ ├── cloudflare-kv
│ │ │ ├── Cargo.toml
│ │ │ └── src
│ │ │ ├── backend.rs
│ │ │ ├── config.rs
│ │ │ ├── core.rs
│ │ │ ├── deleter.rs
│ │ │ ├── docs.md
│ │ │ ├── error.rs
│ │ │ ├── lib.rs
│ │ │ ├── lister.rs
│ │ │ ├── model.rs
│ │ │ └── writer.rs
│ │ ├── compfs
│ │ │ ├── Cargo.toml
│ │ │ └── src
│ │ │ ├── backend.rs
│ │ │ ├── config.rs
│ │ │ ├── core.rs
│ │ │ ├── deleter.rs
│ │ │ ├── lib.rs
│ │ │ ├── lister.rs
│ │ │ ├── reader.rs
│ │ │ └── writer.rs
│ │ ├── cos
│ │ │ ├── Cargo.toml
│ │ │ └── src
│ │ │ ├── backend.rs
│ │ │ ├── config.rs
│ │ │ ├── core.rs
│ │ │ ├── deleter.rs
│ │ │ ├── docs.md
│ │ │ ├── error.rs
│ │ │ ├── lib.rs
│ │ │ ├── lister.rs
│ │ │ └── writer.rs
│ │ ├── d1
│ │ │ ├── Cargo.toml
│ │ │ └── src
│ │ │ ├── backend.rs
│ │ │ ├── config.rs
│ │ │ ├── core.rs
│ │ │ ├── deleter.rs
│ │ │ ├── docs.md
│ │ │ ├── error.rs
│ │ │ ├── lib.rs
│ │ │ ├── model.rs
│ │ │ └── writer.rs
│ │ ├── dashmap
│ │ │ ├── Cargo.toml
│ │ │ └── src
│ │ │ ├── backend.rs
│ │ │ ├── config.rs
│ │ │ ├── core.rs
│ │ │ ├── deleter.rs
│ │ │ ├── docs.md
│ │ │ ├── lib.rs
│ │ │ ├── lister.rs
│ │ │ └── writer.rs
│ │ ├── dbfs
│ │ │ ├── Cargo.toml
│ │ │ └── src
│ │ │ ├── backend.rs
│ │ │ ├── config.rs
│ │ │ ├── core.rs
│ │ │ ├── deleter.rs
│ │ │ ├── docs.md
│ │ │ ├── error.rs
│ │ │ ├── lib.rs
│ │ │ ├── lister.rs
│ │ │ └── writer.rs
│ │ ├── dropbox
│ │ │ ├── Cargo.toml
│ │ │ └── src
│ │ │ ├── backend.rs
│ │ │ ├── builder.rs
│ │ │ ├── config.rs
│ │ │ ├── core.rs
│ │ │ ├── deleter.rs
│ │ │ ├── docs.md
│ │ │ ├── error.rs
│ │ │ ├── lib.rs
│ │ │ ├── lister.rs
│ │ │ └── writer.rs
│ │ ├── etcd
│ │ │ ├── Cargo.toml
│ │ │ └── src
│ │ │ ├── backend.rs
│ │ │ ├── config.rs
│ │ │ ├── core.rs
│ │ │ ├── deleter.rs
│ │ │ ├── docs.md
│ │ │ ├── error.rs
│ │ │ ├── lib.rs
│ │ │ ├── lister.rs
│ │ │ └── writer.rs
│ │ ├── foundationdb
│ │ │ ├── build.rs
│ │ │ ├── Cargo.toml
│ │ │ └── src
│ │ │ ├── backend.rs
│ │ │ ├── config.rs
│ │ │ ├── core.rs
│ │ │ ├── deleter.rs
│ │ │ ├── docs.md
│ │ │ ├── lib.rs
│ │ │ └── writer.rs
│ │ ├── fs
│ │ │ ├── Cargo.toml
│ │ │ └── src
│ │ │ ├── backend.rs
│ │ │ ├── config.rs
│ │ │ ├── core.rs
│ │ │ ├── deleter.rs
│ │ │ ├── docs.md
│ │ │ ├── error.rs
│ │ │ ├── lib.rs
│ │ │ ├── lister.rs
│ │ │ ├── reader.rs
│ │ │ └── writer.rs
│ │ ├── ftp
│ │ │ ├── Cargo.toml
│ │ │ └── src
│ │ │ ├── backend.rs
│ │ │ ├── config.rs
│ │ │ ├── core.rs
│ │ │ ├── deleter.rs
│ │ │ ├── docs.md
│ │ │ ├── err.rs
│ │ │ ├── lib.rs
│ │ │ ├── lister.rs
│ │ │ ├── reader.rs
│ │ │ └── writer.rs
│ │ ├── gcs
│ │ │ ├── Cargo.toml
│ │ │ └── src
│ │ │ ├── backend.rs
│ │ │ ├── config.rs
│ │ │ ├── core.rs
│ │ │ ├── deleter.rs
│ │ │ ├── docs.md
│ │ │ ├── error.rs
│ │ │ ├── lib.rs
│ │ │ ├── lister.rs
│ │ │ ├── uri.rs
│ │ │ └── writer.rs
│ │ ├── gdrive
│ │ │ ├── Cargo.toml
│ │ │ └── src
│ │ │ ├── backend.rs
│ │ │ ├── builder.rs
│ │ │ ├── config.rs
│ │ │ ├── core.rs
│ │ │ ├── deleter.rs
│ │ │ ├── docs.md
│ │ │ ├── error.rs
│ │ │ ├── lib.rs
│ │ │ ├── lister.rs
│ │ │ └── writer.rs
│ │ ├── ghac
│ │ │ ├── Cargo.toml
│ │ │ └── src
│ │ │ ├── backend.rs
│ │ │ ├── config.rs
│ │ │ ├── core.rs
│ │ │ ├── docs.md
│ │ │ ├── error.rs
│ │ │ ├── lib.rs
│ │ │ └── writer.rs
│ │ ├── github
│ │ │ ├── Cargo.toml
│ │ │ └── src
│ │ │ ├── backend.rs
│ │ │ ├── config.rs
│ │ │ ├── core.rs
│ │ │ ├── deleter.rs
│ │ │ ├── docs.md
│ │ │ ├── error.rs
│ │ │ ├── lib.rs
│ │ │ ├── lister.rs
│ │ │ ├── mod.rs
│ │ │ └── writer.rs
│ │ ├── gridfs
│ │ │ ├── Cargo.toml
│ │ │ └── src
│ │ │ ├── backend.rs
│ │ │ ├── config.rs
│ │ │ ├── core.rs
│ │ │ ├── deleter.rs
│ │ │ ├── docs.md
│ │ │ ├── lib.rs
│ │ │ └── writer.rs
│ │ ├── hdfs
│ │ │ ├── Cargo.toml
│ │ │ └── src
│ │ │ ├── backend.rs
│ │ │ ├── config.rs
│ │ │ ├── core.rs
│ │ │ ├── deleter.rs
│ │ │ ├── docs.md
│ │ │ ├── lib.rs
│ │ │ ├── lister.rs
│ │ │ ├── reader.rs
│ │ │ └── writer.rs
│ │ ├── hdfs-native
│ │ │ ├── Cargo.toml
│ │ │ └── src
│ │ │ ├── backend.rs
│ │ │ ├── config.rs
│ │ │ ├── core.rs
│ │ │ ├── deleter.rs
│ │ │ ├── docs.md
│ │ │ ├── error.rs
│ │ │ ├── lib.rs
│ │ │ ├── lister.rs
│ │ │ ├── reader.rs
│ │ │ └── writer.rs
│ │ ├── http
│ │ │ ├── Cargo.toml
│ │ │ └── src
│ │ │ ├── backend.rs
│ │ │ ├── config.rs
│ │ │ ├── core.rs
│ │ │ ├── docs.md
│ │ │ ├── error.rs
│ │ │ └── lib.rs
│ │ ├── huggingface
│ │ │ ├── Cargo.toml
│ │ │ └── src
│ │ │ ├── backend.rs
│ │ │ ├── config.rs
│ │ │ ├── core.rs
│ │ │ ├── docs.md
│ │ │ ├── error.rs
│ │ │ ├── lib.rs
│ │ │ └── lister.rs
│ │ ├── ipfs
│ │ │ ├── Cargo.toml
│ │ │ └── src
│ │ │ ├── backend.rs
│ │ │ ├── config.rs
│ │ │ ├── core.rs
│ │ │ ├── docs.md
│ │ │ ├── error.rs
│ │ │ ├── ipld.rs
│ │ │ └── lib.rs
│ │ ├── ipmfs
│ │ │ ├── Cargo.toml
│ │ │ └── src
│ │ │ ├── backend.rs
│ │ │ ├── builder.rs
│ │ │ ├── config.rs
│ │ │ ├── core.rs
│ │ │ ├── deleter.rs
│ │ │ ├── docs.md
│ │ │ ├── error.rs
│ │ │ ├── lib.rs
│ │ │ ├── lister.rs
│ │ │ └── writer.rs
│ │ ├── koofr
│ │ │ ├── Cargo.toml
│ │ │ └── src
│ │ │ ├── backend.rs
│ │ │ ├── config.rs
│ │ │ ├── core.rs
│ │ │ ├── deleter.rs
│ │ │ ├── docs.md
│ │ │ ├── error.rs
│ │ │ ├── lib.rs
│ │ │ ├── lister.rs
│ │ │ └── writer.rs
│ │ ├── lakefs
│ │ │ ├── Cargo.toml
│ │ │ └── src
│ │ │ ├── backend.rs
│ │ │ ├── config.rs
│ │ │ ├── core.rs
│ │ │ ├── deleter.rs
│ │ │ ├── docs.md
│ │ │ ├── error.rs
│ │ │ ├── lib.rs
│ │ │ ├── lister.rs
│ │ │ └── writer.rs
│ │ ├── memcached
│ │ │ ├── Cargo.toml
│ │ │ └── src
│ │ │ ├── backend.rs
│ │ │ ├── binary.rs
│ │ │ ├── config.rs
│ │ │ ├── core.rs
│ │ │ ├── deleter.rs
│ │ │ ├── docs.md
│ │ │ ├── lib.rs
│ │ │ └── writer.rs
│ │ ├── mini_moka
│ │ │ ├── Cargo.toml
│ │ │ └── src
│ │ │ ├── backend.rs
│ │ │ ├── config.rs
│ │ │ ├── core.rs
│ │ │ ├── deleter.rs
│ │ │ ├── docs.md
│ │ │ ├── lib.rs
│ │ │ ├── lister.rs
│ │ │ └── writer.rs
│ │ ├── moka
│ │ │ ├── Cargo.toml
│ │ │ └── src
│ │ │ ├── backend.rs
│ │ │ ├── config.rs
│ │ │ ├── core.rs
│ │ │ ├── deleter.rs
│ │ │ ├── docs.md
│ │ │ ├── lib.rs
│ │ │ ├── lister.rs
│ │ │ └── writer.rs
│ │ ├── mongodb
│ │ │ ├── Cargo.toml
│ │ │ └── src
│ │ │ ├── backend.rs
│ │ │ ├── config.rs
│ │ │ ├── core.rs
│ │ │ ├── deleter.rs
│ │ │ ├── docs.md
│ │ │ ├── lib.rs
│ │ │ └── writer.rs
│ │ ├── monoiofs
│ │ │ ├── Cargo.toml
│ │ │ └── src
│ │ │ ├── backend.rs
│ │ │ ├── config.rs
│ │ │ ├── core.rs
│ │ │ ├── deleter.rs
│ │ │ ├── docs.md
│ │ │ ├── lib.rs
│ │ │ ├── reader.rs
│ │ │ └── writer.rs
│ │ ├── mysql
│ │ │ ├── Cargo.toml
│ │ │ └── src
│ │ │ ├── backend.rs
│ │ │ ├── config.rs
│ │ │ ├── core.rs
│ │ │ ├── deleter.rs
│ │ │ ├── docs.md
│ │ │ ├── lib.rs
│ │ │ └── writer.rs
│ │ ├── obs
│ │ │ ├── Cargo.toml
│ │ │ └── src
│ │ │ ├── backend.rs
│ │ │ ├── config.rs
│ │ │ ├── core.rs
│ │ │ ├── deleter.rs
│ │ │ ├── docs.md
│ │ │ ├── error.rs
│ │ │ ├── lib.rs
│ │ │ ├── lister.rs
│ │ │ └── writer.rs
│ │ ├── onedrive
│ │ │ ├── Cargo.toml
│ │ │ └── src
│ │ │ ├── backend.rs
│ │ │ ├── builder.rs
│ │ │ ├── config.rs
│ │ │ ├── core.rs
│ │ │ ├── deleter.rs
│ │ │ ├── docs.md
│ │ │ ├── error.rs
│ │ │ ├── graph_model.rs
│ │ │ ├── lib.rs
│ │ │ ├── lister.rs
│ │ │ └── writer.rs
│ │ ├── opfs
│ │ │ ├── Cargo.toml
│ │ │ └── src
│ │ │ ├── backend.rs
│ │ │ ├── config.rs
│ │ │ ├── core.rs
│ │ │ ├── docs.md
│ │ │ ├── error.rs
│ │ │ ├── lib.rs
│ │ │ └── utils.rs
│ │ ├── oss
│ │ │ ├── Cargo.toml
│ │ │ └── src
│ │ │ ├── backend.rs
│ │ │ ├── config.rs
│ │ │ ├── core.rs
│ │ │ ├── deleter.rs
│ │ │ ├── docs.md
│ │ │ ├── error.rs
│ │ │ ├── lib.rs
│ │ │ ├── lister.rs
│ │ │ └── writer.rs
│ │ ├── pcloud
│ │ │ ├── Cargo.toml
│ │ │ └── src
│ │ │ ├── backend.rs
│ │ │ ├── config.rs
│ │ │ ├── core.rs
│ │ │ ├── deleter.rs
│ │ │ ├── docs.md
│ │ │ ├── error.rs
│ │ │ ├── lib.rs
│ │ │ ├── lister.rs
│ │ │ └── writer.rs
│ │ ├── persy
│ │ │ ├── Cargo.toml
│ │ │ └── src
│ │ │ ├── backend.rs
│ │ │ ├── config.rs
│ │ │ ├── core.rs
│ │ │ ├── deleter.rs
│ │ │ ├── docs.md
│ │ │ ├── lib.rs
│ │ │ └── writer.rs
│ │ ├── postgresql
│ │ │ ├── Cargo.toml
│ │ │ └── src
│ │ │ ├── backend.rs
│ │ │ ├── config.rs
│ │ │ ├── core.rs
│ │ │ ├── deleter.rs
│ │ │ ├── docs.md
│ │ │ ├── lib.rs
│ │ │ └── writer.rs
│ │ ├── redb
│ │ │ ├── Cargo.toml
│ │ │ └── src
│ │ │ ├── backend.rs
│ │ │ ├── config.rs
│ │ │ ├── core.rs
│ │ │ ├── deleter.rs
│ │ │ ├── docs.md
│ │ │ ├── lib.rs
│ │ │ └── writer.rs
│ │ ├── redis
│ │ │ ├── Cargo.toml
│ │ │ └── src
│ │ │ ├── backend.rs
│ │ │ ├── config.rs
│ │ │ ├── core.rs
│ │ │ ├── delete.rs
│ │ │ ├── docs.md
│ │ │ ├── lib.rs
│ │ │ └── writer.rs
│ │ ├── rocksdb
│ │ │ ├── Cargo.toml
│ │ │ └── src
│ │ │ ├── backend.rs
│ │ │ ├── config.rs
│ │ │ ├── core.rs
│ │ │ ├── deleter.rs
│ │ │ ├── docs.md
│ │ │ ├── lib.rs
│ │ │ ├── lister.rs
│ │ │ └── writer.rs
│ │ ├── s3
│ │ │ ├── Cargo.toml
│ │ │ └── src
│ │ │ ├── backend.rs
│ │ │ ├── compatible_services.md
│ │ │ ├── config.rs
│ │ │ ├── core.rs
│ │ │ ├── deleter.rs
│ │ │ ├── docs.md
│ │ │ ├── error.rs
│ │ │ ├── lib.rs
│ │ │ ├── lister.rs
│ │ │ ├── mod.rs
│ │ │ └── writer.rs
│ │ ├── seafile
│ │ │ ├── Cargo.toml
│ │ │ └── src
│ │ │ ├── backend.rs
│ │ │ ├── config.rs
│ │ │ ├── core.rs
│ │ │ ├── deleter.rs
│ │ │ ├── docs.md
│ │ │ ├── error.rs
│ │ │ ├── lib.rs
│ │ │ ├── lister.rs
│ │ │ └── writer.rs
│ │ ├── sftp
│ │ │ ├── Cargo.toml
│ │ │ └── src
│ │ │ ├── backend.rs
│ │ │ ├── config.rs
│ │ │ ├── core.rs
│ │ │ ├── deleter.rs
│ │ │ ├── docs.md
│ │ │ ├── error.rs
│ │ │ ├── lib.rs
│ │ │ ├── lister.rs
│ │ │ ├── reader.rs
│ │ │ ├── utils.rs
│ │ │ └── writer.rs
│ │ ├── sled
│ │ │ ├── Cargo.toml
│ │ │ └── src
│ │ │ ├── backend.rs
│ │ │ ├── config.rs
│ │ │ ├── core.rs
│ │ │ ├── deleter.rs
│ │ │ ├── docs.md
│ │ │ ├── lib.rs
│ │ │ ├── lister.rs
│ │ │ └── writer.rs
│ │ ├── sqlite
│ │ │ ├── Cargo.toml
│ │ │ └── src
│ │ │ ├── backend.rs
│ │ │ ├── config.rs
│ │ │ ├── core.rs
│ │ │ ├── deleter.rs
│ │ │ ├── docs.md
│ │ │ ├── lib.rs
│ │ │ └── writer.rs
│ │ ├── surrealdb
│ │ │ ├── Cargo.toml
│ │ │ └── src
│ │ │ ├── backend.rs
│ │ │ ├── config.rs
│ │ │ ├── core.rs
│ │ │ ├── deleter.rs
│ │ │ ├── docs.md
│ │ │ ├── lib.rs
│ │ │ └── writer.rs
│ │ ├── swift
│ │ │ ├── Cargo.toml
│ │ │ └── src
│ │ │ ├── backend.rs
│ │ │ ├── compatible_services.md
│ │ │ ├── config.rs
│ │ │ ├── core.rs
│ │ │ ├── deleter.rs
│ │ │ ├── docs.md
│ │ │ ├── error.rs
│ │ │ ├── lib.rs
│ │ │ ├── lister.rs
│ │ │ └── writer.rs
│ │ ├── tikv
│ │ │ ├── Cargo.toml
│ │ │ └── src
│ │ │ ├── backend.rs
│ │ │ ├── config.rs
│ │ │ ├── core.rs
│ │ │ ├── deleter.rs
│ │ │ ├── docs.md
│ │ │ ├── lib.rs
│ │ │ └── writer.rs
│ │ ├── upyun
│ │ │ ├── Cargo.toml
│ │ │ └── src
│ │ │ ├── backend.rs
│ │ │ ├── config.rs
│ │ │ ├── core.rs
│ │ │ ├── deleter.rs
│ │ │ ├── docs.md
│ │ │ ├── error.rs
│ │ │ ├── lib.rs
│ │ │ ├── lister.rs
│ │ │ └── writer.rs
│ │ ├── vercel-artifacts
│ │ │ ├── Cargo.toml
│ │ │ └── src
│ │ │ ├── backend.rs
│ │ │ ├── builder.rs
│ │ │ ├── config.rs
│ │ │ ├── core.rs
│ │ │ ├── docs.md
│ │ │ ├── error.rs
│ │ │ ├── lib.rs
│ │ │ └── writer.rs
│ │ ├── vercel-blob
│ │ │ ├── Cargo.toml
│ │ │ └── src
│ │ │ ├── backend.rs
│ │ │ ├── config.rs
│ │ │ ├── core.rs
│ │ │ ├── deleter.rs
│ │ │ ├── docs.md
│ │ │ ├── error.rs
│ │ │ ├── lib.rs
│ │ │ ├── lister.rs
│ │ │ └── writer.rs
│ │ ├── webdav
│ │ │ ├── Cargo.toml
│ │ │ └── src
│ │ │ ├── backend.rs
│ │ │ ├── config.rs
│ │ │ ├── core.rs
│ │ │ ├── deleter.rs
│ │ │ ├── docs.md
│ │ │ ├── error.rs
│ │ │ ├── lib.rs
│ │ │ ├── lister.rs
│ │ │ └── writer.rs
│ │ ├── webhdfs
│ │ │ ├── Cargo.toml
│ │ │ └── src
│ │ │ ├── backend.rs
│ │ │ ├── config.rs
│ │ │ ├── core.rs
│ │ │ ├── deleter.rs
│ │ │ ├── docs.md
│ │ │ ├── error.rs
│ │ │ ├── lib.rs
│ │ │ ├── lister.rs
│ │ │ ├── message.rs
│ │ │ └── writer.rs
│ │ └── yandex-disk
│ │ ├── Cargo.toml
│ │ └── src
│ │ ├── backend.rs
│ │ ├── config.rs
│ │ ├── core.rs
│ │ ├── deleter.rs
│ │ ├── docs.md
│ │ ├── error.rs
│ │ ├── lib.rs
│ │ ├── lister.rs
│ │ └── writer.rs
│ ├── src
│ │ └── lib.rs
│ ├── testkit
│ │ ├── Cargo.toml
│ │ └── src
│ │ ├── lib.rs
│ │ ├── read.rs
│ │ ├── utils.rs
│ │ └── write.rs
│ ├── tests
│ │ ├── behavior
│ │ │ ├── async_copy.rs
│ │ │ ├── async_create_dir.rs
│ │ │ ├── async_delete.rs
│ │ │ ├── async_list.rs
│ │ │ ├── async_presign.rs
│ │ │ ├── async_read.rs
│ │ │ ├── async_rename.rs
│ │ │ ├── async_stat.rs
│ │ │ ├── async_write.rs
│ │ │ ├── main.rs
│ │ │ ├── README.md
│ │ │ └── utils.rs
│ │ └── data
│ │ ├── normal_dir
│ │ │ └── .gitkeep
│ │ ├── normal_file.txt
│ │ ├── special_dir !@#$%^&()_+-=;',
│ │ │ └── .gitkeep
│ │ └── special_file !@#$%^&()_+-=;',.txt
│ ├── upgrade.md
│ └── users.md
├── deny.toml
├── DEPENDENCIES.md
├── dev
│ ├── Cargo.lock
│ ├── Cargo.toml
│ ├── README.md
│ └── src
│ ├── generate
│ │ ├── java.j2
│ │ ├── java.rs
│ │ ├── mod.rs
│ │ ├── parser.rs
│ │ ├── python.j2
│ │ └── python.rs
│ ├── main.rs
│ └── release
│ ├── mod.rs
│ └── package.rs
├── doap.rdf
├── fixtures
│ ├── alluxio
│ │ └── docker-compose-alluxio.yml
│ ├── azblob
│ │ └── docker-compose-azurite.yml
│ ├── data
│ │ ├── normal_dir
│ │ │ └── .gitkeep
│ │ ├── normal_file.txt
│ │ ├── special_dir !@#$%^&()_+-=;',
│ │ │ └── .gitkeep
│ │ └── special_file !@#$%^&()_+-=;',.txt
│ ├── etcd
│ │ ├── ca-key.pem
│ │ ├── ca.pem
│ │ ├── client-key.pem
│ │ ├── client.pem
│ │ ├── docker-compose-cluster.yml
│ │ ├── docker-compose-standalone-tls.yml
│ │ ├── docker-compose-standalone.yml
│ │ ├── server-key.pem
│ │ └── server.pem
│ ├── ftp
│ │ └── docker-compose-vsftpd.yml
│ ├── hdfs
│ │ ├── azurite-azblob-core-site.xml
│ │ ├── docker-compose-hdfs-cluster.yml
│ │ ├── gcs-core-site.xml
│ │ ├── hdfs-site.xml
│ │ └── minio-s3-core-site.xml
│ ├── http
│ │ ├── Caddyfile
│ │ ├── docker-compose-caddy.yml
│ │ ├── docker-compose-nginx.yml
│ │ └── nginx.conf
│ ├── libsql
│ │ ├── docker-compose-auth.yml
│ │ └── docker-compose.yml
│ ├── memcached
│ │ ├── docker-compose-memcached-with-auth.yml
│ │ └── docker-compose-memcached.yml
│ ├── mongodb
│ │ ├── docker-compose-basic-auth.yml
│ │ └── docker-compose-no-auth.yml
│ ├── mysql
│ │ ├── docker-compose.yml
│ │ └── init.sql
│ ├── postgresql
│ │ ├── docker-compose.yml
│ │ └── init.sql
│ ├── redis
│ │ ├── docker-compose-dragonfly.yml
│ │ ├── docker-compose-kvrocks.yml
│ │ ├── docker-compose-redis-cluster-tls.yml
│ │ ├── docker-compose-redis-cluster.yml
│ │ ├── docker-compose-redis-tls.yml
│ │ ├── docker-compose-redis.yml
│ │ └── ssl
│ │ ├── .gitignore
│ │ ├── ca.crt
│ │ ├── ca.key
│ │ ├── ca.srl
│ │ ├── README.md
│ │ ├── redis.crt
│ │ ├── redis.key
│ │ └── req.conf
│ ├── s3
│ │ ├── docker-compose-ceph-rados.yml
│ │ └── docker-compose-minio.yml
│ ├── seafile
│ │ └── docker-compose-seafile.yml
│ ├── sftp
│ │ ├── change_root_dir.sh
│ │ ├── docker-compose-sftp-with-default-root.yml
│ │ ├── docker-compose-sftp.yml
│ │ ├── health-check.sh
│ │ ├── test_ssh_key
│ │ └── test_ssh_key.pub
│ ├── sqlite
│ │ └── data.sql
│ ├── swift
│ │ ├── docker-compose-ceph-rados.yml
│ │ └── docker-compose-swift.yml
│ ├── tikv
│ │ ├── gen_cert.sh
│ │ ├── pd-tls.toml
│ │ ├── pd.toml
│ │ ├── ssl
│ │ │ ├── ca-key.pem
│ │ │ ├── ca.pem
│ │ │ ├── client-key.pem
│ │ │ ├── client.pem
│ │ │ ├── pd-server-key.pem
│ │ │ ├── pd-server.pem
│ │ │ ├── tikv-server-key.pem
│ │ │ └── tikv-server.pem
│ │ ├── tikv-tls.toml
│ │ └── tikv.toml
│ ├── webdav
│ │ ├── config
│ │ │ └── nginx
│ │ │ └── http.conf
│ │ ├── docker-compose-webdav-jfrog.yml
│ │ ├── docker-compose-webdav-nextcloud.yml
│ │ ├── docker-compose-webdav-owncloud.yml
│ │ ├── docker-compose-webdav-with-auth.yml
│ │ ├── docker-compose-webdav-with-empty-passwd.yml
│ │ ├── docker-compose-webdav.yml
│ │ └── health-check-nextcloud.sh
│ └── webhdfs
│ └── docker-compose-webhdfs.yml
├── justfile
├── LICENSE
├── licenserc.toml
├── NOTICE
├── README.md
├── rust-toolchain.toml
├── rustfmt.toml
└── scripts
├── constants.py
├── dependencies.py
├── merge_local_staging.py
├── README.md
├── verify.py
└── workspace.py
```
# Files
--------------------------------------------------------------------------------
/core/services/azblob/src/backend.rs:
--------------------------------------------------------------------------------
```rust
1 | // Licensed to the Apache Software Foundation (ASF) under one
2 | // or more contributor license agreements. See the NOTICE file
3 | // distributed with this work for additional information
4 | // regarding copyright ownership. The ASF licenses this file
5 | // to you under the Apache License, Version 2.0 (the
6 | // "License"); you may not use this file except in compliance
7 | // with the License. You may obtain a copy of the License at
8 | //
9 | // http://www.apache.org/licenses/LICENSE-2.0
10 | //
11 | // Unless required by applicable law or agreed to in writing,
12 | // software distributed under the License is distributed on an
13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 | // KIND, either express or implied. See the License for the
15 | // specific language governing permissions and limitations
16 | // under the License.
17 |
18 | use std::fmt::Debug;
19 | use std::sync::Arc;
20 |
21 | use base64::Engine;
22 | use base64::prelude::BASE64_STANDARD;
23 | use http::Response;
24 | use http::StatusCode;
25 | use log::debug;
26 | use reqsign::AzureStorageConfig;
27 | use reqsign::AzureStorageLoader;
28 | use reqsign::AzureStorageSigner;
29 | use sha2::Digest;
30 | use sha2::Sha256;
31 |
32 | use super::AZBLOB_SCHEME;
33 | use super::config::AzblobConfig;
34 | use super::core::AzblobCore;
35 | use super::core::constants::X_MS_META_PREFIX;
36 | use super::core::constants::X_MS_VERSION_ID;
37 | use super::deleter::AzblobDeleter;
38 | use super::error::parse_error;
39 | use super::lister::AzblobLister;
40 | use super::writer::AzblobWriter;
41 | use super::writer::AzblobWriters;
42 | use opendal_core::raw::*;
43 | use opendal_core::*;
44 | use opendal_service_azure_common::{
45 | AzureStorageService, azure_account_name_from_endpoint, azure_config_from_connection_string,
46 | };
47 |
48 | const AZBLOB_BATCH_LIMIT: usize = 256;
49 |
50 | impl From<AzureStorageConfig> for AzblobConfig {
51 | fn from(value: AzureStorageConfig) -> Self {
52 | Self {
53 | endpoint: value.endpoint,
54 | account_name: value.account_name,
55 | account_key: value.account_key,
56 | sas_token: value.sas_token,
57 | ..Default::default()
58 | }
59 | }
60 | }
61 |
62 | #[doc = include_str!("docs.md")]
63 | #[derive(Default)]
64 | pub struct AzblobBuilder {
65 | pub(super) config: AzblobConfig,
66 | }
67 |
68 | impl Debug for AzblobBuilder {
69 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
70 | f.debug_struct("AzblobBuilder")
71 | .field("config", &self.config)
72 | .finish_non_exhaustive()
73 | }
74 | }
75 |
76 | impl AzblobBuilder {
77 | /// Set root of this backend.
78 | ///
79 | /// All operations will happen under this root.
80 | pub fn root(mut self, root: &str) -> Self {
81 | self.config.root = if root.is_empty() {
82 | None
83 | } else {
84 | Some(root.to_string())
85 | };
86 |
87 | self
88 | }
89 |
90 | /// Set container name of this backend.
91 | pub fn container(mut self, container: &str) -> Self {
92 | self.config.container = container.to_string();
93 |
94 | self
95 | }
96 |
97 | /// Set endpoint of this backend
98 | ///
99 | /// Endpoint must be full uri, e.g.
100 | ///
101 | /// - Azblob: `https://accountname.blob.core.windows.net`
102 | /// - Azurite: `http://127.0.0.1:10000/devstoreaccount1`
103 | pub fn endpoint(mut self, endpoint: &str) -> Self {
104 | if !endpoint.is_empty() {
105 | // Trim trailing `/` so that we can accept `http://127.0.0.1:9000/`
106 | self.config.endpoint = Some(endpoint.trim_end_matches('/').to_string());
107 | }
108 |
109 | self
110 | }
111 |
112 | /// Set account_name of this backend.
113 | ///
114 | /// - If account_name is set, we will take user's input first.
115 | /// - If not, we will try to load it from environment.
116 | pub fn account_name(mut self, account_name: &str) -> Self {
117 | if !account_name.is_empty() {
118 | self.config.account_name = Some(account_name.to_string());
119 | }
120 |
121 | self
122 | }
123 |
124 | /// Set account_key of this backend.
125 | ///
126 | /// - If account_key is set, we will take user's input first.
127 | /// - If not, we will try to load it from environment.
128 | pub fn account_key(mut self, account_key: &str) -> Self {
129 | if !account_key.is_empty() {
130 | self.config.account_key = Some(account_key.to_string());
131 | }
132 |
133 | self
134 | }
135 |
136 | /// Set encryption_key of this backend.
137 | ///
138 | /// # Args
139 | ///
140 | /// `v`: Base64-encoded key that matches algorithm specified in `encryption_algorithm`.
141 | ///
142 | /// # Note
143 | ///
144 | /// This function is the low-level setting for SSE related features.
145 | ///
146 | /// SSE related options should be set carefully to make them works.
147 | /// Please use `server_side_encryption_with_*` helpers if even possible.
148 | pub fn encryption_key(mut self, v: &str) -> Self {
149 | if !v.is_empty() {
150 | self.config.encryption_key = Some(v.to_string());
151 | }
152 |
153 | self
154 | }
155 |
156 | /// Set encryption_key_sha256 of this backend.
157 | ///
158 | /// # Args
159 | ///
160 | /// `v`: Base64-encoded SHA256 digest of the key specified in encryption_key.
161 | ///
162 | /// # Note
163 | ///
164 | /// This function is the low-level setting for SSE related features.
165 | ///
166 | /// SSE related options should be set carefully to make them works.
167 | /// Please use `server_side_encryption_with_*` helpers if even possible.
168 | pub fn encryption_key_sha256(mut self, v: &str) -> Self {
169 | if !v.is_empty() {
170 | self.config.encryption_key_sha256 = Some(v.to_string());
171 | }
172 |
173 | self
174 | }
175 |
176 | /// Set encryption_algorithm of this backend.
177 | ///
178 | /// # Args
179 | ///
180 | /// `v`: server-side encryption algorithm. (Available values: `AES256`)
181 | ///
182 | /// # Note
183 | ///
184 | /// This function is the low-level setting for SSE related features.
185 | ///
186 | /// SSE related options should be set carefully to make them works.
187 | /// Please use `server_side_encryption_with_*` helpers if even possible.
188 | pub fn encryption_algorithm(mut self, v: &str) -> Self {
189 | if !v.is_empty() {
190 | self.config.encryption_algorithm = Some(v.to_string());
191 | }
192 |
193 | self
194 | }
195 |
196 | /// Enable server side encryption with customer key.
197 | ///
198 | /// As known as: CPK
199 | ///
200 | /// # Args
201 | ///
202 | /// `key`: Base64-encoded SHA256 digest of the key specified in encryption_key.
203 | ///
204 | /// # Note
205 | ///
206 | /// Function that helps the user to set the server-side customer-provided encryption key, the key's SHA256, and the algorithm.
207 | /// See [Server-side encryption with customer-provided keys (CPK)](https://learn.microsoft.com/en-us/azure/storage/blobs/encryption-customer-provided-keys)
208 | /// for more info.
209 | pub fn server_side_encryption_with_customer_key(mut self, key: &[u8]) -> Self {
210 | // Only AES256 is supported for now
211 | self.config.encryption_algorithm = Some("AES256".to_string());
212 | self.config.encryption_key = Some(BASE64_STANDARD.encode(key));
213 | let key_sha256 = Sha256::digest(key);
214 | self.config.encryption_key_sha256 = Some(BASE64_STANDARD.encode(key_sha256));
215 | self
216 | }
217 |
218 | /// Set sas_token of this backend.
219 | ///
220 | /// - If sas_token is set, we will take user's input first.
221 | /// - If not, we will try to load it from environment.
222 | ///
223 | /// See [Grant limited access to Azure Storage resources using shared access signatures (SAS)](https://learn.microsoft.com/en-us/azure/storage/common/storage-sas-overview)
224 | /// for more info.
225 | pub fn sas_token(mut self, sas_token: &str) -> Self {
226 | if !sas_token.is_empty() {
227 | self.config.sas_token = Some(sas_token.to_string());
228 | }
229 |
230 | self
231 | }
232 |
233 | /// Set maximum batch operations of this backend.
234 | pub fn batch_max_operations(mut self, batch_max_operations: usize) -> Self {
235 | self.config.batch_max_operations = Some(batch_max_operations);
236 |
237 | self
238 | }
239 |
240 | /// from_connection_string will make a builder from connection string
241 | ///
242 | /// connection string looks like:
243 | ///
244 | /// ```txt
245 | /// DefaultEndpointsProtocol=http;AccountName=devstoreaccount1;
246 | /// AccountKey=Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==;
247 | /// BlobEndpoint=http://127.0.0.1:10000/devstoreaccount1;
248 | /// QueueEndpoint=http://127.0.0.1:10001/devstoreaccount1;
249 | /// TableEndpoint=http://127.0.0.1:10002/devstoreaccount1;
250 | /// ```
251 | ///
252 | /// Or
253 | ///
254 | /// ```txt
255 | /// DefaultEndpointsProtocol=https;
256 | /// AccountName=storagesample;
257 | /// AccountKey=<account-key>;
258 | /// EndpointSuffix=core.chinacloudapi.cn;
259 | /// ```
260 | ///
261 | /// For reference: [Configure Azure Storage connection strings](https://learn.microsoft.com/en-us/azure/storage/common/storage-configure-connection-string)
262 | ///
263 | /// # Note
264 | ///
265 | /// Connection strings can only configure the endpoint, account name and
266 | /// authentication information. Users still need to configure container name.
267 | pub fn from_connection_string(conn: &str) -> Result<Self> {
268 | let config = azure_config_from_connection_string(conn, AzureStorageService::Blob)?;
269 |
270 | Ok(AzblobConfig::from(config).into_builder())
271 | }
272 | }
273 |
274 | impl Builder for AzblobBuilder {
275 | type Config = AzblobConfig;
276 |
277 | fn build(self) -> Result<impl Access> {
278 | debug!("backend build started: {:?}", &self);
279 |
280 | let root = normalize_root(&self.config.root.unwrap_or_default());
281 | debug!("backend use root {root}");
282 |
283 | // Handle endpoint, region and container name.
284 | let container = match self.config.container.is_empty() {
285 | false => Ok(&self.config.container),
286 | true => Err(Error::new(ErrorKind::ConfigInvalid, "container is empty")
287 | .with_operation("Builder::build")
288 | .with_context("service", AZBLOB_SCHEME)),
289 | }?;
290 | debug!("backend use container {}", &container);
291 |
292 | let endpoint = match &self.config.endpoint {
293 | Some(endpoint) => Ok(endpoint.clone()),
294 | None => Err(Error::new(ErrorKind::ConfigInvalid, "endpoint is empty")
295 | .with_operation("Builder::build")
296 | .with_context("service", AZBLOB_SCHEME)),
297 | }?;
298 | debug!("backend use endpoint {}", &container);
299 |
300 | #[cfg(target_arch = "wasm32")]
301 | let mut config_loader = AzureStorageConfig::default();
302 | #[cfg(not(target_arch = "wasm32"))]
303 | let mut config_loader = AzureStorageConfig::default().from_env();
304 |
305 | if let Some(v) = self
306 | .config
307 | .account_name
308 | .clone()
309 | .or_else(|| azure_account_name_from_endpoint(endpoint.as_str()))
310 | {
311 | config_loader.account_name = Some(v);
312 | }
313 |
314 | if let Some(v) = self.config.account_key.clone() {
315 | // Validate that account_key can be decoded as base64
316 | if let Err(e) = BASE64_STANDARD.decode(&v) {
317 | return Err(Error::new(
318 | ErrorKind::ConfigInvalid,
319 | format!("invalid account_key: cannot decode as base64: {e}"),
320 | )
321 | .with_operation("Builder::build")
322 | .with_context("service", AZBLOB_SCHEME)
323 | .with_context("key", "account_key"));
324 | }
325 | config_loader.account_key = Some(v);
326 | }
327 |
328 | if let Some(v) = self.config.sas_token.clone() {
329 | config_loader.sas_token = Some(v);
330 | }
331 |
332 | let encryption_key =
333 | match &self.config.encryption_key {
334 | None => None,
335 | Some(v) => Some(build_header_value(v).map_err(|err| {
336 | err.with_context("key", "server_side_encryption_customer_key")
337 | })?),
338 | };
339 |
340 | let encryption_key_sha256 = match &self.config.encryption_key_sha256 {
341 | None => None,
342 | Some(v) => Some(build_header_value(v).map_err(|err| {
343 | err.with_context("key", "server_side_encryption_customer_key_sha256")
344 | })?),
345 | };
346 |
347 | let encryption_algorithm = match &self.config.encryption_algorithm {
348 | None => None,
349 | Some(v) => {
350 | if v == "AES256" {
351 | Some(build_header_value(v).map_err(|err| {
352 | err.with_context("key", "server_side_encryption_customer_algorithm")
353 | })?)
354 | } else {
355 | return Err(Error::new(
356 | ErrorKind::ConfigInvalid,
357 | "encryption_algorithm value must be AES256",
358 | ));
359 | }
360 | }
361 | };
362 |
363 | let cred_loader = AzureStorageLoader::new(config_loader);
364 |
365 | let signer = AzureStorageSigner::new();
366 |
367 | Ok(AzblobBackend {
368 | core: Arc::new(AzblobCore {
369 | info: {
370 | let am = AccessorInfo::default();
371 | am.set_scheme(AZBLOB_SCHEME)
372 | .set_root(&root)
373 | .set_name(container)
374 | .set_native_capability(Capability {
375 | stat: true,
376 | stat_with_if_match: true,
377 | stat_with_if_none_match: true,
378 |
379 | read: true,
380 |
381 | read_with_if_match: true,
382 | read_with_if_none_match: true,
383 | read_with_override_content_disposition: true,
384 | read_with_if_modified_since: true,
385 | read_with_if_unmodified_since: true,
386 |
387 | write: true,
388 | write_can_append: true,
389 | write_can_empty: true,
390 | write_can_multi: true,
391 | write_with_cache_control: true,
392 | write_with_content_type: true,
393 | write_with_if_not_exists: true,
394 | write_with_if_none_match: true,
395 | write_with_user_metadata: true,
396 |
397 | delete: true,
398 | delete_max_size: Some(AZBLOB_BATCH_LIMIT),
399 |
400 | copy: true,
401 | copy_with_if_not_exists: true,
402 |
403 | list: true,
404 | list_with_recursive: true,
405 |
406 | presign: self.config.sas_token.is_some(),
407 | presign_stat: self.config.sas_token.is_some(),
408 | presign_read: self.config.sas_token.is_some(),
409 | presign_write: self.config.sas_token.is_some(),
410 |
411 | shared: true,
412 |
413 | ..Default::default()
414 | });
415 |
416 | am.into()
417 | },
418 | root,
419 | endpoint,
420 | encryption_key,
421 | encryption_key_sha256,
422 | encryption_algorithm,
423 | container: self.config.container.clone(),
424 |
425 | loader: cred_loader,
426 | signer,
427 | }),
428 | })
429 | }
430 | }
431 |
432 | /// Backend for azblob services.
433 | #[derive(Debug, Clone)]
434 | pub struct AzblobBackend {
435 | core: Arc<AzblobCore>,
436 | }
437 |
438 | impl Access for AzblobBackend {
439 | type Reader = HttpBody;
440 | type Writer = AzblobWriters;
441 | type Lister = oio::PageLister<AzblobLister>;
442 | type Deleter = oio::BatchDeleter<AzblobDeleter>;
443 |
444 | fn info(&self) -> Arc<AccessorInfo> {
445 | self.core.info.clone()
446 | }
447 |
448 | async fn stat(&self, path: &str, args: OpStat) -> Result<RpStat> {
449 | let resp = self.core.azblob_get_blob_properties(path, &args).await?;
450 |
451 | let status = resp.status();
452 |
453 | match status {
454 | StatusCode::OK => {
455 | let headers = resp.headers();
456 | let mut meta = parse_into_metadata(path, headers)?;
457 | if let Some(version_id) = parse_header_to_str(headers, X_MS_VERSION_ID)? {
458 | meta.set_version(version_id);
459 | }
460 |
461 | let user_meta = parse_prefixed_headers(headers, X_MS_META_PREFIX);
462 | if !user_meta.is_empty() {
463 | meta = meta.with_user_metadata(user_meta);
464 | }
465 |
466 | Ok(RpStat::new(meta))
467 | }
468 | _ => Err(parse_error(resp)),
469 | }
470 | }
471 |
472 | async fn read(&self, path: &str, args: OpRead) -> Result<(RpRead, Self::Reader)> {
473 | let resp = self.core.azblob_get_blob(path, args.range(), &args).await?;
474 |
475 | let status = resp.status();
476 | match status {
477 | StatusCode::OK | StatusCode::PARTIAL_CONTENT => Ok((RpRead::new(), resp.into_body())),
478 | _ => {
479 | let (part, mut body) = resp.into_parts();
480 | let buf = body.to_buffer().await?;
481 | Err(parse_error(Response::from_parts(part, buf)))
482 | }
483 | }
484 | }
485 |
486 | async fn write(&self, path: &str, args: OpWrite) -> Result<(RpWrite, Self::Writer)> {
487 | let w = AzblobWriter::new(self.core.clone(), args.clone(), path.to_string());
488 | let w = if args.append() {
489 | AzblobWriters::Two(oio::AppendWriter::new(w))
490 | } else {
491 | AzblobWriters::One(oio::BlockWriter::new(
492 | self.core.info.clone(),
493 | w,
494 | args.concurrent(),
495 | ))
496 | };
497 |
498 | Ok((RpWrite::default(), w))
499 | }
500 |
501 | async fn delete(&self) -> Result<(RpDelete, Self::Deleter)> {
502 | Ok((
503 | RpDelete::default(),
504 | oio::BatchDeleter::new(
505 | AzblobDeleter::new(self.core.clone()),
506 | self.core.info.full_capability().delete_max_size,
507 | ),
508 | ))
509 | }
510 |
511 | async fn list(&self, path: &str, args: OpList) -> Result<(RpList, Self::Lister)> {
512 | let l = AzblobLister::new(
513 | self.core.clone(),
514 | path.to_string(),
515 | args.recursive(),
516 | args.limit(),
517 | );
518 |
519 | Ok((RpList::default(), oio::PageLister::new(l)))
520 | }
521 |
522 | async fn copy(&self, from: &str, to: &str, args: OpCopy) -> Result<RpCopy> {
523 | let resp = self.core.azblob_copy_blob(from, to, args).await?;
524 |
525 | let status = resp.status();
526 |
527 | match status {
528 | StatusCode::ACCEPTED => Ok(RpCopy::default()),
529 | _ => Err(parse_error(resp)),
530 | }
531 | }
532 |
533 | async fn presign(&self, path: &str, args: OpPresign) -> Result<RpPresign> {
534 | let req = match args.operation() {
535 | PresignOperation::Stat(v) => self.core.azblob_head_blob_request(path, v),
536 | PresignOperation::Read(v) => {
537 | self.core
538 | .azblob_get_blob_request(path, BytesRange::default(), v)
539 | }
540 | PresignOperation::Write(_) => {
541 | self.core
542 | .azblob_put_blob_request(path, None, &OpWrite::default(), Buffer::new())
543 | }
544 | PresignOperation::Delete(_) => Err(Error::new(
545 | ErrorKind::Unsupported,
546 | "operation is not supported",
547 | )),
548 | _ => Err(Error::new(
549 | ErrorKind::Unsupported,
550 | "presign operation is not supported",
551 | )),
552 | };
553 |
554 | let mut req = req?;
555 |
556 | self.core.sign_query(&mut req).await?;
557 |
558 | let (parts, _) = req.into_parts();
559 |
560 | Ok(RpPresign::new(PresignedRequest::new(
561 | parts.method,
562 | parts.uri,
563 | parts.headers,
564 | )))
565 | }
566 | }
567 |
```
--------------------------------------------------------------------------------
/core/tests/behavior/async_stat.rs:
--------------------------------------------------------------------------------
```rust
1 | // Licensed to the Apache Software Foundation (ASF) under one
2 | // or more contributor license agreements. See the NOTICE file
3 | // distributed with this work for additional information
4 | // regarding copyright ownership. The ASF licenses this file
5 | // to you under the Apache License, Version 2.0 (the
6 | // "License"); you may not use this file except in compliance
7 | // with the License. You may obtain a copy of the License at
8 | //
9 | // http://www.apache.org/licenses/LICENSE-2.0
10 | //
11 | // Unless required by applicable law or agreed to in writing,
12 | // software distributed under the License is distributed on an
13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 | // KIND, either express or implied. See the License for the
15 | // specific language governing permissions and limitations
16 | // under the License.
17 |
18 | use std::str::FromStr;
19 | use std::time::Duration;
20 |
21 | use anyhow::Result;
22 | use http::StatusCode;
23 | use reqwest::Url;
24 | use tokio::time::sleep;
25 |
26 | use crate::*;
27 |
28 | pub fn tests(op: &Operator, tests: &mut Vec<Trial>) {
29 | let cap = op.info().full_capability();
30 |
31 | if cap.stat && cap.write {
32 | tests.extend(async_trials!(
33 | op,
34 | test_stat_file,
35 | test_stat_dir,
36 | test_stat_nested_parent_dir,
37 | test_stat_with_special_chars,
38 | test_stat_not_cleaned_path,
39 | test_stat_not_exist,
40 | test_stat_with_if_match,
41 | test_stat_with_if_none_match,
42 | test_stat_with_if_modified_since,
43 | test_stat_with_if_unmodified_since,
44 | test_stat_with_override_cache_control,
45 | test_stat_with_override_content_disposition,
46 | test_stat_with_override_content_type,
47 | test_stat_root,
48 | test_stat_with_version,
49 | stat_with_not_existing_version
50 | ))
51 | }
52 |
53 | if cap.stat && !cap.write {
54 | tests.extend(async_trials!(
55 | op,
56 | test_read_only_stat_file_and_dir,
57 | test_read_only_stat_special_chars,
58 | test_read_only_stat_not_cleaned_path,
59 | test_read_only_stat_not_exist,
60 | test_read_only_stat_with_if_match,
61 | test_read_only_stat_with_if_none_match,
62 | test_read_only_stat_root
63 | ))
64 | }
65 | }
66 |
67 | /// Stat existing file should return metadata
68 | pub async fn test_stat_file(op: Operator) -> Result<()> {
69 | let (path, content, size) = TEST_FIXTURE.new_file(op.clone());
70 |
71 | op.write(&path, content).await.expect("write must succeed");
72 |
73 | let meta = op.stat(&path).await?;
74 | assert_eq!(meta.mode(), EntryMode::FILE);
75 | assert_eq!(meta.content_length(), size as u64);
76 |
77 | // Stat a file with trailing slash should return `NotFound`.
78 | if op.info().full_capability().create_dir {
79 | let result = op.stat(&format!("{path}/")).await;
80 | assert!(result.is_err());
81 | assert_eq!(result.unwrap_err().kind(), ErrorKind::NotFound);
82 | }
83 |
84 | Ok(())
85 | }
86 |
87 | /// Stat existing file should return metadata
88 | pub async fn test_stat_dir(op: Operator) -> Result<()> {
89 | if !op.info().full_capability().create_dir {
90 | return Ok(());
91 | }
92 |
93 | let path = TEST_FIXTURE.new_dir_path();
94 |
95 | op.create_dir(&path).await.expect("write must succeed");
96 |
97 | let meta = op.stat(&path).await?;
98 | assert_eq!(meta.mode(), EntryMode::DIR);
99 |
100 | // Stat a dir without trailing slash could have two behavior.
101 | let result = op.stat(path.trim_end_matches('/')).await;
102 | match result {
103 | Ok(meta) => assert_eq!(meta.mode(), EntryMode::DIR),
104 | Err(err) => assert_eq!(err.kind(), ErrorKind::NotFound),
105 | }
106 |
107 | Ok(())
108 | }
109 |
110 | /// Stat the parent dir of existing dir should return metadata
111 | pub async fn test_stat_nested_parent_dir(op: Operator) -> Result<()> {
112 | if !op.info().full_capability().create_dir {
113 | return Ok(());
114 | }
115 |
116 | let parent = format!("{}", uuid::Uuid::new_v4());
117 | let file = format!("{}", uuid::Uuid::new_v4());
118 | let (path, content, _) =
119 | TEST_FIXTURE.new_file_with_path(op.clone(), &format!("{parent}/{file}"));
120 |
121 | op.write(&path, content.clone())
122 | .await
123 | .expect("write must succeed");
124 |
125 | let meta = op.stat(&format!("{parent}/")).await?;
126 | assert_eq!(meta.mode(), EntryMode::DIR);
127 |
128 | Ok(())
129 | }
130 |
131 | /// Stat existing file with special chars should return metadata
132 | pub async fn test_stat_with_special_chars(op: Operator) -> Result<()> {
133 | let path = format!("{} !@#$%^&()_+-=;',.txt", uuid::Uuid::new_v4());
134 | let (path, content, size) = TEST_FIXTURE.new_file_with_path(op.clone(), &path);
135 |
136 | op.write(&path, content).await.expect("write must succeed");
137 |
138 | let meta = op.stat(&path).await?;
139 | assert_eq!(meta.mode(), EntryMode::FILE);
140 | assert_eq!(meta.content_length(), size as u64);
141 |
142 | Ok(())
143 | }
144 |
145 | /// Stat not cleaned path should also succeed.
146 | pub async fn test_stat_not_cleaned_path(op: Operator) -> Result<()> {
147 | let (path, content, size) = TEST_FIXTURE.new_file(op.clone());
148 |
149 | op.write(&path, content).await.expect("write must succeed");
150 |
151 | let meta = op.stat(&format!("//{}", &path)).await?;
152 | assert_eq!(meta.mode(), EntryMode::FILE);
153 | assert_eq!(meta.content_length(), size as u64);
154 |
155 | Ok(())
156 | }
157 |
158 | /// Stat not exist file should return NotFound
159 | pub async fn test_stat_not_exist(op: Operator) -> Result<()> {
160 | let path = uuid::Uuid::new_v4().to_string();
161 |
162 | // Stat not exist file should return NotFound.
163 | let meta = op.stat(&path).await;
164 | assert!(meta.is_err());
165 | assert_eq!(meta.unwrap_err().kind(), ErrorKind::NotFound);
166 |
167 | // Stat not exist dir should also return NotFound.
168 | if op.info().full_capability().create_dir {
169 | let meta = op.stat(&format!("{path}/")).await;
170 | assert!(meta.is_err());
171 | assert_eq!(meta.unwrap_err().kind(), ErrorKind::NotFound);
172 | }
173 |
174 | Ok(())
175 | }
176 |
177 | /// Stat with if_match should succeed, else get a ConditionNotMatch error.
178 | pub async fn test_stat_with_if_match(op: Operator) -> Result<()> {
179 | if !op.info().full_capability().stat_with_if_match {
180 | return Ok(());
181 | }
182 |
183 | let (path, content, size) = TEST_FIXTURE.new_file(op.clone());
184 |
185 | op.write(&path, content.clone())
186 | .await
187 | .expect("write must succeed");
188 |
189 | let meta = op.stat(&path).await?;
190 | assert_eq!(meta.mode(), EntryMode::FILE);
191 | assert_eq!(meta.content_length(), size as u64);
192 |
193 | let res = op.stat_with(&path).if_match("\"invalid_etag\"").await;
194 | assert!(res.is_err());
195 | assert_eq!(res.unwrap_err().kind(), ErrorKind::ConditionNotMatch);
196 |
197 | let result = op
198 | .stat_with(&path)
199 | .if_match(meta.etag().expect("etag must exist"))
200 | .await;
201 | assert!(result.is_ok());
202 |
203 | Ok(())
204 | }
205 |
206 | /// Stat with if_none_match should succeed, else get a ConditionNotMatch.
207 | pub async fn test_stat_with_if_none_match(op: Operator) -> Result<()> {
208 | if !op.info().full_capability().stat_with_if_none_match {
209 | return Ok(());
210 | }
211 |
212 | let (path, content, size) = TEST_FIXTURE.new_file(op.clone());
213 |
214 | op.write(&path, content.clone())
215 | .await
216 | .expect("write must succeed");
217 |
218 | let meta = op.stat(&path).await?;
219 | assert_eq!(meta.mode(), EntryMode::FILE);
220 | assert_eq!(meta.content_length(), size as u64);
221 |
222 | let res = op
223 | .stat_with(&path)
224 | .if_none_match(meta.etag().expect("etag must exist"))
225 | .await;
226 | assert!(res.is_err());
227 | assert_eq!(res.unwrap_err().kind(), ErrorKind::ConditionNotMatch);
228 |
229 | let res = op
230 | .stat_with(&path)
231 | .if_none_match("\"invalid_etag\"")
232 | .await?;
233 | assert_eq!(res.mode(), meta.mode());
234 | assert_eq!(res.content_length(), meta.content_length());
235 |
236 | Ok(())
237 | }
238 |
239 | /// Stat file with if_modified_since should succeed, otherwise get a ConditionNotMatch error.
240 | pub async fn test_stat_with_if_modified_since(op: Operator) -> Result<()> {
241 | if !op.info().full_capability().stat_with_if_modified_since {
242 | return Ok(());
243 | }
244 |
245 | let (path, content, _) = TEST_FIXTURE.new_file(op.clone());
246 |
247 | op.write(&path, content.clone())
248 | .await
249 | .expect("write must succeed");
250 |
251 | let meta = op.stat(&path).await?;
252 | assert_eq!(meta.mode(), EntryMode::FILE);
253 | assert_eq!(meta.content_length(), content.len() as u64);
254 |
255 | let since = meta.last_modified().unwrap() - Duration::from_secs(1);
256 | let res = op.stat_with(&path).if_modified_since(since).await?;
257 | assert_eq!(res.last_modified(), meta.last_modified());
258 |
259 | sleep(Duration::from_secs(1)).await;
260 |
261 | let since = meta.last_modified().unwrap() + Duration::from_secs(1);
262 | let res = op.stat_with(&path).if_modified_since(since).await;
263 | assert!(res.is_err());
264 | assert_eq!(res.err().unwrap().kind(), ErrorKind::ConditionNotMatch);
265 |
266 | Ok(())
267 | }
268 |
269 | /// Stat file with if_unmodified_since should succeed, otherwise get a ConditionNotMatch error.
270 | pub async fn test_stat_with_if_unmodified_since(op: Operator) -> Result<()> {
271 | if !op.info().full_capability().stat_with_if_unmodified_since {
272 | return Ok(());
273 | }
274 |
275 | let (path, content, _) = TEST_FIXTURE.new_file(op.clone());
276 |
277 | op.write(&path, content.clone())
278 | .await
279 | .expect("write must succeed");
280 |
281 | let meta = op.stat(&path).await?;
282 | assert_eq!(meta.mode(), EntryMode::FILE);
283 | assert_eq!(meta.content_length(), content.len() as u64);
284 |
285 | let since = meta.last_modified().unwrap() - Duration::from_secs(1);
286 | let res = op.stat_with(&path).if_unmodified_since(since).await;
287 | assert!(res.is_err());
288 | assert_eq!(res.err().unwrap().kind(), ErrorKind::ConditionNotMatch);
289 |
290 | sleep(Duration::from_secs(1)).await;
291 |
292 | let since = meta.last_modified().unwrap() + Duration::from_secs(1);
293 | let res = op.stat_with(&path).if_unmodified_since(since).await?;
294 | assert_eq!(res.last_modified(), meta.last_modified());
295 |
296 | Ok(())
297 | }
298 |
299 | /// Stat file with override-cache-control should succeed.
300 | pub async fn test_stat_with_override_cache_control(op: Operator) -> Result<()> {
301 | if !(op.info().full_capability().stat_with_override_cache_control
302 | && op.info().full_capability().presign)
303 | {
304 | return Ok(());
305 | }
306 |
307 | let (path, content, _) = TEST_FIXTURE.new_file(op.clone());
308 |
309 | op.write(&path, content.clone())
310 | .await
311 | .expect("write must succeed");
312 |
313 | let target_cache_control = "no-cache, no-store, must-revalidate";
314 | let signed_req = op
315 | .presign_stat_with(&path, Duration::from_secs(60))
316 | .override_cache_control(target_cache_control)
317 | .await
318 | .expect("sign must succeed");
319 |
320 | let client = reqwest::Client::new();
321 | let mut req = client.request(
322 | signed_req.method().clone(),
323 | Url::from_str(&signed_req.uri().to_string()).expect("must be valid url"),
324 | );
325 | for (k, v) in signed_req.header() {
326 | req = req.header(k, v);
327 | }
328 |
329 | let resp = req.send().await.expect("send must succeed");
330 |
331 | assert_eq!(resp.status(), StatusCode::OK);
332 | assert_eq!(
333 | resp.headers()
334 | .get("cache-control")
335 | .expect("cache-control header must exist")
336 | .to_str()
337 | .expect("cache-control header must be string"),
338 | target_cache_control
339 | );
340 |
341 | Ok(())
342 | }
343 |
344 | /// Stat file with override_content_disposition should succeed.
345 | pub async fn test_stat_with_override_content_disposition(op: Operator) -> Result<()> {
346 | if !(op
347 | .info()
348 | .full_capability()
349 | .stat_with_override_content_disposition
350 | && op.info().full_capability().presign)
351 | {
352 | return Ok(());
353 | }
354 |
355 | let (path, content, _) = TEST_FIXTURE.new_file(op.clone());
356 |
357 | op.write(&path, content.clone())
358 | .await
359 | .expect("write must succeed");
360 |
361 | let target_content_disposition = "attachment; filename=foo.txt";
362 |
363 | let signed_req = op
364 | .presign_stat_with(&path, Duration::from_secs(60))
365 | .override_content_disposition(target_content_disposition)
366 | .await
367 | .expect("presign must succeed");
368 |
369 | let client = reqwest::Client::new();
370 | let mut req = client.request(
371 | signed_req.method().clone(),
372 | Url::from_str(&signed_req.uri().to_string()).expect("must be valid url"),
373 | );
374 | for (k, v) in signed_req.header() {
375 | req = req.header(k, v);
376 | }
377 |
378 | let resp = req.send().await.expect("send must succeed");
379 |
380 | assert_eq!(resp.status(), StatusCode::OK);
381 | assert_eq!(
382 | resp.headers()
383 | .get(http::header::CONTENT_DISPOSITION)
384 | .expect("content-disposition header must exist")
385 | .to_str()
386 | .expect("content-disposition header must be string"),
387 | target_content_disposition
388 | );
389 |
390 | Ok(())
391 | }
392 |
393 | /// Stat file with override_content_type should succeed.
394 | pub async fn test_stat_with_override_content_type(op: Operator) -> Result<()> {
395 | if !(op.info().full_capability().stat_with_override_content_type
396 | && op.info().full_capability().presign)
397 | {
398 | return Ok(());
399 | }
400 |
401 | let (path, content, _) = TEST_FIXTURE.new_file(op.clone());
402 |
403 | op.write(&path, content.clone())
404 | .await
405 | .expect("write must succeed");
406 |
407 | let target_content_type = "application/opendal";
408 |
409 | let signed_req = op
410 | .presign_stat_with(&path, Duration::from_secs(60))
411 | .override_content_type(target_content_type)
412 | .await
413 | .expect("presign must succeed");
414 |
415 | let client = reqwest::Client::new();
416 | let mut req = client.request(
417 | signed_req.method().clone(),
418 | Url::from_str(&signed_req.uri().to_string()).expect("must be valid url"),
419 | );
420 | for (k, v) in signed_req.header() {
421 | req = req.header(k, v);
422 | }
423 |
424 | let resp = req.send().await.expect("send must succeed");
425 |
426 | assert_eq!(resp.status(), StatusCode::OK);
427 | assert_eq!(
428 | resp.headers()
429 | .get(http::header::CONTENT_TYPE)
430 | .expect("content-type header must exist")
431 | .to_str()
432 | .expect("content-type header must be string"),
433 | target_content_type
434 | );
435 |
436 | Ok(())
437 | }
438 |
439 | /// Root should be able to stat and returns DIR.
440 | pub async fn test_stat_root(op: Operator) -> Result<()> {
441 | let meta = op.stat("").await?;
442 | assert_eq!(meta.mode(), EntryMode::DIR);
443 |
444 | let meta = op.stat("/").await?;
445 | assert_eq!(meta.mode(), EntryMode::DIR);
446 |
447 | Ok(())
448 | }
449 |
450 | /// Stat normal file and dir should return metadata
451 | pub async fn test_read_only_stat_file_and_dir(op: Operator) -> Result<()> {
452 | let meta = op.stat("normal_file.txt").await?;
453 | assert_eq!(meta.mode(), EntryMode::FILE);
454 | assert_eq!(meta.content_length(), 30482);
455 |
456 | let meta = op.stat("normal_dir/").await?;
457 | assert_eq!(meta.mode(), EntryMode::DIR);
458 |
459 | Ok(())
460 | }
461 |
462 | /// Stat special file and dir should return metadata
463 | pub async fn test_read_only_stat_special_chars(op: Operator) -> Result<()> {
464 | let meta = op.stat("special_file !@#$%^&()_+-=;',.txt").await?;
465 | assert_eq!(meta.mode(), EntryMode::FILE);
466 | assert_eq!(meta.content_length(), 30482);
467 |
468 | let meta = op.stat("special_dir !@#$%^&()_+-=;',/").await?;
469 | assert_eq!(meta.mode(), EntryMode::DIR);
470 |
471 | Ok(())
472 | }
473 |
474 | /// Stat not cleaned path should also succeed.
475 | pub async fn test_read_only_stat_not_cleaned_path(op: Operator) -> Result<()> {
476 | let meta = op.stat("//normal_file.txt").await?;
477 | assert_eq!(meta.mode(), EntryMode::FILE);
478 | assert_eq!(meta.content_length(), 30482);
479 |
480 | Ok(())
481 | }
482 |
483 | /// Stat not exist file should return NotFound
484 | pub async fn test_read_only_stat_not_exist(op: Operator) -> Result<()> {
485 | let path = uuid::Uuid::new_v4().to_string();
486 |
487 | let meta = op.stat(&path).await;
488 | assert!(meta.is_err());
489 | assert_eq!(meta.unwrap_err().kind(), ErrorKind::NotFound);
490 |
491 | Ok(())
492 | }
493 |
494 | /// Stat with if_match should succeed, else get a ConditionNotMatch error.
495 | pub async fn test_read_only_stat_with_if_match(op: Operator) -> Result<()> {
496 | if !op.info().full_capability().stat_with_if_match {
497 | return Ok(());
498 | }
499 |
500 | let path = "normal_file.txt";
501 |
502 | let meta = op.stat(path).await?;
503 | assert_eq!(meta.mode(), EntryMode::FILE);
504 | assert_eq!(meta.content_length(), 30482);
505 |
506 | let res = op.stat_with(path).if_match("invalid_etag").await;
507 | assert!(res.is_err());
508 | assert_eq!(res.unwrap_err().kind(), ErrorKind::ConditionNotMatch);
509 |
510 | let result = op
511 | .stat_with(path)
512 | .if_match(meta.etag().expect("etag must exist"))
513 | .await;
514 | assert!(result.is_ok());
515 |
516 | Ok(())
517 | }
518 |
519 | /// Stat with if_none_match should succeed, else get a ConditionNotMatch.
520 | pub async fn test_read_only_stat_with_if_none_match(op: Operator) -> Result<()> {
521 | if !op.info().full_capability().stat_with_if_none_match {
522 | return Ok(());
523 | }
524 |
525 | let path = "normal_file.txt";
526 |
527 | let meta = op.stat(path).await?;
528 | assert_eq!(meta.mode(), EntryMode::FILE);
529 | assert_eq!(meta.content_length(), 30482);
530 |
531 | let res = op
532 | .stat_with(path)
533 | .if_none_match(meta.etag().expect("etag must exist"))
534 | .await;
535 | assert!(res.is_err());
536 | assert_eq!(res.unwrap_err().kind(), ErrorKind::ConditionNotMatch);
537 |
538 | let res = op.stat_with(path).if_none_match("invalid_etag").await?;
539 | assert_eq!(res.mode(), meta.mode());
540 | assert_eq!(res.content_length(), meta.content_length());
541 |
542 | Ok(())
543 | }
544 |
545 | /// Root should be able to stat and returns DIR.
546 | pub async fn test_read_only_stat_root(op: Operator) -> Result<()> {
547 | let meta = op.stat("").await?;
548 | assert_eq!(meta.mode(), EntryMode::DIR);
549 |
550 | let meta = op.stat("/").await?;
551 | assert_eq!(meta.mode(), EntryMode::DIR);
552 |
553 | Ok(())
554 | }
555 |
556 | pub async fn test_stat_with_version(op: Operator) -> Result<()> {
557 | if !op.info().full_capability().stat_with_version {
558 | return Ok(());
559 | }
560 |
561 | let (path, content, _) = TEST_FIXTURE.new_file(op.clone());
562 |
563 | op.write(path.as_str(), content.clone())
564 | .await
565 | .expect("write must success");
566 | let first_meta = op.stat(path.as_str()).await.expect("stat must success");
567 | let first_version = first_meta.version().expect("must have version");
568 |
569 | let first_versioning_meta = op
570 | .stat_with(path.as_str())
571 | .version(first_version)
572 | .await
573 | .expect("stat must success");
574 | assert_eq!(first_meta, first_versioning_meta);
575 |
576 | op.write(path.as_str(), content)
577 | .await
578 | .expect("write must success");
579 | let second_meta = op.stat(path.as_str()).await.expect("stat must success");
580 | let second_version = second_meta.version().expect("must have version");
581 | assert_ne!(first_version, second_version);
582 |
583 | // we can still `stat` with first_version after writing new data
584 | let meta = op
585 | .stat_with(path.as_str())
586 | .version(first_version)
587 | .await
588 | .expect("stat must success");
589 | assert_eq!(first_meta, meta);
590 |
591 | Ok(())
592 | }
593 |
594 | pub async fn stat_with_not_existing_version(op: Operator) -> Result<()> {
595 | if !op.info().full_capability().stat_with_version {
596 | return Ok(());
597 | }
598 |
599 | // retrieve a valid version
600 | let (path, content, _) = TEST_FIXTURE.new_file(op.clone());
601 | op.write(path.as_str(), content.clone())
602 | .await
603 | .expect("write must success");
604 | let version = op
605 | .stat(path.as_str())
606 | .await
607 | .expect("stat must success")
608 | .version()
609 | .expect("must have version")
610 | .to_string();
611 |
612 | let (path, content, _) = TEST_FIXTURE.new_file(op.clone());
613 | op.write(path.as_str(), content)
614 | .await
615 | .expect("write must success");
616 | let ret = op.stat_with(path.as_str()).version(version.as_str()).await;
617 | assert!(ret.is_err());
618 | assert_eq!(ret.unwrap_err().kind(), ErrorKind::NotFound);
619 |
620 | Ok(())
621 | }
622 |
```
--------------------------------------------------------------------------------
/.github/scripts/weekly_update/main.py:
--------------------------------------------------------------------------------
```python
1 | #!/usr/bin/env python3
2 | # Licensed to the Apache Software Foundation (ASF) under one
3 | # or more contributor license agreements. See the NOTICE file
4 | # distributed with this work for additional information
5 | # regarding copyright ownership. The ASF licenses this file
6 | # to you under the Apache License, Version 2.0 (the
7 | # "License"); you may not use this file except in compliance
8 | # with the License. You may obtain a copy of the License at
9 | #
10 | # http://www.apache.org/licenses/LICENSE-2.0
11 | #
12 | # Unless required by applicable law or agreed to in writing,
13 | # software distributed under the License is distributed on an
14 | # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
15 | # KIND, either express or implied. See the License for the
16 | # specific language governing permissions and limitations
17 | # under the License.
18 |
19 |
20 | import os
21 | import sys
22 | import argparse
23 | import json
24 | from datetime import datetime, timedelta
25 | import requests
26 | from dateutil.parser import parse
27 | import pytz
28 | from openai import OpenAI
29 |
30 | import logging
31 |
32 | logging.basicConfig(level=logging.DEBUG)
33 |
34 |
35 | def get_github_api_token():
36 | """Get GitHub API token from environment variables."""
37 | token = os.environ.get("GITHUB_TOKEN")
38 | if not token:
39 | print(
40 | "Warning: GitHub API token not found. Set the GITHUB_TOKEN environment variable."
41 | )
42 | print("Without a token, API rate limits will be lower.")
43 | return token
44 |
45 |
46 | def get_openai_api_key():
47 | """Get OpenAI API key from environment variables."""
48 | api_key = os.environ.get("OPENAI_API_KEY")
49 | if not api_key:
50 | print(
51 | "Error: OpenAI API key not found. Set the OPENAI_API_KEY environment variable."
52 | )
53 | sys.exit(1)
54 | return api_key
55 |
56 |
57 | def init_openai_client():
58 | """Initialize the OpenAI client."""
59 | api_key = get_openai_api_key()
60 | # Get the OpenAI API base URL from environment variable or use the default
61 | api_base = os.environ.get("OPENAI_API_BASE", "https://api.openai.com/v1")
62 | # Get the model from environment variable or use a default
63 | model = os.environ.get("OPENAI_MODEL", "gpt-4o")
64 |
65 | client = OpenAI(
66 | api_key=api_key,
67 | base_url=api_base,
68 | default_query={"api-version": "2023-05-15"},
69 | )
70 | return client, model
71 |
72 |
73 | def is_recent(timestamp_str, days=7):
74 | """Check if the timestamp is within the last 'days' days."""
75 | now = datetime.now(pytz.utc)
76 | timestamp = parse(timestamp_str)
77 | delta = now - timestamp
78 | return delta.days < days
79 |
80 |
81 | def fetch_issues(repo, token, days=7):
82 | """Fetch recent issues from a repository."""
83 | since_date = (datetime.now() - timedelta(days=days)).strftime("%Y-%m-%dT%H:%M:%SZ")
84 | headers = {"Accept": "application/vnd.github.v3+json"}
85 | if token:
86 | headers["Authorization"] = f"token {token}"
87 |
88 | url = f"https://api.github.com/repos/{repo}/issues"
89 | params = {
90 | "state": "all",
91 | "since": since_date,
92 | "sort": "updated",
93 | "direction": "desc",
94 | "per_page": 100,
95 | }
96 |
97 | issues = []
98 | prs = []
99 | good_first_issues = []
100 |
101 | response = requests.get(url, params=params, headers=headers)
102 | if response.status_code != 200:
103 | print(f"Error fetching issues: {response.status_code}")
104 | return [], [], []
105 |
106 | for item in response.json():
107 | if is_recent(item["updated_at"], days):
108 | entry = {
109 | "id": item["number"],
110 | "title": item["title"],
111 | "url": item["html_url"],
112 | "user": item["user"]["login"],
113 | "updated_at": item["updated_at"],
114 | "body": item.get("body", "") or "",
115 | "labels": [label["name"] for label in item.get("labels", [])],
116 | "state": item["state"],
117 | "comments": item["comments"],
118 | }
119 |
120 | # Check if it's a good first issue
121 | label_names = [label["name"].lower() for label in item.get("labels", [])]
122 | is_good_first = any(
123 | name
124 | in [
125 | "good first issue",
126 | "good-first-issue",
127 | "beginner friendly",
128 | "beginner-friendly",
129 | "easy",
130 | ]
131 | for name in label_names
132 | )
133 |
134 | if "pull_request" in item:
135 | # Get additional PR details
136 | if token:
137 | pr_url = (
138 | f"https://api.github.com/repos/{repo}/pulls/{item['number']}"
139 | )
140 | pr_response = requests.get(pr_url, headers=headers)
141 | if pr_response.status_code == 200:
142 | pr_data = pr_response.json()
143 | entry["additions"] = pr_data.get("additions", 0)
144 | entry["deletions"] = pr_data.get("deletions", 0)
145 | entry["changed_files"] = pr_data.get("changed_files", 0)
146 | entry["mergeable"] = pr_data.get("mergeable", None)
147 | entry["draft"] = pr_data.get("draft", False)
148 |
149 | prs.append(entry)
150 | else:
151 | issues.append(entry)
152 | if is_good_first and item["state"] == "open":
153 | good_first_issues.append(entry)
154 |
155 | return issues, prs, good_first_issues
156 |
157 |
158 | def fetch_discussions(repo, token, days=7):
159 | """Fetch recent discussions from a repository."""
160 | headers = {"Accept": "application/vnd.github.v3+json"}
161 | if token:
162 | headers["Authorization"] = f"token {token}"
163 |
164 | # GraphQL query to fetch discussions
165 | query = """
166 | query($owner: String!, $name: String!) {
167 | repository(owner: $owner, name: $name) {
168 | discussions(first: 100, orderBy: {field: UPDATED_AT, direction: DESC}) {
169 | nodes {
170 | number
171 | title
172 | url
173 | author {
174 | login
175 | }
176 | updatedAt
177 | bodyText
178 | category {
179 | name
180 | }
181 | comments {
182 | totalCount
183 | }
184 | answerChosenAt
185 | }
186 | }
187 | }
188 | }
189 | """
190 |
191 | owner, name = repo.split("/")
192 | variables = {"owner": owner, "name": name}
193 |
194 | url = "https://api.github.com/graphql"
195 | response = requests.post(
196 | url, json={"query": query, "variables": variables}, headers=headers
197 | )
198 |
199 | discussions = []
200 | if response.status_code != 200:
201 | print(f"Error fetching discussions: {response.status_code}")
202 | return discussions
203 |
204 | result = response.json()
205 | if (
206 | "data" in result
207 | and "repository" in result["data"]
208 | and "discussions" in result["data"]["repository"]
209 | ):
210 | for discussion in result["data"]["repository"]["discussions"]["nodes"]:
211 | if is_recent(discussion["updatedAt"], days):
212 | discussions.append(
213 | {
214 | "id": discussion["number"],
215 | "title": discussion["title"],
216 | "url": discussion["url"],
217 | "user": discussion["author"]["login"]
218 | if discussion["author"]
219 | else "Anonymous",
220 | "updated_at": discussion["updatedAt"],
221 | "body": discussion.get("bodyText", "") or "",
222 | "category": discussion.get("category", {}).get(
223 | "name", "General"
224 | ),
225 | "comments": discussion.get("comments", {}).get("totalCount", 0),
226 | "answered": discussion.get("answerChosenAt") is not None,
227 | }
228 | )
229 |
230 | return discussions
231 |
232 |
233 | def fetch_additional_good_first_issues(repo, token, count=5):
234 | """Fetch additional good first issues even if they're older."""
235 | headers = {"Accept": "application/vnd.github.v3+json"}
236 | if token:
237 | headers["Authorization"] = f"token {token}"
238 |
239 | url = f"https://api.github.com/repos/{repo}/issues"
240 | params = {
241 | "state": "open",
242 | "labels": "good first issue",
243 | "sort": "updated",
244 | "direction": "desc",
245 | "per_page": count,
246 | }
247 |
248 | additional_issues = []
249 |
250 | # Try with 'good first issue'
251 | response = requests.get(url, params=params, headers=headers)
252 | if response.status_code == 200:
253 | additional_issues.extend(response.json())
254 |
255 | # If we didn't get enough, try with 'good-first-issue'
256 | if len(additional_issues) < count:
257 | params["labels"] = "good-first-issue"
258 | response = requests.get(url, params=params, headers=headers)
259 | if response.status_code == 200:
260 | additional_issues.extend(response.json())
261 |
262 | # If still not enough, try with 'beginner friendly'
263 | if len(additional_issues) < count:
264 | params["labels"] = "beginner friendly"
265 | response = requests.get(url, params=params, headers=headers)
266 | if response.status_code == 200:
267 | additional_issues.extend(response.json())
268 |
269 | # Format the issues
270 | formatted_issues = []
271 | for item in additional_issues[:count]:
272 | formatted_issues.append(
273 | {
274 | "id": item["number"],
275 | "title": item["title"],
276 | "url": item["html_url"],
277 | "user": item["user"]["login"],
278 | "updated_at": item["updated_at"],
279 | "body": item.get("body", "") or "",
280 | "labels": [label["name"] for label in item.get("labels", [])],
281 | "state": item["state"],
282 | "comments": item["comments"],
283 | }
284 | )
285 |
286 | return formatted_issues
287 |
288 |
289 | def format_data_for_llm(repo, issues, prs, discussions, good_first_issues, days=7):
290 | """Format data in a JSON structure that's friendly for LLM processing."""
291 | now = datetime.now()
292 |
293 | # Combine recent good first issues with additional ones
294 | # Remove duplicates by creating a dict with issue ID as key
295 | all_good_first_issues = {}
296 | for issue in good_first_issues:
297 | all_good_first_issues[issue["id"]] = issue
298 |
299 | result = {
300 | "metadata": {
301 | "repository": repo,
302 | "date_generated": now.strftime("%Y-%m-%d"),
303 | "period_days": days,
304 | },
305 | "pull_requests": [
306 | {
307 | "id": pr["id"],
308 | "title": pr["title"],
309 | "url": pr["url"],
310 | "author": pr["user"],
311 | "updated_at": pr["updated_at"],
312 | "description": pr["body"],
313 | "labels": pr["labels"],
314 | "state": pr["state"],
315 | "comments": pr["comments"],
316 | "additions": pr.get("additions", "unknown"),
317 | "deletions": pr.get("deletions", "unknown"),
318 | "changed_files": pr.get("changed_files", "unknown"),
319 | "draft": pr.get("draft", False),
320 | }
321 | for pr in prs
322 | ],
323 | "issues": [
324 | {
325 | "id": issue["id"],
326 | "title": issue["title"],
327 | "url": issue["url"],
328 | "author": issue["user"],
329 | "updated_at": issue["updated_at"],
330 | "description": issue["body"],
331 | "labels": issue["labels"],
332 | "state": issue["state"],
333 | "comments": issue["comments"],
334 | }
335 | for issue in issues
336 | ],
337 | "discussions": [
338 | {
339 | "id": discussion["id"],
340 | "title": discussion["title"],
341 | "url": discussion["url"],
342 | "author": discussion["user"],
343 | "updated_at": discussion["updated_at"],
344 | "description": discussion["body"],
345 | "category": discussion["category"],
346 | "comments": discussion["comments"],
347 | "answered": discussion.get("answered", False),
348 | }
349 | for discussion in discussions
350 | ],
351 | "good_first_issues": [
352 | {
353 | "id": issue["id"],
354 | "title": issue["title"],
355 | "url": issue["url"],
356 | "author": issue["user"],
357 | "updated_at": issue["updated_at"],
358 | "description": issue["body"],
359 | "labels": issue["labels"],
360 | "comments": issue["comments"],
361 | }
362 | for issue in all_good_first_issues.values()
363 | ],
364 | }
365 |
366 | return result
367 |
368 |
369 | def summarize_with_openai(data, client, model):
370 | """Use OpenAI to summarize and prioritize the repository activity."""
371 |
372 | prompt = f"""
373 | You are an open-source community evangelist responsible for reporting GitHub repository activity and encouraging more contributions.
374 |
375 | I will provide you with JSON data containing recent pull requests, issues, and discussions from
376 | the repository {data["metadata"]["repository"]} for the past {data["metadata"]["period_days"]} days.
377 |
378 | Please analyze this data and provide:
379 | 1. A concise summary of the overall activity and key themes
380 | 2. The most important ongoing projects or initiatives based on the data
381 | 3. Prioritized issues and PRs that need immediate attention
382 | 4. Major discussions that should be highlighted
383 | 5. Identify any emerging trends or patterns in development
384 |
385 | Additionally, include a section highlighting "Good First Issues" to encourage new contributors to join the project. Summarize what skills might be needed and why these issues are good entry points.
386 |
387 | IMPORTANT: For each PR you mention, ALWAYS include the contributor's GitHub username with @ symbol (e.g., @username) to properly credit their contributions. This is critical for recognizing contributors' work.
388 |
389 | CRITICAL FORMATTING INSTRUCTIONS:
390 |
391 | - When referring to PRs, issues, or discussions, use ONLY the GitHub reference format: #XXXX (number with # prefix)
392 | - DO NOT include the title after the reference number
393 | - DO NOT repeat the title in your explanation if you've already mentioned the reference number
394 | - AVOID listing large numbers of PRs in sequence - instead, summarize them by theme or use bulleted lists with no more than 3-5 items per bullet
395 | - For groups of related PRs, summarize the theme and mention 1-2 representative examples instead of listing all of them
396 | - When appropriate, use standard Markdown URL syntax [meaningful text](full link) instead of just the reference number
397 |
398 | Example of correct format:
399 |
400 | - #1234 by @username implements the core authentication framework
401 | - Multiple documentation updates were contributed by @contributor focusing on installation guides and API references
402 |
403 | Here's the JSON data:
404 | ```json
405 | {json.dumps(data, ensure_ascii=False)}
406 | ```
407 |
408 | Format your response as:
409 |
410 | *This weekly update is generated by LLMs. You're welcome to join our [Discord](https://opendal.apache.org/discord/) for in-depth discussions.*
411 |
412 | ## Overall Activity Summary
413 | [Provide a concise overview of activity]
414 |
415 | ## Key Ongoing Projects
416 | [List major projects/initiatives with brief descriptions - always mention contributors with @ symbol]
417 |
418 | ## Priority Items
419 | [List issues/PRs that need immediate attention - always mention contributors with @ symbol]
420 |
421 | ## Notable Discussions
422 | [Highlight important ongoing discussions - always mention contributors with @ symbol, use format like #1234: brief description]
423 |
424 | ## Emerging Trends
425 | [Identify patterns or trends]
426 |
427 | ## Good First Issues
428 | [List good first issues for new contributors with brief explanations of what makes them approachable, use format like #1234: brief description]
429 | """
430 |
431 | try:
432 | response = client.chat.completions.create(
433 | model=model,
434 | messages=[
435 | {
436 | "role": "system",
437 | "content": "You are an open-source community evangelist responsible for reporting GitHub repository activity and encouraging more contributions.",
438 | },
439 | {"role": "user", "content": prompt},
440 | ],
441 | temperature=0.3,
442 | max_tokens=4000,
443 | )
444 | return response.choices[0].message.content
445 | except Exception as e:
446 | print(f"Error with OpenAI API: {e}")
447 | return f"Error generating summary: {e}"
448 |
449 |
450 | def main():
451 | parser = argparse.ArgumentParser(
452 | description="Generate a weekly summary of GitHub repository activity with OpenAI analysis."
453 | )
454 | parser.add_argument("repo", help="GitHub repository in the format owner/repo")
455 | parser.add_argument(
456 | "--days", type=int, default=7, help="Number of days to look back (default: 7)"
457 | )
458 | parser.add_argument("--output", help="Output file path (default: stdout)")
459 | parser.add_argument(
460 | "--raw", action="store_true", help="Output raw JSON data without OpenAI summary"
461 | )
462 | parser.add_argument(
463 | "--json-output", help="Path to save raw JSON data (in addition to summary)"
464 | )
465 | parser.add_argument(
466 | "--gfi-count",
467 | type=int,
468 | default=5,
469 | help="Number of good first issues to include (default: 5)",
470 | )
471 |
472 | args = parser.parse_args()
473 |
474 | token = get_github_api_token()
475 |
476 | # Fetch data from GitHub API
477 | print(f"Fetching data from {args.repo} for the last {args.days} days...")
478 | issues, prs, recent_good_first_issues = fetch_issues(args.repo, token, args.days)
479 | discussions = fetch_discussions(args.repo, token, args.days)
480 |
481 | # If we don't have enough good first issues from recent activity, fetch additional ones
482 | if len(recent_good_first_issues) < args.gfi_count:
483 | print("Fetching additional good first issues...")
484 | additional_gfi = fetch_additional_good_first_issues(
485 | args.repo, token, args.gfi_count - len(recent_good_first_issues)
486 | )
487 | good_first_issues = recent_good_first_issues + additional_gfi
488 | else:
489 | good_first_issues = recent_good_first_issues
490 |
491 | print(f"Found {len(good_first_issues)} good first issues.")
492 |
493 | # Generate LLM-friendly structured data
494 | structured_data = format_data_for_llm(
495 | args.repo, issues, prs, discussions, good_first_issues, args.days
496 | )
497 |
498 | # Save raw JSON data if requested
499 | if args.json_output:
500 | with open(args.json_output, "w", encoding="utf-8") as f:
501 | json.dump(structured_data, f, ensure_ascii=False, indent=2)
502 | print(f"Raw JSON data written to {args.json_output}")
503 |
504 | # If raw output is requested, just print the JSON and exit
505 | if args.raw:
506 | if args.output:
507 | with open(args.output, "w", encoding="utf-8") as f:
508 | json.dump(structured_data, f, ensure_ascii=False, indent=2)
509 | print(f"Raw data written to {args.output}")
510 | else:
511 | print(json.dumps(structured_data, ensure_ascii=False, indent=2))
512 | return
513 |
514 | # Initialize openai
515 | print("Initializing openai API for summarization...")
516 | client, model = init_openai_client()
517 |
518 | # Generate summary with Gemini
519 | print("Generating summary with OpenAI ...")
520 | summary = summarize_with_openai(structured_data, client, model)
521 |
522 | # Output the result
523 | if args.output:
524 | with open(args.output, "w", encoding="utf-8") as f:
525 | f.write(summary)
526 | print(f"Summary written to {args.output}")
527 | else:
528 | print(summary)
529 |
530 |
531 | if __name__ == "__main__":
532 | main()
533 |
```
--------------------------------------------------------------------------------
/core/layers/prometheus-client/src/lib.rs:
--------------------------------------------------------------------------------
```rust
1 | // Licensed to the Apache Software Foundation (ASF) under one
2 | // or more contributor license agreements. See the NOTICE file
3 | // distributed with this work for additional information
4 | // regarding copyright ownership. The ASF licenses this file
5 | // to you under the Apache License, Version 2.0 (the
6 | // "License"); you may not use this file except in compliance
7 | // with the License. You may obtain a copy of the License at
8 | //
9 | // http://www.apache.org/licenses/LICENSE-2.0
10 | //
11 | // Unless required by applicable law or agreed to in writing,
12 | // software distributed under the License is distributed on an
13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 | // KIND, either express or implied. See the License for the
15 | // specific language governing permissions and limitations
16 | // under the License.
17 |
18 | //! Metrics layer (using the [prometheus-client](https://docs.rs/prometheus-client) crate) implementation for Apache OpenDAL.
19 |
20 | #![cfg_attr(docsrs, feature(doc_cfg))]
21 | #![deny(missing_docs)]
22 |
23 | use opendal_core::raw::*;
24 | use opendal_layer_observe_metrics_common as observe;
25 | use prometheus_client::encoding::EncodeLabel;
26 | use prometheus_client::encoding::EncodeLabelSet;
27 | use prometheus_client::encoding::LabelSetEncoder;
28 | use prometheus_client::metrics::counter::Counter;
29 | use prometheus_client::metrics::family::Family;
30 | use prometheus_client::metrics::family::MetricConstructor;
31 | use prometheus_client::metrics::gauge::Gauge;
32 | use prometheus_client::metrics::histogram::Histogram;
33 | use prometheus_client::registry::Metric;
34 | use prometheus_client::registry::Registry;
35 | use prometheus_client::registry::Unit;
36 |
37 | /// Add [prometheus-client](https://docs.rs/prometheus-client) for every operation.
38 | ///
39 | /// # Prometheus Metrics
40 | ///
41 | /// We provide several metrics, please see the documentation of [`observe`] module.
42 | /// For a more detailed explanation of these metrics and how they are used, please refer to the [Prometheus documentation](https://prometheus.io/docs/introduction/overview/).
43 | ///
44 | /// # Examples
45 | ///
46 | /// ```no_run
47 | /// # use log::info;
48 | /// # use opendal_core::services;
49 | /// # use opendal_core::Operator;
50 | /// # use opendal_core::Result;
51 | /// # use opendal_layer_prometheus_client::PrometheusClientLayer;
52 | /// #
53 | /// # #[tokio::main]
54 | /// # async fn main() -> Result<()> {
55 | /// let mut registry = prometheus_client::registry::Registry::default();
56 | ///
57 | /// let op = Operator::new(services::Memory::default())?
58 | /// .layer(PrometheusClientLayer::builder().register(&mut registry))
59 | /// .finish();
60 | ///
61 | /// // Write data into object test.
62 | /// op.write("test", "Hello, World!").await?;
63 | /// // Read data from the object.
64 | /// let bs = op.read("test").await?;
65 | /// info!("content: {}", String::from_utf8_lossy(&bs.to_bytes()));
66 | ///
67 | /// // Get object metadata.
68 | /// let meta = op.stat("test").await?;
69 | /// info!("meta: {:?}", meta);
70 | ///
71 | /// // Export prometheus metrics.
72 | /// let mut buf = String::new();
73 | /// prometheus_client::encoding::text::encode(&mut buf, ®istry).unwrap();
74 | /// println!("## Prometheus Metrics");
75 | /// println!("{}", buf);
76 | /// # Ok(())
77 | /// # }
78 | /// ```
79 | #[derive(Clone)]
80 | pub struct PrometheusClientLayer {
81 | interceptor: PrometheusClientInterceptor,
82 | }
83 |
84 | impl PrometheusClientLayer {
85 | /// Create a [`PrometheusClientLayerBuilder`] to set the configuration of metrics.
86 | pub fn builder() -> PrometheusClientLayerBuilder {
87 | PrometheusClientLayerBuilder::default()
88 | }
89 | }
90 |
91 | impl<A: Access> Layer<A> for PrometheusClientLayer {
92 | type LayeredAccess = observe::MetricsAccessor<A, PrometheusClientInterceptor>;
93 |
94 | fn layer(&self, inner: A) -> Self::LayeredAccess {
95 | observe::MetricsLayer::new(self.interceptor.clone()).layer(inner)
96 | }
97 | }
98 |
99 | /// [`PrometheusClientLayerBuilder`] is a config builder to build a [`PrometheusClientLayer`].
100 | pub struct PrometheusClientLayerBuilder {
101 | bytes_buckets: Vec<f64>,
102 | bytes_rate_buckets: Vec<f64>,
103 | entries_buckets: Vec<f64>,
104 | entries_rate_buckets: Vec<f64>,
105 | duration_seconds_buckets: Vec<f64>,
106 | ttfb_buckets: Vec<f64>,
107 | disable_label_root: bool,
108 | }
109 |
110 | impl Default for PrometheusClientLayerBuilder {
111 | fn default() -> Self {
112 | Self {
113 | bytes_buckets: observe::DEFAULT_BYTES_BUCKETS.to_vec(),
114 | bytes_rate_buckets: observe::DEFAULT_BYTES_RATE_BUCKETS.to_vec(),
115 | entries_buckets: observe::DEFAULT_ENTRIES_BUCKETS.to_vec(),
116 | entries_rate_buckets: observe::DEFAULT_ENTRIES_RATE_BUCKETS.to_vec(),
117 | duration_seconds_buckets: observe::DEFAULT_DURATION_SECONDS_BUCKETS.to_vec(),
118 | ttfb_buckets: observe::DEFAULT_TTFB_BUCKETS.to_vec(),
119 | disable_label_root: false,
120 | }
121 | }
122 | }
123 |
124 | impl PrometheusClientLayerBuilder {
125 | /// Set buckets for bytes related histogram like `operation_bytes`.
126 | pub fn bytes_buckets(mut self, buckets: Vec<f64>) -> Self {
127 | if !buckets.is_empty() {
128 | self.bytes_buckets = buckets;
129 | }
130 | self
131 | }
132 |
133 | /// Set buckets for bytes rate related histogram like `operation_bytes_rate`.
134 | pub fn bytes_rate_buckets(mut self, buckets: Vec<f64>) -> Self {
135 | if !buckets.is_empty() {
136 | self.bytes_rate_buckets = buckets;
137 | }
138 | self
139 | }
140 |
141 | /// Set buckets for entries related histogram like `operation_entries`.
142 | pub fn entries_buckets(mut self, buckets: Vec<f64>) -> Self {
143 | if !buckets.is_empty() {
144 | self.entries_buckets = buckets;
145 | }
146 | self
147 | }
148 |
149 | /// Set buckets for entries rate related histogram like `operation_entries_rate`.
150 | pub fn entries_rate_buckets(mut self, buckets: Vec<f64>) -> Self {
151 | if !buckets.is_empty() {
152 | self.entries_rate_buckets = buckets;
153 | }
154 | self
155 | }
156 |
157 | /// Set buckets for duration seconds related histogram like `operation_duration_seconds`.
158 | pub fn duration_seconds_buckets(mut self, buckets: Vec<f64>) -> Self {
159 | if !buckets.is_empty() {
160 | self.duration_seconds_buckets = buckets;
161 | }
162 | self
163 | }
164 |
165 | /// Set buckets for ttfb related histogram like `operation_ttfb_seconds`.
166 | pub fn ttfb_buckets(mut self, buckets: Vec<f64>) -> Self {
167 | if !buckets.is_empty() {
168 | self.ttfb_buckets = buckets;
169 | }
170 | self
171 | }
172 |
173 | /// The 'root' label might have risks of being high cardinality, users can choose to disable it
174 | /// when they found it's not useful for their metrics.
175 | pub fn disable_label_root(mut self, disable: bool) -> Self {
176 | self.disable_label_root = disable;
177 | self
178 | }
179 |
180 | /// Register the metrics into the registry and return a [`PrometheusClientLayer`].
181 | ///
182 | /// # Example
183 | ///
184 | /// ```no_run
185 | /// # use opendal_core::services;
186 | /// # use opendal_core::Operator;
187 | /// # use opendal_core::Result;
188 | /// # use opendal_layer_prometheus_client::PrometheusClientLayer;
189 | /// #
190 | /// # #[tokio::main]
191 | /// # async fn main() -> Result<()> {
192 | /// // Pick a builder and configure it.
193 | /// let builder = services::Memory::default();
194 | /// let mut registry = prometheus_client::registry::Registry::default();
195 | ///
196 | /// let _ = Operator::new(builder)?
197 | /// .layer(PrometheusClientLayer::builder().register(&mut registry))
198 | /// .finish();
199 | /// # Ok(())
200 | /// # }
201 | /// ```
202 | pub fn register(self, registry: &mut Registry) -> PrometheusClientLayer {
203 | let operation_bytes =
204 | Family::<OperationLabels, Histogram, _>::new_with_constructor(HistogramConstructor {
205 | buckets: self.bytes_buckets.clone(),
206 | });
207 | let operation_bytes_rate =
208 | Family::<OperationLabels, Histogram, _>::new_with_constructor(HistogramConstructor {
209 | buckets: self.bytes_rate_buckets.clone(),
210 | });
211 | let operation_entries =
212 | Family::<OperationLabels, Histogram, _>::new_with_constructor(HistogramConstructor {
213 | buckets: self.entries_buckets.clone(),
214 | });
215 | let operation_entries_rate =
216 | Family::<OperationLabels, Histogram, _>::new_with_constructor(HistogramConstructor {
217 | buckets: self.entries_rate_buckets.clone(),
218 | });
219 | let operation_duration_seconds =
220 | Family::<OperationLabels, Histogram, _>::new_with_constructor(HistogramConstructor {
221 | buckets: self.duration_seconds_buckets.clone(),
222 | });
223 | let operation_errors_total = Family::<OperationLabels, Counter>::default();
224 | let operation_executing = Family::<OperationLabels, Gauge>::default();
225 | let operation_ttfb_seconds =
226 | Family::<OperationLabels, Histogram, _>::new_with_constructor(HistogramConstructor {
227 | buckets: self.ttfb_buckets.clone(),
228 | });
229 |
230 | let http_executing = Family::<OperationLabels, Gauge>::default();
231 | let http_request_bytes =
232 | Family::<OperationLabels, Histogram, _>::new_with_constructor(HistogramConstructor {
233 | buckets: self.bytes_buckets.clone(),
234 | });
235 | let http_request_bytes_rate =
236 | Family::<OperationLabels, Histogram, _>::new_with_constructor(HistogramConstructor {
237 | buckets: self.bytes_rate_buckets.clone(),
238 | });
239 | let http_request_duration_seconds =
240 | Family::<OperationLabels, Histogram, _>::new_with_constructor(HistogramConstructor {
241 | buckets: self.duration_seconds_buckets.clone(),
242 | });
243 | let http_response_bytes =
244 | Family::<OperationLabels, Histogram, _>::new_with_constructor(HistogramConstructor {
245 | buckets: self.bytes_buckets.clone(),
246 | });
247 | let http_response_bytes_rate =
248 | Family::<OperationLabels, Histogram, _>::new_with_constructor(HistogramConstructor {
249 | buckets: self.bytes_rate_buckets.clone(),
250 | });
251 | let http_response_duration_seconds =
252 | Family::<OperationLabels, Histogram, _>::new_with_constructor(HistogramConstructor {
253 | buckets: self.duration_seconds_buckets.clone(),
254 | });
255 | let http_connection_errors_total = Family::<OperationLabels, Counter>::default();
256 | let http_status_errors_total = Family::<OperationLabels, Counter>::default();
257 |
258 | register_metric(
259 | registry,
260 | operation_bytes.clone(),
261 | observe::MetricValue::OperationBytes(0),
262 | );
263 | register_metric(
264 | registry,
265 | operation_bytes_rate.clone(),
266 | observe::MetricValue::OperationBytesRate(0.0),
267 | );
268 | register_metric(
269 | registry,
270 | operation_entries.clone(),
271 | observe::MetricValue::OperationEntries(0),
272 | );
273 | register_metric(
274 | registry,
275 | operation_entries_rate.clone(),
276 | observe::MetricValue::OperationEntriesRate(0.0),
277 | );
278 | register_metric(
279 | registry,
280 | operation_duration_seconds.clone(),
281 | observe::MetricValue::OperationDurationSeconds(Duration::default()),
282 | );
283 | register_metric(
284 | registry,
285 | operation_errors_total.clone(),
286 | observe::MetricValue::OperationErrorsTotal,
287 | );
288 | register_metric(
289 | registry,
290 | operation_executing.clone(),
291 | observe::MetricValue::OperationExecuting(0),
292 | );
293 | register_metric(
294 | registry,
295 | operation_ttfb_seconds.clone(),
296 | observe::MetricValue::OperationTtfbSeconds(Duration::default()),
297 | );
298 |
299 | register_metric(
300 | registry,
301 | http_executing.clone(),
302 | observe::MetricValue::HttpExecuting(0),
303 | );
304 | register_metric(
305 | registry,
306 | http_request_bytes.clone(),
307 | observe::MetricValue::HttpRequestBytes(0),
308 | );
309 | register_metric(
310 | registry,
311 | http_request_bytes_rate.clone(),
312 | observe::MetricValue::HttpRequestBytesRate(0.0),
313 | );
314 | register_metric(
315 | registry,
316 | http_request_duration_seconds.clone(),
317 | observe::MetricValue::HttpRequestDurationSeconds(Duration::default()),
318 | );
319 | register_metric(
320 | registry,
321 | http_response_bytes.clone(),
322 | observe::MetricValue::HttpResponseBytes(0),
323 | );
324 | register_metric(
325 | registry,
326 | http_response_bytes_rate.clone(),
327 | observe::MetricValue::HttpResponseBytesRate(0.0),
328 | );
329 | register_metric(
330 | registry,
331 | http_response_duration_seconds.clone(),
332 | observe::MetricValue::HttpResponseDurationSeconds(Duration::default()),
333 | );
334 | register_metric(
335 | registry,
336 | http_connection_errors_total.clone(),
337 | observe::MetricValue::HttpConnectionErrorsTotal,
338 | );
339 | register_metric(
340 | registry,
341 | http_status_errors_total.clone(),
342 | observe::MetricValue::HttpStatusErrorsTotal,
343 | );
344 |
345 | PrometheusClientLayer {
346 | interceptor: PrometheusClientInterceptor {
347 | operation_bytes,
348 | operation_bytes_rate,
349 | operation_entries,
350 | operation_entries_rate,
351 | operation_duration_seconds,
352 | operation_errors_total,
353 | operation_executing,
354 | operation_ttfb_seconds,
355 |
356 | http_executing,
357 | http_request_bytes,
358 | http_request_bytes_rate,
359 | http_request_duration_seconds,
360 | http_response_bytes,
361 | http_response_bytes_rate,
362 | http_response_duration_seconds,
363 | http_connection_errors_total,
364 | http_status_errors_total,
365 |
366 | disable_label_root: self.disable_label_root,
367 | },
368 | }
369 | }
370 | }
371 |
372 | #[derive(Clone)]
373 | struct HistogramConstructor {
374 | buckets: Vec<f64>,
375 | }
376 |
377 | impl MetricConstructor<Histogram> for HistogramConstructor {
378 | fn new_metric(&self) -> Histogram {
379 | Histogram::new(self.buckets.iter().cloned())
380 | }
381 | }
382 |
383 | #[doc(hidden)]
384 | #[derive(Clone, Debug)]
385 | pub struct PrometheusClientInterceptor {
386 | operation_bytes: Family<OperationLabels, Histogram, HistogramConstructor>,
387 | operation_bytes_rate: Family<OperationLabels, Histogram, HistogramConstructor>,
388 | operation_entries: Family<OperationLabels, Histogram, HistogramConstructor>,
389 | operation_entries_rate: Family<OperationLabels, Histogram, HistogramConstructor>,
390 | operation_duration_seconds: Family<OperationLabels, Histogram, HistogramConstructor>,
391 | operation_errors_total: Family<OperationLabels, Counter>,
392 | operation_executing: Family<OperationLabels, Gauge>,
393 | operation_ttfb_seconds: Family<OperationLabels, Histogram, HistogramConstructor>,
394 |
395 | http_executing: Family<OperationLabels, Gauge>,
396 | http_request_bytes: Family<OperationLabels, Histogram, HistogramConstructor>,
397 | http_request_bytes_rate: Family<OperationLabels, Histogram, HistogramConstructor>,
398 | http_request_duration_seconds: Family<OperationLabels, Histogram, HistogramConstructor>,
399 | http_response_bytes: Family<OperationLabels, Histogram, HistogramConstructor>,
400 | http_response_bytes_rate: Family<OperationLabels, Histogram, HistogramConstructor>,
401 | http_response_duration_seconds: Family<OperationLabels, Histogram, HistogramConstructor>,
402 | http_connection_errors_total: Family<OperationLabels, Counter>,
403 | http_status_errors_total: Family<OperationLabels, Counter>,
404 |
405 | disable_label_root: bool,
406 | }
407 |
408 | impl observe::MetricsIntercept for PrometheusClientInterceptor {
409 | fn observe(&self, labels: observe::MetricLabels, value: observe::MetricValue) {
410 | let labels = OperationLabels {
411 | labels,
412 | disable_label_root: self.disable_label_root,
413 | };
414 | match value {
415 | observe::MetricValue::OperationBytes(v) => self
416 | .operation_bytes
417 | .get_or_create(&labels)
418 | .observe(v as f64),
419 | observe::MetricValue::OperationBytesRate(v) => {
420 | self.operation_bytes_rate.get_or_create(&labels).observe(v)
421 | }
422 | observe::MetricValue::OperationEntries(v) => self
423 | .operation_entries
424 | .get_or_create(&labels)
425 | .observe(v as f64),
426 | observe::MetricValue::OperationEntriesRate(v) => self
427 | .operation_entries_rate
428 | .get_or_create(&labels)
429 | .observe(v),
430 | observe::MetricValue::OperationDurationSeconds(v) => self
431 | .operation_duration_seconds
432 | .get_or_create(&labels)
433 | .observe(v.as_secs_f64()),
434 | observe::MetricValue::OperationErrorsTotal => {
435 | self.operation_errors_total.get_or_create(&labels).inc();
436 | }
437 | observe::MetricValue::OperationExecuting(v) => {
438 | self.operation_executing
439 | .get_or_create(&labels)
440 | .inc_by(v as i64);
441 | }
442 | observe::MetricValue::OperationTtfbSeconds(v) => self
443 | .operation_ttfb_seconds
444 | .get_or_create(&labels)
445 | .observe(v.as_secs_f64()),
446 |
447 | observe::MetricValue::HttpExecuting(v) => {
448 | self.http_executing.get_or_create(&labels).inc_by(v as i64);
449 | }
450 | observe::MetricValue::HttpRequestBytes(v) => self
451 | .http_request_bytes
452 | .get_or_create(&labels)
453 | .observe(v as f64),
454 | observe::MetricValue::HttpRequestBytesRate(v) => self
455 | .http_request_bytes_rate
456 | .get_or_create(&labels)
457 | .observe(v),
458 | observe::MetricValue::HttpRequestDurationSeconds(v) => self
459 | .http_request_duration_seconds
460 | .get_or_create(&labels)
461 | .observe(v.as_secs_f64()),
462 | observe::MetricValue::HttpResponseBytes(v) => self
463 | .http_response_bytes
464 | .get_or_create(&labels)
465 | .observe(v as f64),
466 | observe::MetricValue::HttpResponseBytesRate(v) => self
467 | .http_response_bytes_rate
468 | .get_or_create(&labels)
469 | .observe(v),
470 | observe::MetricValue::HttpResponseDurationSeconds(v) => self
471 | .http_response_duration_seconds
472 | .get_or_create(&labels)
473 | .observe(v.as_secs_f64()),
474 | observe::MetricValue::HttpConnectionErrorsTotal => {
475 | self.http_connection_errors_total
476 | .get_or_create(&labels)
477 | .inc();
478 | }
479 | observe::MetricValue::HttpStatusErrorsTotal => {
480 | self.http_status_errors_total.get_or_create(&labels).inc();
481 | }
482 | _ => {}
483 | };
484 | }
485 | }
486 |
487 | #[derive(Clone, Debug, PartialEq, Eq, Hash)]
488 | struct OperationLabels {
489 | labels: observe::MetricLabels,
490 | disable_label_root: bool,
491 | }
492 |
493 | impl EncodeLabelSet for OperationLabels {
494 | fn encode(&self, encoder: &mut LabelSetEncoder<'_>) -> std::fmt::Result {
495 | (observe::LABEL_SCHEME, self.labels.scheme).encode(encoder.encode_label())?;
496 | (observe::LABEL_NAMESPACE, self.labels.namespace.as_ref())
497 | .encode(encoder.encode_label())?;
498 | if !self.disable_label_root {
499 | (observe::LABEL_ROOT, self.labels.root.as_ref()).encode(encoder.encode_label())?;
500 | }
501 | (observe::LABEL_OPERATION, self.labels.operation).encode(encoder.encode_label())?;
502 |
503 | if let Some(error) = &self.labels.error {
504 | (observe::LABEL_ERROR, error.into_static()).encode(encoder.encode_label())?;
505 | }
506 | if let Some(code) = &self.labels.status_code {
507 | (observe::LABEL_STATUS_CODE, code.as_str()).encode(encoder.encode_label())?;
508 | }
509 | Ok(())
510 | }
511 | }
512 |
513 | fn register_metric(registry: &mut Registry, metric: impl Metric, value: observe::MetricValue) {
514 | let ((name, unit), help) = (value.name_with_unit(), value.help());
515 |
516 | if let Some(unit) = unit {
517 | registry.register_with_unit(name, help, Unit::Other(unit.to_string()), metric);
518 | } else {
519 | registry.register(name, help, metric);
520 | }
521 | }
522 |
```
--------------------------------------------------------------------------------
/bindings/java/src/async_operator.rs:
--------------------------------------------------------------------------------
```rust
1 | // Licensed to the Apache Software Foundation (ASF) under one
2 | // or more contributor license agreements. See the NOTICE file
3 | // distributed with this work for additional information
4 | // regarding copyright ownership. The ASF licenses this file
5 | // to you under the Apache License, Version 2.0 (the
6 | // "License"); you may not use this file except in compliance
7 | // with the License. You may obtain a copy of the License at
8 | //
9 | // http://www.apache.org/licenses/LICENSE-2.0
10 | //
11 | // Unless required by applicable law or agreed to in writing,
12 | // software distributed under the License is distributed on an
13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 | // KIND, either express or implied. See the License for the
15 | // specific language governing permissions and limitations
16 | // under the License.
17 |
18 | use std::time::Duration;
19 |
20 | use jni::JNIEnv;
21 | use jni::objects::JByteArray;
22 | use jni::objects::JClass;
23 | use jni::objects::JObject;
24 | use jni::objects::JString;
25 | use jni::objects::JValue;
26 | use jni::objects::JValueOwned;
27 | use jni::sys::jlong;
28 | use jni::sys::jobject;
29 | use jni::sys::jsize;
30 | use opendal::Entry;
31 | use opendal::Operator;
32 | use opendal::blocking;
33 |
34 | use crate::Result;
35 | use crate::convert::{
36 | bytes_to_jbytearray, jmap_to_hashmap, jstring_to_string, offset_length_to_range,
37 | read_int64_field,
38 | };
39 | use crate::executor::Executor;
40 | use crate::executor::executor_or_default;
41 | use crate::executor::get_current_env;
42 | use crate::make_metadata;
43 | use crate::make_operator_info;
44 | use crate::make_presigned_request;
45 | use crate::{make_entry, make_list_options, make_stat_options, make_write_options};
46 |
47 | #[unsafe(no_mangle)]
48 | pub extern "system" fn Java_org_apache_opendal_AsyncOperator_constructor(
49 | mut env: JNIEnv,
50 | _: JClass,
51 | scheme: JString,
52 | map: JObject,
53 | ) -> jlong {
54 | intern_constructor(&mut env, scheme, map).unwrap_or_else(|e| {
55 | e.throw(&mut env);
56 | 0
57 | })
58 | }
59 |
60 | fn intern_constructor(env: &mut JNIEnv, scheme: JString, map: JObject) -> Result<jlong> {
61 | let scheme = jstring_to_string(env, &scheme)?;
62 | let map = jmap_to_hashmap(env, &map)?;
63 | let op = Operator::via_iter(scheme, map)?;
64 | Ok(Box::into_raw(Box::new(op)) as jlong)
65 | }
66 |
67 | /// # Safety
68 | ///
69 | /// This function should not be called before the Operator is ready.
70 | #[unsafe(no_mangle)]
71 | pub unsafe extern "system" fn Java_org_apache_opendal_AsyncOperator_duplicate(
72 | _: JNIEnv,
73 | _: JClass,
74 | op: *mut Operator,
75 | ) -> jlong {
76 | let op = unsafe { &mut *op };
77 | Box::into_raw(Box::new(op.clone())) as jlong
78 | }
79 |
80 | /// # Safety
81 | ///
82 | /// This function should not be called before the Operator is ready.
83 | #[unsafe(no_mangle)]
84 | pub unsafe extern "system" fn Java_org_apache_opendal_AsyncOperator_disposeInternal(
85 | _: JNIEnv,
86 | _: JObject,
87 | op: *mut Operator,
88 | ) {
89 | unsafe {
90 | drop(Box::from_raw(op));
91 | }
92 | }
93 |
94 | /// # Safety
95 | ///
96 | /// This function should not be called before the Operator is ready.
97 | #[unsafe(no_mangle)]
98 | pub unsafe extern "system" fn Java_org_apache_opendal_AsyncOperator_write(
99 | mut env: JNIEnv,
100 | _: JClass,
101 | op: *mut Operator,
102 | executor: *const Executor,
103 | path: JString,
104 | content: JByteArray,
105 | write_options: JObject,
106 | ) -> jlong {
107 | intern_write(&mut env, op, executor, path, content, write_options).unwrap_or_else(|e| {
108 | e.throw(&mut env);
109 | 0
110 | })
111 | }
112 |
113 | fn intern_write(
114 | env: &mut JNIEnv,
115 | op: *mut Operator,
116 | executor: *const Executor,
117 | path: JString,
118 | content: JByteArray,
119 | options: JObject,
120 | ) -> Result<jlong> {
121 | let op = unsafe { &mut *op };
122 | let id = request_id(env)?;
123 |
124 | let write_opts = make_write_options(env, &options)?;
125 | let path = jstring_to_string(env, &path)?;
126 | let content = env.convert_byte_array(content)?;
127 |
128 | executor_or_default(env, executor)?.spawn(async move {
129 | let result = op
130 | .write_options(&path, content, write_opts)
131 | .await
132 | .map(|_| JValueOwned::Void)
133 | .map_err(Into::into);
134 | complete_future(id, result)
135 | });
136 |
137 | Ok(id)
138 | }
139 |
140 | /// # Safety
141 | ///
142 | /// This function should not be called before the Operator is ready.
143 | #[unsafe(no_mangle)]
144 | pub unsafe extern "system" fn Java_org_apache_opendal_AsyncOperator_stat(
145 | mut env: JNIEnv,
146 | _: JClass,
147 | op: *mut Operator,
148 | executor: *const Executor,
149 | path: JString,
150 | stat_options: JObject,
151 | ) -> jlong {
152 | intern_stat(&mut env, op, executor, path, stat_options).unwrap_or_else(|e| {
153 | e.throw(&mut env);
154 | 0
155 | })
156 | }
157 |
158 | fn intern_stat(
159 | env: &mut JNIEnv,
160 | op: *mut Operator,
161 | executor: *const Executor,
162 | path: JString,
163 | options: JObject,
164 | ) -> Result<jlong> {
165 | let op = unsafe { &mut *op };
166 | let id = request_id(env)?;
167 |
168 | let path = jstring_to_string(env, &path)?;
169 | let stat_opts = make_stat_options(env, &options)?;
170 |
171 | executor_or_default(env, executor)?.spawn(async move {
172 | let metadata = op.stat_options(&path, stat_opts).await.map_err(Into::into);
173 | let mut env = unsafe { get_current_env() };
174 | let result = metadata.and_then(|metadata| make_metadata(&mut env, metadata));
175 | complete_future(id, result.map(JValueOwned::Object))
176 | });
177 |
178 | Ok(id)
179 | }
180 |
181 | /// # Safety
182 | ///
183 | /// This function should not be called before the Operator is ready.
184 | #[unsafe(no_mangle)]
185 | pub unsafe extern "system" fn Java_org_apache_opendal_AsyncOperator_read(
186 | mut env: JNIEnv,
187 | _: JClass,
188 | op: *mut Operator,
189 | executor: *const Executor,
190 | path: JString,
191 | read_options: JObject,
192 | ) -> jlong {
193 | intern_read(&mut env, op, executor, path, read_options).unwrap_or_else(|e| {
194 | e.throw(&mut env);
195 | 0
196 | })
197 | }
198 |
199 | fn intern_read(
200 | env: &mut JNIEnv,
201 | op: *mut Operator,
202 | executor: *const Executor,
203 | path: JString,
204 | options: JObject,
205 | ) -> Result<jlong> {
206 | // Prepare inputs before spawning
207 | let id = request_id(env)?;
208 | let path_str = jstring_to_string(env, &path)?;
209 | let offset = read_int64_field(env, &options, "offset")?;
210 | let length = read_int64_field(env, &options, "length")?;
211 | let range = offset_length_to_range(offset, length)?;
212 |
213 | // Clone operator handle to move into the task
214 | let op_cloned = unsafe { &*op }.clone();
215 |
216 | executor_or_default(env, executor)?.spawn(async move {
217 | let mut read_op = op_cloned.read_with(&path_str);
218 | read_op = read_op.range(range);
219 | let result = read_op.await.map_err(Into::into);
220 |
221 | let mut env = unsafe { get_current_env() };
222 | let result = result.and_then(|bs| bytes_to_jbytearray(&mut env, bs.to_bytes()));
223 | complete_future(id, result.map(|bs| JValueOwned::Object(bs.into())))
224 | });
225 |
226 | Ok(id)
227 | }
228 |
229 | /// # Safety
230 | ///
231 | /// This function should not be called before the Operator is ready.
232 | #[unsafe(no_mangle)]
233 | pub unsafe extern "system" fn Java_org_apache_opendal_AsyncOperator_delete(
234 | mut env: JNIEnv,
235 | _: JClass,
236 | op: *mut Operator,
237 | executor: *const Executor,
238 | path: JString,
239 | ) -> jlong {
240 | intern_delete(&mut env, op, executor, path).unwrap_or_else(|e| {
241 | e.throw(&mut env);
242 | 0
243 | })
244 | }
245 |
246 | fn intern_delete(
247 | env: &mut JNIEnv,
248 | op: *mut Operator,
249 | executor: *const Executor,
250 | path: JString,
251 | ) -> Result<jlong> {
252 | let op = unsafe { &mut *op };
253 | let id = request_id(env)?;
254 |
255 | let path = jstring_to_string(env, &path)?;
256 |
257 | executor_or_default(env, executor)?.spawn(async move {
258 | let result = op
259 | .delete(&path)
260 | .await
261 | .map(|_| JValueOwned::Void)
262 | .map_err(Into::into);
263 | complete_future(id, result)
264 | });
265 |
266 | Ok(id)
267 | }
268 |
269 | /// # Safety
270 | ///
271 | /// This function should not be called before the Operator is ready.
272 | #[unsafe(no_mangle)]
273 | pub unsafe extern "system" fn Java_org_apache_opendal_AsyncOperator_makeBlockingOp(
274 | mut env: JNIEnv,
275 | _: JClass,
276 | op: *mut Operator,
277 | executor: *const Executor,
278 | ) -> jlong {
279 | intern_make_blocking_op(&mut env, op, executor).unwrap_or_else(|e| {
280 | e.throw(&mut env);
281 | 0
282 | })
283 | }
284 |
285 | fn intern_make_blocking_op(
286 | env: &mut JNIEnv,
287 | op: *mut Operator,
288 | executor: *const Executor,
289 | ) -> Result<jlong> {
290 | let op = unsafe { &mut *op };
291 | let op = executor_or_default(env, executor)?
292 | .enter_with(move || blocking::Operator::new(op.clone()))?;
293 | Ok(Box::into_raw(Box::new(op)) as jlong)
294 | }
295 |
296 | /// # Safety
297 | ///
298 | /// This function should not be called before the Operator is ready.
299 | #[unsafe(no_mangle)]
300 | pub unsafe extern "system" fn Java_org_apache_opendal_AsyncOperator_makeOperatorInfo(
301 | mut env: JNIEnv,
302 | _: JClass,
303 | op: *mut Operator,
304 | ) -> jobject {
305 | intern_make_operator_info(&mut env, op).unwrap_or_else(|e| {
306 | e.throw(&mut env);
307 | JObject::default().into_raw()
308 | })
309 | }
310 |
311 | fn intern_make_operator_info(env: &mut JNIEnv, op: *mut Operator) -> Result<jobject> {
312 | let op = unsafe { &mut *op };
313 | Ok(make_operator_info(env, op.info())?.into_raw())
314 | }
315 |
316 | /// # Safety
317 | ///
318 | /// This function should not be called before the Operator is ready.
319 | #[unsafe(no_mangle)]
320 | pub unsafe extern "system" fn Java_org_apache_opendal_AsyncOperator_createDir(
321 | mut env: JNIEnv,
322 | _: JClass,
323 | op: *mut Operator,
324 | executor: *const Executor,
325 | path: JString,
326 | ) -> jlong {
327 | intern_create_dir(&mut env, op, executor, path).unwrap_or_else(|e| {
328 | e.throw(&mut env);
329 | 0
330 | })
331 | }
332 |
333 | fn intern_create_dir(
334 | env: &mut JNIEnv,
335 | op: *mut Operator,
336 | executor: *const Executor,
337 | path: JString,
338 | ) -> Result<jlong> {
339 | let op = unsafe { &mut *op };
340 | let id = request_id(env)?;
341 |
342 | let path = jstring_to_string(env, &path)?;
343 |
344 | executor_or_default(env, executor)?.spawn(async move {
345 | let result = op
346 | .create_dir(&path)
347 | .await
348 | .map(|_| JValueOwned::Void)
349 | .map_err(Into::into);
350 | complete_future(id, result)
351 | });
352 |
353 | Ok(id)
354 | }
355 |
356 | /// # Safety
357 | ///
358 | /// This function should not be called before the Operator is ready.
359 | #[unsafe(no_mangle)]
360 | pub unsafe extern "system" fn Java_org_apache_opendal_AsyncOperator_copy(
361 | mut env: JNIEnv,
362 | _: JClass,
363 | op: *mut Operator,
364 | executor: *const Executor,
365 | source_path: JString,
366 | target_path: JString,
367 | ) -> jlong {
368 | intern_copy(&mut env, op, executor, source_path, target_path).unwrap_or_else(|e| {
369 | e.throw(&mut env);
370 | 0
371 | })
372 | }
373 |
374 | fn intern_copy(
375 | env: &mut JNIEnv,
376 | op: *mut Operator,
377 | executor: *const Executor,
378 | source_path: JString,
379 | target_path: JString,
380 | ) -> Result<jlong> {
381 | let op = unsafe { &mut *op };
382 | let id = request_id(env)?;
383 |
384 | let source_path = jstring_to_string(env, &source_path)?;
385 | let target_path = jstring_to_string(env, &target_path)?;
386 |
387 | executor_or_default(env, executor)?.spawn(async move {
388 | let result = op
389 | .copy(&source_path, &target_path)
390 | .await
391 | .map(|_| JValueOwned::Void)
392 | .map_err(Into::into);
393 | complete_future(id, result)
394 | });
395 |
396 | Ok(id)
397 | }
398 |
399 | /// # Safety
400 | ///
401 | /// This function should not be called before the Operator is ready.
402 | #[unsafe(no_mangle)]
403 | pub unsafe extern "system" fn Java_org_apache_opendal_AsyncOperator_rename(
404 | mut env: JNIEnv,
405 | _: JClass,
406 | op: *mut Operator,
407 | executor: *const Executor,
408 | source_path: JString,
409 | target_path: JString,
410 | ) -> jlong {
411 | intern_rename(&mut env, op, executor, source_path, target_path).unwrap_or_else(|e| {
412 | e.throw(&mut env);
413 | 0
414 | })
415 | }
416 |
417 | fn intern_rename(
418 | env: &mut JNIEnv,
419 | op: *mut Operator,
420 | executor: *const Executor,
421 | source_path: JString,
422 | target_path: JString,
423 | ) -> Result<jlong> {
424 | let op = unsafe { &mut *op };
425 | let id = request_id(env)?;
426 |
427 | let source_path = jstring_to_string(env, &source_path)?;
428 | let target_path = jstring_to_string(env, &target_path)?;
429 |
430 | executor_or_default(env, executor)?.spawn(async move {
431 | let result = op
432 | .rename(&source_path, &target_path)
433 | .await
434 | .map(|_| JValueOwned::Void)
435 | .map_err(Into::into);
436 | complete_future(id, result)
437 | });
438 |
439 | Ok(id)
440 | }
441 |
442 | /// # Safety
443 | ///
444 | /// This function should not be called before the Operator is ready.
445 | #[unsafe(no_mangle)]
446 | pub unsafe extern "system" fn Java_org_apache_opendal_AsyncOperator_removeAll(
447 | mut env: JNIEnv,
448 | _: JClass,
449 | op: *mut Operator,
450 | executor: *const Executor,
451 | path: JString,
452 | ) -> jlong {
453 | intern_remove_all(&mut env, op, executor, path).unwrap_or_else(|e| {
454 | e.throw(&mut env);
455 | 0
456 | })
457 | }
458 |
459 | fn intern_remove_all(
460 | env: &mut JNIEnv,
461 | op: *mut Operator,
462 | executor: *const Executor,
463 | path: JString,
464 | ) -> Result<jlong> {
465 | let op = unsafe { &mut *op };
466 | let id = request_id(env)?;
467 |
468 | let path = jstring_to_string(env, &path)?;
469 |
470 | executor_or_default(env, executor)?.spawn(async move {
471 | let result = op
472 | .delete_with(&path)
473 | .recursive(true)
474 | .await
475 | .map(|_| JValueOwned::Void)
476 | .map_err(Into::into);
477 | complete_future(id, result)
478 | });
479 |
480 | Ok(id)
481 | }
482 |
483 | /// # Safety
484 | ///
485 | /// This function should not be called before the Operator is ready.
486 | #[unsafe(no_mangle)]
487 | pub unsafe extern "system" fn Java_org_apache_opendal_AsyncOperator_list(
488 | mut env: JNIEnv,
489 | _: JClass,
490 | op: *mut Operator,
491 | executor: *const Executor,
492 | path: JString,
493 | options: JObject,
494 | ) -> jlong {
495 | intern_list(&mut env, op, executor, path, options).unwrap_or_else(|e| {
496 | e.throw(&mut env);
497 | 0
498 | })
499 | }
500 |
501 | fn intern_list(
502 | env: &mut JNIEnv,
503 | op: *mut Operator,
504 | executor: *const Executor,
505 | path: JString,
506 | options: JObject,
507 | ) -> Result<jlong> {
508 | let op = unsafe { &mut *op };
509 | let id = request_id(env)?;
510 |
511 | let path = jstring_to_string(env, &path)?;
512 | let list_opts = make_list_options(env, &options)?;
513 | executor_or_default(env, executor)?.spawn(async move {
514 | let entries = op.list_options(&path, list_opts).await.map_err(Into::into);
515 | let result = make_entries(entries);
516 | complete_future(id, result.map(JValueOwned::Object))
517 | });
518 |
519 | Ok(id)
520 | }
521 |
522 | fn make_entries<'local>(entries: Result<Vec<Entry>>) -> Result<JObject<'local>> {
523 | let entries = entries?;
524 |
525 | let mut env = unsafe { get_current_env() };
526 | let jarray = env.new_object_array(
527 | entries.len() as jsize,
528 | "org/apache/opendal/Entry",
529 | JObject::null(),
530 | )?;
531 |
532 | for (idx, entry) in entries.into_iter().enumerate() {
533 | let entry = make_entry(&mut env, entry)?;
534 | env.set_object_array_element(&jarray, idx as jsize, entry)?;
535 | }
536 |
537 | Ok(jarray.into())
538 | }
539 |
540 | /// # Safety
541 | ///
542 | /// This function should not be called before the Operator is ready.
543 | #[unsafe(no_mangle)]
544 | pub unsafe extern "system" fn Java_org_apache_opendal_AsyncOperator_presignRead(
545 | mut env: JNIEnv,
546 | _: JClass,
547 | op: *mut Operator,
548 | executor: *const Executor,
549 | path: JString,
550 | expire: jlong,
551 | ) -> jlong {
552 | intern_presign_read(&mut env, op, executor, path, expire).unwrap_or_else(|e| {
553 | e.throw(&mut env);
554 | 0
555 | })
556 | }
557 |
558 | fn intern_presign_read(
559 | env: &mut JNIEnv,
560 | op: *mut Operator,
561 | executor: *const Executor,
562 | path: JString,
563 | expire: jlong,
564 | ) -> Result<jlong> {
565 | let op = unsafe { &mut *op };
566 | let id = request_id(env)?;
567 |
568 | let path = jstring_to_string(env, &path)?;
569 | let expire = Duration::from_nanos(expire as u64);
570 |
571 | executor_or_default(env, executor)?.spawn(async move {
572 | let result = op.presign_read(&path, expire).await.map_err(Into::into);
573 | let mut env = unsafe { get_current_env() };
574 | let result = result.and_then(|req| make_presigned_request(&mut env, req));
575 | complete_future(id, result.map(JValueOwned::Object))
576 | });
577 |
578 | Ok(id)
579 | }
580 | /// # Safety
581 | ///
582 | /// This function should not be called before the Operator is ready.
583 | #[unsafe(no_mangle)]
584 | pub unsafe extern "system" fn Java_org_apache_opendal_AsyncOperator_presignWrite(
585 | mut env: JNIEnv,
586 | _: JClass,
587 | op: *mut Operator,
588 | executor: *const Executor,
589 | path: JString,
590 | expire: jlong,
591 | ) -> jlong {
592 | intern_presign_write(&mut env, op, executor, path, expire).unwrap_or_else(|e| {
593 | e.throw(&mut env);
594 | 0
595 | })
596 | }
597 |
598 | fn intern_presign_write(
599 | env: &mut JNIEnv,
600 | op: *mut Operator,
601 | executor: *const Executor,
602 | path: JString,
603 | expire: jlong,
604 | ) -> Result<jlong> {
605 | let op = unsafe { &mut *op };
606 | let id = request_id(env)?;
607 |
608 | let path = jstring_to_string(env, &path)?;
609 | let expire = Duration::from_nanos(expire as u64);
610 |
611 | executor_or_default(env, executor)?.spawn(async move {
612 | let result = op.presign_write(&path, expire).await.map_err(Into::into);
613 | let mut env = unsafe { get_current_env() };
614 | let result = result.and_then(|req| make_presigned_request(&mut env, req));
615 | complete_future(id, result.map(JValueOwned::Object))
616 | });
617 |
618 | Ok(id)
619 | }
620 |
621 | /// # Safety
622 | ///
623 | /// This function should not be called before the Operator is ready.
624 | #[unsafe(no_mangle)]
625 | pub unsafe extern "system" fn Java_org_apache_opendal_AsyncOperator_presignStat(
626 | mut env: JNIEnv,
627 | _: JClass,
628 | op: *mut Operator,
629 | executor: *const Executor,
630 | path: JString,
631 | expire: jlong,
632 | ) -> jlong {
633 | intern_presign_stat(&mut env, op, executor, path, expire).unwrap_or_else(|e| {
634 | e.throw(&mut env);
635 | 0
636 | })
637 | }
638 |
639 | fn intern_presign_stat(
640 | env: &mut JNIEnv,
641 | op: *mut Operator,
642 | executor: *const Executor,
643 | path: JString,
644 | expire: jlong,
645 | ) -> Result<jlong> {
646 | let op = unsafe { &mut *op };
647 | let id = request_id(env)?;
648 |
649 | let path = jstring_to_string(env, &path)?;
650 | let expire = Duration::from_nanos(expire as u64);
651 |
652 | executor_or_default(env, executor)?.spawn(async move {
653 | let result = op.presign_stat(&path, expire).await.map_err(Into::into);
654 | let mut env = unsafe { get_current_env() };
655 | let result = result.and_then(|req| make_presigned_request(&mut env, req));
656 | complete_future(id, result.map(JValueOwned::Object))
657 | });
658 |
659 | Ok(id)
660 | }
661 |
662 | fn make_object<'local>(
663 | env: &mut JNIEnv<'local>,
664 | value: JValueOwned<'local>,
665 | ) -> Result<JObject<'local>> {
666 | let o = match value {
667 | JValueOwned::Object(o) => o,
668 | JValueOwned::Byte(_) => env.new_object("java/lang/Long", "(B)V", &[value.borrow()])?,
669 | JValueOwned::Char(_) => env.new_object("java/lang/Char", "(C)V", &[value.borrow()])?,
670 | JValueOwned::Short(_) => env.new_object("java/lang/Short", "(S)V", &[value.borrow()])?,
671 | JValueOwned::Int(_) => env.new_object("java/lang/Integer", "(I)V", &[value.borrow()])?,
672 | JValueOwned::Long(_) => env.new_object("java/lang/Long", "(J)V", &[value.borrow()])?,
673 | JValueOwned::Bool(_) => env.new_object("java/lang/Boolean", "(Z)V", &[value.borrow()])?,
674 | JValueOwned::Float(_) => env.new_object("java/lang/Float", "(F)V", &[value.borrow()])?,
675 | JValueOwned::Double(_) => env.new_object("java/lang/Double", "(D)V", &[value.borrow()])?,
676 | JValueOwned::Void => JObject::null(),
677 | };
678 | Ok(o)
679 | }
680 |
681 | fn complete_future(id: jlong, result: Result<JValueOwned>) {
682 | try_complete_future(id, result).expect("complete future must succeed");
683 | }
684 |
685 | fn try_complete_future(id: jlong, result: Result<JValueOwned>) -> Result<()> {
686 | let mut env = unsafe { get_current_env() };
687 | let future = get_future(&mut env, id)?;
688 | match result {
689 | Ok(result) => {
690 | let result = make_object(&mut env, result)?;
691 | env.call_method(
692 | future,
693 | "complete",
694 | "(Ljava/lang/Object;)Z",
695 | &[JValue::Object(&result)],
696 | )?
697 | }
698 | Err(err) => {
699 | let exception = err.to_exception(&mut env)?;
700 | env.call_method(
701 | future,
702 | "completeExceptionally",
703 | "(Ljava/lang/Throwable;)Z",
704 | &[JValue::Object(&exception)],
705 | )?
706 | }
707 | };
708 | Ok(())
709 | }
710 |
711 | fn request_id(env: &mut JNIEnv) -> Result<jlong> {
712 | Ok(env
713 | .call_static_method(
714 | "org/apache/opendal/AsyncOperator$AsyncRegistry",
715 | "requestId",
716 | "()J",
717 | &[],
718 | )?
719 | .j()?)
720 | }
721 |
722 | fn get_future<'local>(env: &mut JNIEnv<'local>, id: jlong) -> Result<JObject<'local>> {
723 | Ok(env
724 | .call_static_method(
725 | "org/apache/opendal/AsyncOperator$AsyncRegistry",
726 | "get",
727 | "(J)Ljava/util/concurrent/CompletableFuture;",
728 | &[JValue::Long(id)],
729 | )?
730 | .l()?)
731 | }
732 |
```