sdf
This commit is contained in:
parent
59666d0dd6
commit
c5bc4da2c3
BIN
ceph/.commands.txt.swp
Normal file
BIN
ceph/.commands.txt.swp
Normal file
Binary file not shown.
122
ceph/ceph.go
122
ceph/ceph.go
|
|
@ -4,65 +4,119 @@ import (
|
|||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
|
||||
"github.com/ceph/go-ceph/rados"
|
||||
)
|
||||
|
||||
func errChecker(err error) {
|
||||
if err != nil {
|
||||
log.Fatalf("Error %v happened", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// request to monitor
|
||||
func makeMonQuery(cephConn *cephConnection, query map[string]string) []byte {
|
||||
monJson, err := json.Marshal(query)
|
||||
if err != nil {
|
||||
log.Fatalf("Can't marshal json mon query. Error: %v", err)
|
||||
}
|
||||
|
||||
monRawAnswer, _, err := cephConn.conn.MonCommand(monJson)
|
||||
monRawAnswer, str, err := cephConn.conn.MonCommand(monJson)
|
||||
fmt.Println("MakeMonQuery info: ",str)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed exec monCommand. Error: %v", err)
|
||||
}
|
||||
return monRawAnswer
|
||||
}
|
||||
|
||||
func getCommands(cephConn *cephConnection) []byte {
|
||||
monRawAnswer := makeMonQuery(cephConn, map[string]string{"prefix": "get_command_descriptions"})
|
||||
return monRawAnswer
|
||||
// get all available commands and save it to a file
|
||||
func getAvailableCommands(cephConn *cephConnection) {
|
||||
var err error
|
||||
var monAnswer map[string]interface{}
|
||||
|
||||
monRawAnswer := makeMonQuery(cephConn, map[string]string{"prefix": "get_command_descriptions"})
|
||||
|
||||
err = json.Unmarshal(monRawAnswer, &monAnswer)
|
||||
if err != nil {
|
||||
log.Fatalln("Cannot unmarshal")
|
||||
}
|
||||
prettyView,err := json.MarshalIndent(monAnswer,""," ")
|
||||
// fmt.Println(string(prettyView))
|
||||
|
||||
//Open a file to write all the commands to
|
||||
f, err := os.Create("/tmp/share/commands.txt")
|
||||
errChecker(err)
|
||||
os.WriteFile("/tmp/share/commands.txt",prettyView,0644)
|
||||
defer f.Close()
|
||||
|
||||
}
|
||||
|
||||
// Prints the output in human readable deserialized form
|
||||
func OutputFormatter(input []byte) {
|
||||
var monAnswer map[string]interface{}
|
||||
var err error
|
||||
|
||||
err = json.Unmarshal(input, &monAnswer)
|
||||
if err != nil {
|
||||
log.Fatalf("Cannot unmarshal %#v",err)
|
||||
}
|
||||
prettyView,err := json.MarshalIndent(monAnswer,""," ")
|
||||
fmt.Println(string(prettyView))
|
||||
}
|
||||
|
||||
func CloseConn(cephConn *cephConnection) {
|
||||
fmt.Println("Closing connection to cluster")
|
||||
cephConn.conn.Shutdown()
|
||||
}
|
||||
func main() {
|
||||
var connect cephConnection
|
||||
|
||||
|
||||
func getPools(conn *cephConnection,pool_name string, vars... string) Poolinfo {
|
||||
var monAnswer Poolinfo
|
||||
var err error
|
||||
// var poolInfo Poolinfo
|
||||
var monAnswer interface{}
|
||||
|
||||
fmt.Println("Creating connection object")
|
||||
connect.conn,err = rados.NewConn()
|
||||
if err != nil {
|
||||
log.Fatal("Cannot create Connection object\n")
|
||||
monRawAnswer := makeMonQuery(conn,map[string]string{"prefix":"osd pool get","pool":pool_name,"var":vars[0],"format":"json"})
|
||||
if err = json.Unmarshal(monRawAnswer,&monAnswer); err != nil {
|
||||
log.Fatalln("Cannot get pool info %v",err)
|
||||
}
|
||||
connect.conn.ReadConfigFile("/etc/ceph/ceph.conf")
|
||||
connect.conn.Connect()
|
||||
defer CloseConn(&connect)
|
||||
|
||||
// monAnswer = getCommands(&connect)
|
||||
|
||||
err = json.Unmarshal(getCommands(&connect), &monAnswer)
|
||||
if err != nil {
|
||||
log.Fatalln("Cannot unmarshal")
|
||||
}
|
||||
|
||||
prettyForm, err := json.MarshalIndent(monAnswer,""," ")
|
||||
if err != nil {
|
||||
log.Fatalln("Second unmarshallin not successful")
|
||||
}
|
||||
|
||||
fmt.Println(string(prettyForm))
|
||||
// hum,_ := json.MarshalIndent(monAnswer, "", " ")
|
||||
// fmt.Println(monAnswer)
|
||||
|
||||
fmt.Println(string(monRawAnswer))
|
||||
return monAnswer
|
||||
}
|
||||
|
||||
|
||||
func main() {
|
||||
|
||||
var connect cephConnection
|
||||
var err error
|
||||
// var monAnswer map[string]interface{}
|
||||
|
||||
// Start connection procedure
|
||||
|
||||
fmt.Println("Creating connection object")
|
||||
|
||||
connect.conn, err = rados.NewConnWithClusterAndUser("ceph", "client.admin")
|
||||
if err != nil {
|
||||
log.Fatalln("Can't create connection with cluster. Error: %v\n",err)
|
||||
}
|
||||
|
||||
if err = connect.conn.ReadConfigFile("/etc/ceph/ceph.conf"); err != nil {
|
||||
log.Fatalf("Can't read config file. Error: %v\n", err)
|
||||
}
|
||||
|
||||
if err = connect.conn.SetConfigOption("keyring","/etc/ceph/ceph.client.admin.keyring"); err != nil {
|
||||
log.Fatalln("Cannot read keyring.Error %v\n",err)
|
||||
}
|
||||
|
||||
if err = connect.conn.Connect(); err != nil {
|
||||
log.Fatalln("Couldnt connect to cluster. Error is %v\n",err)
|
||||
}
|
||||
defer CloseConn(&connect)
|
||||
|
||||
// end connection procedure
|
||||
|
||||
// getAvailableCommands(&connect)
|
||||
// monRawAnswer := makeMonQuery(&connect, map[string]string{"prefix":"osd pool get","pool":"test","var":"size","format":"json"})
|
||||
// OutputFormatter(monRawAnswer)
|
||||
// fmt.Println(string(monRawAnswer))
|
||||
fmt.Println(getPools(&connect,"bench","pg_num"))
|
||||
}
|
||||
|
||||
|
|
|
|||
14890
ceph/commands.txt
Normal file
14890
ceph/commands.txt
Normal file
File diff suppressed because it is too large
Load Diff
|
|
@ -18,3 +18,7 @@ type Device struct {
|
|||
ID int64 `json:"id"`
|
||||
Name string `json:"name"`
|
||||
}
|
||||
|
||||
type commands struct {
|
||||
Descr string `json:"help"`
|
||||
}
|
||||
|
|
|
|||
1
pkg/mod/cache/download/github.com/ceph/go-ceph/@v/list
vendored
Normal file
1
pkg/mod/cache/download/github.com/ceph/go-ceph/@v/list
vendored
Normal file
|
|
@ -0,0 +1 @@
|
|||
v0.35.0
|
||||
1
pkg/mod/cache/download/github.com/ceph/go-ceph/@v/v0.35.0.info
vendored
Normal file
1
pkg/mod/cache/download/github.com/ceph/go-ceph/@v/v0.35.0.info
vendored
Normal file
|
|
@ -0,0 +1 @@
|
|||
{"Version":"v0.35.0","Time":"2025-08-12T13:58:16Z","Origin":{"VCS":"git","URL":"https://github.com/ceph/go-ceph","Hash":"20995e7845bf56766325054b9f3b8a5c42a19df4","Ref":"refs/tags/v0.35.0"}}
|
||||
0
pkg/mod/cache/download/github.com/ceph/go-ceph/@v/v0.35.0.lock
vendored
Normal file
0
pkg/mod/cache/download/github.com/ceph/go-ceph/@v/v0.35.0.lock
vendored
Normal file
33
pkg/mod/cache/download/github.com/ceph/go-ceph/@v/v0.35.0.mod
vendored
Normal file
33
pkg/mod/cache/download/github.com/ceph/go-ceph/@v/v0.35.0.mod
vendored
Normal file
|
|
@ -0,0 +1,33 @@
|
|||
module github.com/ceph/go-ceph
|
||||
|
||||
go 1.23.0
|
||||
|
||||
require (
|
||||
github.com/aws/aws-sdk-go-v2 v1.37.1
|
||||
github.com/aws/aws-sdk-go-v2/config v1.30.2
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.18.2
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.85.1
|
||||
github.com/aws/smithy-go v1.22.5
|
||||
github.com/gofrs/uuid/v5 v5.3.2
|
||||
github.com/stretchr/testify v1.10.0
|
||||
golang.org/x/sys v0.34.0
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.8.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.26.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.31.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.35.1 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
)
|
||||
BIN
pkg/mod/cache/download/github.com/ceph/go-ceph/@v/v0.35.0.zip
vendored
Normal file
BIN
pkg/mod/cache/download/github.com/ceph/go-ceph/@v/v0.35.0.zip
vendored
Normal file
Binary file not shown.
1
pkg/mod/cache/download/github.com/ceph/go-ceph/@v/v0.35.0.ziphash
vendored
Normal file
1
pkg/mod/cache/download/github.com/ceph/go-ceph/@v/v0.35.0.ziphash
vendored
Normal file
|
|
@ -0,0 +1 @@
|
|||
h1:wcDUbsjeNJ7OfbWCE7I5prqUL794uXchopw3IvrGQkk=
|
||||
|
|
@ -1,2 +1,3 @@
|
|||
v0.6.0
|
||||
v0.34.0
|
||||
v0.35.0
|
||||
|
|
|
|||
1
pkg/mod/cache/download/golang.org/x/sys/@v/v0.34.0.info
vendored
Normal file
1
pkg/mod/cache/download/golang.org/x/sys/@v/v0.34.0.info
vendored
Normal file
|
|
@ -0,0 +1 @@
|
|||
{"Version":"v0.34.0","Time":"2025-06-17T17:35:38Z","Origin":{"VCS":"git","URL":"https://go.googlesource.com/sys","Hash":"751c3c6ac2a644645976e8e7f3db0b75c87d32c6","Ref":"refs/tags/v0.34.0"}}
|
||||
0
pkg/mod/cache/download/golang.org/x/sys/@v/v0.34.0.lock
vendored
Normal file
0
pkg/mod/cache/download/golang.org/x/sys/@v/v0.34.0.lock
vendored
Normal file
3
pkg/mod/cache/download/golang.org/x/sys/@v/v0.34.0.mod
vendored
Normal file
3
pkg/mod/cache/download/golang.org/x/sys/@v/v0.34.0.mod
vendored
Normal file
|
|
@ -0,0 +1,3 @@
|
|||
module golang.org/x/sys
|
||||
|
||||
go 1.23.0
|
||||
BIN
pkg/mod/cache/download/golang.org/x/sys/@v/v0.34.0.zip
vendored
Normal file
BIN
pkg/mod/cache/download/golang.org/x/sys/@v/v0.34.0.zip
vendored
Normal file
Binary file not shown.
1
pkg/mod/cache/download/golang.org/x/sys/@v/v0.34.0.ziphash
vendored
Normal file
1
pkg/mod/cache/download/golang.org/x/sys/@v/v0.34.0.ziphash
vendored
Normal file
|
|
@ -0,0 +1 @@
|
|||
h1:H5Y5sJ2L2JRdyv7ROF1he/lPdvFsd0mJHFw2ThKHxLA=
|
||||
|
|
@ -0,0 +1,67 @@
|
|||
// For format details, see https://aka.ms/devcontainer.json. For config options, see the README at:
|
||||
// https://github.com/microsoft/vscode-dev-containers/tree/v0.166.1/containers/docker-existing-dockerfile
|
||||
{
|
||||
"name": "go-ceph",
|
||||
"build": {
|
||||
// Sets the run context to one level up instead of the .devcontainer folder.
|
||||
"context": "..",
|
||||
"dockerfile": "../testing/containers/ceph/Dockerfile",
|
||||
"args": {
|
||||
"CEPH_VERSION": "octopus"
|
||||
}
|
||||
},
|
||||
"workspaceMount": "source=${localWorkspaceFolder},target=/go/src/github.com/ceph/go-ceph,type=bind,consistency=cached",
|
||||
"workspaceFolder": "/go/src/github.com/ceph/go-ceph",
|
||||
// Set *default* container specific settings.json values on container create.
|
||||
"settings": {
|
||||
"go.toolsManagement.checkForUpdates": "local",
|
||||
"go.toolsManagement.autoUpdate": true,
|
||||
"go.useLanguageServer": true,
|
||||
"go.goroot": "/opt/go",
|
||||
"go.testEnvVars": {
|
||||
"GODEBUG": "cgocheck=2",
|
||||
"CEPH_CONF": "/ceph_a/ceph.conf",
|
||||
},
|
||||
"go.buildTags": "",
|
||||
"go.testTags": "",
|
||||
"go.testFlags": [
|
||||
"-v",
|
||||
"-count=1"
|
||||
],
|
||||
},
|
||||
// Add the IDs of extensions you want installed when the container is created.
|
||||
"extensions": [
|
||||
"golang.go",
|
||||
],
|
||||
// Use 'forwardPorts' to make a list of ports inside the container available locally.
|
||||
// "forwardPorts": [],
|
||||
// Uncomment the next line to run commands after the container is created - for example installing curl.
|
||||
// "postCreateCommand": "apt-get update && apt-get install -y curl",
|
||||
// Uncomment when using a ptrace-based debugger like C++, Go, and Rust
|
||||
"runArgs": [
|
||||
"--cap-add=SYS_PTRACE",
|
||||
"--security-opt=seccomp=unconfined",
|
||||
"--net=test_ceph_net",
|
||||
],
|
||||
// Uncomment to use the Docker CLI from inside the container. See https://aka.ms/vscode-remote/samples/docker-from-docker.
|
||||
// "mounts": [ "source=/var/run/docker.sock,target=/var/run/docker.sock,type=bind" ],
|
||||
"mounts": [
|
||||
"source=test_ceph_go_cache,target=/go,type=volume",
|
||||
"source=test_ceph_a_data,target=/ceph_a,type=volume",
|
||||
"source=test_ceph_b_data,target=/ceph_b,type=volume",
|
||||
],
|
||||
|
||||
// Uncomment to enable testing ceph nodes
|
||||
// "initializeCommand": [
|
||||
// "bash", "-c", "eval $*", "--",
|
||||
// "docker kill test_ceph_a test_ceph_b 2>/dev/null ;",
|
||||
// "docker run --rm -d --name test_ceph_a --hostname test_ceph_a",
|
||||
// " --net test_ceph_net -v test_ceph_a_data:/tmp/ceph go-ceph-ci:octopus --test-run=NONE --pause ;",
|
||||
// "docker run --rm -d --name test_ceph_b --hostname test_ceph_b",
|
||||
// " --net test_ceph_net -v test_ceph_b_data:/tmp/ceph go-ceph-ci:octopus --test-run=NONE --pause ;",
|
||||
// ],
|
||||
// "postCreateCommand": "/entrypoint.sh --wait-for=/ceph_a/.ready:/ceph_b/.ready --ceph-conf=/ceph_a/ceph.conf --mirror=/ceph_b/ceph.conf --test-pkg none"
|
||||
|
||||
// Uncomment to connect as a non-root user if you've added one. See https://aka.ms/vscode-remote/containers/non-root.
|
||||
// "remoteUser": "vscode"
|
||||
}
|
||||
34
pkg/mod/github.com/ceph/go-ceph@v0.35.0/.github/dependabot.yml
vendored
Normal file
34
pkg/mod/github.com/ceph/go-ceph@v0.35.0/.github/dependabot.yml
vendored
Normal file
|
|
@ -0,0 +1,34 @@
|
|||
---
|
||||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: "gomod"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "monthly"
|
||||
rebase-strategy: "disabled"
|
||||
labels:
|
||||
- "no-API"
|
||||
commit-message:
|
||||
prefix: "go-ceph"
|
||||
groups:
|
||||
aws-sdk:
|
||||
patterns:
|
||||
- "github.com/aws/aws-sdk-*"
|
||||
- package-ecosystem: "gomod"
|
||||
directory: "/contrib/implements"
|
||||
schedule:
|
||||
interval: "monthly"
|
||||
rebase-strategy: "disabled"
|
||||
labels:
|
||||
- "no-API"
|
||||
commit-message:
|
||||
prefix: "contrib"
|
||||
- package-ecosystem: "github-actions"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "monthly"
|
||||
rebase-strategy: disabled
|
||||
labels:
|
||||
- "no-API"
|
||||
commit-message:
|
||||
prefix: "go-ceph"
|
||||
70
pkg/mod/github.com/ceph/go-ceph@v0.35.0/.github/mergify.yml
vendored
Normal file
70
pkg/mod/github.com/ceph/go-ceph@v0.35.0/.github/mergify.yml
vendored
Normal file
|
|
@ -0,0 +1,70 @@
|
|||
---
|
||||
# each test should be listed separately, do not use regular expressions:
|
||||
# https://docs.mergify.io/conditions.html#validating-all-status-check
|
||||
# Until mergify allows us to have default conditions, we will need to
|
||||
# repeat this list in a few places.
|
||||
queue_rules:
|
||||
- name: default
|
||||
queue_conditions:
|
||||
- check-success=check
|
||||
- check-success=dpulls
|
||||
- check-success=test-suite (octopus)
|
||||
- check-success=test-suite (pacific)
|
||||
- check-success=test-suite (quincy)
|
||||
- check-success=test-suite (reef)
|
||||
- check-success=test-suite (squid)
|
||||
merge_method: rebase
|
||||
update_method: rebase
|
||||
|
||||
pull_request_rules:
|
||||
# Clearing approvals after content changes
|
||||
- name: remove outdated approvals
|
||||
conditions:
|
||||
- base=master
|
||||
actions:
|
||||
dismiss_reviews:
|
||||
approved: true
|
||||
changes_requested: false
|
||||
# Our auto merge rules
|
||||
- name: automatic merge
|
||||
conditions:
|
||||
- label!=do-not-merge
|
||||
- label!=extended-review
|
||||
- base=master
|
||||
- "#changes-requested-reviews-by=0"
|
||||
- status-success=check
|
||||
- status-success=dpulls
|
||||
# See above
|
||||
- status-success=test-suite (octopus)
|
||||
- status-success=test-suite (pacific)
|
||||
- status-success=test-suite (quincy)
|
||||
- status-success=test-suite (reef)
|
||||
- status-success=test-suite (squid)
|
||||
- or:
|
||||
- and:
|
||||
- label=no-API
|
||||
- "#approved-reviews-by>=1"
|
||||
- and:
|
||||
- label=API
|
||||
- "#approved-reviews-by>=2"
|
||||
- and:
|
||||
- label=API
|
||||
- "#approved-reviews-by>=1"
|
||||
- "updated-at<10 days ago"
|
||||
actions:
|
||||
queue: {}
|
||||
dismiss_reviews: {}
|
||||
delete_head_branch: {}
|
||||
# Conflict resolution prompt
|
||||
- name: ask to resolve conflict
|
||||
conditions:
|
||||
- conflict
|
||||
actions:
|
||||
comment:
|
||||
message: |
|
||||
This pull request now has conflicts with the target branch.
|
||||
Could you please resolve conflicts and force push the corrected
|
||||
changes? 🙏
|
||||
|
||||
merge_queue:
|
||||
max_parallel_checks: 1
|
||||
30
pkg/mod/github.com/ceph/go-ceph@v0.35.0/.github/pull_request_template.md
vendored
Normal file
30
pkg/mod/github.com/ceph/go-ceph@v0.35.0/.github/pull_request_template.md
vendored
Normal file
|
|
@ -0,0 +1,30 @@
|
|||
|
||||
<!--
|
||||
Thank you for opening a pull request. Please provide:
|
||||
|
||||
- A clear summary of your changes
|
||||
|
||||
- Descriptive and succinct commit messages with the format:
|
||||
"""
|
||||
[topic]: [short description]
|
||||
|
||||
[Longer description]
|
||||
|
||||
Signed-off-by: [Your Name] <[your email]>
|
||||
"""
|
||||
|
||||
Topic will generally be the go ceph package dir you are working in.
|
||||
|
||||
- Ensure checklist items listed below are accounted for
|
||||
-->
|
||||
|
||||
## Checklist
|
||||
- [ ] Added tests for features and functional changes
|
||||
- [ ] Public functions and types are documented
|
||||
- [ ] Standard formatting is applied to Go code
|
||||
- [ ] Is this a new API? Added a new file that begins with `//go:build ceph_preview`
|
||||
- [ ] Ran `make api-update` to record new APIs
|
||||
|
||||
New or infrequent contributors may want to review the go-ceph [Developer's Guide](https://github.com/ceph/go-ceph/blob/master/docs/development.md) including the section on how we track [API Status](https://github.com/ceph/go-ceph/blob/master/docs/development.md#api-status) and the [API Stability Plan](https://github.com/ceph/go-ceph/blob/master/docs/api-stability.md).
|
||||
|
||||
The go-ceph project uses mergify. View the [mergify command guide](https://docs.mergify.com/commands/#commands) for information on how to interact with mergify. Add a comment with `@Mergifyio` `rebase` to rebase your PR when github indicates that the PR is out of date with the base branch.
|
||||
115
pkg/mod/github.com/ceph/go-ceph@v0.35.0/.github/workflows/main.yml
vendored
Normal file
115
pkg/mod/github.com/ceph/go-ceph@v0.35.0/.github/workflows/main.yml
vendored
Normal file
|
|
@ -0,0 +1,115 @@
|
|||
|
||||
name: CI
|
||||
|
||||
# Run tests on pull requests and when changes are directly
|
||||
# commited to master.
|
||||
on:
|
||||
push:
|
||||
branches: [ master ]
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
# run the CI also on PRs that are based on branches starting with pr/...
|
||||
- 'pr/**'
|
||||
schedule:
|
||||
- cron: 1 1 * * *
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
debug_enabled:
|
||||
type: boolean
|
||||
description: 'Run the build with tmate debugging enabled (https://github.com/marketplace/actions/debugging-with-tmate)'
|
||||
required: false
|
||||
default: false
|
||||
|
||||
jobs:
|
||||
# Determine the latest go versions
|
||||
go-versions:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- id: go-versions
|
||||
run: |
|
||||
curl -s 'https://go.dev/dl/?mode=json' -o go-latest.json
|
||||
curl -s 'https://go.dev/dl/?mode=json&include=all' -o go-all.json
|
||||
|
||||
LATEST=$(jq -r '.[0]|.version' go-latest.json)
|
||||
PREV=$(jq -r '.[1]|.version' go-latest.json)
|
||||
UNSTABLE=$(jq -r '.[0]|.version' go-all.json)
|
||||
|
||||
echo "latest=${LATEST#go}" >> $GITHUB_OUTPUT
|
||||
echo "prev=${PREV#go}" >> $GITHUB_OUTPUT
|
||||
echo "unstable=${UNSTABLE#go}" >> $GITHUB_OUTPUT
|
||||
outputs:
|
||||
latest: ${{ steps.go-versions.outputs.latest }}
|
||||
prev: ${{ steps.go-versions.outputs.prev }}
|
||||
unstable: ${{ steps.go-versions.outputs.unstable }}
|
||||
|
||||
# Run static/code-quality checks
|
||||
check:
|
||||
needs: go-versions
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
- uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5
|
||||
with:
|
||||
go-version: ${{ needs.go-versions.outputs.latest }}
|
||||
- name: Install revive
|
||||
run: go install github.com/mgechev/revive@latest
|
||||
- name: Run checks
|
||||
run: make check
|
||||
|
||||
# Run the test suite in a container per-ceph-codename
|
||||
test-suite:
|
||||
name: test-suite (${{ matrix.ceph_version }}${{ matrix.go_version != needs.go-versions.outputs.latest && format(', go{0}', matrix.go_version) || '' }})
|
||||
needs: go-versions
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
ceph_version:
|
||||
- "octopus"
|
||||
- "pacific"
|
||||
- "quincy"
|
||||
- "reef"
|
||||
- "squid"
|
||||
# - "tentacle"
|
||||
- "pre-reef"
|
||||
- "pre-squid"
|
||||
- "pre-tentacle"
|
||||
- "main"
|
||||
go_version:
|
||||
- ${{ needs.go-versions.outputs.latest }}
|
||||
include:
|
||||
- ceph_version: "squid"
|
||||
go_version: ${{ needs.go-versions.outputs.prev }}
|
||||
- ceph_version: "squid"
|
||||
go_version: ${{ needs.go-versions.outputs.unstable }}
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
- name: Set cores to get stored as "core"
|
||||
run: sudo bash -c 'echo "core" > /proc/sys/kernel/core_pattern'
|
||||
- name: Run tests
|
||||
run: make test-containers-test "CEPH_VERSION=${{ matrix.ceph_version }}" "GO_VERSION=${{ matrix.go_version }}" "RESULTS_DIR=$PWD/_results"
|
||||
# As an example, one can enable tmate debugging of manually-triggered
|
||||
# workflows if the input option was provided
|
||||
# - name: Setup tmate session
|
||||
# uses: mxschmitt/action-tmate@v3
|
||||
# if: ${{ github.event_name == 'workflow_dispatch' && inputs.debug_enabled }}
|
||||
- name: Clean up test containers
|
||||
if: always()
|
||||
run: make test-containers-clean "CEPH_VERSION=${{ matrix.ceph_version }}"
|
||||
- name: Archive test results
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: "go-ceph-results-${{ matrix.ceph_version }}-${{ matrix.go_version }}"
|
||||
path: |
|
||||
_results/
|
||||
retention-days: 30
|
||||
- name: Check API Versions and Aging
|
||||
if: always()
|
||||
run: |
|
||||
if [ -f _results/implements.json ]; then
|
||||
./contrib/apiage.py
|
||||
else
|
||||
echo "Skipping apiage check"
|
||||
fi
|
||||
32
pkg/mod/github.com/ceph/go-ceph@v0.35.0/.github/workflows/report-stable.yml
vendored
Normal file
32
pkg/mod/github.com/ceph/go-ceph@v0.35.0/.github/workflows/report-stable.yml
vendored
Normal file
|
|
@ -0,0 +1,32 @@
|
|||
|
||||
name: Report-API-Updates
|
||||
|
||||
# Run tests on pull requests and when changes are directly
|
||||
# commited to master.
|
||||
on:
|
||||
workflow_dispatch: {}
|
||||
|
||||
jobs:
|
||||
find-updates:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
# Checkout with fetch-depth=0 in order to fetch (all) tags.
|
||||
# The Makefile runs git commands to pass tag info to the apiage script.
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Run makefile
|
||||
run: make api-report-issuetemplate RESULTS_DIR=_results
|
||||
- name: Archive test results
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02
|
||||
with:
|
||||
name: "go-ceph-api-updates"
|
||||
path: |
|
||||
_results/
|
||||
retention-days: 30
|
||||
if: "!cancelled()"
|
||||
- name: File a GitHub Issue
|
||||
run: gh issue create --title "$(cat _results/title.txt)" --body-file _results/body.md
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
GH_REPO: ${{ github.repository }}
|
||||
20
pkg/mod/github.com/ceph/go-ceph@v0.35.0/.github/workflows/stale.yml
vendored
Normal file
20
pkg/mod/github.com/ceph/go-ceph@v0.35.0/.github/workflows/stale.yml
vendored
Normal file
|
|
@ -0,0 +1,20 @@
|
|||
name: "Stale issue handler"
|
||||
on:
|
||||
schedule:
|
||||
- cron: "0 0 * * *"
|
||||
|
||||
jobs:
|
||||
stale:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/stale@5bef64f19d7facfb25b37b414482c7164d639639
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
stale-issue-message: 'This issue has been automatically marked as stale because it has not had recent activity. It will be closed in 14 days if no further activity occurs. Thank you for your contribution.'
|
||||
days-before-stale: 30
|
||||
days-before-close: 14
|
||||
only-issue-labels: 'question'
|
||||
stale-pr-message: 'This Pull Request has been automatically marked as stale because it has not had recent activity. It will be closed in 21 days if no further activity occurs. Remember, a closed PR can always be reopened. Thank you for your contribution.'
|
||||
close-pr-message: 'This Pull Request has been automatically closed due to inactivity. In the future, if you resume working on this again, the PR can be reopened. Additionally, if you are proposing a feature or fix that you think someone else could take up - please say so - and if there is no existing issue already, file an issue for the topic. Thank you for your contribution.'
|
||||
days-before-pr-stale: 60
|
||||
days-before-pr-close: 21
|
||||
10
pkg/mod/github.com/ceph/go-ceph@v0.35.0/.gitignore
vendored
Normal file
10
pkg/mod/github.com/ceph/go-ceph@v0.35.0/.gitignore
vendored
Normal file
|
|
@ -0,0 +1,10 @@
|
|||
.build-docker
|
||||
.build.*
|
||||
.run.*
|
||||
*.swp
|
||||
*~
|
||||
*.out
|
||||
*.test
|
||||
/*.json
|
||||
/implements
|
||||
/_results
|
||||
65
pkg/mod/github.com/ceph/go-ceph@v0.35.0/.revive.toml
Normal file
65
pkg/mod/github.com/ceph/go-ceph@v0.35.0/.revive.toml
Normal file
|
|
@ -0,0 +1,65 @@
|
|||
|
||||
ignoreGeneratedHeader = false
|
||||
severity = "error"
|
||||
confidence = 0.8
|
||||
errorCode = 1
|
||||
warningCode = 0
|
||||
|
||||
[directive.specify-disable-reason]
|
||||
|
||||
[rule.blank-imports]
|
||||
[rule.context-as-argument]
|
||||
[rule.context-keys-type]
|
||||
[rule.dot-imports]
|
||||
[rule.error-return]
|
||||
[rule.error-strings]
|
||||
[rule.error-naming]
|
||||
[rule.exported]
|
||||
[rule.if-return]
|
||||
[rule.increment-decrement]
|
||||
# The configuration approach to the following rule is pretty bonkers.
|
||||
# But we need to skip package name checks because we already have an
|
||||
# (internal!) utils package and we aint changing it now.
|
||||
[rule.var-naming]
|
||||
arguments = [["ID", "UID"], [], [{skipPackageNameChecks=true}]]
|
||||
[rule.var-declaration]
|
||||
# We need to disable the package-comments check because we check all the .go
|
||||
# files in the repo individually. This appears to confuse the new
|
||||
# pacakge-comments implementation as it works correctly with ./... or ./a/b/c
|
||||
# but not ./a/b/c/*.go. We need the latter input style as we have files with
|
||||
# build tags we want to check, as well as multiple modules, within the repo.
|
||||
[rule.package-comments]
|
||||
disabled = true
|
||||
[rule.range]
|
||||
[rule.receiver-naming]
|
||||
[rule.time-naming]
|
||||
[rule.unexported-return]
|
||||
[rule.indent-error-flow]
|
||||
[rule.errorf]
|
||||
[rule.empty-block]
|
||||
[rule.superfluous-else]
|
||||
[rule.unused-parameter]
|
||||
[rule.unreachable-code]
|
||||
[rule.redefines-builtin-id]
|
||||
|
||||
[rule.atomic]
|
||||
[rule.bool-literal-in-expr]
|
||||
[rule.constant-logical-expr]
|
||||
[rule.unnecessary-stmt]
|
||||
[rule.unused-receiver]
|
||||
[rule.modifies-parameter]
|
||||
[rule.modifies-value-receiver]
|
||||
[rule.range-val-in-closure]
|
||||
[rule.waitgroup-by-value]
|
||||
[rule.duplicated-imports]
|
||||
[rule.struct-tag]
|
||||
[rule.import-shadowing]
|
||||
|
||||
[rule.argument-limit]
|
||||
arguments = [7]
|
||||
[rule.function-result-limit]
|
||||
arguments = [3]
|
||||
|
||||
[rule.unhandled-error]
|
||||
# functions to ignore unhandled errors on
|
||||
arguments = ["fmt.Printf", "fmt.Println"]
|
||||
21
pkg/mod/github.com/ceph/go-ceph@v0.35.0/LICENSE
Normal file
21
pkg/mod/github.com/ceph/go-ceph@v0.35.0/LICENSE
Normal file
|
|
@ -0,0 +1,21 @@
|
|||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2014 Noah Watkins
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
344
pkg/mod/github.com/ceph/go-ceph@v0.35.0/Makefile
Normal file
344
pkg/mod/github.com/ceph/go-ceph@v0.35.0/Makefile
Normal file
|
|
@ -0,0 +1,344 @@
|
|||
CI_IMAGE_NAME ?= go-ceph-ci
|
||||
CONTAINER_CMD ?=
|
||||
CONTAINER_OPTS ?= --security-opt $(shell grep -q selinux /sys/kernel/security/lsm 2>/dev/null && echo "label=disable" || echo "apparmor:unconfined")
|
||||
CONTAINER_BUILD_OPTS ?=
|
||||
CONTAINER_CONFIG_DIR ?= testing/containers/ceph
|
||||
VOLUME_FLAGS ?=
|
||||
CEPH_VERSION ?= pacific
|
||||
RESULTS_DIR ?=
|
||||
CHECK_GOFMT_FLAGS ?= -e -s -l
|
||||
IMPLEMENTS_OPTS ?=
|
||||
BUILD_TAGS ?= $(CEPH_VERSION)
|
||||
|
||||
ifeq ($(CONTAINER_CMD),)
|
||||
CONTAINER_CMD:=$(shell docker version >/dev/null 2>&1 && echo docker)
|
||||
endif
|
||||
ifeq ($(CONTAINER_CMD),)
|
||||
CONTAINER_CMD:=$(shell podman version >/dev/null 2>&1 && echo podman)
|
||||
endif
|
||||
|
||||
ifeq ($(CEPH_VERSION),octopus)
|
||||
CEPH_TAG ?= v15
|
||||
endif
|
||||
ifeq ($(CEPH_VERSION),pacific)
|
||||
CEPH_TAG ?= v16
|
||||
endif
|
||||
ifeq ($(CEPH_VERSION),quincy)
|
||||
CEPH_TAG ?= v17
|
||||
endif
|
||||
ifeq ($(CEPH_VERSION),reef)
|
||||
CEPH_TAG ?= v18
|
||||
endif
|
||||
ifeq ($(CEPH_VERSION),squid)
|
||||
CEPH_TAG ?= v19
|
||||
endif
|
||||
ifeq ($(CEPH_VERSION),tentacle)
|
||||
CEPH_TAG ?= v20
|
||||
endif
|
||||
# pre-<codename> indicates we want to consume pre-release versions of ceph from
|
||||
# the ceph ci. This way we can start testing on ceph versions before they hit
|
||||
# quay.io/ceph/ceph
|
||||
ifeq ($(CEPH_VERSION),pre-reef)
|
||||
CEPH_TAG ?= reef
|
||||
CEPH_IMG ?= quay.ceph.io/ceph-ci/ceph
|
||||
GO_CEPH_VERSION := reef
|
||||
BUILD_TAGS := reef,ceph_pre_reef
|
||||
endif
|
||||
ifeq ($(CEPH_VERSION),pre-squid)
|
||||
CEPH_TAG ?= squid
|
||||
CEPH_IMG ?= quay.ceph.io/ceph-ci/ceph
|
||||
GO_CEPH_VERSION := squid
|
||||
BUILD_TAGS := squid,ceph_pre_squid
|
||||
endif
|
||||
ifeq ($(CEPH_VERSION),pre-tentacle)
|
||||
CEPH_TAG ?= tentacle
|
||||
CEPH_IMG ?= quay.ceph.io/ceph-ci/ceph
|
||||
GO_CEPH_VERSION := tentacle
|
||||
BUILD_TAGS := tentacle,ceph_pre_tentacle
|
||||
endif
|
||||
ifeq ($(CEPH_VERSION),main)
|
||||
CEPH_TAG ?= main
|
||||
CEPH_IMG ?= quay.ceph.io/ceph-ci/ceph
|
||||
GO_CEPH_VERSION := main
|
||||
BUILD_TAGS := main,ceph_main
|
||||
endif
|
||||
|
||||
GO_CMD:=go
|
||||
GOFMT_CMD:=gofmt
|
||||
GOARCH:=$(shell $(GO_CMD) env GOARCH)
|
||||
GOPROXY:=$(shell $(GO_CMD) env GOPROXY)
|
||||
|
||||
# the full name of the marker file including the ceph version
|
||||
BUILDFILE=.build.$(CEPH_VERSION)
|
||||
|
||||
# files marking daemon containers supporting the tests
|
||||
TEST_CTR_A=.run.test_ceph_a
|
||||
TEST_CTR_B=.run.test_ceph_b
|
||||
TEST_CTR_NET=.run.test_ceph_net
|
||||
|
||||
# the name of the image plus ceph version as tag
|
||||
CI_IMAGE_TAG=$(CI_IMAGE_NAME):$(CEPH_VERSION)
|
||||
|
||||
ifneq ($(NO_PTRGUARD),)
|
||||
CONTAINER_OPTS += -e NO_PTRGUARD=true
|
||||
BUILD_TAGS := $(BUILD_TAGS),no_ptrguard
|
||||
endif
|
||||
|
||||
ifneq ($(NO_PREVIEW),)
|
||||
CONTAINER_OPTS += -e NO_PREVIEW=true
|
||||
else
|
||||
BUILD_TAGS := $(BUILD_TAGS),ceph_preview
|
||||
endif
|
||||
|
||||
CONTAINER_OPTS += -e BUILD_TAGS=$(BUILD_TAGS)
|
||||
ifdef GOPROXY
|
||||
CONTAINER_OPTS += --env GOPROXY=$(GOPROXY)
|
||||
endif
|
||||
|
||||
ifneq ($(USE_CACHE),)
|
||||
GOCACHE_VOLUME := -v test_ceph_go_cache:/go
|
||||
endif
|
||||
|
||||
SELINUX := $(shell getenforce 2>/dev/null)
|
||||
ifeq ($(SELINUX),Enforcing)
|
||||
VOLUME_FLAGS = :z
|
||||
endif
|
||||
|
||||
ifdef RESULTS_DIR
|
||||
RESULTS_VOLUME := -v $(RESULTS_DIR):/results$(VOLUME_FLAGS)
|
||||
endif
|
||||
|
||||
ifneq ($(USE_GOCO),)
|
||||
GO_CMD:=$(CONTAINER_CMD) run $(CONTAINER_OPTS) --rm $(GOCACHE_VOLUME) -v $(CURDIR):/go/src/github.com/ceph/go-ceph$(VOLUME_FLAGS) --entrypoint $(GO_CMD) $(CI_IMAGE_TAG)
|
||||
GOFMT_CMD:=$(CONTAINER_CMD) run $(CONTAINER_OPTS) --rm $(GOCACHE_VOLUME) -v $(CURDIR):/go/src/github.com/ceph/go-ceph$(VOLUME_FLAGS) --entrypoint $(GOFMT_CMD) $(CI_IMAGE_TAG)
|
||||
endif
|
||||
|
||||
# Assemble the various build args that will be passed container build command(s)
|
||||
CONTAINER_BUILD_ARGS:=$(DEFAULT_BUILD_ARGS)
|
||||
CONTAINER_BUILD_ARGS += --build-arg GOARCH=$(GOARCH)
|
||||
ifneq ($(GO_VERSION),)
|
||||
CONTAINER_BUILD_ARGS += --build-arg GO_VERSION=$(GO_VERSION)
|
||||
endif
|
||||
ifdef CEPH_IMG
|
||||
CONTAINER_BUILD_ARGS += --build-arg CEPH_IMG=$(CEPH_IMG)
|
||||
endif
|
||||
ifdef CEPH_TAG
|
||||
CONTAINER_BUILD_ARGS += --build-arg CEPH_TAG=$(CEPH_TAG)
|
||||
endif
|
||||
ifdef GO_CEPH_VERSION
|
||||
CONTAINER_BUILD_ARGS += --build-arg GO_CEPH_VERSION=$(GO_CEPH_VERSION)
|
||||
else
|
||||
CONTAINER_BUILD_ARGS += --build-arg GO_CEPH_VERSION=$(CEPH_VERSION)
|
||||
endif
|
||||
ifdef GOPROXY
|
||||
CONTAINER_BUILD_ARGS += --build-arg GOPROXY=$(GOPROXY)
|
||||
endif
|
||||
|
||||
build:
|
||||
$(GO_CMD) build -v -tags $(BUILD_TAGS) $(shell $(GO_CMD) list ./... | grep -v /contrib)
|
||||
fmt:
|
||||
$(GO_CMD) fmt ./...
|
||||
test:
|
||||
$(GO_CMD) test -v -tags $(BUILD_TAGS) ./...
|
||||
|
||||
.PHONY: test-docker test-container test-multi-container
|
||||
test-docker: test-container
|
||||
test-container: $(BUILDFILE) $(RESULTS_DIR)
|
||||
$(CONTAINER_CMD) run $(CONTAINER_OPTS) --rm --hostname test_ceph_aio \
|
||||
-v $(CURDIR):/go/src/github.com/ceph/go-ceph$(VOLUME_FLAGS) $(RESULTS_VOLUME) $(GOCACHE_VOLUME) \
|
||||
$(CI_IMAGE_TAG) $(ENTRYPOINT_ARGS)
|
||||
test-multi-container: $(BUILDFILE) $(RESULTS_DIR)
|
||||
-$(MAKE) test-containers-kill
|
||||
-$(MAKE) test-containers-rm-volumes
|
||||
-$(MAKE) test-containers-rm-network
|
||||
$(MAKE) test-containers-test
|
||||
$(MAKE) test-containers-kill
|
||||
$(MAKE) test-containers-rm-volumes
|
||||
$(MAKE) test-containers-rm-network
|
||||
|
||||
# The test-containers-* cleanup rules:
|
||||
.PHONY: test-containers-clean \
|
||||
test-containers-kill \
|
||||
test-containers-rm-volumes \
|
||||
test-containers-rm-network
|
||||
|
||||
test-containers-clean: test-containers-kill
|
||||
-$(MAKE) test-containers-rm-volumes
|
||||
-$(MAKE) test-containers-rm-network
|
||||
|
||||
test-containers-kill:
|
||||
-$(CONTAINER_CMD) kill test_ceph_a || $(CONTAINER_CMD) rm test_ceph_a
|
||||
-$(CONTAINER_CMD) kill test_ceph_b || $(CONTAINER_CMD) rm test_ceph_b
|
||||
$(RM) $(TEST_CTR_A) $(TEST_CTR_B)
|
||||
sleep 0.3
|
||||
# sometimes the container runtime fails to remove things immediately after
|
||||
# killing the containers. The short sleep helps avoid hitting that condition.
|
||||
|
||||
test-containers-rm-volumes:
|
||||
$(CONTAINER_CMD) volume remove test_ceph_a_data test_ceph_b_data
|
||||
|
||||
test-containers-rm-network:
|
||||
$(CONTAINER_CMD) network rm test_ceph_net
|
||||
$(RM) $(TEST_CTR_NET)
|
||||
|
||||
# Thest test-containers-* setup rules:
|
||||
.PHONY: test-containers-network \
|
||||
test-containers-test_ceph_a \
|
||||
test-containers-test_ceph_b \
|
||||
test-containers-test
|
||||
|
||||
test-containers-network: $(TEST_CTR_NET)
|
||||
$(TEST_CTR_NET):
|
||||
($(CONTAINER_CMD) network ls -q | grep -q test_ceph_net) \
|
||||
|| $(CONTAINER_CMD) network create test_ceph_net
|
||||
@echo "test_ceph_net" > $(TEST_CTR_NET)
|
||||
|
||||
test-containers-test_ceph_a: $(TEST_CTR_A)
|
||||
$(TEST_CTR_A): $(TEST_CTR_NET) $(BUILDFILE)
|
||||
$(CONTAINER_CMD) run $(CONTAINER_OPTS) \
|
||||
--cidfile=$(TEST_CTR_A) --rm -d --name test_ceph_a \
|
||||
--hostname test_ceph_a \
|
||||
--net test_ceph_net \
|
||||
-v test_ceph_a_data:/tmp/ceph $(CI_IMAGE_TAG) \
|
||||
--test-run=NONE --pause
|
||||
|
||||
test-containers-test_ceph_b: $(TEST_CTR_B)
|
||||
$(TEST_CTR_B): $(TEST_CTR_NET) $(BUILDFILE)
|
||||
$(CONTAINER_CMD) run $(CONTAINER_OPTS) \
|
||||
--cidfile=$(TEST_CTR_B) --rm -d --name test_ceph_b \
|
||||
--hostname test_ceph_b \
|
||||
--net test_ceph_net \
|
||||
-v test_ceph_b_data:/tmp/ceph $(CI_IMAGE_TAG) \
|
||||
--test-run=NONE --pause
|
||||
|
||||
test-containers-test: $(BUILDFILE) $(TEST_CTR_A) $(TEST_CTR_B)
|
||||
$(CONTAINER_CMD) run $(CONTAINER_OPTS) --rm \
|
||||
--net test_ceph_net \
|
||||
-v test_ceph_a_data:/ceph_a \
|
||||
-v test_ceph_b_data:/ceph_b \
|
||||
-v $(CURDIR):/go/src/github.com/ceph/go-ceph$(VOLUME_FLAGS) \
|
||||
$(RESULTS_VOLUME) $(GOCACHE_VOLUME) \
|
||||
$(CI_IMAGE_TAG) \
|
||||
--wait-for=/ceph_a/.ready:/ceph_b/.ready \
|
||||
--mirror-state=/ceph_b/.mstate \
|
||||
--ceph-conf=/ceph_a/ceph.conf \
|
||||
--mirror=/ceph_b/ceph.conf \
|
||||
--altfs=@/ceph_a/altfs.txt \
|
||||
$(ENTRYPOINT_ARGS)
|
||||
|
||||
ifdef RESULTS_DIR
|
||||
$(RESULTS_DIR):
|
||||
mkdir -p $(RESULTS_DIR)
|
||||
endif
|
||||
|
||||
SHELL_SOURCES=entrypoint.sh micro-osd.sh
|
||||
|
||||
.PHONY: ci-image
|
||||
ci-image: $(BUILDFILE)
|
||||
$(BUILDFILE): $(CONTAINER_CONFIG_DIR)/Dockerfile $(SHELL_SOURCES)
|
||||
$(CONTAINER_CMD) build \
|
||||
$(CONTAINER_BUILD_ARGS) \
|
||||
$(CONTAINER_BUILD_OPTS) \
|
||||
-t $(CI_IMAGE_TAG) \
|
||||
-f $(CONTAINER_CONFIG_DIR)/Dockerfile .
|
||||
@$(CONTAINER_CMD) inspect -f '{{.Id}}' $(CI_IMAGE_TAG) > $(BUILDFILE)
|
||||
echo $(CEPH_VERSION) >> $(BUILDFILE)
|
||||
|
||||
check: check-revive check-format check-shell
|
||||
|
||||
check-format:
|
||||
! $(GOFMT_CMD) $(CHECK_GOFMT_FLAGS) . | sed 's,^,formatting error: ,' | grep 'go$$'
|
||||
|
||||
check-revive:
|
||||
# Configure project's revive checks using .revive.toml
|
||||
# See: https://github.com/mgechev/revive
|
||||
revive -config .revive.toml $$(find . -name '*.go')
|
||||
|
||||
check-shell:
|
||||
shellcheck -fgcc $(SHELL_SOURCES)
|
||||
|
||||
|
||||
# Do a quick compile only check of the tests and impliclity the
|
||||
# library code as well.
|
||||
test-binaries: \
|
||||
cephfs.test \
|
||||
cephfs/admin.test \
|
||||
common/admin/manager.test \
|
||||
common/admin/nfs.test \
|
||||
common/admin/smb.test \
|
||||
internal/callbacks.test \
|
||||
internal/commands.test \
|
||||
internal/cutil.test \
|
||||
internal/errutil.test \
|
||||
internal/retry.test \
|
||||
rados.test \
|
||||
rbd.test \
|
||||
rbd/admin.test
|
||||
test-bins: test-binaries
|
||||
|
||||
%.test: % force_go_build
|
||||
$(GO_CMD) test -c -tags $(BUILD_TAGS) ./$<
|
||||
|
||||
implements:
|
||||
cd contrib/implements && $(GO_CMD) build -o ../../implements
|
||||
|
||||
check-implements: implements
|
||||
./implements $(IMPLEMENTS_OPTS) cephfs rados rbd
|
||||
|
||||
clean-implements:
|
||||
$(RM) ./implements
|
||||
|
||||
|
||||
.PHONY: api-check
|
||||
api-check: implements-json
|
||||
./contrib/apiage.py
|
||||
|
||||
.PHONY: api-update
|
||||
api-update: implements-json
|
||||
./contrib/apiage.py --mode=update --placeholder-versions
|
||||
|
||||
.PHONY: api-promote
|
||||
api-promote: implements-json
|
||||
./contrib/apiage.py --mode=promote \
|
||||
--current-tag="$$(git describe --tags --abbrev=0)"
|
||||
./contrib/apiage.py --mode=write-doc
|
||||
|
||||
.PHONY: api-fix-versions
|
||||
api-fix-versions:
|
||||
./contrib/apiage.py --mode=fix-versions \
|
||||
--current-tag="$$(git describe --tags --abbrev=0)"
|
||||
./contrib/apiage.py --mode=write-doc
|
||||
|
||||
.PHONY: api-doc
|
||||
api-doc:
|
||||
./contrib/apiage.py --mode=write-doc
|
||||
|
||||
.PHONY: api-check-updates
|
||||
api-check-updates: $(RESULTS_DIR)
|
||||
./contrib/apiage.py --mode=find-updates \
|
||||
--current-tag="$$(git describe --tags --abbrev=0)" \
|
||||
> $(RESULTS_DIR)/updates-found.json
|
||||
|
||||
.PHONY: api-report-updates
|
||||
api-report-updates: api-check-updates
|
||||
./contrib/apiage.py --mode=updates-to-markdown \
|
||||
< $(RESULTS_DIR)/updates-found.json > $(RESULTS_DIR)/updates-found.md
|
||||
|
||||
.PHONY: api-report-issuetemplate
|
||||
api-report-issuetemplate: api-check-updates
|
||||
./contrib/apiage.py --mode=updates-to-issuetemplate \
|
||||
--current-tag="$$(git describe --tags --abbrev=0)" \
|
||||
< $(RESULTS_DIR)/updates-found.json
|
||||
|
||||
ifeq ($(RESULTS_DIR),)
|
||||
IMPLEMENTS_DIR:=$(PWD)/_results
|
||||
else
|
||||
IMPLEMENTS_DIR:=$(RESULTS_DIR)
|
||||
endif
|
||||
|
||||
implements-json: $(BUILDFILE)
|
||||
$(MAKE) RESULTS_DIR="$(IMPLEMENTS_DIR)" ENTRYPOINT_ARGS="--test-run=IMPLEMENTS --micro-osd=/bin/true $(ENTRYPOINT_ARGS)" test-container
|
||||
|
||||
# force_go_build is phony and builds nothing, can be used for forcing
|
||||
# go toolchain commands to always run
|
||||
.PHONY: build fmt test test-docker check test-binaries test-bins force_go_build check-implements clean-implements implements-json
|
||||
161
pkg/mod/github.com/ceph/go-ceph@v0.35.0/README.md
Normal file
161
pkg/mod/github.com/ceph/go-ceph@v0.35.0/README.md
Normal file
|
|
@ -0,0 +1,161 @@
|
|||
# go-ceph - Go bindings for Ceph APIs
|
||||
|
||||
[](https://godoc.org/github.com/ceph/go-ceph) [](https://raw.githubusercontent.com/ceph/go-ceph/master/LICENSE)
|
||||
|
||||
## Introduction
|
||||
|
||||
The go-ceph project is a collection of API bindings that support the use of
|
||||
native Ceph APIs, which are C language functions, in Go. These bindings make
|
||||
use of Go's cgo feature.
|
||||
There are three main Go sub-packages that make up go-ceph:
|
||||
* rados - exports functionality from Ceph's librados
|
||||
* rbd - exports functionality from Ceph's librbd
|
||||
* cephfs - exports functionality from Ceph's libcephfs
|
||||
* rgw/admin - interact with [radosgw admin ops API](https://docs.ceph.com/en/latest/radosgw/adminops)
|
||||
|
||||
We aim to provide comprehensive support for the Ceph APIs over time. This
|
||||
includes both I/O related functions and management functions. If your project
|
||||
makes use of Ceph command line tools and is written in Go, you may be able to
|
||||
switch away from shelling out to the CLI and to these native function calls.
|
||||
|
||||
## Installation
|
||||
|
||||
The code in go-ceph is purely a library module. Typically, one will import
|
||||
go-ceph in another Go based project. When building the code the native RADOS,
|
||||
RBD, & CephFS library and development headers are expected to be installed.
|
||||
|
||||
On debian based systems (apt) these may be:
|
||||
```sh
|
||||
libcephfs-dev librbd-dev librados-dev
|
||||
```
|
||||
|
||||
On rpm based systems (dnf, yum, etc) these may be:
|
||||
```sh
|
||||
libcephfs-devel librbd-devel librados-devel
|
||||
```
|
||||
|
||||
On MacOS you can use brew to install the libraries:
|
||||
```sh
|
||||
brew tap mulbc/ceph-client
|
||||
brew install ceph-client
|
||||
```
|
||||
|
||||
NOTE: CentOS users may want to use a
|
||||
[CentOS Storage SIG](https://wiki.centos.org/SpecialInterestGroup/Storage/Ceph)
|
||||
repository to enable packages for a supported ceph version.
|
||||
Example: `dnf -y install centos-release-ceph-pacific`.
|
||||
(CentOS 7 users should use "yum" rather than "dnf")
|
||||
|
||||
|
||||
To quickly test if one can build with go-ceph on your system, run:
|
||||
```sh
|
||||
go get github.com/ceph/go-ceph
|
||||
```
|
||||
|
||||
Once compiled, code using go-ceph is expected to dynamically link to the Ceph
|
||||
libraries. These libraries must be available on the system where the go based
|
||||
binaries will be run. Our use of cgo and ceph libraries does not allow for
|
||||
fully static binaries.
|
||||
|
||||
go-ceph tries to support different Ceph versions. However some functions might
|
||||
only be available in recent versions, and others may be deprecated. In order to
|
||||
work with non-current versions of Ceph, it is required to pass build-tags to
|
||||
the `go` command line. A tag with the named Ceph release will enable/disable
|
||||
certain features of the go-ceph packages, and prevent warnings or compile
|
||||
problems. For example, to ensure you select the library features that match
|
||||
the "pacific" release, use:
|
||||
```sh
|
||||
go build -tags pacific ....
|
||||
go test -tags pacific ....
|
||||
```
|
||||
|
||||
### Supported Ceph Versions
|
||||
|
||||
| go-ceph version | Supported Ceph Versions | Deprecated Ceph Versions |
|
||||
| --------------- | ------------------------| -------------------------|
|
||||
| v0.35.0 | quincy, reef, squid | octopus, pacific |
|
||||
| v0.34.0 | pacific, quincy, reef, squid | octopus |
|
||||
| v0.33.0 | pacific, quincy, reef, squid | octopus |
|
||||
| v0.32.0 | pacific, quincy, reef, squid | octopus |
|
||||
| v0.31.0 | pacific, quincy, reef, squid | octopus |
|
||||
| v0.30.0 | pacific, quincy, reef, squid | octopus |
|
||||
| v0.29.0 | pacific, quincy, reef | octopus |
|
||||
| v0.28.0 | pacific, quincy, reef | nautilus, octopus |
|
||||
| v0.27.0 | pacific, quincy, reef | nautilus, octopus |
|
||||
| v0.26.0 | pacific, quincy, reef | nautilus, octopus |
|
||||
| v0.25.0 | pacific, quincy, reef | nautilus, octopus |
|
||||
| v0.24.0 | pacific, quincy, reef | nautilus, octopus |
|
||||
| v0.23.0 | pacific, quincy, reef | nautilus, octopus |
|
||||
| v0.22.0 | pacific, quincy | nautilus, octopus |
|
||||
| v0.21.0 | pacific, quincy | nautilus, octopus |
|
||||
| v0.20.0 | pacific, quincy | nautilus, octopus |
|
||||
| v0.19.0 | pacific, quincy | nautilus, octopus |
|
||||
| v0.18.0 | octopus, pacific, quincy | nautilus |
|
||||
| v0.17.0 | octopus, pacific, quincy | nautilus |
|
||||
|
||||
The tags affect what is supported at compile time. What version of the Ceph
|
||||
cluster the client libraries support, and vice versa, is determined entirely
|
||||
by what version of the Ceph C libraries go-ceph is compiled with.
|
||||
|
||||
To see what older versions of go-ceph supported refer to the [older
|
||||
releases](./docs/older-releases.md) file in the documentation.
|
||||
|
||||
|
||||
## Documentation
|
||||
|
||||
Detailed API documentation is available at
|
||||
<https://pkg.go.dev/github.com/ceph/go-ceph>.
|
||||
|
||||
Some [API Hints and How-Tos](./docs/hints.md) are also available to quickly
|
||||
introduce how some of API calls work together.
|
||||
|
||||
|
||||
## Development
|
||||
|
||||
```
|
||||
docker run --rm -it --net=host \
|
||||
--security-opt apparmor:unconfined \
|
||||
-v ${PWD}:/go/src/github.com/ceph/go-ceph:z \
|
||||
-v /home/nwatkins/src/ceph/build:/home/nwatkins/src/ceph/build:z \
|
||||
-e CEPH_CONF=/home/nwatkins/src/ceph/build/ceph.conf \
|
||||
ceph-golang
|
||||
```
|
||||
|
||||
Run against a `vstart.sh` cluster without installing Ceph:
|
||||
|
||||
```
|
||||
export CGO_CPPFLAGS="-I/ceph/src/include"
|
||||
export CGO_LDFLAGS="-L/ceph/build/lib"
|
||||
go build
|
||||
```
|
||||
|
||||
## Contributing
|
||||
|
||||
Contributions are welcome & greatly appreciated, every little bit helps. Make code changes via Github pull requests:
|
||||
|
||||
- Fork the repo and create a topic branch for every feature/fix. Avoid
|
||||
making changes directly on master branch.
|
||||
- All incoming features should be accompanied with tests.
|
||||
- Make sure that you run `go fmt` before submitting a change
|
||||
set. Alternatively the Makefile has a flag for this, so you can call
|
||||
`make fmt` as well.
|
||||
- The integration tests can be run in a docker container, for this run:
|
||||
|
||||
```
|
||||
make test-docker
|
||||
```
|
||||
|
||||
### Getting in Touch
|
||||
|
||||
Want to get in touch with the go-ceph team? We're available through a few
|
||||
different channels:
|
||||
* Have a question, comment, or feedback:
|
||||
[Use the Discussions Board](https://github.com/ceph/go-ceph/discussions)
|
||||
* Report an issue or request a feature:
|
||||
[Issues Tracker](https://github.com/ceph/go-ceph/issues)
|
||||
* We participate in the Ceph
|
||||
[user's mailing list](https://lists.ceph.io/hyperkitty/list/ceph-users@ceph.io/)
|
||||
and [dev list](https://lists.ceph.io/hyperkitty/list/dev@ceph.io/)
|
||||
and we also announce our releases on those lists
|
||||
* You can sometimes find us in the
|
||||
[#ceph-devel IRC channel](https://ceph.io/irc/) - hours may vary
|
||||
|
|
@ -0,0 +1,66 @@
|
|||
package admin
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// ByteCount represents the size of a volume in bytes.
|
||||
type ByteCount uint64
|
||||
|
||||
// SI byte size constants. keep these private for now.
|
||||
const (
|
||||
kibiByte ByteCount = 1024
|
||||
mebiByte = 1024 * kibiByte
|
||||
gibiByte = 1024 * mebiByte
|
||||
tebiByte = 1024 * gibiByte
|
||||
)
|
||||
|
||||
// resizeValue returns a size value as a string, as needed by the subvolume
|
||||
// resize command json.
|
||||
func (bc ByteCount) resizeValue() string {
|
||||
return uint64String(uint64(bc))
|
||||
}
|
||||
|
||||
// QuotaSize interface values can be used to change the size of a volume.
|
||||
type QuotaSize interface {
|
||||
resizeValue() string
|
||||
}
|
||||
|
||||
// specialSize is a custom non-numeric quota size value.
|
||||
type specialSize string
|
||||
|
||||
// resizeValue for a specialSize returns the original string value.
|
||||
func (s specialSize) resizeValue() string {
|
||||
return string(s)
|
||||
}
|
||||
|
||||
// Infinite is a special QuotaSize value that can be used to clear size limits
|
||||
// on a subvolume.
|
||||
const Infinite = specialSize("infinite")
|
||||
|
||||
// quotaSizePlaceholder types are helpful to extract QuotaSize typed values
|
||||
// from JSON responses.
|
||||
type quotaSizePlaceholder struct {
|
||||
Value QuotaSize
|
||||
}
|
||||
|
||||
func (p *quotaSizePlaceholder) UnmarshalJSON(b []byte) error {
|
||||
var val interface{}
|
||||
if err := json.Unmarshal(b, &val); err != nil {
|
||||
return err
|
||||
}
|
||||
switch v := val.(type) {
|
||||
case string:
|
||||
if v == string(Infinite) {
|
||||
p.Value = Infinite
|
||||
} else {
|
||||
return fmt.Errorf("quota size: invalid string value: %q", v)
|
||||
}
|
||||
case float64:
|
||||
p.Value = ByteCount(v)
|
||||
default:
|
||||
return fmt.Errorf("quota size: invalid type, string or number required: %v (%T)", val, val)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
158
pkg/mod/github.com/ceph/go-ceph@v0.35.0/cephfs/admin/clone.go
Normal file
158
pkg/mod/github.com/ceph/go-ceph@v0.35.0/cephfs/admin/clone.go
Normal file
|
|
@ -0,0 +1,158 @@
|
|||
package admin
|
||||
|
||||
import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
const notProtectedSuffix = "is not protected"
|
||||
|
||||
// NotProtectedError error values will be returned by CloneSubVolumeSnapshot in
|
||||
// the case that the source snapshot needs to be protected but is not. The
|
||||
// requirement for a snapshot to be protected prior to cloning varies by Ceph
|
||||
// version.
|
||||
type NotProtectedError struct {
|
||||
response
|
||||
}
|
||||
|
||||
// CloneOptions are used to specify optional values to be used when creating a
|
||||
// new subvolume clone.
|
||||
type CloneOptions struct {
|
||||
TargetGroup string
|
||||
PoolLayout string
|
||||
}
|
||||
|
||||
// CloneSubVolumeSnapshot clones the specified snapshot from the subvolume.
|
||||
// The group, subvolume, and snapshot parameters specify the source for the
|
||||
// clone, and only the source. Additional properties of the clone, such as the
|
||||
// subvolume group that the clone will be created in and the pool layout may be
|
||||
// specified using the clone options parameter.
|
||||
//
|
||||
// Similar To:
|
||||
//
|
||||
// ceph fs subvolume snapshot clone <volume> --group_name=<group> <subvolume> <snapshot> <name> [...]
|
||||
func (fsa *FSAdmin) CloneSubVolumeSnapshot(volume, group, subvolume, snapshot, name string, o *CloneOptions) error {
|
||||
m := map[string]string{
|
||||
"prefix": "fs subvolume snapshot clone",
|
||||
"vol_name": volume,
|
||||
"sub_name": subvolume,
|
||||
"snap_name": snapshot,
|
||||
"target_sub_name": name,
|
||||
"format": "json",
|
||||
}
|
||||
if group != NoGroup {
|
||||
m["group_name"] = group
|
||||
}
|
||||
if o != nil && o.TargetGroup != NoGroup {
|
||||
m["target_group_name"] = group
|
||||
}
|
||||
if o != nil && o.PoolLayout != "" {
|
||||
m["pool_layout"] = o.PoolLayout
|
||||
}
|
||||
return checkCloneResponse(fsa.marshalMgrCommand(m))
|
||||
}
|
||||
|
||||
func checkCloneResponse(res response) error {
|
||||
if strings.HasSuffix(res.Status(), notProtectedSuffix) {
|
||||
return NotProtectedError{response: res}
|
||||
}
|
||||
return res.NoData().End()
|
||||
}
|
||||
|
||||
// CloneState is used to define constant values used to determine the state of
|
||||
// a clone.
|
||||
type CloneState string
|
||||
|
||||
const (
|
||||
// ClonePending is the state of a pending clone.
|
||||
ClonePending = CloneState("pending")
|
||||
// CloneInProgress is the state of a clone in progress.
|
||||
CloneInProgress = CloneState("in-progress")
|
||||
// CloneComplete is the state of a complete clone.
|
||||
CloneComplete = CloneState("complete")
|
||||
// CloneFailed is the state of a failed clone.
|
||||
CloneFailed = CloneState("failed")
|
||||
)
|
||||
|
||||
// CloneSource contains values indicating the source of a clone.
|
||||
type CloneSource struct {
|
||||
Volume string `json:"volume"`
|
||||
Group string `json:"group"`
|
||||
SubVolume string `json:"subvolume"`
|
||||
Snapshot string `json:"snapshot"`
|
||||
}
|
||||
|
||||
// CloneProgressReport contains the progress report of a subvolume clone.
|
||||
type CloneProgressReport struct {
|
||||
PercentageCloned string `json:"percentage cloned"`
|
||||
AmountCloned string `json:"amount cloned"`
|
||||
FilesCloned string `json:"files cloned"`
|
||||
}
|
||||
|
||||
// CloneStatus reports on the status of a subvolume clone.
|
||||
type CloneStatus struct {
|
||||
State CloneState `json:"state"`
|
||||
Source CloneSource `json:"source"`
|
||||
ProgressReport CloneProgressReport `json:"progress_report"`
|
||||
|
||||
// failure can be obtained through .GetFailure()
|
||||
failure *CloneFailure
|
||||
}
|
||||
|
||||
// CloneFailure reports details of a failure after a subvolume clone failed.
|
||||
type CloneFailure struct {
|
||||
Errno string `json:"errno"`
|
||||
ErrStr string `json:"errstr"`
|
||||
}
|
||||
|
||||
type cloneStatusWrapper struct {
|
||||
Status CloneStatus `json:"status"`
|
||||
Failure CloneFailure `json:"failure"`
|
||||
}
|
||||
|
||||
func parseCloneStatus(res response) (*CloneStatus, error) {
|
||||
var status cloneStatusWrapper
|
||||
if err := res.NoStatus().Unmarshal(&status).End(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if status.Failure.Errno != "" || status.Failure.ErrStr != "" {
|
||||
status.Status.failure = &status.Failure
|
||||
}
|
||||
return &status.Status, nil
|
||||
}
|
||||
|
||||
// CloneStatus returns data reporting the status of a subvolume clone.
|
||||
//
|
||||
// Similar To:
|
||||
//
|
||||
// ceph fs clone status <volume> --group_name=<group> <clone>
|
||||
func (fsa *FSAdmin) CloneStatus(volume, group, clone string) (*CloneStatus, error) {
|
||||
m := map[string]string{
|
||||
"prefix": "fs clone status",
|
||||
"vol_name": volume,
|
||||
"clone_name": clone,
|
||||
"format": "json",
|
||||
}
|
||||
if group != NoGroup {
|
||||
m["group_name"] = group
|
||||
}
|
||||
return parseCloneStatus(fsa.marshalMgrCommand(m))
|
||||
}
|
||||
|
||||
// CancelClone stops the background processes that populate a clone.
|
||||
// CancelClone does not delete the clone.
|
||||
//
|
||||
// Similar To:
|
||||
//
|
||||
// ceph fs clone cancel <volume> --group_name=<group> <clone>
|
||||
func (fsa *FSAdmin) CancelClone(volume, group, clone string) error {
|
||||
m := map[string]string{
|
||||
"prefix": "fs clone cancel",
|
||||
"vol_name": volume,
|
||||
"clone_name": clone,
|
||||
"format": "json",
|
||||
}
|
||||
if group != NoGroup {
|
||||
m["group_name"] = group
|
||||
}
|
||||
return fsa.marshalMgrCommand(m).NoData().End()
|
||||
}
|
||||
|
|
@ -0,0 +1,11 @@
|
|||
package admin
|
||||
|
||||
// GetFailure returns details about the CloneStatus when in CloneFailed state.
|
||||
//
|
||||
// Similar To:
|
||||
//
|
||||
// Reading the .failure object from the JSON returned by "ceph fs subvolume
|
||||
// snapshot clone"
|
||||
func (cs *CloneStatus) GetFailure() *CloneFailure {
|
||||
return cs.failure
|
||||
}
|
||||
|
|
@ -0,0 +1,65 @@
|
|||
package admin
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
var sampleCloneStatusFailed = []byte(`{
|
||||
"status": {
|
||||
"state": "failed",
|
||||
"source": {
|
||||
"volume": "non-existing-cephfs",
|
||||
"subvolume": "subvol1",
|
||||
"snapshot": "snap1"
|
||||
}
|
||||
},
|
||||
"failure": {
|
||||
"errno": "2",
|
||||
"errstr": "No such file or directory"
|
||||
}
|
||||
}`)
|
||||
|
||||
// TestParseCloneStatusFailure is heavily based on TestParseCloneStatus, with
|
||||
// the addition of GetFailure() calls.
|
||||
func TestParseCloneStatusFailure(t *testing.T) {
|
||||
R := newResponse
|
||||
t.Run("okPending", func(t *testing.T) {
|
||||
status, err := parseCloneStatus(R(sampleCloneStatusPending, "", nil))
|
||||
assert.NoError(t, err)
|
||||
if assert.NotNil(t, status) {
|
||||
assert.EqualValues(t, ClonePending, status.State)
|
||||
assert.EqualValues(t, "cephfs", status.Source.Volume)
|
||||
assert.EqualValues(t, "jurrasic", status.Source.SubVolume)
|
||||
assert.EqualValues(t, "dinodna", status.Source.Snapshot)
|
||||
assert.EqualValues(t, "park", status.Source.Group)
|
||||
assert.Nil(t, status.GetFailure())
|
||||
}
|
||||
})
|
||||
t.Run("okInProg", func(t *testing.T) {
|
||||
status, err := parseCloneStatus(R(sampleCloneStatusInProg, "", nil))
|
||||
assert.NoError(t, err)
|
||||
if assert.NotNil(t, status) {
|
||||
assert.EqualValues(t, CloneInProgress, status.State)
|
||||
assert.EqualValues(t, "cephfs", status.Source.Volume)
|
||||
assert.EqualValues(t, "subvol1", status.Source.SubVolume)
|
||||
assert.EqualValues(t, "snap1", status.Source.Snapshot)
|
||||
assert.EqualValues(t, "", status.Source.Group)
|
||||
assert.Nil(t, status.GetFailure())
|
||||
}
|
||||
})
|
||||
t.Run("failedMissingVolume", func(t *testing.T) {
|
||||
status, err := parseCloneStatus(R(sampleCloneStatusFailed, "", nil))
|
||||
assert.NoError(t, err)
|
||||
if assert.NotNil(t, status) {
|
||||
assert.EqualValues(t, CloneFailed, status.State)
|
||||
assert.EqualValues(t, "non-existing-cephfs", status.Source.Volume)
|
||||
assert.EqualValues(t, "subvol1", status.Source.SubVolume)
|
||||
assert.EqualValues(t, "snap1", status.Source.Snapshot)
|
||||
assert.EqualValues(t, "", status.Source.Group)
|
||||
assert.EqualValues(t, "2", status.GetFailure().Errno)
|
||||
assert.EqualValues(t, "No such file or directory", status.GetFailure().ErrStr)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
|
@ -0,0 +1,76 @@
|
|||
//go:build !(octopus || pacific || quincy || reef || squid)
|
||||
|
||||
package admin
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestCloneProgress(t *testing.T) {
|
||||
fsa := getFSAdmin(t)
|
||||
volume := "cephfs"
|
||||
group := "Park"
|
||||
subname := "Jurrasic"
|
||||
snapname := "dinodna1"
|
||||
clonename := "babydino"
|
||||
|
||||
err := fsa.CreateSubVolumeGroup(volume, group, nil)
|
||||
assert.NoError(t, err)
|
||||
defer func() {
|
||||
err := fsa.RemoveSubVolumeGroup(volume, group)
|
||||
assert.NoError(t, err)
|
||||
}()
|
||||
|
||||
svopts := &SubVolumeOptions{
|
||||
Mode: 0750,
|
||||
Size: 20 * gibiByte,
|
||||
}
|
||||
err = fsa.CreateSubVolume(volume, group, subname, svopts)
|
||||
assert.NoError(t, err)
|
||||
defer func() {
|
||||
err := fsa.RemoveSubVolume(volume, group, subname)
|
||||
assert.NoError(t, err)
|
||||
}()
|
||||
|
||||
err = fsa.CreateSubVolumeSnapshot(volume, group, subname, snapname)
|
||||
assert.NoError(t, err)
|
||||
defer func() {
|
||||
err := fsa.RemoveSubVolumeSnapshot(volume, group, subname, snapname)
|
||||
assert.NoError(t, err)
|
||||
}()
|
||||
|
||||
err = fsa.CloneSubVolumeSnapshot(
|
||||
volume, group, subname, snapname, clonename,
|
||||
&CloneOptions{TargetGroup: group})
|
||||
assert.NoError(t, err)
|
||||
defer func() {
|
||||
err := fsa.RemoveSubVolume(volume, group, clonename)
|
||||
assert.NoError(t, err)
|
||||
}()
|
||||
|
||||
wasInProgress := false
|
||||
for done := false; !done; {
|
||||
status, err := fsa.CloneStatus(volume, group, clonename)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, status)
|
||||
switch status.State {
|
||||
case ClonePending:
|
||||
case CloneInProgress:
|
||||
wasInProgress = true
|
||||
assert.NotNil(t, status.ProgressReport.PercentageCloned)
|
||||
assert.NotNil(t, status.ProgressReport.AmountCloned)
|
||||
assert.NotNil(t, status.ProgressReport.FilesCloned)
|
||||
case CloneComplete:
|
||||
done = true
|
||||
case CloneFailed:
|
||||
t.Fatal("clone failed")
|
||||
default:
|
||||
t.Fatalf("invalid clone status: %q", status.State)
|
||||
}
|
||||
time.Sleep(5 * time.Millisecond)
|
||||
}
|
||||
assert.Equal(t, wasInProgress, true)
|
||||
}
|
||||
|
|
@ -0,0 +1,76 @@
|
|||
//go:build octopus || pacific || quincy || reef || squid
|
||||
|
||||
package admin
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestCloneProgress(t *testing.T) {
|
||||
fsa := getFSAdmin(t)
|
||||
volume := "cephfs"
|
||||
group := "Park"
|
||||
subname := "Jurrasic"
|
||||
snapname := "dinodna1"
|
||||
clonename := "babydino"
|
||||
|
||||
err := fsa.CreateSubVolumeGroup(volume, group, nil)
|
||||
assert.NoError(t, err)
|
||||
defer func() {
|
||||
err := fsa.RemoveSubVolumeGroup(volume, group)
|
||||
assert.NoError(t, err)
|
||||
}()
|
||||
|
||||
svopts := &SubVolumeOptions{
|
||||
Mode: 0750,
|
||||
Size: 20 * gibiByte,
|
||||
}
|
||||
err = fsa.CreateSubVolume(volume, group, subname, svopts)
|
||||
assert.NoError(t, err)
|
||||
defer func() {
|
||||
err := fsa.RemoveSubVolume(volume, group, subname)
|
||||
assert.NoError(t, err)
|
||||
}()
|
||||
|
||||
err = fsa.CreateSubVolumeSnapshot(volume, group, subname, snapname)
|
||||
assert.NoError(t, err)
|
||||
defer func() {
|
||||
err := fsa.RemoveSubVolumeSnapshot(volume, group, subname, snapname)
|
||||
assert.NoError(t, err)
|
||||
}()
|
||||
|
||||
err = fsa.CloneSubVolumeSnapshot(
|
||||
volume, group, subname, snapname, clonename,
|
||||
&CloneOptions{TargetGroup: group})
|
||||
assert.NoError(t, err)
|
||||
defer func() {
|
||||
err := fsa.RemoveSubVolume(volume, group, clonename)
|
||||
assert.NoError(t, err)
|
||||
}()
|
||||
|
||||
wasInProgress := false
|
||||
for done := false; !done; {
|
||||
status, err := fsa.CloneStatus(volume, group, clonename)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, status)
|
||||
switch status.State {
|
||||
case ClonePending:
|
||||
case CloneInProgress:
|
||||
wasInProgress = true
|
||||
assert.Empty(t, status.ProgressReport.PercentageCloned)
|
||||
assert.Empty(t, status.ProgressReport.AmountCloned)
|
||||
assert.Empty(t, status.ProgressReport.FilesCloned)
|
||||
case CloneComplete:
|
||||
done = true
|
||||
case CloneFailed:
|
||||
t.Fatal("clone failed")
|
||||
default:
|
||||
t.Fatalf("invalid clone status: %q", status.State)
|
||||
}
|
||||
time.Sleep(5 * time.Millisecond)
|
||||
}
|
||||
assert.Equal(t, wasInProgress, true)
|
||||
}
|
||||
|
|
@ -0,0 +1,206 @@
|
|||
package admin
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
var sampleCloneStatusPending = []byte(`{
|
||||
"status": {
|
||||
"state": "pending",
|
||||
"source": {
|
||||
"volume": "cephfs",
|
||||
"subvolume": "jurrasic",
|
||||
"snapshot": "dinodna",
|
||||
"group": "park"
|
||||
}
|
||||
}
|
||||
}`)
|
||||
|
||||
var sampleCloneStatusInProg = []byte(`{
|
||||
"status": {
|
||||
"state": "in-progress",
|
||||
"source": {
|
||||
"volume": "cephfs",
|
||||
"subvolume": "subvol1",
|
||||
"snapshot": "snap1"
|
||||
}
|
||||
}
|
||||
}`)
|
||||
|
||||
func TestParseCloneStatus(t *testing.T) {
|
||||
R := newResponse
|
||||
t.Run("error", func(t *testing.T) {
|
||||
_, err := parseCloneStatus(R(nil, "", errors.New("flub")))
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, "flub", err.Error())
|
||||
})
|
||||
t.Run("statusSet", func(t *testing.T) {
|
||||
_, err := parseCloneStatus(R(nil, "unexpected!", nil))
|
||||
assert.Error(t, err)
|
||||
})
|
||||
t.Run("badJSON", func(t *testing.T) {
|
||||
_, err := parseCloneStatus(R([]byte("_XxXxX"), "", nil))
|
||||
assert.Error(t, err)
|
||||
})
|
||||
t.Run("okPending", func(t *testing.T) {
|
||||
status, err := parseCloneStatus(R(sampleCloneStatusPending, "", nil))
|
||||
assert.NoError(t, err)
|
||||
if assert.NotNil(t, status) {
|
||||
assert.EqualValues(t, ClonePending, status.State)
|
||||
assert.EqualValues(t, "cephfs", status.Source.Volume)
|
||||
assert.EqualValues(t, "jurrasic", status.Source.SubVolume)
|
||||
assert.EqualValues(t, "dinodna", status.Source.Snapshot)
|
||||
assert.EqualValues(t, "park", status.Source.Group)
|
||||
}
|
||||
})
|
||||
t.Run("okInProg", func(t *testing.T) {
|
||||
status, err := parseCloneStatus(R(sampleCloneStatusInProg, "", nil))
|
||||
assert.NoError(t, err)
|
||||
if assert.NotNil(t, status) {
|
||||
assert.EqualValues(t, CloneInProgress, status.State)
|
||||
assert.EqualValues(t, "cephfs", status.Source.Volume)
|
||||
assert.EqualValues(t, "subvol1", status.Source.SubVolume)
|
||||
assert.EqualValues(t, "snap1", status.Source.Snapshot)
|
||||
assert.EqualValues(t, "", status.Source.Group)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestCloneSubVolumeSnapshot(t *testing.T) {
|
||||
fsa := getFSAdmin(t)
|
||||
volume := "cephfs"
|
||||
group := "Park"
|
||||
subname := "Jurrasic"
|
||||
snapname := "dinodna0"
|
||||
clonename := "babydino"
|
||||
|
||||
err := fsa.CreateSubVolumeGroup(volume, group, nil)
|
||||
assert.NoError(t, err)
|
||||
defer func() {
|
||||
err := fsa.RemoveSubVolumeGroup(volume, group)
|
||||
assert.NoError(t, err)
|
||||
}()
|
||||
|
||||
svopts := &SubVolumeOptions{
|
||||
Mode: 0750,
|
||||
Size: 20 * gibiByte,
|
||||
}
|
||||
err = fsa.CreateSubVolume(volume, group, subname, svopts)
|
||||
assert.NoError(t, err)
|
||||
defer func() {
|
||||
err := fsa.RemoveSubVolume(volume, group, subname)
|
||||
assert.NoError(t, err)
|
||||
}()
|
||||
|
||||
err = fsa.CreateSubVolumeSnapshot(volume, group, subname, snapname)
|
||||
assert.NoError(t, err)
|
||||
defer func() {
|
||||
err := fsa.RemoveSubVolumeSnapshot(volume, group, subname, snapname)
|
||||
assert.NoError(t, err)
|
||||
}()
|
||||
|
||||
err = fsa.CloneSubVolumeSnapshot(
|
||||
volume, group, subname, snapname, clonename,
|
||||
&CloneOptions{TargetGroup: group})
|
||||
var x NotProtectedError
|
||||
if errors.As(err, &x) {
|
||||
err = fsa.ProtectSubVolumeSnapshot(volume, group, subname, snapname)
|
||||
assert.NoError(t, err)
|
||||
defer func() {
|
||||
err := fsa.UnprotectSubVolumeSnapshot(volume, group, subname, snapname)
|
||||
assert.NoError(t, err)
|
||||
}()
|
||||
|
||||
err = fsa.CloneSubVolumeSnapshot(
|
||||
volume, group, subname, snapname, clonename,
|
||||
&CloneOptions{TargetGroup: group})
|
||||
}
|
||||
assert.NoError(t, err)
|
||||
defer func() {
|
||||
err := fsa.RemoveSubVolume(volume, group, clonename)
|
||||
assert.NoError(t, err)
|
||||
}()
|
||||
|
||||
for done := false; !done; {
|
||||
status, err := fsa.CloneStatus(volume, group, clonename)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, status)
|
||||
switch status.State {
|
||||
case ClonePending, CloneInProgress:
|
||||
case CloneComplete:
|
||||
done = true
|
||||
case CloneFailed:
|
||||
t.Fatal("clone failed")
|
||||
default:
|
||||
t.Fatalf("invalid status.State: %q", status.State)
|
||||
}
|
||||
time.Sleep(5 * time.Millisecond)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCancelClone(t *testing.T) {
|
||||
fsa := getFSAdmin(t)
|
||||
volume := "cephfs"
|
||||
group := "Park"
|
||||
subname := "Jurrasic"
|
||||
snapname := "dinodna0"
|
||||
clonename := "babydino2"
|
||||
|
||||
err := fsa.CreateSubVolumeGroup(volume, group, nil)
|
||||
assert.NoError(t, err)
|
||||
defer func() {
|
||||
err := fsa.RemoveSubVolumeGroup(volume, group)
|
||||
assert.NoError(t, err)
|
||||
}()
|
||||
|
||||
svopts := &SubVolumeOptions{
|
||||
Mode: 0750,
|
||||
Size: 20 * gibiByte,
|
||||
}
|
||||
err = fsa.CreateSubVolume(volume, group, subname, svopts)
|
||||
assert.NoError(t, err)
|
||||
defer func() {
|
||||
err := fsa.RemoveSubVolume(volume, group, subname)
|
||||
assert.NoError(t, err)
|
||||
}()
|
||||
|
||||
err = fsa.CreateSubVolumeSnapshot(volume, group, subname, snapname)
|
||||
assert.NoError(t, err)
|
||||
defer func() {
|
||||
err := fsa.RemoveSubVolumeSnapshot(volume, group, subname, snapname)
|
||||
assert.NoError(t, err)
|
||||
}()
|
||||
|
||||
err = fsa.CloneSubVolumeSnapshot(
|
||||
volume, group, subname, snapname, clonename,
|
||||
&CloneOptions{TargetGroup: group})
|
||||
var x NotProtectedError
|
||||
if errors.As(err, &x) {
|
||||
err = fsa.ProtectSubVolumeSnapshot(volume, group, subname, snapname)
|
||||
assert.NoError(t, err)
|
||||
defer func() {
|
||||
err := fsa.UnprotectSubVolumeSnapshot(volume, group, subname, snapname)
|
||||
assert.NoError(t, err)
|
||||
}()
|
||||
|
||||
err = fsa.CloneSubVolumeSnapshot(
|
||||
volume, group, subname, snapname, clonename,
|
||||
&CloneOptions{TargetGroup: group})
|
||||
}
|
||||
assert.NoError(t, err)
|
||||
defer func() {
|
||||
err := fsa.ForceRemoveSubVolume(volume, group, clonename)
|
||||
assert.NoError(t, err)
|
||||
}()
|
||||
|
||||
// we can't guarantee that this clone is can be canceled here, especially
|
||||
// if the clone happens fast on the ceph server side, but I have not yet
|
||||
// seen an instance where it fails. If it happens we can adjust the test as
|
||||
// needed.
|
||||
err = fsa.CancelClone(volume, group, clonename)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
11
pkg/mod/github.com/ceph/go-ceph@v0.35.0/cephfs/admin/doc.go
Normal file
11
pkg/mod/github.com/ceph/go-ceph@v0.35.0/cephfs/admin/doc.go
Normal file
|
|
@ -0,0 +1,11 @@
|
|||
/*
|
||||
Package admin is a convenience layer to support the administration of
|
||||
CephFS volumes, subvolumes, etc.
|
||||
|
||||
Unlike the cephfs package this API does not map to APIs provided by
|
||||
ceph libraries themselves. This API is not yet stable and is subject
|
||||
to change.
|
||||
|
||||
This package only supports ceph "nautilus" and "octopus" at this time.
|
||||
*/
|
||||
package admin
|
||||
|
|
@ -0,0 +1,58 @@
|
|||
package admin
|
||||
|
||||
// For APIs that accept extra sets of "boolean" flags we may end up wanting
|
||||
// multiple different sets of supported flags. Example: most rm functions
|
||||
// accept a force flag, but only subvolume delete has retain snapshots.
|
||||
// To make this somewhat uniform in the admin package we define a utility
|
||||
// interface and helper function to merge flags with naming options.
|
||||
|
||||
type flagSet interface {
|
||||
flags() map[string]bool
|
||||
}
|
||||
|
||||
type commonRmFlags struct {
|
||||
force bool
|
||||
}
|
||||
|
||||
func (f commonRmFlags) flags() map[string]bool {
|
||||
o := make(map[string]bool)
|
||||
if f.force {
|
||||
o["force"] = true
|
||||
}
|
||||
return o
|
||||
}
|
||||
|
||||
// SubVolRmFlags does not embed other types to simplify and keep the
|
||||
// interface with the type flat and simple. At the cost of some code
|
||||
// duplication we get a nicer UX for those using the library.
|
||||
|
||||
// SubVolRmFlags may be used to specify behavior modifying flags when
|
||||
// removing sub volumes.
|
||||
type SubVolRmFlags struct {
|
||||
Force bool
|
||||
RetainSnapshots bool
|
||||
}
|
||||
|
||||
func (f SubVolRmFlags) flags() map[string]bool {
|
||||
o := make(map[string]bool)
|
||||
if f.Force {
|
||||
o["force"] = true
|
||||
}
|
||||
if f.RetainSnapshots {
|
||||
o["retain_snapshots"] = true
|
||||
}
|
||||
return o
|
||||
}
|
||||
|
||||
// mergeFlags combines a set of key-value settings with any type implementing
|
||||
// the flagSet interface.
|
||||
func mergeFlags(m map[string]string, f flagSet) map[string]interface{} {
|
||||
o := make(map[string]interface{})
|
||||
for k, v := range m {
|
||||
o[k] = v
|
||||
}
|
||||
for k, v := range f.flags() {
|
||||
o[k] = v
|
||||
}
|
||||
return o
|
||||
}
|
||||
|
|
@ -0,0 +1,133 @@
|
|||
package admin
|
||||
|
||||
import "fmt"
|
||||
|
||||
// fixedPointFloat is a custom type that implements the MarshalJSON interface.
|
||||
// This is used to format float64 values to two decimal places.
|
||||
// By default these get converted to integers in the JSON output and
|
||||
// fail the command.
|
||||
type fixedPointFloat float64
|
||||
|
||||
// MarshalJSON provides a custom implementation for the JSON marshalling
|
||||
// of fixedPointFloat. It formats the float to two decimal places.
|
||||
func (fpf fixedPointFloat) MarshalJSON() ([]byte, error) {
|
||||
return []byte(fmt.Sprintf("%.2f", float64(fpf))), nil
|
||||
}
|
||||
|
||||
// fSQuiesceFields is the internal type used to create JSON for ceph.
|
||||
// See FSQuiesceOptions for the type that users of the library
|
||||
// interact with.
|
||||
type fSQuiesceFields struct {
|
||||
Prefix string `json:"prefix"`
|
||||
VolName string `json:"vol_name"`
|
||||
GroupName string `json:"group_name,omitempty"`
|
||||
Members []string `json:"members,omitempty"`
|
||||
SetId string `json:"set_id,omitempty"`
|
||||
Timeout fixedPointFloat `json:"timeout,omitempty"`
|
||||
Expiration fixedPointFloat `json:"expiration,omitempty"`
|
||||
AwaitFor fixedPointFloat `json:"await_for,omitempty"`
|
||||
Await bool `json:"await,omitempty"`
|
||||
IfVersion int `json:"if_version,omitempty"`
|
||||
Include bool `json:"include,omitempty"`
|
||||
Exclude bool `json:"exclude,omitempty"`
|
||||
Reset bool `json:"reset,omitempty"`
|
||||
Release bool `json:"release,omitempty"`
|
||||
Query bool `json:"query,omitempty"`
|
||||
All bool `json:"all,omitempty"`
|
||||
Cancel bool `json:"cancel,omitempty"`
|
||||
}
|
||||
|
||||
// FSQuiesceOptions are used to specify optional, non-identifying, values
|
||||
// to be used when quiescing a cephfs volume.
|
||||
type FSQuiesceOptions struct {
|
||||
Timeout float64
|
||||
Expiration float64
|
||||
AwaitFor float64
|
||||
Await bool
|
||||
IfVersion int
|
||||
Include bool
|
||||
Exclude bool
|
||||
Reset bool
|
||||
Release bool
|
||||
Query bool
|
||||
All bool
|
||||
Cancel bool
|
||||
}
|
||||
|
||||
// toFields is used to convert the FSQuiesceOptions to the internal
|
||||
// fSQuiesceFields type.
|
||||
func (o *FSQuiesceOptions) toFields(volume, group string, subvolumes []string, setId string) *fSQuiesceFields {
|
||||
return &fSQuiesceFields{
|
||||
Prefix: "fs quiesce",
|
||||
VolName: volume,
|
||||
GroupName: group,
|
||||
Members: subvolumes,
|
||||
SetId: setId,
|
||||
Timeout: fixedPointFloat(o.Timeout),
|
||||
Expiration: fixedPointFloat(o.Expiration),
|
||||
AwaitFor: fixedPointFloat(o.AwaitFor),
|
||||
Await: o.Await,
|
||||
IfVersion: o.IfVersion,
|
||||
Include: o.Include,
|
||||
Exclude: o.Exclude,
|
||||
Reset: o.Reset,
|
||||
Release: o.Release,
|
||||
Query: o.Query,
|
||||
All: o.All,
|
||||
Cancel: o.Cancel,
|
||||
}
|
||||
}
|
||||
|
||||
// QuiesceState is used to report the state of a quiesced fs volume.
|
||||
type QuiesceState struct {
|
||||
Name string `json:"name"`
|
||||
Age float64 `json:"age"`
|
||||
}
|
||||
|
||||
// QuiesceInfoMember is used to report the state of a quiesced fs volume.
|
||||
// This is part of sets members object array in the json.
|
||||
type QuiesceInfoMember struct {
|
||||
Excluded bool `json:"excluded"`
|
||||
State QuiesceState `json:"state"`
|
||||
}
|
||||
|
||||
// QuiesceInfo reports various informational values about a quiesced volume.
|
||||
// This is returned as sets object array in the json.
|
||||
type QuiesceInfo struct {
|
||||
Version int `json:"version"`
|
||||
AgeRef float64 `json:"age_ref"`
|
||||
State QuiesceState `json:"state"`
|
||||
Timeout float64 `json:"timeout"`
|
||||
Expiration float64 `json:"expiration"`
|
||||
Members map[string]QuiesceInfoMember `json:"members"`
|
||||
}
|
||||
|
||||
// FSQuiesceInfo reports various informational values about quiesced volumes.
|
||||
type FSQuiesceInfo struct {
|
||||
Epoch int `json:"epoch"`
|
||||
SetVersion int `json:"set_version"`
|
||||
Sets map[string]QuiesceInfo `json:"sets"`
|
||||
}
|
||||
|
||||
// parseFSQuiesceInfo is used to parse the response from the quiesce command. It returns a FSQuiesceInfo object.
|
||||
func parseFSQuiesceInfo(res response) (*FSQuiesceInfo, error) {
|
||||
var info FSQuiesceInfo
|
||||
if err := res.NoStatus().Unmarshal(&info).End(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &info, nil
|
||||
}
|
||||
|
||||
// FSQuiesce will quiesce the specified subvolumes in a volume.
|
||||
// Quiescing a fs will prevent new writes to the subvolumes.
|
||||
// Similar To:
|
||||
//
|
||||
// ceph fs quiesce <volume>
|
||||
func (fsa *FSAdmin) FSQuiesce(volume, group string, subvolumes []string, setId string, o *FSQuiesceOptions) (*FSQuiesceInfo, error) {
|
||||
if o == nil {
|
||||
o = &FSQuiesceOptions{}
|
||||
}
|
||||
f := o.toFields(volume, group, subvolumes, setId)
|
||||
|
||||
return parseFSQuiesceInfo(fsa.marshalMgrCommand(f))
|
||||
}
|
||||
|
|
@ -0,0 +1,51 @@
|
|||
//go:build !(nautilus || octopus || pacific || quincy || reef)
|
||||
|
||||
package admin
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestFSQuiesce(t *testing.T) {
|
||||
fsa := getFSAdmin(t)
|
||||
volume := "cephfs"
|
||||
group := NoGroup
|
||||
subvol := "quiesceMe"
|
||||
fsa.CreateSubVolume(volume, group, subvol, nil)
|
||||
defer func() {
|
||||
err := fsa.RemoveSubVolume(volume, group, subvol)
|
||||
assert.NoError(t, err)
|
||||
}()
|
||||
ret, err := fsa.FSQuiesce(volume, group, []string{subvol}, "", nil)
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
for _, val := range ret.Sets {
|
||||
assert.Equal(t, 0.0, val.Timeout)
|
||||
}
|
||||
o := &FSQuiesceOptions{}
|
||||
o.Timeout = 10.7
|
||||
ret, err = fsa.FSQuiesce(volume, group, []string{subvol}, "", o)
|
||||
assert.NoError(t, err)
|
||||
for _, val := range ret.Sets {
|
||||
assert.Equal(t, 10.7, val.Timeout)
|
||||
}
|
||||
|
||||
o.Expiration = 15.2
|
||||
ret, err = fsa.FSQuiesce(volume, group, []string{subvol}, "", o)
|
||||
assert.NoError(t, err)
|
||||
for _, val := range ret.Sets {
|
||||
assert.Equal(t, 15.2, val.Expiration)
|
||||
assert.Equal(t, 10.7, val.Timeout)
|
||||
}
|
||||
|
||||
o.Expiration = 15
|
||||
ret, err = fsa.FSQuiesce(volume, group, []string{subvol}, "", o)
|
||||
assert.NoError(t, err)
|
||||
for _, val := range ret.Sets {
|
||||
assert.Equal(t, 15.0, val.Expiration)
|
||||
assert.Equal(t, 10.7, val.Timeout)
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,25 @@
|
|||
//go:build nautilus || octopus || pacific || quincy || reef
|
||||
|
||||
package admin
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestFSQuiesce(t *testing.T) {
|
||||
fsa := getFSAdmin(t)
|
||||
volume := "cephfs"
|
||||
group := NoGroup
|
||||
fsa.CreateSubVolume(volume, group, "quiesceMe", nil)
|
||||
defer func() {
|
||||
err := fsa.RemoveSubVolume(volume, group, "quiesceMe")
|
||||
assert.NoError(t, err)
|
||||
}()
|
||||
ret, err := fsa.FSQuiesce(volume, group, []string{"quiesceMe"}, "", nil)
|
||||
assert.Nil(t, ret)
|
||||
var notImplemented NotImplementedError
|
||||
assert.True(t, errors.As(err, ¬Implemented))
|
||||
}
|
||||
116
pkg/mod/github.com/ceph/go-ceph@v0.35.0/cephfs/admin/fsadmin.go
Normal file
116
pkg/mod/github.com/ceph/go-ceph@v0.35.0/cephfs/admin/fsadmin.go
Normal file
|
|
@ -0,0 +1,116 @@
|
|||
package admin
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
|
||||
ccom "github.com/ceph/go-ceph/common/commands"
|
||||
"github.com/ceph/go-ceph/internal/commands"
|
||||
"github.com/ceph/go-ceph/rados"
|
||||
)
|
||||
|
||||
// RadosCommander provides an interface to execute JSON-formatted commands that
|
||||
// allow the cephfs administrative functions to interact with the Ceph cluster.
|
||||
type RadosCommander = ccom.RadosCommander
|
||||
|
||||
// FSAdmin is used to administrate CephFS within a ceph cluster.
|
||||
type FSAdmin struct {
|
||||
conn RadosCommander
|
||||
}
|
||||
|
||||
// NewFromConn creates an FSAdmin management object from a preexisting
|
||||
// rados connection. The existing connection can be rados.Conn or any
|
||||
// type implementing the RadosCommander interface. This may be useful
|
||||
// if the calling layer needs to inject additional logging, error handling,
|
||||
// fault injection, etc.
|
||||
func NewFromConn(conn RadosCommander) *FSAdmin {
|
||||
return &FSAdmin{conn}
|
||||
}
|
||||
|
||||
func (fsa *FSAdmin) validate() error {
|
||||
if fsa.conn == nil {
|
||||
return rados.ErrNotConnected
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// rawMgrCommand takes a byte buffer and sends it to the MGR as a command.
|
||||
// The buffer is expected to contain preformatted JSON.
|
||||
func (fsa *FSAdmin) rawMgrCommand(buf []byte) response {
|
||||
return commands.RawMgrCommand(fsa.conn, buf)
|
||||
}
|
||||
|
||||
// marshalMgrCommand takes an generic interface{} value, converts it to JSON and
|
||||
// sends the json to the MGR as a command.
|
||||
func (fsa *FSAdmin) marshalMgrCommand(v interface{}) response {
|
||||
return commands.MarshalMgrCommand(fsa.conn, v)
|
||||
}
|
||||
|
||||
// rawMonCommand takes a byte buffer and sends it to the MON as a command.
|
||||
// The buffer is expected to contain preformatted JSON.
|
||||
func (fsa *FSAdmin) rawMonCommand(buf []byte) response {
|
||||
return commands.RawMonCommand(fsa.conn, buf)
|
||||
}
|
||||
|
||||
// marshalMonCommand takes an generic interface{} value, converts it to JSON and
|
||||
// sends the json to the MGR as a command.
|
||||
func (fsa *FSAdmin) marshalMonCommand(v interface{}) response {
|
||||
return commands.MarshalMonCommand(fsa.conn, v)
|
||||
}
|
||||
|
||||
type listNamedResult struct {
|
||||
Name string `json:"name"`
|
||||
}
|
||||
|
||||
func parseListNames(res response) ([]string, error) {
|
||||
var r []listNamedResult
|
||||
if err := res.NoStatus().Unmarshal(&r).End(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
vl := make([]string, len(r))
|
||||
for i := range r {
|
||||
vl[i] = r[i].Name
|
||||
}
|
||||
return vl, nil
|
||||
}
|
||||
|
||||
func parseListKeyValues(res response) (map[string]string, error) {
|
||||
var x map[string]string
|
||||
if err := res.NoStatus().Unmarshal(&x).End(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return x, nil
|
||||
}
|
||||
|
||||
// parsePathResponse returns a cleaned up path from requests that get a path
|
||||
// unless an error is encountered, then an error is returned.
|
||||
func parsePathResponse(res response) (string, error) {
|
||||
if res2 := res.NoStatus(); !res2.Ok() {
|
||||
return "", res.End()
|
||||
}
|
||||
b := res.Body()
|
||||
// if there's a trailing newline in the buffer strip it.
|
||||
// ceph assumes a CLI wants the output of the buffer and there's
|
||||
// no format=json mode available currently.
|
||||
for len(b) >= 1 && b[len(b)-1] == '\n' {
|
||||
b = b[:len(b)-1]
|
||||
}
|
||||
return string(b), nil
|
||||
}
|
||||
|
||||
// modeString converts a unix-style mode value to a string-ified version in an
|
||||
// octal representation (e.g. "777", "700", etc). This format is expected by
|
||||
// some of the ceph JSON command inputs.
|
||||
func modeString(m int, force bool) string {
|
||||
if force || m != 0 {
|
||||
return strconv.FormatInt(int64(m), 8)
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// uint64String converts a uint64 to a string. Some of the ceph json commands
|
||||
// can take a string or "int" (as a string). This is a common function for
|
||||
// doing that conversion.
|
||||
func uint64String(v uint64) string {
|
||||
return strconv.FormatUint(uint64(v), 10)
|
||||
}
|
||||
|
|
@ -0,0 +1,111 @@
|
|||
package admin
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/ceph/go-ceph/internal/admintest"
|
||||
"github.com/ceph/go-ceph/internal/util"
|
||||
)
|
||||
|
||||
var (
|
||||
radosConnector = admintest.NewConnector()
|
||||
|
||||
// some tests are sensitive to the server version
|
||||
serverVersion string
|
||||
)
|
||||
|
||||
func init() {
|
||||
serverVersion = util.CurrentCephVersionString()
|
||||
}
|
||||
|
||||
func TestServerSentinel(t *testing.T) {
|
||||
// there probably *is* a better way to do this but I'm doing what's easy
|
||||
// and expedient at the moment. That's tying the tests to the environment
|
||||
// var to tell us what version of the *server* we are testing against. The
|
||||
// build tags control what version of the *client libs* we use. These
|
||||
// happen to be the same for our CI tests today, but its a lousy way to
|
||||
// organize things IMO.
|
||||
// This check is intended to fail the test suite if you don't tell it a
|
||||
// server version it expects and force us to update the tests if a new
|
||||
// version of ceph is added.
|
||||
if serverVersion == "" {
|
||||
t.Fatalf("server must be nautilus, octopus, pacific, quincy, or reef (do the tests need updating?)")
|
||||
}
|
||||
}
|
||||
|
||||
func getFSAdmin(t *testing.T) *FSAdmin {
|
||||
return NewFromConn(radosConnector.Get(t))
|
||||
}
|
||||
|
||||
func newFSAdmin(t *testing.T, configFile string) *FSAdmin {
|
||||
return NewFromConn(
|
||||
admintest.WrapConn(admintest.NewConnFromConfig(t, configFile)))
|
||||
}
|
||||
|
||||
func TestInvalidFSAdmin(t *testing.T) {
|
||||
fsa := &FSAdmin{}
|
||||
res := fsa.rawMgrCommand([]byte("FOOBAR!"))
|
||||
assert.Error(t, res.Unwrap())
|
||||
}
|
||||
|
||||
type badMarshalType bool
|
||||
|
||||
func (badMarshalType) MarshalJSON() ([]byte, error) {
|
||||
return nil, errors.New("Zowie! wow")
|
||||
}
|
||||
|
||||
func TestBadMarshal(t *testing.T) {
|
||||
fsa := getFSAdmin(t)
|
||||
|
||||
var bad badMarshalType
|
||||
res := fsa.marshalMgrCommand(bad)
|
||||
assert.Error(t, res.Unwrap())
|
||||
}
|
||||
|
||||
func TestParseListNames(t *testing.T) {
|
||||
R := newResponse
|
||||
t.Run("error", func(t *testing.T) {
|
||||
_, err := parseListNames(R(nil, "", errors.New("bonk")))
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, "bonk", err.Error())
|
||||
})
|
||||
t.Run("statusSet", func(t *testing.T) {
|
||||
_, err := parseListNames(R(nil, "unexpected!", nil))
|
||||
assert.Error(t, err)
|
||||
})
|
||||
t.Run("badJSON", func(t *testing.T) {
|
||||
_, err := parseListNames(R([]byte("Foo[[["), "", nil))
|
||||
assert.Error(t, err)
|
||||
})
|
||||
t.Run("ok", func(t *testing.T) {
|
||||
l, err := parseListNames(R([]byte(`[{"name":"bob"}]`), "", nil))
|
||||
assert.NoError(t, err)
|
||||
if assert.Len(t, l, 1) {
|
||||
assert.Equal(t, "bob", l[0])
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestCheckEmptyResponseExpected(t *testing.T) {
|
||||
R := newResponse
|
||||
t.Run("error", func(t *testing.T) {
|
||||
err := R(nil, "", errors.New("bonk")).NoData().End()
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, "bonk", err.Error())
|
||||
})
|
||||
t.Run("statusSet", func(t *testing.T) {
|
||||
err := R(nil, "unexpected!", nil).NoData().End()
|
||||
assert.Error(t, err)
|
||||
})
|
||||
t.Run("someJSON", func(t *testing.T) {
|
||||
err := R([]byte(`{"trouble": true}`), "", nil).NoData().End()
|
||||
assert.Error(t, err)
|
||||
})
|
||||
t.Run("ok", func(t *testing.T) {
|
||||
err := R([]byte{}, "", nil).NoData().End()
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
}
|
||||
107
pkg/mod/github.com/ceph/go-ceph@v0.35.0/cephfs/admin/metadata.go
Normal file
107
pkg/mod/github.com/ceph/go-ceph@v0.35.0/cephfs/admin/metadata.go
Normal file
|
|
@ -0,0 +1,107 @@
|
|||
//go:build !(nautilus || octopus || pacific)
|
||||
// +build !nautilus,!octopus,!pacific
|
||||
|
||||
package admin
|
||||
|
||||
// GetMetadata gets custom metadata on the subvolume in a volume belonging to
|
||||
// an optional subvolume group based on provided key name.
|
||||
//
|
||||
// Similar To:
|
||||
//
|
||||
// ceph fs subvolume metadata get <vol_name> <sub_name> <key_name> [--group_name <subvol_group_name>]
|
||||
func (fsa *FSAdmin) GetMetadata(volume, group, subvolume, key string) (string, error) {
|
||||
m := map[string]string{
|
||||
"prefix": "fs subvolume metadata get",
|
||||
"format": "json",
|
||||
"vol_name": volume,
|
||||
"sub_name": subvolume,
|
||||
"key_name": key,
|
||||
}
|
||||
|
||||
if group != NoGroup {
|
||||
m["group_name"] = group
|
||||
}
|
||||
|
||||
return parsePathResponse(fsa.marshalMgrCommand(m))
|
||||
}
|
||||
|
||||
// SetMetadata sets custom metadata on the subvolume in a volume belonging to
|
||||
// an optional subvolume group as a key-value pair.
|
||||
//
|
||||
// Similar To:
|
||||
//
|
||||
// ceph fs subvolume metadata set <vol_name> <sub_name> <key_name> <value> [--group_name <subvol_group_name>]
|
||||
func (fsa *FSAdmin) SetMetadata(volume, group, subvolume, key, value string) error {
|
||||
m := map[string]string{
|
||||
"prefix": "fs subvolume metadata set",
|
||||
"format": "json",
|
||||
"vol_name": volume,
|
||||
"sub_name": subvolume,
|
||||
"key_name": key,
|
||||
"value": value,
|
||||
}
|
||||
|
||||
if group != NoGroup {
|
||||
m["group_name"] = group
|
||||
}
|
||||
|
||||
return fsa.marshalMgrCommand(m).NoData().End()
|
||||
}
|
||||
|
||||
// RemoveMetadata removes custom metadata set on the subvolume in a volume
|
||||
// belonging to an optional subvolume group using the metadata key.
|
||||
//
|
||||
// Similar To:
|
||||
//
|
||||
// ceph fs subvolume metadata rm <vol_name> <sub_name> <key_name> [--group_name <subvol_group_name>]
|
||||
func (fsa *FSAdmin) RemoveMetadata(volume, group, subvolume, key string) error {
|
||||
return fsa.rmSubVolumeMetadata(volume, group, subvolume, key, commonRmFlags{})
|
||||
}
|
||||
|
||||
// ForceRemoveMetadata attempt to forcefully remove custom metadata set on
|
||||
// the subvolume in a volume belonging to an optional subvolume group using
|
||||
// the metadata key.
|
||||
//
|
||||
// Similar To:
|
||||
//
|
||||
// ceph fs subvolume metadata rm <vol_name> <sub_name> <key_name> [--group_name <subvol_group_name>] --force
|
||||
func (fsa *FSAdmin) ForceRemoveMetadata(volume, group, subvolume, key string) error {
|
||||
return fsa.rmSubVolumeMetadata(volume, group, subvolume, key, commonRmFlags{force: true})
|
||||
}
|
||||
|
||||
func (fsa *FSAdmin) rmSubVolumeMetadata(volume, group, subvolume, key string, o commonRmFlags) error {
|
||||
m := map[string]string{
|
||||
"prefix": "fs subvolume metadata rm",
|
||||
"format": "json",
|
||||
"vol_name": volume,
|
||||
"sub_name": subvolume,
|
||||
"key_name": key,
|
||||
}
|
||||
|
||||
if group != NoGroup {
|
||||
m["group_name"] = group
|
||||
}
|
||||
|
||||
return fsa.marshalMgrCommand(mergeFlags(m, o)).NoData().End()
|
||||
}
|
||||
|
||||
// ListMetadata lists custom metadata (key-value pairs) set on the subvolume
|
||||
// in a volume belonging to an optional subvolume group.
|
||||
//
|
||||
// Similar To:
|
||||
//
|
||||
// ceph fs subvolume metadata ls <vol_name> <sub_name> [--group_name <subvol_group_name>]
|
||||
func (fsa *FSAdmin) ListMetadata(volume, group, subvolume string) (map[string]string, error) {
|
||||
m := map[string]string{
|
||||
"prefix": "fs subvolume metadata ls",
|
||||
"format": "json",
|
||||
"vol_name": volume,
|
||||
"sub_name": subvolume,
|
||||
}
|
||||
|
||||
if group != NoGroup {
|
||||
m["group_name"] = group
|
||||
}
|
||||
|
||||
return parseListKeyValues(fsa.marshalMgrCommand(m))
|
||||
}
|
||||
|
|
@ -0,0 +1,155 @@
|
|||
//go:build !(nautilus || octopus || pacific)
|
||||
// +build !nautilus,!octopus,!pacific
|
||||
|
||||
package admin
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestSetMetadata(t *testing.T) {
|
||||
fsa := getFSAdmin(t)
|
||||
volume := "cephfs"
|
||||
group := "group"
|
||||
subname := "subVol"
|
||||
key := "hi"
|
||||
value := "hello"
|
||||
|
||||
err := fsa.CreateSubVolumeGroup(volume, group, nil)
|
||||
assert.NoError(t, err)
|
||||
err = fsa.CreateSubVolume(volume, group, subname, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = fsa.SetMetadata(volume, group, subname, key, value)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = fsa.RemoveSubVolume(volume, group, subname)
|
||||
assert.NoError(t, err)
|
||||
err = fsa.RemoveSubVolumeGroup(volume, group)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestGetMetadata(t *testing.T) {
|
||||
fsa := getFSAdmin(t)
|
||||
volume := "cephfs"
|
||||
group := "group"
|
||||
subname := "subVol"
|
||||
key := "hi"
|
||||
value := "hello"
|
||||
|
||||
err := fsa.CreateSubVolumeGroup(volume, group, nil)
|
||||
assert.NoError(t, err)
|
||||
err = fsa.CreateSubVolume(volume, group, subname, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = fsa.SetMetadata(volume, group, subname, key, value)
|
||||
assert.NoError(t, err)
|
||||
|
||||
metaValue, err := fsa.GetMetadata(volume, group, subname, key)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, metaValue, value)
|
||||
|
||||
err = fsa.RemoveSubVolume(volume, group, subname)
|
||||
assert.NoError(t, err)
|
||||
err = fsa.RemoveSubVolumeGroup(volume, group)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestRemoveMetadata(t *testing.T) {
|
||||
fsa := getFSAdmin(t)
|
||||
volume := "cephfs"
|
||||
group := "group"
|
||||
subname := "subVol"
|
||||
key := "hi"
|
||||
value := "hello"
|
||||
|
||||
err := fsa.CreateSubVolumeGroup(volume, group, nil)
|
||||
assert.NoError(t, err)
|
||||
err = fsa.CreateSubVolume(volume, group, subname, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = fsa.SetMetadata(volume, group, subname, key, value)
|
||||
assert.NoError(t, err)
|
||||
|
||||
metaValue, err := fsa.GetMetadata(volume, group, subname, key)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, metaValue, value)
|
||||
|
||||
err = fsa.RemoveMetadata(volume, group, subname, key)
|
||||
assert.NoError(t, err)
|
||||
|
||||
metaValue, err = fsa.GetMetadata(volume, group, subname, key)
|
||||
assert.Error(t, err)
|
||||
|
||||
err = fsa.RemoveSubVolume(volume, group, subname)
|
||||
assert.NoError(t, err)
|
||||
err = fsa.RemoveSubVolumeGroup(volume, group)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestForceRemoveMetadata(t *testing.T) {
|
||||
fsa := getFSAdmin(t)
|
||||
volume := "cephfs"
|
||||
group := "group"
|
||||
subname := "subVol"
|
||||
key := "hi"
|
||||
value := "hello"
|
||||
|
||||
err := fsa.CreateSubVolumeGroup(volume, group, nil)
|
||||
assert.NoError(t, err)
|
||||
err = fsa.CreateSubVolume(volume, group, subname, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = fsa.SetMetadata(volume, group, subname, key, value)
|
||||
assert.NoError(t, err)
|
||||
|
||||
metaValue, err := fsa.GetMetadata(volume, group, subname, key)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, metaValue, value)
|
||||
|
||||
err = fsa.ForceRemoveMetadata(volume, group, subname, key)
|
||||
assert.NoError(t, err)
|
||||
|
||||
metaValue, err = fsa.GetMetadata(volume, group, subname, key)
|
||||
assert.Error(t, err)
|
||||
|
||||
err = fsa.RemoveSubVolume(volume, group, subname)
|
||||
assert.NoError(t, err)
|
||||
err = fsa.RemoveSubVolumeGroup(volume, group)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestListMetadata(t *testing.T) {
|
||||
fsa := getFSAdmin(t)
|
||||
volume := "cephfs"
|
||||
group := "group"
|
||||
subname := "subVol"
|
||||
key1 := "hi1"
|
||||
value1 := "hello1"
|
||||
key2 := "hi2"
|
||||
value2 := "hello2"
|
||||
|
||||
err := fsa.CreateSubVolumeGroup(volume, group, nil)
|
||||
assert.NoError(t, err)
|
||||
err = fsa.CreateSubVolume(volume, group, subname, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = fsa.SetMetadata(volume, group, subname, key1, value1)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = fsa.SetMetadata(volume, group, subname, key2, value2)
|
||||
assert.NoError(t, err)
|
||||
|
||||
metaList, err := fsa.ListMetadata(volume, group, subname)
|
||||
assert.NoError(t, err)
|
||||
assert.GreaterOrEqual(t, len(metaList), 2)
|
||||
assert.Contains(t, metaList, key1)
|
||||
assert.Contains(t, metaList, key2)
|
||||
|
||||
err = fsa.RemoveSubVolume(volume, group, subname)
|
||||
assert.NoError(t, err)
|
||||
err = fsa.RemoveSubVolumeGroup(volume, group)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
|
@ -0,0 +1,27 @@
|
|||
package admin
|
||||
|
||||
import (
|
||||
"github.com/ceph/go-ceph/common/admin/manager"
|
||||
)
|
||||
|
||||
const mirroring = "mirroring"
|
||||
|
||||
// EnableMirroringModule will enable the mirroring module for cephfs.
|
||||
//
|
||||
// Similar To:
|
||||
//
|
||||
// ceph mgr module enable mirroring [--force]
|
||||
func (fsa *FSAdmin) EnableMirroringModule(force bool) error {
|
||||
mgradmin := manager.NewFromConn(fsa.conn)
|
||||
return mgradmin.EnableModule(mirroring, force)
|
||||
}
|
||||
|
||||
// DisableMirroringModule will disable the mirroring module for cephfs.
|
||||
//
|
||||
// Similar To:
|
||||
//
|
||||
// ceph mgr module disable mirroring
|
||||
func (fsa *FSAdmin) DisableMirroringModule() error {
|
||||
mgradmin := manager.NewFromConn(fsa.conn)
|
||||
return mgradmin.DisableModule(mirroring)
|
||||
}
|
||||
242
pkg/mod/github.com/ceph/go-ceph@v0.35.0/cephfs/admin/mirror.go
Normal file
242
pkg/mod/github.com/ceph/go-ceph@v0.35.0/cephfs/admin/mirror.go
Normal file
|
|
@ -0,0 +1,242 @@
|
|||
package admin
|
||||
|
||||
import (
|
||||
ccom "github.com/ceph/go-ceph/common/commands"
|
||||
"github.com/ceph/go-ceph/internal/commands"
|
||||
)
|
||||
|
||||
// SnapshotMirrorAdmin helps administer the snapshot mirroring features of
|
||||
// cephfs. Snapshot mirroring is only available in ceph pacific and later.
|
||||
type SnapshotMirrorAdmin struct {
|
||||
conn ccom.MgrCommander
|
||||
}
|
||||
|
||||
// SnapshotMirror returns a new SnapshotMirrorAdmin to be used for the
|
||||
// administration of snapshot mirroring features.
|
||||
func (fsa *FSAdmin) SnapshotMirror() *SnapshotMirrorAdmin {
|
||||
return &SnapshotMirrorAdmin{conn: fsa.conn}
|
||||
}
|
||||
|
||||
// Enable snapshot mirroring for the given file system.
|
||||
//
|
||||
// Similar To:
|
||||
//
|
||||
// ceph fs snapshot mirror enable <fs_name>
|
||||
func (sma *SnapshotMirrorAdmin) Enable(fsname string) error {
|
||||
m := map[string]string{
|
||||
"prefix": "fs snapshot mirror enable",
|
||||
"fs_name": fsname,
|
||||
"format": "json",
|
||||
}
|
||||
return commands.MarshalMgrCommand(sma.conn, m).NoStatus().EmptyBody().End()
|
||||
}
|
||||
|
||||
// Disable snapshot mirroring for the given file system.
|
||||
//
|
||||
// Similar To:
|
||||
//
|
||||
// ceph fs snapshot mirror disable <fs_name>
|
||||
func (sma *SnapshotMirrorAdmin) Disable(fsname string) error {
|
||||
m := map[string]string{
|
||||
"prefix": "fs snapshot mirror disable",
|
||||
"fs_name": fsname,
|
||||
"format": "json",
|
||||
}
|
||||
return commands.MarshalMgrCommand(sma.conn, m).NoStatus().EmptyBody().End()
|
||||
}
|
||||
|
||||
// Add a path in the file system to be mirrored.
|
||||
//
|
||||
// Similar To:
|
||||
//
|
||||
// ceph fs snapshot mirror add <fs_name> <path>
|
||||
func (sma *SnapshotMirrorAdmin) Add(fsname, path string) error {
|
||||
m := map[string]string{
|
||||
"prefix": "fs snapshot mirror add",
|
||||
"fs_name": fsname,
|
||||
"path": path,
|
||||
"format": "json",
|
||||
}
|
||||
return commands.MarshalMgrCommand(sma.conn, m).NoStatus().EmptyBody().End()
|
||||
}
|
||||
|
||||
// Remove a path in the file system from mirroring.
|
||||
//
|
||||
// Similar To:
|
||||
//
|
||||
// ceph fs snapshot mirror remove <fs_name> <path>
|
||||
func (sma *SnapshotMirrorAdmin) Remove(fsname, path string) error {
|
||||
m := map[string]string{
|
||||
"prefix": "fs snapshot mirror remove",
|
||||
"fs_name": fsname,
|
||||
"path": path,
|
||||
"format": "json",
|
||||
}
|
||||
return commands.MarshalMgrCommand(sma.conn, m).NoStatus().EmptyBody().End()
|
||||
}
|
||||
|
||||
type bootstrapTokenResponse struct {
|
||||
Token string `json:"token"`
|
||||
}
|
||||
|
||||
// CreatePeerBootstrapToken returns a token that can be used to create
|
||||
// a peering association between this site an another site.
|
||||
//
|
||||
// Similar To:
|
||||
//
|
||||
// ceph fs snapshot mirror peer_bootstrap create <fs_name> <client_entity> <site-name>
|
||||
func (sma *SnapshotMirrorAdmin) CreatePeerBootstrapToken(
|
||||
fsname, client, site string) (string, error) {
|
||||
m := map[string]string{
|
||||
"prefix": "fs snapshot mirror peer_bootstrap create",
|
||||
"fs_name": fsname,
|
||||
"client_name": client,
|
||||
"format": "json",
|
||||
}
|
||||
if site != "" {
|
||||
m["site_name"] = site
|
||||
}
|
||||
var bt bootstrapTokenResponse
|
||||
err := commands.MarshalMgrCommand(sma.conn, m).NoStatus().Unmarshal(&bt).End()
|
||||
return bt.Token, err
|
||||
}
|
||||
|
||||
// ImportPeerBoostrapToken creates an association between another site, one
|
||||
// that has provided a token, with the current site.
|
||||
//
|
||||
// Similar To:
|
||||
//
|
||||
// ceph fs snapshot mirror peer_bootstrap import <fs_name> <token>
|
||||
func (sma *SnapshotMirrorAdmin) ImportPeerBoostrapToken(fsname, token string) error {
|
||||
m := map[string]string{
|
||||
"prefix": "fs snapshot mirror peer_bootstrap import",
|
||||
"fs_name": fsname,
|
||||
"token": token,
|
||||
"format": "json",
|
||||
}
|
||||
return commands.MarshalMgrCommand(sma.conn, m).NoStatus().EmptyBody().End()
|
||||
}
|
||||
|
||||
// DaemonID represents the ID of a cephfs mirroring daemon.
|
||||
type DaemonID uint
|
||||
|
||||
// FileSystemID represents the ID of a cephfs file system.
|
||||
type FileSystemID uint
|
||||
|
||||
// PeerUUID represents the UUID of a cephfs mirroring peer.
|
||||
type PeerUUID string
|
||||
|
||||
// DaemonStatusPeer contains fields detailing a remote peer.
|
||||
type DaemonStatusPeer struct {
|
||||
ClientName string `json:"client_name"`
|
||||
ClusterName string `json:"cluster_name"`
|
||||
FSName string `json:"fs_name"`
|
||||
}
|
||||
|
||||
// DaemonStatusPeerStats contains fields detailing the a remote peer's stats.
|
||||
type DaemonStatusPeerStats struct {
|
||||
FailureCount uint64 `json:"failure_count"`
|
||||
RecoveryCount uint64 `json:"recovery_count"`
|
||||
}
|
||||
|
||||
// DaemonStatusPeerInfo contains fields representing information about a remote peer.
|
||||
type DaemonStatusPeerInfo struct {
|
||||
UUID PeerUUID `json:"uuid"`
|
||||
Remote DaemonStatusPeer `json:"remote"`
|
||||
Stats DaemonStatusPeerStats `json:"stats"`
|
||||
}
|
||||
|
||||
// DaemonStatusFileSystemInfo represents information about a mirrored file system.
|
||||
type DaemonStatusFileSystemInfo struct {
|
||||
FileSystemID FileSystemID `json:"filesystem_id"`
|
||||
Name string `json:"name"`
|
||||
DirectoryCount int64 `json:"directory_count"`
|
||||
Peers []DaemonStatusPeerInfo `json:"peers"`
|
||||
}
|
||||
|
||||
// DaemonStatusInfo maps file system IDs to information about that file system.
|
||||
type DaemonStatusInfo struct {
|
||||
DaemonID DaemonID `json:"daemon_id"`
|
||||
FileSystems []DaemonStatusFileSystemInfo `json:"filesystems"`
|
||||
}
|
||||
|
||||
// DaemonStatusResults maps mirroring daemon IDs to information about that
|
||||
// mirroring daemon.
|
||||
type DaemonStatusResults []DaemonStatusInfo
|
||||
|
||||
func parseDaemonStatus(res response) (DaemonStatusResults, error) {
|
||||
var dsr DaemonStatusResults
|
||||
if err := res.NoStatus().Unmarshal(&dsr).End(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dsr, nil
|
||||
}
|
||||
|
||||
// DaemonStatus returns information on the status of cephfs mirroring daemons
|
||||
// associated with the given file system.
|
||||
//
|
||||
// Similar To:
|
||||
//
|
||||
// ceph fs snapshot mirror daemon status <fs_name>
|
||||
func (sma *SnapshotMirrorAdmin) DaemonStatus(fsname string) (
|
||||
DaemonStatusResults, error) {
|
||||
// ---
|
||||
m := map[string]string{
|
||||
"prefix": "fs snapshot mirror daemon status",
|
||||
"fs_name": fsname,
|
||||
"format": "json",
|
||||
}
|
||||
return parseDaemonStatus(commands.MarshalMgrCommand(sma.conn, m))
|
||||
}
|
||||
|
||||
// PeerInfo includes information about a cephfs mirroring peer.
|
||||
type PeerInfo struct {
|
||||
ClientName string `json:"client_name"`
|
||||
SiteName string `json:"site_name"`
|
||||
FSName string `json:"fs_name"`
|
||||
MonHost string `json:"mon_host"`
|
||||
}
|
||||
|
||||
// PeerListResults maps a peer's UUID to information about that peer.
|
||||
type PeerListResults map[PeerUUID]PeerInfo
|
||||
|
||||
func parsePeerList(res response) (PeerListResults, error) {
|
||||
var plr PeerListResults
|
||||
if err := res.NoStatus().Unmarshal(&plr).End(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return plr, nil
|
||||
}
|
||||
|
||||
// PeerList returns information about peers associated with the given file system.
|
||||
//
|
||||
// Similar To:
|
||||
//
|
||||
// ceph fs snapshot mirror peer_list <fs_name>
|
||||
func (sma *SnapshotMirrorAdmin) PeerList(fsname string) (
|
||||
PeerListResults, error) {
|
||||
// ---
|
||||
m := map[string]string{
|
||||
"prefix": "fs snapshot mirror peer_list",
|
||||
"fs_name": fsname,
|
||||
"format": "json",
|
||||
}
|
||||
return parsePeerList(commands.MarshalMgrCommand(sma.conn, m))
|
||||
}
|
||||
|
||||
/*
|
||||
DirMap - figure out what last_shuffled is supposed to mean and, if it is a time
|
||||
like it seems to be, how best to represent in Go.
|
||||
|
||||
DirMap TODO
|
||||
ceph fs snapshot mirror dirmap
|
||||
func (sma *SnapshotMirrorAdmin) DirMap(fsname, path string) error {
|
||||
m := map[string]string{
|
||||
"prefix": "fs snapshot mirror dirmap",
|
||||
"fs_name": fsname,
|
||||
"path": path,
|
||||
"format": "json",
|
||||
}
|
||||
return commands.MarshalMgrCommand(sma.conn, m).NoStatus().EmptyBody().End()
|
||||
}
|
||||
*/
|
||||
|
|
@ -0,0 +1,168 @@
|
|||
package admin
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
// sampleOldDaemonStatus1 and sampleOldDaemonStatus2 are examples of the json
|
||||
// returned for daemon status before Ceph v16.2.5. It is not used by the tests
|
||||
// as the code now only supports the format of v16.2.5 and later. This is
|
||||
// retained for reference and the off chance that someone asks go-ceph to
|
||||
// support the older format.
|
||||
|
||||
var sampleOldDaemonStatus1 = `
|
||||
{"4157": {"1": {"name": "cephfs", "directory_count": 0, "peers": {}}}}
|
||||
`
|
||||
|
||||
var sampleOldDaemonStatus2 = `
|
||||
{
|
||||
"4154": {
|
||||
"1": {
|
||||
"name": "cephfs",
|
||||
"directory_count": 1,
|
||||
"peers": {
|
||||
"d284fccd-6110-4e94-843c-78ecda3aef38": {
|
||||
"remote": {"client_name": "client.mirror_remote", "cluster_name": "ceph_b", "fs_name": "cephfs"},
|
||||
"stats": {"failure_count": 1, "recovery_count": 0}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
`
|
||||
|
||||
var sampleDaemonStatus1 = `
|
||||
[
|
||||
{
|
||||
"daemon_id": 4115,
|
||||
"filesystems": [
|
||||
{
|
||||
"filesystem_id": 1,
|
||||
"name": "cephfs",
|
||||
"directory_count": 0,
|
||||
"peers": []
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
`
|
||||
|
||||
var sampleDaemonStatus2 = `
|
||||
[
|
||||
{
|
||||
"daemon_id": 4143,
|
||||
"filesystems": [
|
||||
{
|
||||
"filesystem_id": 1,
|
||||
"name": "cephfs",
|
||||
"directory_count": 1,
|
||||
"peers": [
|
||||
{
|
||||
"uuid": "43c50942-9dba-4f66-8f9b-102378fa863e",
|
||||
"remote": {
|
||||
"client_name": "client.mirror_remote",
|
||||
"cluster_name": "ceph_b",
|
||||
"fs_name": "cephfs"
|
||||
},
|
||||
"stats": {
|
||||
"failure_count": 1,
|
||||
"recovery_count": 0
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
`
|
||||
|
||||
var samplePeerList1 = `
|
||||
{
|
||||
"f138660d-7b22-4036-95ba-0fda727bff40": {
|
||||
"client_name": "client.mirror_remote",
|
||||
"site_name": "ceph_b",
|
||||
"fs_name": "cephfs",
|
||||
"mon_host": "test_ceph_b"
|
||||
}
|
||||
}
|
||||
`
|
||||
|
||||
func TestParseDaemonStatus(t *testing.T) {
|
||||
t.Run("error", func(t *testing.T) {
|
||||
r := newResponse(nil, "", errors.New("snark"))
|
||||
_, err := parseDaemonStatus(r)
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, "snark", err.Error())
|
||||
})
|
||||
t.Run("statusSet", func(t *testing.T) {
|
||||
r := newResponse(nil, "oopsie", nil)
|
||||
_, err := parseDaemonStatus(r)
|
||||
assert.Error(t, err)
|
||||
})
|
||||
t.Run("ok", func(t *testing.T) {
|
||||
r := newResponse([]byte(sampleDaemonStatus1), "", nil)
|
||||
ds, err := parseDaemonStatus(r)
|
||||
assert.NoError(t, err)
|
||||
if assert.NotNil(t, ds) && assert.Len(t, ds, 1) {
|
||||
dsi := ds[0]
|
||||
assert.Equal(t, DaemonID(4115), dsi.DaemonID)
|
||||
if assert.Len(t, dsi.FileSystems, 1) {
|
||||
fs := dsi.FileSystems[0]
|
||||
assert.Len(t, fs.Peers, 0)
|
||||
assert.Equal(t, "cephfs", fs.Name)
|
||||
assert.Equal(t, int64(0), fs.DirectoryCount)
|
||||
}
|
||||
}
|
||||
})
|
||||
t.Run("ok2", func(t *testing.T) {
|
||||
r := newResponse([]byte(sampleDaemonStatus2), "", nil)
|
||||
ds, err := parseDaemonStatus(r)
|
||||
assert.NoError(t, err)
|
||||
if assert.NotNil(t, ds) && assert.Len(t, ds, 1) {
|
||||
dsi := ds[0]
|
||||
assert.Equal(t, DaemonID(4143), dsi.DaemonID)
|
||||
if assert.Len(t, dsi.FileSystems, 1) {
|
||||
fs := dsi.FileSystems[0]
|
||||
assert.Equal(t, "cephfs", fs.Name)
|
||||
assert.Equal(t, int64(1), fs.DirectoryCount)
|
||||
if assert.Len(t, fs.Peers, 1) {
|
||||
p := fs.Peers[0]
|
||||
assert.Equal(t, "ceph_b", p.Remote.ClusterName)
|
||||
assert.Equal(t, "cephfs", p.Remote.FSName)
|
||||
assert.Equal(t, uint64(1), p.Stats.FailureCount)
|
||||
assert.Equal(t, uint64(0), p.Stats.RecoveryCount)
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestParsePeerList(t *testing.T) {
|
||||
t.Run("error", func(t *testing.T) {
|
||||
r := newResponse(nil, "", errors.New("snark"))
|
||||
_, err := parsePeerList(r)
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, "snark", err.Error())
|
||||
})
|
||||
t.Run("statusSet", func(t *testing.T) {
|
||||
r := newResponse(nil, "oopsie", nil)
|
||||
_, err := parsePeerList(r)
|
||||
assert.Error(t, err)
|
||||
})
|
||||
t.Run("ok", func(t *testing.T) {
|
||||
r := newResponse([]byte(samplePeerList1), "", nil)
|
||||
plr, err := parsePeerList(r)
|
||||
assert.NoError(t, err)
|
||||
if assert.NotNil(t, plr) && assert.Len(t, plr, 1) {
|
||||
p, ok := plr[PeerUUID("f138660d-7b22-4036-95ba-0fda727bff40")]
|
||||
assert.True(t, ok)
|
||||
assert.Equal(t, "client.mirror_remote", p.ClientName)
|
||||
assert.Equal(t, "ceph_b", p.SiteName)
|
||||
assert.Equal(t, "cephfs", p.FSName)
|
||||
assert.Equal(t, "test_ceph_b", p.MonHost)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
|
@ -0,0 +1,255 @@
|
|||
//go:build !nautilus && !octopus
|
||||
// +build !nautilus,!octopus
|
||||
|
||||
package admin
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
pth "path"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/ceph/go-ceph/cephfs"
|
||||
"github.com/ceph/go-ceph/common/admin/manager"
|
||||
)
|
||||
|
||||
func mirrorConfig() string {
|
||||
return os.Getenv("MIRROR_CONF")
|
||||
}
|
||||
|
||||
const (
|
||||
noForce = false
|
||||
mirrorClient = "client.mirror_remote"
|
||||
)
|
||||
|
||||
func waitForMirroring(t *testing.T, fsa *FSAdmin) {
|
||||
mgradmin := manager.NewFromConn(fsa.conn)
|
||||
for i := 0; i < 30; i++ {
|
||||
modinfo, err := mgradmin.ListModules()
|
||||
require.NoError(t, err)
|
||||
for _, emod := range modinfo.EnabledModules {
|
||||
if emod == "mirroring" {
|
||||
// give additional time for mgr to restart(?)
|
||||
time.Sleep(200 * time.Millisecond)
|
||||
return
|
||||
}
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
t.Fatalf("timed out waiting for mirroring module")
|
||||
}
|
||||
|
||||
func TestMirroring(t *testing.T) {
|
||||
if mirrorConfig() == "" {
|
||||
t.Skip("no mirror config available")
|
||||
}
|
||||
|
||||
fsa1 := getFSAdmin(t)
|
||||
fsname := "cephfs"
|
||||
|
||||
require.NotNil(t, fsa1.conn)
|
||||
err := fsa1.EnableMirroringModule(noForce)
|
||||
assert.NoError(t, err)
|
||||
defer func() {
|
||||
err := fsa1.DisableMirroringModule()
|
||||
assert.NoError(t, err)
|
||||
}()
|
||||
require.NoError(t, err)
|
||||
waitForMirroring(t, fsa1)
|
||||
|
||||
smadmin1 := fsa1.SnapshotMirror()
|
||||
err = smadmin1.Enable(fsname)
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
err := smadmin1.Disable(fsname)
|
||||
require.NoError(t, err)
|
||||
}()
|
||||
|
||||
fsa2 := newFSAdmin(t, mirrorConfig())
|
||||
err = fsa2.EnableMirroringModule(noForce)
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
err := fsa2.DisableMirroringModule()
|
||||
assert.NoError(t, err)
|
||||
}()
|
||||
waitForMirroring(t, fsa2)
|
||||
|
||||
smadmin2 := fsa2.SnapshotMirror()
|
||||
err = smadmin2.Enable(fsname)
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
err := smadmin2.Disable(fsname)
|
||||
require.NoError(t, err)
|
||||
}()
|
||||
|
||||
// from https://docs.ceph.com/en/pacific/dev/cephfs-mirroring/
|
||||
// "Peer bootstrap involves creating a bootstrap token on the peer cluster"
|
||||
// and "Import the bootstrap token in the primary cluster"
|
||||
token, err := smadmin2.CreatePeerBootstrapToken(fsname, mirrorClient, "ceph_b")
|
||||
require.NoError(t, err)
|
||||
err = smadmin1.ImportPeerBoostrapToken(fsname, token)
|
||||
require.NoError(t, err)
|
||||
|
||||
// we need a path to mirror
|
||||
path := "/wonderland"
|
||||
|
||||
mount1 := fsConnect(t, "")
|
||||
defer func(mount *cephfs.MountInfo) {
|
||||
assert.NoError(t, mount.Unmount())
|
||||
assert.NoError(t, mount.Release())
|
||||
}(mount1)
|
||||
|
||||
mount2 := fsConnect(t, mirrorConfig())
|
||||
defer func(mount *cephfs.MountInfo) {
|
||||
assert.NoError(t, mount.Unmount())
|
||||
assert.NoError(t, mount.Release())
|
||||
}(mount2)
|
||||
|
||||
err = mount1.MakeDir(path, 0770)
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
err = mount2.ChangeDir("/")
|
||||
assert.NoError(t, err)
|
||||
err = mount1.RemoveDir(path)
|
||||
assert.NoError(t, err)
|
||||
}()
|
||||
err = mount2.MakeDir(path, 0770)
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
err = mount2.ChangeDir("/")
|
||||
assert.NoError(t, err)
|
||||
err = mount2.RemoveDir(path)
|
||||
assert.NoError(t, err)
|
||||
}()
|
||||
|
||||
err = smadmin1.Add(fsname, path)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = mount1.ChangeDir(path)
|
||||
require.NoError(t, err)
|
||||
|
||||
// write some dirs & files
|
||||
err = mount1.MakeDir("drink_me", 0770)
|
||||
require.NoError(t, err)
|
||||
err = mount1.MakeDir("eat_me", 0770)
|
||||
require.NoError(t, err)
|
||||
writeFile(t, mount1, "drink_me/bottle1.txt",
|
||||
[]byte("magic potions #1\n"))
|
||||
|
||||
snapname1 := "alice"
|
||||
err = mount1.MakeDir(pth.Join(snapDir, snapname1), 0700)
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
err := mount1.RemoveDir(pth.Join(snapDir, snapname1))
|
||||
assert.NoError(t, err)
|
||||
err = mount2.RemoveDir(pth.Join(snapDir, snapname1))
|
||||
assert.NoError(t, err)
|
||||
}()
|
||||
|
||||
err = mount2.ChangeDir(path)
|
||||
require.NoError(t, err)
|
||||
|
||||
// wait a bit for the snapshot to propagate and the dirs to be created on
|
||||
// the remote fs.
|
||||
for i := 0; i < 60; i++ {
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
_, err1 := mount2.Statx("drink_me", cephfs.StatxBasicStats, 0)
|
||||
_, err2 := mount2.Statx("eat_me", cephfs.StatxBasicStats, 0)
|
||||
if err1 == nil && err2 == nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
waitforpeers:
|
||||
for i := 0; i < 60; i++ {
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
dstatus, err := smadmin1.DaemonStatus(fsname)
|
||||
assert.NoError(t, err)
|
||||
for _, dsinfo := range dstatus {
|
||||
for _, fsinfo := range dsinfo.FileSystems {
|
||||
if len(fsinfo.Peers) > 0 {
|
||||
break waitforpeers
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
p, err := smadmin1.PeerList(fsname)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, p, 1)
|
||||
for _, peer := range p {
|
||||
assert.Equal(t, "cephfs", peer.FSName)
|
||||
}
|
||||
|
||||
stx, err := mount2.Statx("drink_me", cephfs.StatxBasicStats, 0)
|
||||
if assert.NoError(t, err) {
|
||||
assert.Equal(t, uint16(0040000), stx.Mode&0040000) // is dir?
|
||||
}
|
||||
|
||||
stx, err = mount2.Statx("eat_me", cephfs.StatxBasicStats, 0)
|
||||
if assert.NoError(t, err) {
|
||||
assert.Equal(t, uint16(0040000), stx.Mode&0040000) // is dir?
|
||||
}
|
||||
|
||||
stx, err = mount2.Statx("drink_me/bottle1.txt", cephfs.StatxBasicStats, 0)
|
||||
if assert.NoError(t, err) {
|
||||
assert.Equal(t, uint16(0100000), stx.Mode&0100000) // is reg?
|
||||
assert.Equal(t, uint64(17), stx.Size)
|
||||
}
|
||||
data := readFile(t, mount2, "drink_me/bottle1.txt")
|
||||
assert.Equal(t, "magic potions #1\n", string(data))
|
||||
|
||||
err = mount1.Unlink("drink_me/bottle1.txt")
|
||||
require.NoError(t, err)
|
||||
err = mount1.RemoveDir("drink_me")
|
||||
require.NoError(t, err)
|
||||
err = mount1.RemoveDir("eat_me")
|
||||
require.NoError(t, err)
|
||||
|
||||
snapname2 := "rabbit"
|
||||
err = mount1.MakeDir(pth.Join(snapDir, snapname2), 0700)
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
err := mount1.RemoveDir(pth.Join(snapDir, snapname2))
|
||||
assert.NoError(t, err)
|
||||
err = mount2.RemoveDir(pth.Join(snapDir, snapname2))
|
||||
assert.NoError(t, err)
|
||||
}()
|
||||
|
||||
// wait a bit for the snapshot to propagate and the dirs to be removed on
|
||||
// the remote fs.
|
||||
for i := 0; i < 60; i++ {
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
_, err1 := mount2.Statx("drink_me", cephfs.StatxBasicStats, 0)
|
||||
_, err2 := mount2.Statx("eat_me", cephfs.StatxBasicStats, 0)
|
||||
if err1 != nil && err2 != nil {
|
||||
break
|
||||
}
|
||||
|
||||
}
|
||||
_, err = mount2.Statx("drink_me", cephfs.StatxBasicStats, 0)
|
||||
if assert.Error(t, err) {
|
||||
var ec errorWithCode
|
||||
if assert.True(t, errors.As(err, &ec)) {
|
||||
assert.Equal(t, -2, ec.ErrorCode())
|
||||
}
|
||||
}
|
||||
_, err = mount2.Statx("eat_me", cephfs.StatxBasicStats, 0)
|
||||
if assert.Error(t, err) {
|
||||
var ec errorWithCode
|
||||
if assert.True(t, errors.As(err, &ec)) {
|
||||
assert.Equal(t, -2, ec.ErrorCode())
|
||||
}
|
||||
}
|
||||
|
||||
err = smadmin1.Remove(fsname, path)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
type errorWithCode interface {
|
||||
ErrorCode() int
|
||||
}
|
||||
46
pkg/mod/github.com/ceph/go-ceph@v0.35.0/cephfs/admin/pin.go
Normal file
46
pkg/mod/github.com/ceph/go-ceph@v0.35.0/cephfs/admin/pin.go
Normal file
|
|
@ -0,0 +1,46 @@
|
|||
//go:build !nautilus
|
||||
// +build !nautilus
|
||||
|
||||
package admin
|
||||
|
||||
// PinSubVolume pins subvolume to ranks according to policies. A valid pin
|
||||
// setting value depends on the type of pin as described in the docs from
|
||||
// https://docs.ceph.com/en/latest/cephfs/multimds/#cephfs-pinning and
|
||||
// https://docs.ceph.com/en/latest/cephfs/multimds/#setting-subtree-partitioning-policies
|
||||
//
|
||||
// Similar To:
|
||||
//
|
||||
// ceph fs subvolume pin <vol_name> <sub_name> <pin_type> <pin_setting>
|
||||
func (fsa *FSAdmin) PinSubVolume(volume, subvolume, pintype, pinsetting string) (string, error) {
|
||||
m := map[string]string{
|
||||
"prefix": "fs subvolume pin",
|
||||
"format": "json",
|
||||
"vol_name": volume,
|
||||
"sub_name": subvolume,
|
||||
"pin_type": pintype,
|
||||
"pin_setting": pinsetting,
|
||||
}
|
||||
|
||||
return parsePathResponse(fsa.marshalMgrCommand(m))
|
||||
}
|
||||
|
||||
// PinSubVolumeGroup pins subvolume to ranks according to policies. A valid pin
|
||||
// setting value depends on the type of pin as described in the docs from
|
||||
// https://docs.ceph.com/en/latest/cephfs/multimds/#cephfs-pinning and
|
||||
// https://docs.ceph.com/en/latest/cephfs/multimds/#setting-subtree-partitioning-policies
|
||||
//
|
||||
// Similar To:
|
||||
//
|
||||
// ceph fs subvolumegroup pin <vol_name> <group_name> <pin_type> <pin_setting>
|
||||
func (fsa *FSAdmin) PinSubVolumeGroup(volume, group, pintype, pinsetting string) (string, error) {
|
||||
m := map[string]string{
|
||||
"prefix": "fs subvolumegroup pin",
|
||||
"format": "json",
|
||||
"vol_name": volume,
|
||||
"group_name": group,
|
||||
"pin_type": pintype,
|
||||
"pin_setting": pinsetting,
|
||||
}
|
||||
|
||||
return parsePathResponse(fsa.marshalMgrCommand(m))
|
||||
}
|
||||
|
|
@ -0,0 +1,59 @@
|
|||
//go:build !nautilus
|
||||
// +build !nautilus
|
||||
|
||||
package admin
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestPinSubVolume(t *testing.T) {
|
||||
fsa := getFSAdmin(t)
|
||||
volume := "cephfs"
|
||||
|
||||
subvolname := "cephfs_subvol"
|
||||
err := fsa.CreateSubVolume(volume, NoGroup, subvolname, nil)
|
||||
assert.NoError(t, err)
|
||||
defer func() {
|
||||
err := fsa.RemoveSubVolume(volume, NoGroup, subvolname)
|
||||
assert.NoError(t, err)
|
||||
}()
|
||||
|
||||
var ec errorCode
|
||||
_, err = fsa.PinSubVolume(volume, subvolname, "distributed", "2")
|
||||
assert.True(t, errors.As(err, &ec))
|
||||
assert.Equal(t, -22, ec.ErrorCode())
|
||||
|
||||
_, err = fsa.PinSubVolume(volume, subvolname, "distributed", "1")
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestPinSubVolumeGroup(t *testing.T) {
|
||||
fsa := getFSAdmin(t)
|
||||
volume := "cephfs"
|
||||
|
||||
subvolumegroup := "cephfs_subvol_group"
|
||||
err := fsa.CreateSubVolumeGroup(volume, subvolumegroup, nil)
|
||||
assert.NoError(t, err)
|
||||
defer func() {
|
||||
err := fsa.RemoveSubVolumeGroup(volume, subvolumegroup)
|
||||
assert.NoError(t, err)
|
||||
}()
|
||||
|
||||
// mds_export_ephemeral_random_max has a default value of 0.01. EINVAL
|
||||
// is returned for an attempt to set a value beyond this config.
|
||||
var ec errorCode
|
||||
_, err = fsa.PinSubVolumeGroup(volume, subvolumegroup, "random", "0.5")
|
||||
assert.True(t, errors.As(err, &ec))
|
||||
assert.Equal(t, -22, ec.ErrorCode())
|
||||
|
||||
_, err = fsa.PinSubVolumeGroup(volume, subvolumegroup, "random", "0.01")
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
type errorCode interface {
|
||||
ErrorCode() int
|
||||
}
|
||||
|
|
@ -0,0 +1,22 @@
|
|||
package admin
|
||||
|
||||
import (
|
||||
"github.com/ceph/go-ceph/internal/commands"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrStatusNotEmpty is an alias for commands.ErrStatusNotEmpty
|
||||
ErrStatusNotEmpty = commands.ErrStatusNotEmpty
|
||||
// ErrBodyNotEmpty is an alias for commands.ErrBodyNotEmpty
|
||||
ErrBodyNotEmpty = commands.ErrBodyNotEmpty
|
||||
)
|
||||
|
||||
type response = commands.Response
|
||||
|
||||
// NotImplementedError is an alias for commands.NotImplementedError.
|
||||
type NotImplementedError = commands.NotImplementedError
|
||||
|
||||
// newResponse returns a response.
|
||||
func newResponse(b []byte, s string, e error) response {
|
||||
return commands.NewResponse(b, s, e)
|
||||
}
|
||||
|
|
@ -0,0 +1,112 @@
|
|||
//go:build !(nautilus || octopus || pacific)
|
||||
// +build !nautilus,!octopus,!pacific
|
||||
|
||||
package admin
|
||||
|
||||
// GetSnapshotMetadata gets custom metadata on the subvolume snapshot in a
|
||||
// volume belonging to an optional subvolume group based on provided key name.
|
||||
//
|
||||
// Similar To:
|
||||
//
|
||||
// ceph fs subvolume snapshot metadata get <vol_name> <sub_name> <snap_name> <key_name> [--group_name <subvol_group_name>]
|
||||
func (fsa *FSAdmin) GetSnapshotMetadata(volume, group, subvolume, snapname, key string) (string, error) {
|
||||
m := map[string]string{
|
||||
"prefix": "fs subvolume snapshot metadata get",
|
||||
"format": "json",
|
||||
"vol_name": volume,
|
||||
"sub_name": subvolume,
|
||||
"snap_name": snapname,
|
||||
"key_name": key,
|
||||
}
|
||||
|
||||
if group != NoGroup {
|
||||
m["group_name"] = group
|
||||
}
|
||||
|
||||
return parsePathResponse(fsa.marshalMgrCommand(m))
|
||||
}
|
||||
|
||||
// SetSnapshotMetadata sets custom metadata on the subvolume snapshot in a
|
||||
// volume belonging to an optional subvolume group as a key-value pair.
|
||||
//
|
||||
// Similar To:
|
||||
//
|
||||
// ceph fs subvolume snapshot metadata set <vol_name> <sub_name> <snap_name> <key_name> <value> [--group_name <subvol_group_name>]
|
||||
func (fsa *FSAdmin) SetSnapshotMetadata(volume, group, subvolume, snapname, key, value string) error {
|
||||
m := map[string]string{
|
||||
"prefix": "fs subvolume snapshot metadata set",
|
||||
"format": "json",
|
||||
"vol_name": volume,
|
||||
"sub_name": subvolume,
|
||||
"snap_name": snapname,
|
||||
"key_name": key,
|
||||
"value": value,
|
||||
}
|
||||
|
||||
if group != NoGroup {
|
||||
m["group_name"] = group
|
||||
}
|
||||
|
||||
return fsa.marshalMgrCommand(m).NoData().End()
|
||||
}
|
||||
|
||||
// RemoveSnapshotMetadata removes custom metadata set on the subvolume
|
||||
// snapshot in a volume belonging to an optional subvolume group using the
|
||||
// metadata key.
|
||||
//
|
||||
// Similar To:
|
||||
//
|
||||
// ceph fs subvolume snapshot metadata rm <vol_name> <sub_name> <snap_name> <key_name> [--group_name <subvol_group_name>]
|
||||
func (fsa *FSAdmin) RemoveSnapshotMetadata(volume, group, subvolume, snapname, key string) error {
|
||||
return fsa.rmSubVolumeSnapShotMetadata(volume, group, subvolume, snapname, key, commonRmFlags{})
|
||||
}
|
||||
|
||||
// ForceRemoveSnapshotMetadata attempt to forcefully remove custom metadata
|
||||
// set on the subvolume snapshot in a volume belonging to an optional
|
||||
// subvolume group using the metadata key.
|
||||
//
|
||||
// Similar To:
|
||||
//
|
||||
// ceph fs subvolume snapshot metadata rm <vol_name> <sub_name> <snap_name> <key_name> [--group_name <subvol_group_name>] --force
|
||||
func (fsa *FSAdmin) ForceRemoveSnapshotMetadata(volume, group, subvolume, snapname, key string) error {
|
||||
return fsa.rmSubVolumeSnapShotMetadata(volume, group, subvolume, snapname, key, commonRmFlags{force: true})
|
||||
}
|
||||
|
||||
func (fsa *FSAdmin) rmSubVolumeSnapShotMetadata(volume, group, subvolume, snapname, key string, o commonRmFlags) error {
|
||||
m := map[string]string{
|
||||
"prefix": "fs subvolume snapshot metadata rm",
|
||||
"format": "json",
|
||||
"vol_name": volume,
|
||||
"sub_name": subvolume,
|
||||
"snap_name": snapname,
|
||||
"key_name": key,
|
||||
}
|
||||
|
||||
if group != NoGroup {
|
||||
m["group_name"] = group
|
||||
}
|
||||
|
||||
return fsa.marshalMgrCommand(mergeFlags(m, o)).NoData().End()
|
||||
}
|
||||
|
||||
// ListSnapshotMetadata lists custom metadata (key-value pairs) set on the subvolume
|
||||
// snapshot in a volume belonging to an optional subvolume group.
|
||||
//
|
||||
// Similar To:
|
||||
//
|
||||
// ceph fs subvolume snapshot metadata ls <vol_name> <sub_name> <snap_name> [--group_name <subvol_group_name>]
|
||||
func (fsa *FSAdmin) ListSnapshotMetadata(volume, group, subvolume, snapname string) (map[string]string, error) {
|
||||
m := map[string]string{
|
||||
"prefix": "fs subvolume snapshot metadata ls",
|
||||
"format": "json",
|
||||
"vol_name": volume,
|
||||
"sub_name": subvolume,
|
||||
"snap_name": snapname,
|
||||
}
|
||||
|
||||
if group != NoGroup {
|
||||
m["group_name"] = group
|
||||
}
|
||||
|
||||
return parseListKeyValues(fsa.marshalMgrCommand(m))
|
||||
}
|
||||
|
|
@ -0,0 +1,179 @@
|
|||
//go:build !(nautilus || octopus || pacific)
|
||||
// +build !nautilus,!octopus,!pacific
|
||||
|
||||
package admin
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestSetSnapshotMetadata(t *testing.T) {
|
||||
fsa := getFSAdmin(t)
|
||||
volume := "cephfs"
|
||||
group := "group"
|
||||
subname := "subVol"
|
||||
snapname := "snap1"
|
||||
key := "hi"
|
||||
value := "hello"
|
||||
|
||||
err := fsa.CreateSubVolumeGroup(volume, group, nil)
|
||||
assert.NoError(t, err)
|
||||
err = fsa.CreateSubVolume(volume, group, subname, nil)
|
||||
assert.NoError(t, err)
|
||||
err = fsa.CreateSubVolumeSnapshot(volume, group, subname, snapname)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = fsa.SetSnapshotMetadata(volume, group, subname, snapname, key, value)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = fsa.RemoveSubVolumeSnapshot(volume, group, subname, snapname)
|
||||
assert.NoError(t, err)
|
||||
err = fsa.RemoveSubVolume(volume, group, subname)
|
||||
assert.NoError(t, err)
|
||||
err = fsa.RemoveSubVolumeGroup(volume, group)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestGetSnapshotMetadata(t *testing.T) {
|
||||
fsa := getFSAdmin(t)
|
||||
volume := "cephfs"
|
||||
group := "group"
|
||||
subname := "subVol"
|
||||
snapname := "snap1"
|
||||
key := "hi"
|
||||
value := "hello"
|
||||
|
||||
err := fsa.CreateSubVolumeGroup(volume, group, nil)
|
||||
assert.NoError(t, err)
|
||||
err = fsa.CreateSubVolume(volume, group, subname, nil)
|
||||
assert.NoError(t, err)
|
||||
err = fsa.CreateSubVolumeSnapshot(volume, group, subname, snapname)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = fsa.SetSnapshotMetadata(volume, group, subname, snapname, key, value)
|
||||
assert.NoError(t, err)
|
||||
|
||||
metaValue, err := fsa.GetSnapshotMetadata(volume, group, subname, snapname, key)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, metaValue, value)
|
||||
|
||||
err = fsa.RemoveSubVolumeSnapshot(volume, group, subname, snapname)
|
||||
assert.NoError(t, err)
|
||||
err = fsa.RemoveSubVolume(volume, group, subname)
|
||||
assert.NoError(t, err)
|
||||
err = fsa.RemoveSubVolumeGroup(volume, group)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestRemoveSnapshotMetadata(t *testing.T) {
|
||||
fsa := getFSAdmin(t)
|
||||
volume := "cephfs"
|
||||
group := "group"
|
||||
subname := "subVol"
|
||||
snapname := "snap1"
|
||||
key := "hi"
|
||||
value := "hello"
|
||||
|
||||
err := fsa.CreateSubVolumeGroup(volume, group, nil)
|
||||
assert.NoError(t, err)
|
||||
err = fsa.CreateSubVolume(volume, group, subname, nil)
|
||||
assert.NoError(t, err)
|
||||
err = fsa.CreateSubVolumeSnapshot(volume, group, subname, snapname)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = fsa.SetSnapshotMetadata(volume, group, subname, snapname, key, value)
|
||||
assert.NoError(t, err)
|
||||
|
||||
metaValue, err := fsa.GetSnapshotMetadata(volume, group, subname, snapname, key)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, metaValue, value)
|
||||
|
||||
err = fsa.RemoveSnapshotMetadata(volume, group, subname, snapname, key)
|
||||
assert.NoError(t, err)
|
||||
|
||||
metaValue, err = fsa.GetSnapshotMetadata(volume, group, subname, snapname, key)
|
||||
assert.Error(t, err)
|
||||
|
||||
err = fsa.RemoveSubVolumeSnapshot(volume, group, subname, snapname)
|
||||
assert.NoError(t, err)
|
||||
err = fsa.RemoveSubVolume(volume, group, subname)
|
||||
assert.NoError(t, err)
|
||||
err = fsa.RemoveSubVolumeGroup(volume, group)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestForceRemoveSnapshotMetadata(t *testing.T) {
|
||||
fsa := getFSAdmin(t)
|
||||
volume := "cephfs"
|
||||
group := "group"
|
||||
subname := "subVol"
|
||||
snapname := "snap1"
|
||||
key := "hi"
|
||||
value := "hello"
|
||||
|
||||
err := fsa.CreateSubVolumeGroup(volume, group, nil)
|
||||
assert.NoError(t, err)
|
||||
err = fsa.CreateSubVolume(volume, group, subname, nil)
|
||||
assert.NoError(t, err)
|
||||
err = fsa.CreateSubVolumeSnapshot(volume, group, subname, snapname)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = fsa.SetSnapshotMetadata(volume, group, subname, snapname, key, value)
|
||||
assert.NoError(t, err)
|
||||
|
||||
metaValue, err := fsa.GetSnapshotMetadata(volume, group, subname, snapname, key)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, metaValue, value)
|
||||
|
||||
err = fsa.ForceRemoveSnapshotMetadata(volume, group, subname, snapname, key)
|
||||
assert.NoError(t, err)
|
||||
|
||||
metaValue, err = fsa.GetSnapshotMetadata(volume, group, subname, snapname, key)
|
||||
assert.Error(t, err)
|
||||
|
||||
err = fsa.RemoveSubVolumeSnapshot(volume, group, subname, snapname)
|
||||
assert.NoError(t, err)
|
||||
err = fsa.RemoveSubVolume(volume, group, subname)
|
||||
assert.NoError(t, err)
|
||||
err = fsa.RemoveSubVolumeGroup(volume, group)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestListSnapshotMetadata(t *testing.T) {
|
||||
fsa := getFSAdmin(t)
|
||||
volume := "cephfs"
|
||||
group := "group"
|
||||
subname := "subVol"
|
||||
snapname := "snap1"
|
||||
key1 := "hi1"
|
||||
value1 := "hello1"
|
||||
key2 := "hi2"
|
||||
value2 := "hello2"
|
||||
|
||||
err := fsa.CreateSubVolumeGroup(volume, group, nil)
|
||||
assert.NoError(t, err)
|
||||
err = fsa.CreateSubVolume(volume, group, subname, nil)
|
||||
assert.NoError(t, err)
|
||||
err = fsa.CreateSubVolumeSnapshot(volume, group, subname, snapname)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = fsa.SetSnapshotMetadata(volume, group, subname, snapname, key1, value1)
|
||||
assert.NoError(t, err)
|
||||
err = fsa.SetSnapshotMetadata(volume, group, subname, snapname, key2, value2)
|
||||
assert.NoError(t, err)
|
||||
|
||||
metaList, err := fsa.ListSnapshotMetadata(volume, group, subname, snapname)
|
||||
assert.NoError(t, err)
|
||||
assert.GreaterOrEqual(t, len(metaList), 2)
|
||||
assert.Contains(t, metaList, key1)
|
||||
assert.Contains(t, metaList, key2)
|
||||
|
||||
err = fsa.RemoveSubVolumeSnapshot(volume, group, subname, snapname)
|
||||
assert.NoError(t, err)
|
||||
err = fsa.RemoveSubVolume(volume, group, subname)
|
||||
assert.NoError(t, err)
|
||||
err = fsa.RemoveSubVolumeGroup(volume, group)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
|
@ -0,0 +1,425 @@
|
|||
package admin
|
||||
|
||||
// this is the internal type used to create JSON for ceph.
|
||||
// See SubVolumeOptions for the type that users of the library
|
||||
// interact with.
|
||||
// note that the ceph json takes mode as a string.
|
||||
type subVolumeFields struct {
|
||||
Prefix string `json:"prefix"`
|
||||
Format string `json:"format"`
|
||||
VolName string `json:"vol_name"`
|
||||
GroupName string `json:"group_name,omitempty"`
|
||||
SubName string `json:"sub_name"`
|
||||
Size ByteCount `json:"size,omitempty"`
|
||||
Uid int `json:"uid,omitempty"`
|
||||
Gid int `json:"gid,omitempty"`
|
||||
Mode string `json:"mode,omitempty"`
|
||||
PoolLayout string `json:"pool_layout,omitempty"`
|
||||
NamespaceIsolated bool `json:"namespace_isolated"`
|
||||
}
|
||||
|
||||
// SubVolumeOptions are used to specify optional, non-identifying, values
|
||||
// to be used when creating a new subvolume.
|
||||
type SubVolumeOptions struct {
|
||||
Size ByteCount
|
||||
Uid int
|
||||
Gid int
|
||||
Mode int
|
||||
PoolLayout string
|
||||
NamespaceIsolated bool
|
||||
}
|
||||
|
||||
func (s *SubVolumeOptions) toFields(v, g, n string) *subVolumeFields {
|
||||
return &subVolumeFields{
|
||||
Prefix: "fs subvolume create",
|
||||
Format: "json",
|
||||
VolName: v,
|
||||
GroupName: g,
|
||||
SubName: n,
|
||||
Size: s.Size,
|
||||
Uid: s.Uid,
|
||||
Gid: s.Gid,
|
||||
Mode: modeString(s.Mode, false),
|
||||
PoolLayout: s.PoolLayout,
|
||||
NamespaceIsolated: s.NamespaceIsolated,
|
||||
}
|
||||
}
|
||||
|
||||
// NoGroup should be used when an optional subvolume group name is not
|
||||
// specified.
|
||||
const NoGroup = ""
|
||||
|
||||
// CreateSubVolume sends a request to create a CephFS subvolume in a volume,
|
||||
// belonging to an optional subvolume group.
|
||||
//
|
||||
// Similar To:
|
||||
//
|
||||
// ceph fs subvolume create <volume> --group-name=<group> <name> ...
|
||||
func (fsa *FSAdmin) CreateSubVolume(volume, group, name string, o *SubVolumeOptions) error {
|
||||
if o == nil {
|
||||
o = &SubVolumeOptions{}
|
||||
}
|
||||
f := o.toFields(volume, group, name)
|
||||
return fsa.marshalMgrCommand(f).NoData().End()
|
||||
}
|
||||
|
||||
// ListSubVolumes returns a list of subvolumes belonging to the volume and
|
||||
// optional subvolume group.
|
||||
//
|
||||
// Similar To:
|
||||
//
|
||||
// ceph fs subvolume ls <volume> --group-name=<group>
|
||||
func (fsa *FSAdmin) ListSubVolumes(volume, group string) ([]string, error) {
|
||||
m := map[string]string{
|
||||
"prefix": "fs subvolume ls",
|
||||
"vol_name": volume,
|
||||
"format": "json",
|
||||
}
|
||||
if group != NoGroup {
|
||||
m["group_name"] = group
|
||||
}
|
||||
return parseListNames(fsa.marshalMgrCommand(m))
|
||||
}
|
||||
|
||||
// RemoveSubVolume will delete a CephFS subvolume in a volume and optional
|
||||
// subvolume group.
|
||||
//
|
||||
// Similar To:
|
||||
//
|
||||
// ceph fs subvolume rm <volume> --group-name=<group> <name>
|
||||
func (fsa *FSAdmin) RemoveSubVolume(volume, group, name string) error {
|
||||
return fsa.RemoveSubVolumeWithFlags(volume, group, name, SubVolRmFlags{})
|
||||
}
|
||||
|
||||
// ForceRemoveSubVolume will delete a CephFS subvolume in a volume and optional
|
||||
// subvolume group.
|
||||
//
|
||||
// Similar To:
|
||||
//
|
||||
// ceph fs subvolume rm <volume> --group-name=<group> <name> --force
|
||||
func (fsa *FSAdmin) ForceRemoveSubVolume(volume, group, name string) error {
|
||||
return fsa.RemoveSubVolumeWithFlags(volume, group, name, SubVolRmFlags{Force: true})
|
||||
}
|
||||
|
||||
// RemoveSubVolumeWithFlags will delete a CephFS subvolume in a volume and
|
||||
// optional subvolume group. This function accepts a SubVolRmFlags type that
|
||||
// can be used to specify flags that modify the operations behavior.
|
||||
// Equivalent to RemoveSubVolume with no flags set.
|
||||
// Equivalent to ForceRemoveSubVolume if only the "Force" flag is set.
|
||||
//
|
||||
// Similar To:
|
||||
//
|
||||
// ceph fs subvolume rm <volume> --group-name=<group> <name> [...flags...]
|
||||
func (fsa *FSAdmin) RemoveSubVolumeWithFlags(volume, group, name string, o SubVolRmFlags) error {
|
||||
m := map[string]string{
|
||||
"prefix": "fs subvolume rm",
|
||||
"vol_name": volume,
|
||||
"sub_name": name,
|
||||
"format": "json",
|
||||
}
|
||||
if group != NoGroup {
|
||||
m["group_name"] = group
|
||||
}
|
||||
return fsa.marshalMgrCommand(mergeFlags(m, o)).NoData().End()
|
||||
}
|
||||
|
||||
type subVolumeResizeFields struct {
|
||||
Prefix string `json:"prefix"`
|
||||
Format string `json:"format"`
|
||||
VolName string `json:"vol_name"`
|
||||
GroupName string `json:"group_name,omitempty"`
|
||||
SubName string `json:"sub_name"`
|
||||
NewSize string `json:"new_size"`
|
||||
NoShrink bool `json:"no_shrink"`
|
||||
}
|
||||
|
||||
// SubVolumeResizeResult reports the size values returned by the
|
||||
// ResizeSubVolume function, as reported by Ceph.
|
||||
type SubVolumeResizeResult struct {
|
||||
BytesUsed ByteCount `json:"bytes_used"`
|
||||
BytesQuota ByteCount `json:"bytes_quota"`
|
||||
BytesPercent string `json:"bytes_pcent"`
|
||||
}
|
||||
|
||||
// ResizeSubVolume will resize a CephFS subvolume. The newSize value may be a
|
||||
// ByteCount or the special Infinite constant. Setting noShrink to true will
|
||||
// prevent reducing the size of the volume below the current used size.
|
||||
//
|
||||
// Similar To:
|
||||
//
|
||||
// ceph fs subvolume resize <volume> --group-name=<group> <name> ...
|
||||
func (fsa *FSAdmin) ResizeSubVolume(
|
||||
volume, group, name string,
|
||||
newSize QuotaSize, noShrink bool) (*SubVolumeResizeResult, error) {
|
||||
|
||||
f := &subVolumeResizeFields{
|
||||
Prefix: "fs subvolume resize",
|
||||
Format: "json",
|
||||
VolName: volume,
|
||||
GroupName: group,
|
||||
SubName: name,
|
||||
NewSize: newSize.resizeValue(),
|
||||
NoShrink: noShrink,
|
||||
}
|
||||
var result []*SubVolumeResizeResult
|
||||
res := fsa.marshalMgrCommand(f)
|
||||
if err := res.NoStatus().Unmarshal(&result).End(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return result[0], nil
|
||||
}
|
||||
|
||||
// SubVolumePath returns the path to the subvolume from the root of the file system.
|
||||
//
|
||||
// Similar To:
|
||||
//
|
||||
// ceph fs subvolume getpath <volume> --group-name=<group> <name>
|
||||
func (fsa *FSAdmin) SubVolumePath(volume, group, name string) (string, error) {
|
||||
m := map[string]string{
|
||||
"prefix": "fs subvolume getpath",
|
||||
"vol_name": volume,
|
||||
"sub_name": name,
|
||||
// ceph doesn't respond in json for this cmd (even if you ask)
|
||||
}
|
||||
if group != NoGroup {
|
||||
m["group_name"] = group
|
||||
}
|
||||
return parsePathResponse(fsa.marshalMgrCommand(m))
|
||||
}
|
||||
|
||||
// Feature is used to define constant values for optional features on
|
||||
// subvolumes.
|
||||
type Feature string
|
||||
|
||||
const (
|
||||
// SnapshotCloneFeature indicates a subvolume supports cloning.
|
||||
SnapshotCloneFeature = Feature("snapshot-clone")
|
||||
// SnapshotAutoprotectFeature indicates a subvolume does not require
|
||||
// manually protecting a subvolume before cloning.
|
||||
SnapshotAutoprotectFeature = Feature("snapshot-autoprotect")
|
||||
// SnapshotRetentionFeature indicates a subvolume supports retaining
|
||||
// snapshots on subvolume removal.
|
||||
SnapshotRetentionFeature = Feature("snapshot-retention")
|
||||
)
|
||||
|
||||
// SubVolumeState is used to define constant value for the state of
|
||||
// a subvolume.
|
||||
type SubVolumeState string
|
||||
|
||||
const (
|
||||
// StateUnset indicates a subvolume without any state.
|
||||
StateUnset = SubVolumeState("")
|
||||
// StateInit indicates that the subvolume is in initializing state.
|
||||
StateInit = SubVolumeState("init")
|
||||
// StatePending indicates that the subvolume is in pending state.
|
||||
StatePending = SubVolumeState("pending")
|
||||
// StateInProgress indicates that the subvolume is in in-progress state.
|
||||
StateInProgress = SubVolumeState("in-progress")
|
||||
// StateFailed indicates that the subvolume is in failed state.
|
||||
StateFailed = SubVolumeState("failed")
|
||||
// StateComplete indicates that the subvolume is in complete state.
|
||||
StateComplete = SubVolumeState("complete")
|
||||
// StateCanceled indicates that the subvolume is in canceled state.
|
||||
StateCanceled = SubVolumeState("canceled")
|
||||
// StateSnapRetained indicates that the subvolume is in
|
||||
// snapshot-retained state.
|
||||
StateSnapRetained = SubVolumeState("snapshot-retained")
|
||||
)
|
||||
|
||||
// SubVolumeInfo reports various informational values about a subvolume.
|
||||
type SubVolumeInfo struct {
|
||||
Type string `json:"type"`
|
||||
Path string `json:"path"`
|
||||
State SubVolumeState `json:"state"`
|
||||
Uid int `json:"uid"`
|
||||
Gid int `json:"gid"`
|
||||
Mode int `json:"mode"`
|
||||
BytesPercent string `json:"bytes_pcent"`
|
||||
BytesUsed ByteCount `json:"bytes_used"`
|
||||
BytesQuota QuotaSize `json:"-"`
|
||||
DataPool string `json:"data_pool"`
|
||||
PoolNamespace string `json:"pool_namespace"`
|
||||
Atime TimeStamp `json:"atime"`
|
||||
Mtime TimeStamp `json:"mtime"`
|
||||
Ctime TimeStamp `json:"ctime"`
|
||||
CreatedAt TimeStamp `json:"created_at"`
|
||||
Features []Feature `json:"features"`
|
||||
}
|
||||
|
||||
type subVolumeInfoWrapper struct {
|
||||
SubVolumeInfo
|
||||
VBytesQuota *quotaSizePlaceholder `json:"bytes_quota"`
|
||||
}
|
||||
|
||||
func parseSubVolumeInfo(res response) (*SubVolumeInfo, error) {
|
||||
var info subVolumeInfoWrapper
|
||||
if err := res.NoStatus().Unmarshal(&info).End(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if info.VBytesQuota != nil {
|
||||
info.BytesQuota = info.VBytesQuota.Value
|
||||
}
|
||||
return &info.SubVolumeInfo, nil
|
||||
}
|
||||
|
||||
// SubVolumeInfo returns information about the specified subvolume.
|
||||
//
|
||||
// Similar To:
|
||||
//
|
||||
// ceph fs subvolume info <volume> --group-name=<group> <name>
|
||||
func (fsa *FSAdmin) SubVolumeInfo(volume, group, name string) (*SubVolumeInfo, error) {
|
||||
m := map[string]string{
|
||||
"prefix": "fs subvolume info",
|
||||
"vol_name": volume,
|
||||
"sub_name": name,
|
||||
"format": "json",
|
||||
}
|
||||
if group != NoGroup {
|
||||
m["group_name"] = group
|
||||
}
|
||||
return parseSubVolumeInfo(fsa.marshalMgrCommand(m))
|
||||
}
|
||||
|
||||
// CreateSubVolumeSnapshot creates a new snapshot from the source subvolume.
|
||||
//
|
||||
// Similar To:
|
||||
//
|
||||
// ceph fs subvolume snapshot create <volume> --group-name=<group> <source> <name>
|
||||
func (fsa *FSAdmin) CreateSubVolumeSnapshot(volume, group, source, name string) error {
|
||||
m := map[string]string{
|
||||
"prefix": "fs subvolume snapshot create",
|
||||
"vol_name": volume,
|
||||
"sub_name": source,
|
||||
"snap_name": name,
|
||||
"format": "json",
|
||||
}
|
||||
if group != NoGroup {
|
||||
m["group_name"] = group
|
||||
}
|
||||
return fsa.marshalMgrCommand(m).NoData().End()
|
||||
}
|
||||
|
||||
// RemoveSubVolumeSnapshot removes the specified snapshot from the subvolume.
|
||||
//
|
||||
// Similar To:
|
||||
//
|
||||
// ceph fs subvolume snapshot rm <volume> --group-name=<group> <subvolume> <name>
|
||||
func (fsa *FSAdmin) RemoveSubVolumeSnapshot(volume, group, subvolume, name string) error {
|
||||
return fsa.rmSubVolumeSnapshot(volume, group, subvolume, name, commonRmFlags{})
|
||||
}
|
||||
|
||||
// ForceRemoveSubVolumeSnapshot removes the specified snapshot from the subvolume.
|
||||
//
|
||||
// Similar To:
|
||||
//
|
||||
// ceph fs subvolume snapshot rm <volume> --group-name=<group> <subvolume> <name> --force
|
||||
func (fsa *FSAdmin) ForceRemoveSubVolumeSnapshot(volume, group, subvolume, name string) error {
|
||||
return fsa.rmSubVolumeSnapshot(volume, group, subvolume, name, commonRmFlags{force: true})
|
||||
}
|
||||
|
||||
func (fsa *FSAdmin) rmSubVolumeSnapshot(volume, group, subvolume, name string, o commonRmFlags) error {
|
||||
|
||||
m := map[string]string{
|
||||
"prefix": "fs subvolume snapshot rm",
|
||||
"vol_name": volume,
|
||||
"sub_name": subvolume,
|
||||
"snap_name": name,
|
||||
"format": "json",
|
||||
}
|
||||
if group != NoGroup {
|
||||
m["group_name"] = group
|
||||
}
|
||||
return fsa.marshalMgrCommand(mergeFlags(m, o)).NoData().End()
|
||||
}
|
||||
|
||||
// ListSubVolumeSnapshots returns a listing of snapshots for a given subvolume.
|
||||
//
|
||||
// Similar To:
|
||||
//
|
||||
// ceph fs subvolume snapshot ls <volume> --group-name=<group> <name>
|
||||
func (fsa *FSAdmin) ListSubVolumeSnapshots(volume, group, name string) ([]string, error) {
|
||||
m := map[string]string{
|
||||
"prefix": "fs subvolume snapshot ls",
|
||||
"vol_name": volume,
|
||||
"sub_name": name,
|
||||
"format": "json",
|
||||
}
|
||||
if group != NoGroup {
|
||||
m["group_name"] = group
|
||||
}
|
||||
return parseListNames(fsa.marshalMgrCommand(m))
|
||||
}
|
||||
|
||||
// SubVolumeSnapshotInfo reports various informational values about a subvolume.
|
||||
type SubVolumeSnapshotInfo struct {
|
||||
CreatedAt TimeStamp `json:"created_at"`
|
||||
DataPool string `json:"data_pool"`
|
||||
HasPendingClones string `json:"has_pending_clones"`
|
||||
Protected string `json:"protected"`
|
||||
Size ByteCount `json:"size"`
|
||||
}
|
||||
|
||||
func parseSubVolumeSnapshotInfo(res response) (*SubVolumeSnapshotInfo, error) {
|
||||
var info SubVolumeSnapshotInfo
|
||||
if err := res.NoStatus().Unmarshal(&info).End(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &info, nil
|
||||
}
|
||||
|
||||
// SubVolumeSnapshotInfo returns information about the specified subvolume snapshot.
|
||||
//
|
||||
// Similar To:
|
||||
//
|
||||
// ceph fs subvolume snapshot info <volume> --group-name=<group> <subvolume> <name>
|
||||
func (fsa *FSAdmin) SubVolumeSnapshotInfo(volume, group, subvolume, name string) (*SubVolumeSnapshotInfo, error) {
|
||||
m := map[string]string{
|
||||
"prefix": "fs subvolume snapshot info",
|
||||
"vol_name": volume,
|
||||
"sub_name": subvolume,
|
||||
"snap_name": name,
|
||||
"format": "json",
|
||||
}
|
||||
if group != NoGroup {
|
||||
m["group_name"] = group
|
||||
}
|
||||
return parseSubVolumeSnapshotInfo(fsa.marshalMgrCommand(m))
|
||||
}
|
||||
|
||||
// ProtectSubVolumeSnapshot protects the specified snapshot.
|
||||
//
|
||||
// Similar To:
|
||||
//
|
||||
// ceph fs subvolume snapshot protect <volume> --group-name=<group> <subvolume> <name>
|
||||
func (fsa *FSAdmin) ProtectSubVolumeSnapshot(volume, group, subvolume, name string) error {
|
||||
m := map[string]string{
|
||||
"prefix": "fs subvolume snapshot protect",
|
||||
"vol_name": volume,
|
||||
"sub_name": subvolume,
|
||||
"snap_name": name,
|
||||
"format": "json",
|
||||
}
|
||||
if group != NoGroup {
|
||||
m["group_name"] = group
|
||||
}
|
||||
return fsa.marshalMgrCommand(m).FilterDeprecated().NoData().End()
|
||||
}
|
||||
|
||||
// UnprotectSubVolumeSnapshot removes protection from the specified snapshot.
|
||||
//
|
||||
// Similar To:
|
||||
//
|
||||
// ceph fs subvolume snapshot unprotect <volume> --group-name=<group> <subvolume> <name>
|
||||
func (fsa *FSAdmin) UnprotectSubVolumeSnapshot(volume, group, subvolume, name string) error {
|
||||
m := map[string]string{
|
||||
"prefix": "fs subvolume snapshot unprotect",
|
||||
"vol_name": volume,
|
||||
"sub_name": subvolume,
|
||||
"snap_name": name,
|
||||
"format": "json",
|
||||
}
|
||||
if group != NoGroup {
|
||||
m["group_name"] = group
|
||||
}
|
||||
return fsa.marshalMgrCommand(m).FilterDeprecated().NoData().End()
|
||||
}
|
||||
|
|
@ -0,0 +1,22 @@
|
|||
//go:build !(octopus || pacific || quincy || reef || squid) && ceph_preview
|
||||
|
||||
package admin
|
||||
|
||||
// SubVolumeSnapshotPath returns the path for a snapshot from the source subvolume.
|
||||
//
|
||||
// Similar To:
|
||||
//
|
||||
// ceph fs subvolume snapshot getpath <volume> --group-name=<group> <source> <name>
|
||||
func (fsa *FSAdmin) SubVolumeSnapshotPath(volume, group, source, name string) (string, error) {
|
||||
m := map[string]string{
|
||||
"prefix": "fs subvolume snapshot getpath",
|
||||
"vol_name": volume,
|
||||
"sub_name": source,
|
||||
"snap_name": name,
|
||||
"format": "json",
|
||||
}
|
||||
if group != NoGroup {
|
||||
m["group_name"] = group
|
||||
}
|
||||
return parsePathResponse(fsa.marshalMgrCommand(m))
|
||||
}
|
||||
|
|
@ -0,0 +1,93 @@
|
|||
//go:build !(octopus || pacific || quincy || reef || squid) && ceph_preview
|
||||
|
||||
package admin
|
||||
|
||||
import (
|
||||
"path"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestSubVolumeSnapshotPath(t *testing.T) {
|
||||
fsa := getFSAdmin(t)
|
||||
volume := "cephfs"
|
||||
|
||||
t.Run("non existing snapshot", func(t *testing.T) {
|
||||
subname := "subvol"
|
||||
|
||||
err := fsa.CreateSubVolume(volume, NoGroup, subname, nil)
|
||||
assert.NoError(t, err)
|
||||
defer func() {
|
||||
err := fsa.RemoveSubVolume(volume, NoGroup, subname)
|
||||
assert.NoError(t, err)
|
||||
}()
|
||||
|
||||
_, err = fsa.SubVolumeSnapshotPath(volume, NoGroup, subname, "nosnap")
|
||||
assert.Error(t, err)
|
||||
})
|
||||
|
||||
t.Run("without group", func(t *testing.T) {
|
||||
subname := "subvol1"
|
||||
snapname := "snap1"
|
||||
|
||||
err := fsa.CreateSubVolume(volume, NoGroup, subname, nil)
|
||||
assert.NoError(t, err)
|
||||
defer func() {
|
||||
err := fsa.RemoveSubVolume(volume, NoGroup, subname)
|
||||
assert.NoError(t, err)
|
||||
}()
|
||||
|
||||
svpath, err := fsa.SubVolumePath(volume, NoGroup, subname)
|
||||
assert.NoError(t, err)
|
||||
svuuid := path.Base(svpath)
|
||||
|
||||
err = fsa.CreateSubVolumeSnapshot(volume, NoGroup, subname, snapname)
|
||||
assert.NoError(t, err)
|
||||
defer func() {
|
||||
err := fsa.RemoveSubVolumeSnapshot(volume, NoGroup, subname, snapname)
|
||||
assert.NoError(t, err)
|
||||
}()
|
||||
|
||||
expSnappath := path.Join("/volumes", "_nogroup", subname, ".snap", snapname, svuuid)
|
||||
snappath, err := fsa.SubVolumeSnapshotPath(volume, NoGroup, subname, snapname)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, expSnappath, snappath)
|
||||
})
|
||||
|
||||
t.Run("with group", func(t *testing.T) {
|
||||
group := "subvolgroup"
|
||||
subname := "subvol2"
|
||||
snapname := "snap2"
|
||||
|
||||
err := fsa.CreateSubVolumeGroup(volume, group, nil)
|
||||
assert.NoError(t, err)
|
||||
defer func() {
|
||||
err := fsa.RemoveSubVolumeGroup(volume, group)
|
||||
assert.NoError(t, err)
|
||||
}()
|
||||
|
||||
err = fsa.CreateSubVolume(volume, group, subname, nil)
|
||||
assert.NoError(t, err)
|
||||
defer func() {
|
||||
err := fsa.RemoveSubVolume(volume, group, subname)
|
||||
assert.NoError(t, err)
|
||||
}()
|
||||
|
||||
svpath, err := fsa.SubVolumePath(volume, group, subname)
|
||||
assert.NoError(t, err)
|
||||
svuuid := path.Base(svpath)
|
||||
|
||||
err = fsa.CreateSubVolumeSnapshot(volume, group, subname, snapname)
|
||||
assert.NoError(t, err)
|
||||
defer func() {
|
||||
err := fsa.RemoveSubVolumeSnapshot(volume, group, subname, snapname)
|
||||
assert.NoError(t, err)
|
||||
}()
|
||||
|
||||
expSnappath := path.Join("/volumes", group, subname, ".snap", snapname, svuuid)
|
||||
snappath, err := fsa.SubVolumeSnapshotPath(volume, group, subname, snapname)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, expSnappath, snappath)
|
||||
})
|
||||
}
|
||||
|
|
@ -0,0 +1,690 @@
|
|||
package admin
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
var shortDuration = 50 * time.Millisecond
|
||||
|
||||
func delay() {
|
||||
// ceph seems to do this (partly?) async. So for now, we cheat
|
||||
// and sleep a little to make subsequent tests more reliable
|
||||
time.Sleep(shortDuration)
|
||||
}
|
||||
|
||||
func TestCreateSubVolume(t *testing.T) {
|
||||
fsa := getFSAdmin(t)
|
||||
volume := "cephfs"
|
||||
type gn struct {
|
||||
group string
|
||||
name string
|
||||
}
|
||||
created := []gn{}
|
||||
defer func() {
|
||||
for _, c := range created {
|
||||
err := fsa.RemoveSubVolume(volume, c.group, c.name)
|
||||
assert.NoError(t, err)
|
||||
delay()
|
||||
if c.group != NoGroup {
|
||||
err := fsa.RemoveSubVolumeGroup(volume, c.group)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
t.Run("simple", func(t *testing.T) {
|
||||
subname := "SubVol1"
|
||||
err := fsa.CreateSubVolume(volume, NoGroup, subname, nil)
|
||||
assert.NoError(t, err)
|
||||
created = append(created, gn{NoGroup, subname})
|
||||
|
||||
lsv, err := fsa.ListSubVolumes(volume, NoGroup)
|
||||
assert.NoError(t, err)
|
||||
assert.GreaterOrEqual(t, len(lsv), 1)
|
||||
assert.Contains(t, lsv, subname)
|
||||
})
|
||||
|
||||
t.Run("options", func(t *testing.T) {
|
||||
subname := "SubVol2"
|
||||
o := &SubVolumeOptions{
|
||||
Mode: 0777,
|
||||
Uid: 200,
|
||||
Gid: 200,
|
||||
}
|
||||
err := fsa.CreateSubVolume(volume, NoGroup, subname, o)
|
||||
assert.NoError(t, err)
|
||||
created = append(created, gn{NoGroup, subname})
|
||||
|
||||
lsv, err := fsa.ListSubVolumes(volume, NoGroup)
|
||||
assert.NoError(t, err)
|
||||
assert.GreaterOrEqual(t, len(lsv), 1)
|
||||
assert.Contains(t, lsv, subname)
|
||||
})
|
||||
|
||||
t.Run("withGroup", func(t *testing.T) {
|
||||
group := "withGroup1"
|
||||
subname := "SubVol3"
|
||||
|
||||
err := fsa.CreateSubVolumeGroup(volume, group, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = fsa.CreateSubVolume(volume, group, subname, nil)
|
||||
assert.NoError(t, err)
|
||||
created = append(created, gn{group, subname})
|
||||
|
||||
lsv, err := fsa.ListSubVolumes(volume, group)
|
||||
assert.NoError(t, err)
|
||||
assert.GreaterOrEqual(t, len(lsv), 1)
|
||||
assert.Contains(t, lsv, subname)
|
||||
})
|
||||
|
||||
t.Run("groupAndOptions", func(t *testing.T) {
|
||||
group := "withGroup2"
|
||||
subname := "SubVol4"
|
||||
err := fsa.CreateSubVolumeGroup(volume, group, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
o := &SubVolumeOptions{
|
||||
Size: 5 * gibiByte,
|
||||
Mode: 0777,
|
||||
Uid: 200,
|
||||
Gid: 200,
|
||||
}
|
||||
err = fsa.CreateSubVolume(volume, group, subname, o)
|
||||
assert.NoError(t, err)
|
||||
created = append(created, gn{group, subname})
|
||||
|
||||
lsv, err := fsa.ListSubVolumes(volume, group)
|
||||
assert.NoError(t, err)
|
||||
assert.GreaterOrEqual(t, len(lsv), 1)
|
||||
assert.Contains(t, lsv, subname)
|
||||
})
|
||||
}
|
||||
|
||||
func TestRemoveSubVolume(t *testing.T) {
|
||||
fsa := getFSAdmin(t)
|
||||
volume := "cephfs"
|
||||
|
||||
lsv, err := fsa.ListSubVolumes(volume, NoGroup)
|
||||
assert.NoError(t, err)
|
||||
beforeCount := len(lsv)
|
||||
|
||||
removeTest := func(t *testing.T, rm func(string, string, string) error) {
|
||||
err = fsa.CreateSubVolume(volume, NoGroup, "deletemev1", nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
lsv, err = fsa.ListSubVolumes(volume, NoGroup)
|
||||
assert.NoError(t, err)
|
||||
afterCount := len(lsv)
|
||||
assert.Equal(t, beforeCount, afterCount-1)
|
||||
|
||||
err = rm(volume, NoGroup, "deletemev1")
|
||||
assert.NoError(t, err)
|
||||
|
||||
delay()
|
||||
lsv, err = fsa.ListSubVolumes(volume, NoGroup)
|
||||
assert.NoError(t, err)
|
||||
nowCount := len(lsv)
|
||||
if !assert.Equal(t, beforeCount, nowCount) {
|
||||
// this is a hack for debugging a flapping test
|
||||
assert.Equal(t, []string{}, lsv)
|
||||
}
|
||||
}
|
||||
|
||||
t.Run("standard", func(t *testing.T) {
|
||||
removeTest(t, fsa.RemoveSubVolume)
|
||||
})
|
||||
t.Run("force", func(t *testing.T) {
|
||||
removeTest(t, fsa.ForceRemoveSubVolume)
|
||||
})
|
||||
t.Run("withFlagsEmpty", func(t *testing.T) {
|
||||
removeTest(t, func(v, g, n string) error {
|
||||
return fsa.RemoveSubVolumeWithFlags(v, g, n, SubVolRmFlags{})
|
||||
})
|
||||
})
|
||||
t.Run("withFlagsForce", func(t *testing.T) {
|
||||
removeTest(t, func(v, g, n string) error {
|
||||
return fsa.RemoveSubVolumeWithFlags(v, g, n, SubVolRmFlags{Force: true})
|
||||
})
|
||||
})
|
||||
t.Run("withFlagsRetainSnaps", func(t *testing.T) {
|
||||
removeTest(t, func(v, g, n string) error {
|
||||
return fsa.RemoveSubVolumeWithFlags(v, g, n, SubVolRmFlags{RetainSnapshots: true})
|
||||
})
|
||||
})
|
||||
t.Run("retainedSnapshotsTest", func(t *testing.T) {
|
||||
subname := "retsnap1"
|
||||
snapname := "s1"
|
||||
err = fsa.CreateSubVolume(volume, NoGroup, subname, nil)
|
||||
assert.NoError(t, err)
|
||||
vinfo, err := fsa.SubVolumeInfo(volume, NoGroup, subname)
|
||||
assert.NoError(t, err)
|
||||
|
||||
canRetain := false
|
||||
for _, f := range vinfo.Features {
|
||||
if f == SnapshotRetentionFeature {
|
||||
canRetain = true
|
||||
}
|
||||
}
|
||||
if !canRetain {
|
||||
err = fsa.RemoveSubVolumeWithFlags(
|
||||
volume, NoGroup, subname, SubVolRmFlags{Force: true})
|
||||
assert.NoError(t, err)
|
||||
t.Skipf("this rest of this test requires snapshot retention on the server side")
|
||||
}
|
||||
|
||||
lsv, err = fsa.ListSubVolumes(volume, NoGroup)
|
||||
assert.NoError(t, err)
|
||||
afterCount := len(lsv)
|
||||
assert.Equal(t, beforeCount, afterCount-1)
|
||||
err = fsa.CreateSubVolumeSnapshot(volume, NoGroup, subname, snapname)
|
||||
|
||||
err = fsa.RemoveSubVolumeWithFlags(
|
||||
volume, NoGroup, subname, SubVolRmFlags{Force: true})
|
||||
assert.Error(t, err)
|
||||
|
||||
err = fsa.RemoveSubVolumeWithFlags(
|
||||
volume, NoGroup, subname, SubVolRmFlags{RetainSnapshots: true})
|
||||
assert.NoError(t, err)
|
||||
|
||||
delay()
|
||||
subInfo, err := fsa.SubVolumeInfo(volume, NoGroup, subname)
|
||||
assert.NoError(t, err)
|
||||
// If the subvolume is deleted with snapshots retained, then
|
||||
// it must have snapshot-retained state.
|
||||
assert.Equal(t, subInfo.State, StateSnapRetained)
|
||||
|
||||
err = fsa.RemoveSubVolumeSnapshot(volume, NoGroup, subname, snapname)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// The deletion of a subvolume in snapshot-retained state is triggered
|
||||
// by the deletion of the last snapshot. It does not need to be
|
||||
// explicitly deleted.
|
||||
// This may also be why we need to wait longer for the subvolume
|
||||
// to be removed from the listing.
|
||||
// See also: https://tracker.ceph.com/issues/54625
|
||||
|
||||
assert.Eventually(t,
|
||||
func() bool {
|
||||
lsv, err := fsa.ListSubVolumes(volume, NoGroup)
|
||||
if !assert.NoError(t, err) {
|
||||
return false
|
||||
}
|
||||
return len(lsv) == beforeCount
|
||||
},
|
||||
2*time.Minute,
|
||||
shortDuration,
|
||||
"subvolume count did not return to previous value")
|
||||
|
||||
})
|
||||
}
|
||||
|
||||
func TestResizeSubVolume(t *testing.T) {
|
||||
fsa := getFSAdmin(t)
|
||||
volume := "cephfs"
|
||||
group := "sizedGroup"
|
||||
subname := "sizeMe1"
|
||||
|
||||
err := fsa.CreateSubVolumeGroup(volume, group, nil)
|
||||
assert.NoError(t, err)
|
||||
defer func() {
|
||||
err := fsa.RemoveSubVolumeGroup(volume, group)
|
||||
assert.NoError(t, err)
|
||||
}()
|
||||
|
||||
svopts := &SubVolumeOptions{
|
||||
Mode: 0777,
|
||||
Size: 20 * gibiByte,
|
||||
}
|
||||
err = fsa.CreateSubVolume(volume, group, subname, svopts)
|
||||
assert.NoError(t, err)
|
||||
defer func() {
|
||||
err := fsa.RemoveSubVolume(volume, group, subname)
|
||||
assert.NoError(t, err)
|
||||
}()
|
||||
|
||||
lsv, err := fsa.ListSubVolumes(volume, group)
|
||||
assert.NoError(t, err)
|
||||
assert.Contains(t, lsv, subname)
|
||||
|
||||
rr, err := fsa.ResizeSubVolume(volume, group, subname, 30*gibiByte, false)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, rr)
|
||||
|
||||
rr, err = fsa.ResizeSubVolume(volume, group, subname, 10*gibiByte, true)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, rr)
|
||||
|
||||
rr, err = fsa.ResizeSubVolume(volume, group, subname, Infinite, true)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, rr)
|
||||
}
|
||||
|
||||
func TestSubVolumePath(t *testing.T) {
|
||||
fsa := getFSAdmin(t)
|
||||
volume := "cephfs"
|
||||
group := "svpGroup"
|
||||
subname := "svp1"
|
||||
|
||||
err := fsa.CreateSubVolumeGroup(volume, group, nil)
|
||||
assert.NoError(t, err)
|
||||
defer func() {
|
||||
err := fsa.RemoveSubVolumeGroup(volume, group)
|
||||
assert.NoError(t, err)
|
||||
}()
|
||||
|
||||
err = fsa.CreateSubVolume(volume, group, subname, nil)
|
||||
assert.NoError(t, err)
|
||||
defer func() {
|
||||
err := fsa.RemoveSubVolume(volume, group, subname)
|
||||
assert.NoError(t, err)
|
||||
}()
|
||||
|
||||
path, err := fsa.SubVolumePath(volume, group, subname)
|
||||
assert.NoError(t, err)
|
||||
assert.Contains(t, path, group)
|
||||
assert.Contains(t, path, subname)
|
||||
assert.NotContains(t, path, "\n")
|
||||
|
||||
// invalid subname
|
||||
path, err = fsa.SubVolumePath(volume, group, "oops")
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, "", path)
|
||||
}
|
||||
|
||||
var sampleSubVolumeInfo1 = []byte(`
|
||||
{
|
||||
"atime": "2020-08-31 19:53:43",
|
||||
"bytes_pcent": "undefined",
|
||||
"bytes_quota": "infinite",
|
||||
"bytes_used": 0,
|
||||
"created_at": "2020-08-31 19:53:43",
|
||||
"ctime": "2020-08-31 19:57:15",
|
||||
"data_pool": "cephfs_data",
|
||||
"gid": 0,
|
||||
"mode": 16877,
|
||||
"mon_addrs": [
|
||||
"127.0.0.1:6789"
|
||||
],
|
||||
"mtime": "2020-08-31 19:53:43",
|
||||
"path": "/volumes/_nogroup/nibbles/df11be81-a648-4a7b-8549-f28306e3ad93",
|
||||
"pool_namespace": "",
|
||||
"type": "subvolume",
|
||||
"uid": 0
|
||||
}
|
||||
`)
|
||||
|
||||
var sampleSubVolumeInfo2 = []byte(`
|
||||
{
|
||||
"atime": "2020-09-01 17:49:25",
|
||||
"bytes_pcent": "0.00",
|
||||
"bytes_quota": 444444,
|
||||
"bytes_used": 0,
|
||||
"created_at": "2020-09-01 17:49:25",
|
||||
"ctime": "2020-09-01 23:49:22",
|
||||
"data_pool": "cephfs_data",
|
||||
"gid": 0,
|
||||
"mode": 16877,
|
||||
"mon_addrs": [
|
||||
"127.0.0.1:6789"
|
||||
],
|
||||
"mtime": "2020-09-01 17:49:25",
|
||||
"path": "/volumes/_nogroup/nibbles/d6e062df-7fa0-46ca-872a-9adf728e0e00",
|
||||
"pool_namespace": "",
|
||||
"type": "subvolume",
|
||||
"uid": 0
|
||||
}
|
||||
`)
|
||||
|
||||
var sampleSubVolumeInfo3 = []byte(`
|
||||
{
|
||||
"atime": "2020-10-02 13:48:17",
|
||||
"bytes_pcent": "undefined",
|
||||
"bytes_quota": "infinite",
|
||||
"bytes_used": 0,
|
||||
"created_at": "2020-10-02 13:48:17",
|
||||
"ctime": "2020-10-02 13:48:17",
|
||||
"data_pool": "cephfs_data",
|
||||
"features": [
|
||||
"snapshot-clone",
|
||||
"snapshot-autoprotect"
|
||||
],
|
||||
"gid": 0,
|
||||
"mode": 16877,
|
||||
"mon_addrs": [
|
||||
"127.0.0.1:6789"
|
||||
],
|
||||
"mtime": "2020-10-02 13:48:17",
|
||||
"path": "/volumes/igotta/boogie/0302e067-e7fb-4d9b-8388-aae46164d8b0",
|
||||
"pool_namespace": "",
|
||||
"type": "subvolume",
|
||||
"uid": 0
|
||||
}
|
||||
`)
|
||||
|
||||
var sampleSubVolumeInfo4 = []byte(`
|
||||
{
|
||||
"atime": "2020-10-02 13:48:17",
|
||||
"bytes_pcent": "undefined",
|
||||
"bytes_quota": "infinite",
|
||||
"bytes_used": 0,
|
||||
"created_at": "2020-10-02 13:48:17",
|
||||
"ctime": "2020-10-02 13:48:17",
|
||||
"data_pool": "cephfs_data",
|
||||
"features": [
|
||||
"snapshot-clone",
|
||||
"snapshot-autoprotect",
|
||||
"snapshot-retention"
|
||||
],
|
||||
"gid": 0,
|
||||
"mode": 16877,
|
||||
"mon_addrs": [
|
||||
"127.0.0.1:6789"
|
||||
],
|
||||
"mtime": "2020-10-02 13:48:17",
|
||||
"path": "/volumes/igotta/boogie/0302e067-e7fb-4d9b-8388-aae46164d8b0",
|
||||
"pool_namespace": "",
|
||||
"state": "complete",
|
||||
"type": "subvolume",
|
||||
"uid": 0
|
||||
}
|
||||
`)
|
||||
|
||||
var badSampleSubVolumeInfo1 = []byte(`
|
||||
{
|
||||
"bytes_quota": "fishy",
|
||||
"uid": 0
|
||||
}
|
||||
`)
|
||||
|
||||
var badSampleSubVolumeInfo2 = []byte(`
|
||||
{
|
||||
"bytes_quota": true,
|
||||
"uid": 0
|
||||
}
|
||||
`)
|
||||
|
||||
func TestParseSubVolumeInfo(t *testing.T) {
|
||||
R := newResponse
|
||||
t.Run("error", func(t *testing.T) {
|
||||
_, err := parseSubVolumeInfo(R(nil, "", errors.New("gleep glop")))
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, "gleep glop", err.Error())
|
||||
})
|
||||
t.Run("statusSet", func(t *testing.T) {
|
||||
_, err := parseSubVolumeInfo(R(nil, "unexpected!", nil))
|
||||
assert.Error(t, err)
|
||||
})
|
||||
t.Run("ok", func(t *testing.T) {
|
||||
info, err := parseSubVolumeInfo(R(sampleSubVolumeInfo1, "", nil))
|
||||
assert.NoError(t, err)
|
||||
if assert.NotNil(t, info) {
|
||||
assert.Equal(t,
|
||||
"/volumes/_nogroup/nibbles/df11be81-a648-4a7b-8549-f28306e3ad93",
|
||||
info.Path)
|
||||
assert.Equal(t, 0, info.Uid)
|
||||
assert.Equal(t, Infinite, info.BytesQuota)
|
||||
assert.Equal(t, 040755, info.Mode)
|
||||
assert.Equal(t, 2020, info.Ctime.Year())
|
||||
assert.Equal(t, "2020-08-31 19:57:15", info.Ctime.String())
|
||||
}
|
||||
})
|
||||
t.Run("ok2", func(t *testing.T) {
|
||||
info, err := parseSubVolumeInfo(R(sampleSubVolumeInfo2, "", nil))
|
||||
assert.NoError(t, err)
|
||||
if assert.NotNil(t, info) {
|
||||
assert.Equal(t,
|
||||
"/volumes/_nogroup/nibbles/d6e062df-7fa0-46ca-872a-9adf728e0e00",
|
||||
info.Path)
|
||||
assert.Equal(t, 0, info.Uid)
|
||||
assert.Equal(t, ByteCount(444444), info.BytesQuota)
|
||||
assert.Equal(t, 040755, info.Mode)
|
||||
assert.Equal(t, 2020, info.Ctime.Year())
|
||||
assert.Equal(t, "2020-09-01 23:49:22", info.Ctime.String())
|
||||
}
|
||||
})
|
||||
t.Run("ok3", func(t *testing.T) {
|
||||
info, err := parseSubVolumeInfo(R(sampleSubVolumeInfo3, "", nil))
|
||||
assert.NoError(t, err)
|
||||
if assert.NotNil(t, info) {
|
||||
assert.Equal(t,
|
||||
"/volumes/igotta/boogie/0302e067-e7fb-4d9b-8388-aae46164d8b0",
|
||||
info.Path)
|
||||
assert.Equal(t, 0, info.Uid)
|
||||
assert.Equal(t, Infinite, info.BytesQuota)
|
||||
assert.Equal(t, 040755, info.Mode)
|
||||
assert.Equal(t, 2020, info.Ctime.Year())
|
||||
assert.Equal(t, "2020-10-02 13:48:17", info.Ctime.String())
|
||||
assert.Contains(t, info.Features, SnapshotCloneFeature)
|
||||
assert.Contains(t, info.Features, SnapshotAutoprotectFeature)
|
||||
}
|
||||
})
|
||||
t.Run("ok4", func(t *testing.T) {
|
||||
info, err := parseSubVolumeInfo(R(sampleSubVolumeInfo4, "", nil))
|
||||
assert.NoError(t, err)
|
||||
if assert.NotNil(t, info) {
|
||||
assert.Equal(t,
|
||||
"/volumes/igotta/boogie/0302e067-e7fb-4d9b-8388-aae46164d8b0",
|
||||
info.Path)
|
||||
assert.Equal(t, 0, info.Uid)
|
||||
assert.Contains(t, info.Features, SnapshotRetentionFeature)
|
||||
assert.Equal(t, info.State, StateComplete)
|
||||
}
|
||||
})
|
||||
t.Run("invalidBytesQuotaValue", func(t *testing.T) {
|
||||
info, err := parseSubVolumeInfo(R(badSampleSubVolumeInfo1, "", nil))
|
||||
assert.Error(t, err)
|
||||
assert.Nil(t, info)
|
||||
})
|
||||
t.Run("invalidBytesQuotaType", func(t *testing.T) {
|
||||
info, err := parseSubVolumeInfo(R(badSampleSubVolumeInfo2, "", nil))
|
||||
assert.Error(t, err)
|
||||
assert.Nil(t, info)
|
||||
})
|
||||
}
|
||||
|
||||
func TestSubVolumeInfo(t *testing.T) {
|
||||
fsa := getFSAdmin(t)
|
||||
volume := "cephfs"
|
||||
group := "hoagie"
|
||||
subname := "grinder"
|
||||
|
||||
err := fsa.CreateSubVolumeGroup(volume, group, nil)
|
||||
assert.NoError(t, err)
|
||||
defer func() {
|
||||
err := fsa.RemoveSubVolumeGroup(volume, group)
|
||||
assert.NoError(t, err)
|
||||
}()
|
||||
|
||||
svopts := &SubVolumeOptions{
|
||||
Mode: 0750,
|
||||
Size: 20 * gibiByte,
|
||||
}
|
||||
err = fsa.CreateSubVolume(volume, group, subname, svopts)
|
||||
assert.NoError(t, err)
|
||||
defer func() {
|
||||
err := fsa.RemoveSubVolume(volume, group, subname)
|
||||
assert.NoError(t, err)
|
||||
}()
|
||||
|
||||
vinfo, err := fsa.SubVolumeInfo(volume, group, subname)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, vinfo)
|
||||
assert.Equal(t, 0, vinfo.Uid)
|
||||
assert.Equal(t, 20*gibiByte, vinfo.BytesQuota)
|
||||
assert.Equal(t, 040750, vinfo.Mode)
|
||||
assert.Equal(t, time.Now().Year(), vinfo.Ctime.Year())
|
||||
// state field was added with snapshot retention feature.
|
||||
canRetain := false
|
||||
for _, f := range vinfo.Features {
|
||||
if f == SnapshotRetentionFeature {
|
||||
canRetain = true
|
||||
}
|
||||
}
|
||||
if canRetain {
|
||||
assert.Equal(t, vinfo.State, StateComplete)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSubVolumeSnapshots(t *testing.T) {
|
||||
fsa := getFSAdmin(t)
|
||||
volume := "cephfs"
|
||||
group := "20000leagues"
|
||||
subname := "nautilus"
|
||||
snapname1 := "ne1"
|
||||
snapname2 := "mo2"
|
||||
|
||||
err := fsa.CreateSubVolumeGroup(volume, group, nil)
|
||||
assert.NoError(t, err)
|
||||
defer func() {
|
||||
err := fsa.RemoveSubVolumeGroup(volume, group)
|
||||
assert.NoError(t, err)
|
||||
}()
|
||||
|
||||
svopts := &SubVolumeOptions{
|
||||
Mode: 0750,
|
||||
Size: 20 * gibiByte,
|
||||
}
|
||||
err = fsa.CreateSubVolume(volume, group, subname, svopts)
|
||||
assert.NoError(t, err)
|
||||
defer func() {
|
||||
err := fsa.RemoveSubVolume(volume, group, subname)
|
||||
assert.NoError(t, err)
|
||||
}()
|
||||
|
||||
t.Run("createAndRemove", func(t *testing.T) {
|
||||
err = fsa.CreateSubVolumeSnapshot(volume, group, subname, snapname1)
|
||||
assert.NoError(t, err)
|
||||
err := fsa.RemoveSubVolumeSnapshot(volume, group, subname, snapname1)
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("createAndForceRemove", func(t *testing.T) {
|
||||
err = fsa.CreateSubVolumeSnapshot(volume, group, subname, snapname1)
|
||||
assert.NoError(t, err)
|
||||
err := fsa.ForceRemoveSubVolumeSnapshot(volume, group, subname, snapname1)
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("listOne", func(t *testing.T) {
|
||||
err = fsa.CreateSubVolumeSnapshot(volume, group, subname, snapname1)
|
||||
assert.NoError(t, err)
|
||||
defer func() {
|
||||
err := fsa.RemoveSubVolumeSnapshot(volume, group, subname, snapname1)
|
||||
assert.NoError(t, err)
|
||||
}()
|
||||
|
||||
snaps, err := fsa.ListSubVolumeSnapshots(volume, group, subname)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, snaps, 1)
|
||||
assert.Contains(t, snaps, snapname1)
|
||||
})
|
||||
|
||||
t.Run("listTwo", func(t *testing.T) {
|
||||
err = fsa.CreateSubVolumeSnapshot(volume, group, subname, snapname1)
|
||||
assert.NoError(t, err)
|
||||
defer func() {
|
||||
err := fsa.RemoveSubVolumeSnapshot(volume, group, subname, snapname1)
|
||||
assert.NoError(t, err)
|
||||
}()
|
||||
err = fsa.CreateSubVolumeSnapshot(volume, group, subname, snapname2)
|
||||
assert.NoError(t, err)
|
||||
defer func() {
|
||||
err := fsa.RemoveSubVolumeSnapshot(volume, group, subname, snapname2)
|
||||
assert.NoError(t, err)
|
||||
}()
|
||||
|
||||
snaps, err := fsa.ListSubVolumeSnapshots(volume, group, subname)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, snaps, 2)
|
||||
assert.Contains(t, snaps, snapname1)
|
||||
assert.Contains(t, snaps, snapname2)
|
||||
})
|
||||
}
|
||||
|
||||
var sampleSubVolumeSnapshoInfo1 = []byte(`
|
||||
{
|
||||
"created_at": "2020-09-11 17:40:12.035792",
|
||||
"data_pool": "cephfs_data",
|
||||
"has_pending_clones": "no",
|
||||
"protected": "yes",
|
||||
"size": 0
|
||||
}
|
||||
`)
|
||||
|
||||
func TestParseSubVolumeSnapshotInfo(t *testing.T) {
|
||||
R := newResponse
|
||||
t.Run("error", func(t *testing.T) {
|
||||
_, err := parseSubVolumeSnapshotInfo(R(nil, "", errors.New("flub")))
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, "flub", err.Error())
|
||||
})
|
||||
t.Run("statusSet", func(t *testing.T) {
|
||||
_, err := parseSubVolumeSnapshotInfo(R(nil, "unexpected!", nil))
|
||||
assert.Error(t, err)
|
||||
})
|
||||
t.Run("badJSON", func(t *testing.T) {
|
||||
_, err := parseSubVolumeSnapshotInfo(R([]byte("_XxXxX"), "", nil))
|
||||
assert.Error(t, err)
|
||||
})
|
||||
t.Run("ok", func(t *testing.T) {
|
||||
info, err := parseSubVolumeSnapshotInfo(R(sampleSubVolumeSnapshoInfo1, "", nil))
|
||||
assert.NoError(t, err)
|
||||
if assert.NotNil(t, info) {
|
||||
assert.Equal(t, "cephfs_data", info.DataPool)
|
||||
assert.EqualValues(t, 0, info.Size)
|
||||
assert.Equal(t, 2020, info.CreatedAt.Year())
|
||||
assert.Equal(t, "yes", info.Protected)
|
||||
assert.Equal(t, "no", info.HasPendingClones)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestSubVolumeSnapshotInfo(t *testing.T) {
|
||||
fsa := getFSAdmin(t)
|
||||
volume := "cephfs"
|
||||
group := "20000leagues"
|
||||
subname := "poulp"
|
||||
snapname1 := "t1"
|
||||
snapname2 := "nope"
|
||||
|
||||
err := fsa.CreateSubVolumeGroup(volume, group, nil)
|
||||
assert.NoError(t, err)
|
||||
defer func() {
|
||||
err := fsa.RemoveSubVolumeGroup(volume, group)
|
||||
assert.NoError(t, err)
|
||||
}()
|
||||
|
||||
svopts := &SubVolumeOptions{
|
||||
Mode: 0750,
|
||||
Size: 20 * gibiByte,
|
||||
}
|
||||
err = fsa.CreateSubVolume(volume, group, subname, svopts)
|
||||
assert.NoError(t, err)
|
||||
defer func() {
|
||||
err := fsa.RemoveSubVolume(volume, group, subname)
|
||||
assert.NoError(t, err)
|
||||
}()
|
||||
|
||||
err = fsa.CreateSubVolumeSnapshot(volume, group, subname, snapname1)
|
||||
assert.NoError(t, err)
|
||||
defer func() {
|
||||
err := fsa.RemoveSubVolumeSnapshot(volume, group, subname, snapname1)
|
||||
assert.NoError(t, err)
|
||||
}()
|
||||
|
||||
sinfo, err := fsa.SubVolumeSnapshotInfo(volume, group, subname, snapname1)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, sinfo)
|
||||
assert.EqualValues(t, 0, sinfo.Size)
|
||||
assert.Equal(t, "cephfs_data", sinfo.DataPool)
|
||||
assert.Equal(t, time.Now().Year(), sinfo.CreatedAt.Year())
|
||||
|
||||
sinfo, err = fsa.SubVolumeSnapshotInfo(volume, group, subname, snapname2)
|
||||
assert.Error(t, err)
|
||||
assert.Nil(t, sinfo)
|
||||
}
|
||||
|
|
@ -0,0 +1,108 @@
|
|||
package admin
|
||||
|
||||
// this is the internal type used to create JSON for ceph.
|
||||
// See SubVolumeGroupOptions for the type that users of the library
|
||||
// interact with.
|
||||
// note that the ceph json takes mode as a string.
|
||||
type subVolumeGroupFields struct {
|
||||
Prefix string `json:"prefix"`
|
||||
Format string `json:"format"`
|
||||
VolName string `json:"vol_name"`
|
||||
GroupName string `json:"group_name"`
|
||||
Uid int `json:"uid,omitempty"`
|
||||
Gid int `json:"gid,omitempty"`
|
||||
Mode string `json:"mode,omitempty"`
|
||||
PoolLayout string `json:"pool_layout,omitempty"`
|
||||
}
|
||||
|
||||
// SubVolumeGroupOptions are used to specify optional, non-identifying, values
|
||||
// to be used when creating a new subvolume group.
|
||||
type SubVolumeGroupOptions struct {
|
||||
Uid int
|
||||
Gid int
|
||||
Mode int
|
||||
PoolLayout string
|
||||
}
|
||||
|
||||
func (s *SubVolumeGroupOptions) toFields(v, g string) *subVolumeGroupFields {
|
||||
return &subVolumeGroupFields{
|
||||
Prefix: "fs subvolumegroup create",
|
||||
Format: "json",
|
||||
VolName: v,
|
||||
GroupName: g,
|
||||
Uid: s.Uid,
|
||||
Gid: s.Gid,
|
||||
Mode: modeString(s.Mode, false),
|
||||
PoolLayout: s.PoolLayout,
|
||||
}
|
||||
}
|
||||
|
||||
// CreateSubVolumeGroup sends a request to create a subvolume group in a volume.
|
||||
//
|
||||
// Similar To:
|
||||
//
|
||||
// ceph fs subvolumegroup create <volume> <group_name> ...
|
||||
func (fsa *FSAdmin) CreateSubVolumeGroup(volume, name string, o *SubVolumeGroupOptions) error {
|
||||
if o == nil {
|
||||
o = &SubVolumeGroupOptions{}
|
||||
}
|
||||
res := fsa.marshalMgrCommand(o.toFields(volume, name))
|
||||
return res.NoData().End()
|
||||
}
|
||||
|
||||
// ListSubVolumeGroups returns a list of subvolume groups belonging to the
|
||||
// specified volume.
|
||||
//
|
||||
// Similar To:
|
||||
//
|
||||
// ceph fs subvolumegroup ls cephfs <volume>
|
||||
func (fsa *FSAdmin) ListSubVolumeGroups(volume string) ([]string, error) {
|
||||
res := fsa.marshalMgrCommand(map[string]string{
|
||||
"prefix": "fs subvolumegroup ls",
|
||||
"vol_name": volume,
|
||||
"format": "json",
|
||||
})
|
||||
return parseListNames(res)
|
||||
}
|
||||
|
||||
// RemoveSubVolumeGroup will delete a subvolume group in a volume.
|
||||
// Similar To:
|
||||
//
|
||||
// ceph fs subvolumegroup rm <volume> <group_name>
|
||||
func (fsa *FSAdmin) RemoveSubVolumeGroup(volume, name string) error {
|
||||
return fsa.rmSubVolumeGroup(volume, name, commonRmFlags{})
|
||||
}
|
||||
|
||||
// ForceRemoveSubVolumeGroup will delete a subvolume group in a volume.
|
||||
// Similar To:
|
||||
//
|
||||
// ceph fs subvolumegroup rm <volume> <group_name> --force
|
||||
func (fsa *FSAdmin) ForceRemoveSubVolumeGroup(volume, name string) error {
|
||||
return fsa.rmSubVolumeGroup(volume, name, commonRmFlags{force: true})
|
||||
}
|
||||
|
||||
func (fsa *FSAdmin) rmSubVolumeGroup(volume, name string, o commonRmFlags) error {
|
||||
res := fsa.marshalMgrCommand(mergeFlags(map[string]string{
|
||||
"prefix": "fs subvolumegroup rm",
|
||||
"vol_name": volume,
|
||||
"group_name": name,
|
||||
"format": "json",
|
||||
}, o))
|
||||
return res.NoData().End()
|
||||
}
|
||||
|
||||
// SubVolumeGroupPath returns the path to the subvolume from the root of the
|
||||
// file system.
|
||||
//
|
||||
// Similar To:
|
||||
//
|
||||
// ceph fs subvolumegroup getpath <volume> <group_name>
|
||||
func (fsa *FSAdmin) SubVolumeGroupPath(volume, name string) (string, error) {
|
||||
m := map[string]string{
|
||||
"prefix": "fs subvolumegroup getpath",
|
||||
"vol_name": volume,
|
||||
"group_name": name,
|
||||
// ceph doesn't respond in json for this cmd (even if you ask)
|
||||
}
|
||||
return parsePathResponse(fsa.marshalMgrCommand(m))
|
||||
}
|
||||
|
|
@ -0,0 +1,119 @@
|
|||
package admin
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestCreateSubVolumeGroup(t *testing.T) {
|
||||
fsa := getFSAdmin(t)
|
||||
volume := "cephfs"
|
||||
created := []string{}
|
||||
defer func() {
|
||||
for _, name := range created {
|
||||
err := fsa.RemoveSubVolumeGroup(volume, name)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
}()
|
||||
|
||||
t.Run("simple", func(t *testing.T) {
|
||||
svgroup := "svg1"
|
||||
err := fsa.CreateSubVolumeGroup(volume, svgroup, nil)
|
||||
assert.NoError(t, err)
|
||||
created = append(created, svgroup)
|
||||
|
||||
lsvg, err := fsa.ListSubVolumeGroups(volume)
|
||||
assert.NoError(t, err)
|
||||
assert.GreaterOrEqual(t, len(lsvg), 1)
|
||||
assert.Contains(t, lsvg, svgroup)
|
||||
})
|
||||
|
||||
t.Run("options1", func(t *testing.T) {
|
||||
svgroup := "svg2"
|
||||
err := fsa.CreateSubVolumeGroup(volume, svgroup, &SubVolumeGroupOptions{
|
||||
Mode: 0777,
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
created = append(created, svgroup)
|
||||
|
||||
lsvg, err := fsa.ListSubVolumeGroups(volume)
|
||||
assert.NoError(t, err)
|
||||
assert.GreaterOrEqual(t, len(lsvg), 1)
|
||||
assert.Contains(t, lsvg, svgroup)
|
||||
})
|
||||
|
||||
t.Run("options2", func(t *testing.T) {
|
||||
svgroup := "anotherSVG"
|
||||
err := fsa.CreateSubVolumeGroup(volume, svgroup, &SubVolumeGroupOptions{
|
||||
Uid: 200,
|
||||
Gid: 200,
|
||||
Mode: 0771,
|
||||
// TODO: test pool_layout... I think its a pool name
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
created = append(created, svgroup)
|
||||
|
||||
lsvg, err := fsa.ListSubVolumeGroups(volume)
|
||||
assert.NoError(t, err)
|
||||
assert.GreaterOrEqual(t, len(lsvg), 1)
|
||||
assert.Contains(t, lsvg, svgroup)
|
||||
})
|
||||
}
|
||||
|
||||
func TestRemoveSubVolumeGroup(t *testing.T) {
|
||||
fsa := getFSAdmin(t)
|
||||
volume := "cephfs"
|
||||
|
||||
lsvg, err := fsa.ListSubVolumeGroups(volume)
|
||||
assert.NoError(t, err)
|
||||
beforeCount := len(lsvg)
|
||||
|
||||
removeTest := func(t *testing.T, rm func(string, string) error) {
|
||||
err = fsa.CreateSubVolumeGroup(volume, "deleteme1", nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
lsvg, err = fsa.ListSubVolumeGroups(volume)
|
||||
assert.NoError(t, err)
|
||||
afterCount := len(lsvg)
|
||||
assert.Equal(t, beforeCount, afterCount-1)
|
||||
|
||||
err = rm(volume, "deleteme1")
|
||||
assert.NoError(t, err)
|
||||
|
||||
lsvg, err = fsa.ListSubVolumeGroups(volume)
|
||||
assert.NoError(t, err)
|
||||
nowCount := len(lsvg)
|
||||
assert.Equal(t, beforeCount, nowCount)
|
||||
}
|
||||
|
||||
t.Run("standard", func(t *testing.T) {
|
||||
removeTest(t, fsa.RemoveSubVolumeGroup)
|
||||
})
|
||||
t.Run("force", func(t *testing.T) {
|
||||
removeTest(t, fsa.ForceRemoveSubVolumeGroup)
|
||||
})
|
||||
}
|
||||
|
||||
func TestSubVolumeGroupPath(t *testing.T) {
|
||||
fsa := getFSAdmin(t)
|
||||
volume := "cephfs"
|
||||
group := "grewp"
|
||||
|
||||
err := fsa.CreateSubVolumeGroup(volume, group, nil)
|
||||
assert.NoError(t, err)
|
||||
defer func() {
|
||||
err := fsa.RemoveSubVolumeGroup(volume, group)
|
||||
assert.NoError(t, err)
|
||||
}()
|
||||
|
||||
path, err := fsa.SubVolumeGroupPath(volume, group)
|
||||
assert.NoError(t, err)
|
||||
assert.Contains(t, path, "/volumes/"+group)
|
||||
assert.NotContains(t, path, "\n")
|
||||
|
||||
// invalid group name
|
||||
path, err = fsa.SubVolumeGroupPath(volume, "oops")
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, "", path)
|
||||
}
|
||||
|
|
@ -0,0 +1,37 @@
|
|||
package admin
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"time"
|
||||
)
|
||||
|
||||
// golang's date parsing approach is rather bizarre
|
||||
var cephTSLayout = "2006-01-02 15:04:05"
|
||||
|
||||
// TimeStamp abstracts some of the details about date+time stamps
|
||||
// returned by ceph via JSON.
|
||||
type TimeStamp struct {
|
||||
time.Time
|
||||
}
|
||||
|
||||
// String returns a string representing the date+time as presented
|
||||
// by ceph.
|
||||
func (ts TimeStamp) String() string {
|
||||
return ts.Format(cephTSLayout)
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements the json Unmarshaler interface.
|
||||
func (ts *TimeStamp) UnmarshalJSON(b []byte) error {
|
||||
var raw string
|
||||
if err := json.Unmarshal(b, &raw); err != nil {
|
||||
return err
|
||||
}
|
||||
// AFAICT, ceph always returns the time in UTC so Parse, as opposed to
|
||||
// ParseInLocation, is appropriate here.
|
||||
t, err := time.Parse(cephTSLayout, raw)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*ts = TimeStamp{t}
|
||||
return nil
|
||||
}
|
||||
|
|
@ -0,0 +1,42 @@
|
|||
package admin
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestTimeStampUnmarshal(t *testing.T) {
|
||||
t.Run("valid", func(t *testing.T) {
|
||||
j1 := []byte(`"2020-01-03 18:03:21"`)
|
||||
var ts TimeStamp
|
||||
err := json.Unmarshal(j1, &ts)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 2020, ts.Year())
|
||||
assert.Equal(t, time.Month(1), ts.Month())
|
||||
assert.Equal(t, 3, ts.Day())
|
||||
})
|
||||
t.Run("badType", func(t *testing.T) {
|
||||
j1 := []byte(`["2020-01-03 18:03:21"]`)
|
||||
var ts TimeStamp
|
||||
err := json.Unmarshal(j1, &ts)
|
||||
assert.Error(t, err)
|
||||
})
|
||||
t.Run("badValue", func(t *testing.T) {
|
||||
j1 := []byte(`"just another manic monday"`)
|
||||
var ts TimeStamp
|
||||
err := json.Unmarshal(j1, &ts)
|
||||
assert.Error(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestTimeStampString(t *testing.T) {
|
||||
s := "2020-11-06 11:33:56"
|
||||
ti, err := time.Parse(cephTSLayout, s)
|
||||
if assert.NoError(t, err) {
|
||||
ts := TimeStamp{ti}
|
||||
assert.Equal(t, s, ts.String())
|
||||
}
|
||||
}
|
||||
186
pkg/mod/github.com/ceph/go-ceph@v0.35.0/cephfs/admin/volume.go
Normal file
186
pkg/mod/github.com/ceph/go-ceph@v0.35.0/cephfs/admin/volume.go
Normal file
|
|
@ -0,0 +1,186 @@
|
|||
package admin
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
)
|
||||
|
||||
var (
|
||||
listVolumesCmd = []byte(`{"prefix":"fs volume ls"}`)
|
||||
dumpVolumesCmd = []byte(`{"prefix":"fs dump","format":"json"}`)
|
||||
listFsCmd = []byte(`{"prefix":"fs ls","format":"json"}`)
|
||||
)
|
||||
|
||||
// ListVolumes return a list of volumes in this Ceph cluster.
|
||||
//
|
||||
// Similar To:
|
||||
//
|
||||
// ceph fs volume ls
|
||||
func (fsa *FSAdmin) ListVolumes() ([]string, error) {
|
||||
res := fsa.rawMgrCommand(listVolumesCmd)
|
||||
return parseListNames(res)
|
||||
}
|
||||
|
||||
// FSPoolInfo contains the name of a file system as well as the metadata and
|
||||
// data pools. Pool information is available by ID or by name.
|
||||
type FSPoolInfo struct {
|
||||
Name string `json:"name"`
|
||||
MetadataPool string `json:"metadata_pool"`
|
||||
MetadataPoolID int `json:"metadata_pool_id"`
|
||||
DataPools []string `json:"data_pools"`
|
||||
DataPoolIDs []int `json:"data_pool_ids"`
|
||||
}
|
||||
|
||||
// ListFileSystems lists file systems along with the pools occupied by those
|
||||
// file systems.
|
||||
//
|
||||
// Similar To:
|
||||
//
|
||||
// ceph fs ls
|
||||
func (fsa *FSAdmin) ListFileSystems() ([]FSPoolInfo, error) {
|
||||
res := fsa.rawMonCommand(listFsCmd)
|
||||
return parseFsList(res)
|
||||
}
|
||||
|
||||
func parseFsList(res response) ([]FSPoolInfo, error) {
|
||||
var listing []FSPoolInfo
|
||||
if err := res.NoStatus().Unmarshal(&listing).End(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return listing, nil
|
||||
}
|
||||
|
||||
// VolumeIdent contains a pair of file system identifying values: the volume
|
||||
// name and the volume ID.
|
||||
type VolumeIdent struct {
|
||||
Name string
|
||||
ID int64
|
||||
}
|
||||
|
||||
type cephFileSystem struct {
|
||||
ID int64 `json:"id"`
|
||||
MDSMap struct {
|
||||
FSName string `json:"fs_name"`
|
||||
} `json:"mdsmap"`
|
||||
}
|
||||
|
||||
type fsDump struct {
|
||||
FileSystems []cephFileSystem `json:"filesystems"`
|
||||
}
|
||||
|
||||
const (
|
||||
dumpOkPrefix = "dumped fsmap epoch"
|
||||
dumpOkLen = len(dumpOkPrefix)
|
||||
|
||||
invalidTextualResponse = "this ceph version returns a non-parsable volume status response"
|
||||
)
|
||||
|
||||
func parseDumpToIdents(res response) ([]VolumeIdent, error) {
|
||||
if !res.Ok() {
|
||||
return nil, res.End()
|
||||
}
|
||||
var dump fsDump
|
||||
if err := res.FilterPrefix(dumpOkPrefix).NoStatus().Unmarshal(&dump).End(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// copy the dump json into the simpler enumeration list
|
||||
idents := make([]VolumeIdent, len(dump.FileSystems))
|
||||
for i := range dump.FileSystems {
|
||||
idents[i].ID = dump.FileSystems[i].ID
|
||||
idents[i].Name = dump.FileSystems[i].MDSMap.FSName
|
||||
}
|
||||
return idents, nil
|
||||
}
|
||||
|
||||
// EnumerateVolumes returns a list of volume-name volume-id pairs.
|
||||
func (fsa *FSAdmin) EnumerateVolumes() ([]VolumeIdent, error) {
|
||||
// We base our enumeration on the ceph fs dump json. This may not be the
|
||||
// only way to do it, but it's the only one I know of currently. Because of
|
||||
// this and to keep our initial implementation simple, we expose our own
|
||||
// simplified type only, rather do a partial implementation of dump.
|
||||
return parseDumpToIdents(fsa.rawMonCommand(dumpVolumesCmd))
|
||||
}
|
||||
|
||||
// VolumePool reports on the pool status for a CephFS volume.
|
||||
type VolumePool struct {
|
||||
ID int `json:"id"`
|
||||
Name string `json:"name"`
|
||||
Type string `json:"type"`
|
||||
Available uint64 `json:"avail"`
|
||||
Used uint64 `json:"used"`
|
||||
}
|
||||
|
||||
// VolumeStatus reports various properties of a CephFS volume.
|
||||
// TODO: Fill in.
|
||||
type VolumeStatus struct {
|
||||
MDSVersion string `json:"mds_version"`
|
||||
Pools []VolumePool `json:"pools"`
|
||||
}
|
||||
|
||||
type mdsVersionField struct {
|
||||
Version string
|
||||
Items []struct {
|
||||
Version string `json:"version"`
|
||||
}
|
||||
}
|
||||
|
||||
func (m *mdsVersionField) UnmarshalJSON(data []byte) (err error) {
|
||||
if err = json.Unmarshal(data, &m.Version); err == nil {
|
||||
return
|
||||
}
|
||||
return json.Unmarshal(data, &m.Items)
|
||||
}
|
||||
|
||||
// volumeStatusResponse deals with the changing output of the mgr
|
||||
// api json
|
||||
type volumeStatusResponse struct {
|
||||
Pools []VolumePool `json:"pools"`
|
||||
MDSVersion mdsVersionField `json:"mds_version"`
|
||||
}
|
||||
|
||||
func (v *volumeStatusResponse) volumeStatus() *VolumeStatus {
|
||||
vstatus := &VolumeStatus{}
|
||||
vstatus.Pools = v.Pools
|
||||
if v.MDSVersion.Version != "" {
|
||||
vstatus.MDSVersion = v.MDSVersion.Version
|
||||
} else if len(v.MDSVersion.Items) > 0 {
|
||||
vstatus.MDSVersion = v.MDSVersion.Items[0].Version
|
||||
}
|
||||
return vstatus
|
||||
}
|
||||
|
||||
func parseVolumeStatus(res response) (*volumeStatusResponse, error) {
|
||||
var vs volumeStatusResponse
|
||||
res = res.NoStatus()
|
||||
if !res.Ok() {
|
||||
return nil, res.End()
|
||||
}
|
||||
res = res.Unmarshal(&vs)
|
||||
if !res.Ok() {
|
||||
if bytes.HasPrefix(res.Body(), []byte("ceph")) {
|
||||
return nil, NotImplementedError{
|
||||
Response: newResponse(res.Body(), invalidTextualResponse, res.Unwrap()),
|
||||
}
|
||||
}
|
||||
return nil, res.End()
|
||||
}
|
||||
return &vs, nil
|
||||
}
|
||||
|
||||
// VolumeStatus returns a VolumeStatus object for the given volume name.
|
||||
//
|
||||
// Similar To:
|
||||
//
|
||||
// ceph fs status cephfs <name>
|
||||
func (fsa *FSAdmin) VolumeStatus(name string) (*VolumeStatus, error) {
|
||||
res := fsa.marshalMgrCommand(map[string]string{
|
||||
"fs": name,
|
||||
"prefix": "fs status",
|
||||
"format": "json",
|
||||
})
|
||||
v, err := parseVolumeStatus(res)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return v.volumeStatus(), nil
|
||||
}
|
||||
|
|
@ -0,0 +1,48 @@
|
|||
//go:build !(nautilus || octopus)
|
||||
// +build !nautilus,!octopus
|
||||
|
||||
package admin
|
||||
|
||||
// PoolInfo reports various properties of a pool.
|
||||
type PoolInfo struct {
|
||||
Available int `json:"avail"`
|
||||
Name string `json:"name"`
|
||||
Used int `json:"used"`
|
||||
}
|
||||
|
||||
// PoolType indicates the type of pool related to a volume.
|
||||
type PoolType struct {
|
||||
DataPool []PoolInfo `json:"data"`
|
||||
MetadataPool []PoolInfo `json:"metadata"`
|
||||
}
|
||||
|
||||
// VolInfo holds various informational values about a volume.
|
||||
type VolInfo struct {
|
||||
MonAddrs []string `json:"mon_addrs"`
|
||||
PendingSubvolDels int `json:"pending_subvolume_deletions"`
|
||||
Pools PoolType `json:"pools"`
|
||||
UsedSize int `json:"used_size"`
|
||||
}
|
||||
|
||||
func parseVolumeInfo(res response) (*VolInfo, error) {
|
||||
var info VolInfo
|
||||
if err := res.NoStatus().Unmarshal(&info).End(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &info, nil
|
||||
}
|
||||
|
||||
// FetchVolumeInfo fetches the information of a CephFS volume.
|
||||
//
|
||||
// Similar To:
|
||||
//
|
||||
// ceph fs volume info <vol_name>
|
||||
func (fsa *FSAdmin) FetchVolumeInfo(volume string) (*VolInfo, error) {
|
||||
m := map[string]string{
|
||||
"prefix": "fs volume info",
|
||||
"vol_name": volume,
|
||||
"format": "json",
|
||||
}
|
||||
|
||||
return parseVolumeInfo(fsa.marshalMgrCommand(m))
|
||||
}
|
||||
|
|
@ -0,0 +1,65 @@
|
|||
//go:build !(nautilus || octopus)
|
||||
// +build !nautilus,!octopus
|
||||
|
||||
package admin
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestFetchVolumeInfo(t *testing.T) {
|
||||
fsa := getFSAdmin(t)
|
||||
|
||||
t.Run("SimpleVolume", func(t *testing.T) {
|
||||
volume := "cephfs"
|
||||
vinfo, err := fsa.FetchVolumeInfo(volume)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, vinfo)
|
||||
assert.Contains(t, vinfo.MonAddrs[0], "6789")
|
||||
assert.Equal(t, "cephfs_data", vinfo.Pools.DataPool[0].Name)
|
||||
})
|
||||
|
||||
t.Run("InvalidVolume", func(t *testing.T) {
|
||||
volume := "blah"
|
||||
var ec ErrCode
|
||||
_, err := fsa.FetchVolumeInfo(volume)
|
||||
assert.True(t, errors.As(err, &ec))
|
||||
assert.Equal(t, -2, ec.ErrorCode())
|
||||
})
|
||||
|
||||
t.Run("WithSubvolume", func(t *testing.T) {
|
||||
volume := "altfs"
|
||||
subvolname := "altfs_subvol"
|
||||
|
||||
err := fsa.CreateSubVolume(volume, NoGroup, subvolname, nil)
|
||||
assert.NoError(t, err)
|
||||
defer func() {
|
||||
err := fsa.RemoveSubVolume(volume, NoGroup, subvolname)
|
||||
assert.NoError(t, err)
|
||||
}()
|
||||
|
||||
vinfo, err := fsa.FetchVolumeInfo(volume)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, vinfo)
|
||||
assert.EqualValues(t, 0, vinfo.PendingSubvolDels)
|
||||
assert.Eventually(t,
|
||||
func() bool {
|
||||
vinfo, err := fsa.FetchVolumeInfo(volume)
|
||||
if !assert.NoError(t, err) {
|
||||
return false
|
||||
}
|
||||
return vinfo.Pools.DataPool[0].Used != 0
|
||||
},
|
||||
10*time.Second,
|
||||
100*time.Millisecond,
|
||||
"Data pool size not updated")
|
||||
})
|
||||
}
|
||||
|
||||
type ErrCode interface {
|
||||
ErrorCode() int
|
||||
}
|
||||
|
|
@ -0,0 +1,410 @@
|
|||
package admin
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/ceph/go-ceph/internal/util"
|
||||
)
|
||||
|
||||
func TestListVolumes(t *testing.T) {
|
||||
fsa := getFSAdmin(t)
|
||||
|
||||
vl, err := fsa.ListVolumes()
|
||||
assert.NoError(t, err)
|
||||
assert.GreaterOrEqual(t, len(vl), 1)
|
||||
assert.Contains(t, vl, "cephfs")
|
||||
}
|
||||
|
||||
func TestEnumerateVolumes(t *testing.T) {
|
||||
fsa := getFSAdmin(t)
|
||||
|
||||
ve, err := fsa.EnumerateVolumes()
|
||||
assert.NoError(t, err)
|
||||
|
||||
found := false
|
||||
for i := range ve {
|
||||
if ve[i].Name == "cephfs" {
|
||||
assert.Equal(t, int64(1), ve[i].ID)
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
assert.True(t, found, "never found a cephfs entry in enumerated volumes")
|
||||
}
|
||||
|
||||
// note: some of these dumps are simplified for testing purposes if we add
|
||||
// general dump support these samples may need to be expanded upon.
|
||||
var sampleDump1 = []byte(`
|
||||
{
|
||||
"epoch": 5,
|
||||
"default_fscid": 1,
|
||||
"filesystems": [
|
||||
{
|
||||
"mdsmap": {
|
||||
"epoch": 5,
|
||||
"flags": 18,
|
||||
"ever_allowed_features": 0,
|
||||
"explicitly_allowed_features": 0,
|
||||
"created": "2020-08-31T18:37:34.657633+0000",
|
||||
"modified": "2020-08-31T18:37:36.700989+0000",
|
||||
"tableserver": 0,
|
||||
"root": 0,
|
||||
"session_timeout": 60,
|
||||
"session_autoclose": 300,
|
||||
"min_compat_client": "0 (unknown)",
|
||||
"max_file_size": 1099511627776,
|
||||
"last_failure": 0,
|
||||
"last_failure_osd_epoch": 0,
|
||||
"compat": {
|
||||
"compat": {},
|
||||
"ro_compat": {},
|
||||
"incompat": {
|
||||
"feature_1": "base v0.20",
|
||||
"feature_2": "client writeable ranges",
|
||||
"feature_3": "default file layouts on dirs",
|
||||
"feature_4": "dir inode in separate object",
|
||||
"feature_5": "mds uses versioned encoding",
|
||||
"feature_6": "dirfrag is stored in omap",
|
||||
"feature_8": "no anchor table",
|
||||
"feature_9": "file layout v2",
|
||||
"feature_10": "snaprealm v2"
|
||||
}
|
||||
},
|
||||
"max_mds": 1,
|
||||
"in": [
|
||||
0
|
||||
],
|
||||
"up": {
|
||||
"mds_0": 4115
|
||||
},
|
||||
"failed": [],
|
||||
"damaged": [],
|
||||
"stopped": [],
|
||||
"info": {
|
||||
"gid_4115": {
|
||||
"gid": 4115,
|
||||
"name": "Z",
|
||||
"rank": 0,
|
||||
"incarnation": 4,
|
||||
"state": "up:active",
|
||||
"state_seq": 2,
|
||||
"addr": "127.0.0.1:6809/2568111595",
|
||||
"addrs": {
|
||||
"addrvec": [
|
||||
{
|
||||
"type": "v1",
|
||||
"addr": "127.0.0.1:6809",
|
||||
"nonce": 2568111595
|
||||
}
|
||||
]
|
||||
},
|
||||
"join_fscid": -1,
|
||||
"export_targets": [],
|
||||
"features": 4540138292836696000,
|
||||
"flags": 0
|
||||
}
|
||||
},
|
||||
"data_pools": [
|
||||
1
|
||||
],
|
||||
"metadata_pool": 2,
|
||||
"enabled": true,
|
||||
"fs_name": "cephfs",
|
||||
"balancer": "",
|
||||
"standby_count_wanted": 0
|
||||
},
|
||||
"id": 1
|
||||
}
|
||||
]
|
||||
}
|
||||
`)
|
||||
|
||||
var sampleDump2 = []byte(`
|
||||
{
|
||||
"epoch": 5,
|
||||
"default_fscid": 1,
|
||||
"filesystems": [
|
||||
{
|
||||
"mdsmap": {
|
||||
"fs_name": "wiffleball",
|
||||
"standby_count_wanted": 0
|
||||
},
|
||||
"id": 1
|
||||
},
|
||||
{
|
||||
"mdsmap": {
|
||||
"fs_name": "beanbag",
|
||||
"standby_count_wanted": 0
|
||||
},
|
||||
"id": 2
|
||||
}
|
||||
]
|
||||
}
|
||||
`)
|
||||
|
||||
func TestParseDumpToIdents(t *testing.T) {
|
||||
R := newResponse
|
||||
fakePrefix := dumpOkPrefix + " 5"
|
||||
t.Run("error", func(t *testing.T) {
|
||||
idents, err := parseDumpToIdents(R(nil, "", errors.New("boop")))
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, "boop", err.Error())
|
||||
assert.Nil(t, idents)
|
||||
})
|
||||
t.Run("badStatus", func(t *testing.T) {
|
||||
_, err := parseDumpToIdents(R(sampleDump1, "unexpected!", nil))
|
||||
assert.Error(t, err)
|
||||
})
|
||||
t.Run("oneVolOk", func(t *testing.T) {
|
||||
idents, err := parseDumpToIdents(R(sampleDump1, fakePrefix, nil))
|
||||
assert.NoError(t, err)
|
||||
if assert.Len(t, idents, 1) {
|
||||
assert.Equal(t, "cephfs", idents[0].Name)
|
||||
assert.Equal(t, int64(1), idents[0].ID)
|
||||
}
|
||||
})
|
||||
t.Run("twoVolOk", func(t *testing.T) {
|
||||
idents, err := parseDumpToIdents(R(sampleDump2, fakePrefix, nil))
|
||||
assert.NoError(t, err)
|
||||
if assert.Len(t, idents, 2) {
|
||||
assert.Equal(t, "wiffleball", idents[0].Name)
|
||||
assert.Equal(t, int64(1), idents[0].ID)
|
||||
assert.Equal(t, "beanbag", idents[1].Name)
|
||||
assert.Equal(t, int64(2), idents[1].ID)
|
||||
}
|
||||
})
|
||||
t.Run("unexpectedStatus", func(t *testing.T) {
|
||||
idents, err := parseDumpToIdents(R(sampleDump1, "slip-up", nil))
|
||||
assert.Error(t, err)
|
||||
assert.Nil(t, idents)
|
||||
})
|
||||
}
|
||||
|
||||
func TestVolumeStatus(t *testing.T) {
|
||||
if serverVersion == util.Nautilus {
|
||||
t.Skipf("can only execute on octopus/pacific servers")
|
||||
}
|
||||
fsa := getFSAdmin(t)
|
||||
|
||||
vs, err := fsa.VolumeStatus("cephfs")
|
||||
assert.NoError(t, err)
|
||||
assert.Contains(t, vs.MDSVersion, "version")
|
||||
}
|
||||
|
||||
func TestVolumeStatusInvalid(t *testing.T) {
|
||||
if serverVersion != util.Nautilus {
|
||||
t.Skipf("can only excecute on nautilus servers")
|
||||
}
|
||||
fsa := getFSAdmin(t)
|
||||
|
||||
vs, err := fsa.VolumeStatus("cephfs")
|
||||
assert.Error(t, err)
|
||||
assert.Nil(t, vs)
|
||||
var notImpl NotImplementedError
|
||||
assert.True(t, errors.As(err, ¬Impl))
|
||||
}
|
||||
|
||||
var sampleVolumeStatus1 = []byte(`
|
||||
{
|
||||
"clients": [{"clients": 1, "fs": "cephfs"}],
|
||||
"mds_version": "ceph version 15.2.4 (7447c15c6ff58d7fce91843b705a268a1917325c) octopus (stable)",
|
||||
"mdsmap": [{"dns": 76, "inos": 19, "name": "Z", "rank": 0, "rate": 0.0, "state": "active"}],
|
||||
"pools": [{"avail": 1017799872, "id": 2, "name": "cephfs_metadata", "type": "metadata", "used": 2204126}, {"avail": 1017799872, "id": 1, "name": "cephfs_data", "type": "data", "used": 0}]
|
||||
}
|
||||
`)
|
||||
|
||||
var sampleVolumeStatusQ = []byte(`
|
||||
{"clients": [{"clients": 3, "fs": "cephfs"}], "mds_version": [{"daemon": ["Z"], "version": "ceph version 17.1.0 (c675060073a05d40ef404d5921c81178a52af6e0) quincy (dev)"}], "mdsmap": [{"caps": 11, "dirs": 26, "dns": 49, "inos": 30, "name": "Z", "rank": 0, "rate": 0, "state": "active"}], "pools": [{"avail": 1018405056, "id": 2, "name": "cephfs_metadata", "type": "metadata", "used": 467690}, {"avail": 1018405056, "id": 1, "name": "cephfs_data", "type": "data", "used": 8}]}
|
||||
`)
|
||||
|
||||
var sampleVolumeStatusTextJunk = []byte(`cephfs - 2 clients
|
||||
======
|
||||
+------+--------+-----+---------------+-------+-------+
|
||||
| Rank | State | MDS | Activity | dns | inos |
|
||||
+------+--------+-----+---------------+-------+-------+
|
||||
| 0 | active | Z | Reqs: 98 /s | 254 | 192 |
|
||||
+------+--------+-----+---------------+-------+-------+
|
||||
+-----------------+----------+-------+-------+
|
||||
| Pool | type | used | avail |
|
||||
+-----------------+----------+-------+-------+
|
||||
| cephfs_metadata | metadata | 62.1M | 910M |
|
||||
| cephfs_data | data | 0 | 910M |
|
||||
+-----------------+----------+-------+-------+
|
||||
+-------------+
|
||||
| Standby MDS |
|
||||
+-------------+
|
||||
+-------------+
|
||||
MDS version: ceph version 14.2.11 (f7fdb2f52131f54b891a2ec99d8205561242cdaf) nautilus (stable)
|
||||
`)
|
||||
|
||||
func TestParseVolumeStatus(t *testing.T) {
|
||||
R := newResponse
|
||||
t.Run("error", func(t *testing.T) {
|
||||
_, err := parseVolumeStatus(R(nil, "", errors.New("bonk")))
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, "bonk", err.Error())
|
||||
})
|
||||
t.Run("statusSet", func(t *testing.T) {
|
||||
_, err := parseVolumeStatus(R(nil, "unexpected!", nil))
|
||||
assert.Error(t, err)
|
||||
})
|
||||
t.Run("badJSON", func(t *testing.T) {
|
||||
_, err := parseVolumeStatus(R([]byte("_XxXxX"), "", nil))
|
||||
assert.Error(t, err)
|
||||
})
|
||||
t.Run("ok", func(t *testing.T) {
|
||||
v, err := parseVolumeStatus(R(sampleVolumeStatus1, "", nil))
|
||||
assert.NoError(t, err)
|
||||
s := v.volumeStatus()
|
||||
if assert.NotNil(t, s) {
|
||||
assert.Contains(t, s.MDSVersion, "ceph version 15.2.4")
|
||||
assert.Contains(t, s.MDSVersion, "octopus")
|
||||
}
|
||||
})
|
||||
t.Run("notJSONfromServer", func(t *testing.T) {
|
||||
_, err := parseVolumeStatus(R(sampleVolumeStatusTextJunk, "", nil))
|
||||
if assert.Error(t, err) {
|
||||
var notImpl NotImplementedError
|
||||
assert.True(t, errors.As(err, ¬Impl))
|
||||
}
|
||||
})
|
||||
t.Run("quincy", func(t *testing.T) {
|
||||
v, err := parseVolumeStatus(R(sampleVolumeStatusQ, "", nil))
|
||||
assert.NoError(t, err)
|
||||
s := v.volumeStatus()
|
||||
if assert.NotNil(t, s) {
|
||||
assert.Contains(t, s.MDSVersion, "ceph version 17.1.0")
|
||||
assert.Contains(t, s.MDSVersion, "quincy")
|
||||
}
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
var sampleFsLs1 = []byte(`
|
||||
[
|
||||
{
|
||||
"name": "cephfs",
|
||||
"metadata_pool": "cephfs_metadata",
|
||||
"metadata_pool_id": 2,
|
||||
"data_pool_ids": [
|
||||
1
|
||||
],
|
||||
"data_pools": [
|
||||
"cephfs_data"
|
||||
]
|
||||
}
|
||||
]
|
||||
`)
|
||||
|
||||
var sampleFsLs2 = []byte(`
|
||||
[
|
||||
{
|
||||
"name": "cephfs",
|
||||
"metadata_pool": "cephfs_metadata",
|
||||
"metadata_pool_id": 2,
|
||||
"data_pool_ids": [
|
||||
1
|
||||
],
|
||||
"data_pools": [
|
||||
"cephfs_data"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "archivefs",
|
||||
"metadata_pool": "archivefs_metadata",
|
||||
"metadata_pool_id": 6,
|
||||
"data_pool_ids": [
|
||||
4,
|
||||
5
|
||||
],
|
||||
"data_pools": [
|
||||
"archivefs_data1",
|
||||
"archivefs_data2"
|
||||
]
|
||||
}
|
||||
]
|
||||
`)
|
||||
|
||||
func TestParseFsList(t *testing.T) {
|
||||
t.Run("error", func(t *testing.T) {
|
||||
_, err := parseFsList(
|
||||
newResponse(nil, "", errors.New("eek")))
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, "eek", err.Error())
|
||||
})
|
||||
t.Run("statusSet", func(t *testing.T) {
|
||||
_, err := parseFsList(
|
||||
newResponse(nil, "oof", nil))
|
||||
assert.Error(t, err)
|
||||
})
|
||||
t.Run("badJSON", func(t *testing.T) {
|
||||
_, err := parseFsList(
|
||||
newResponse([]byte("______"), "", nil))
|
||||
assert.Error(t, err)
|
||||
})
|
||||
t.Run("ok1", func(t *testing.T) {
|
||||
l, err := parseFsList(
|
||||
newResponse(sampleFsLs1, "", nil))
|
||||
assert.NoError(t, err)
|
||||
if assert.NotNil(t, l) && assert.Len(t, l, 1) {
|
||||
fs := l[0]
|
||||
assert.Equal(t, "cephfs", fs.Name)
|
||||
assert.Equal(t, "cephfs_metadata", fs.MetadataPool)
|
||||
assert.Equal(t, 2, fs.MetadataPoolID)
|
||||
assert.Len(t, fs.DataPools, 1)
|
||||
assert.Contains(t, fs.DataPools, "cephfs_data")
|
||||
assert.Len(t, fs.DataPoolIDs, 1)
|
||||
assert.Contains(t, fs.DataPoolIDs, 1)
|
||||
}
|
||||
})
|
||||
t.Run("ok2", func(t *testing.T) {
|
||||
l, err := parseFsList(
|
||||
newResponse(sampleFsLs2, "", nil))
|
||||
assert.NoError(t, err)
|
||||
if assert.NotNil(t, l) && assert.Len(t, l, 2) {
|
||||
fs := l[0]
|
||||
assert.Equal(t, "cephfs", fs.Name)
|
||||
assert.Equal(t, "cephfs_metadata", fs.MetadataPool)
|
||||
assert.Equal(t, 2, fs.MetadataPoolID)
|
||||
assert.Len(t, fs.DataPools, 1)
|
||||
assert.Contains(t, fs.DataPools, "cephfs_data")
|
||||
assert.Len(t, fs.DataPoolIDs, 1)
|
||||
assert.Contains(t, fs.DataPoolIDs, 1)
|
||||
fs = l[1]
|
||||
assert.Equal(t, "archivefs", fs.Name)
|
||||
assert.Equal(t, "archivefs_metadata", fs.MetadataPool)
|
||||
assert.Equal(t, 6, fs.MetadataPoolID)
|
||||
assert.Len(t, fs.DataPools, 2)
|
||||
assert.Contains(t, fs.DataPools, "archivefs_data1")
|
||||
assert.Contains(t, fs.DataPools, "archivefs_data2")
|
||||
assert.Len(t, fs.DataPoolIDs, 2)
|
||||
assert.Contains(t, fs.DataPoolIDs, 4)
|
||||
assert.Contains(t, fs.DataPoolIDs, 5)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestListFileSystems(t *testing.T) {
|
||||
fsa := getFSAdmin(t)
|
||||
|
||||
l, err := fsa.ListFileSystems()
|
||||
assert.NoError(t, err)
|
||||
assert.GreaterOrEqual(t, len(l), 1)
|
||||
|
||||
idx := -1
|
||||
for i := range l {
|
||||
if l[i].Name == "cephfs" {
|
||||
idx = i
|
||||
break
|
||||
}
|
||||
}
|
||||
if assert.NotEqual(t, -1, idx) {
|
||||
assert.Equal(t, "cephfs", l[idx].Name)
|
||||
assert.Equal(t, "cephfs_metadata", l[idx].MetadataPool)
|
||||
assert.Len(t, l[idx].DataPools, 1)
|
||||
assert.Contains(t, l[idx].DataPools, "cephfs_data")
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,270 @@
|
|||
package admin
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
pathpkg "path"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ceph/go-ceph/cephfs"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
var snapDir = ".snapshots"
|
||||
|
||||
func fsConnect(t *testing.T, configFile string) *cephfs.MountInfo {
|
||||
mount, err := cephfs.CreateMount()
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, mount)
|
||||
|
||||
if configFile == "" {
|
||||
err = mount.ReadDefaultConfigFile()
|
||||
require.NoError(t, err)
|
||||
} else {
|
||||
err = mount.ReadConfigFile(configFile)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
err = mount.SetConfigOption("client_snapdir", snapDir)
|
||||
require.NoError(t, err)
|
||||
|
||||
timeout := time.After(time.Second * 5)
|
||||
ch := make(chan error)
|
||||
go func(mount *cephfs.MountInfo) {
|
||||
ch <- mount.Mount()
|
||||
}(mount)
|
||||
select {
|
||||
case err = <-ch:
|
||||
case <-timeout:
|
||||
err = fmt.Errorf("timed out waiting for connect")
|
||||
}
|
||||
require.NoError(t, err)
|
||||
return mount
|
||||
}
|
||||
|
||||
func writeFile(t *testing.T, mount *cephfs.MountInfo, path string, content []byte) {
|
||||
f1, err := mount.Open(path, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0700)
|
||||
require.NoError(t, err)
|
||||
defer f1.Close()
|
||||
f1.WriteAt(content, 0)
|
||||
}
|
||||
|
||||
func readFile(t *testing.T, mount *cephfs.MountInfo, path string) []byte {
|
||||
f1, err := mount.Open(path, os.O_RDONLY, 0)
|
||||
require.NoError(t, err)
|
||||
defer f1.Close()
|
||||
b, err := io.ReadAll(f1)
|
||||
require.NoError(t, err)
|
||||
return b
|
||||
}
|
||||
|
||||
func getSnapPath(t *testing.T, mount *cephfs.MountInfo, subvol, snapname string) string {
|
||||
// I wish there was a nicer way to do this
|
||||
snapPath := pathpkg.Join(subvol, snapDir, snapname)
|
||||
_, err := mount.Statx(snapPath, cephfs.StatxBasicStats, 0)
|
||||
if err == nil {
|
||||
return snapPath
|
||||
}
|
||||
snapPath = pathpkg.Join(
|
||||
pathpkg.Dir(subvol),
|
||||
snapDir,
|
||||
snapname,
|
||||
pathpkg.Base(subvol))
|
||||
_, err = mount.Statx(snapPath, cephfs.StatxBasicStats, 0)
|
||||
if err == nil {
|
||||
return snapPath
|
||||
}
|
||||
t.Fatalf("did not find a snap path for %s", snapname)
|
||||
return ""
|
||||
}
|
||||
|
||||
// TestWorkflow aims to do more than just exercise the API calls, but rather to
|
||||
// also check that they do what they say on the tin. This means importing the
|
||||
// cephfs lib in addition to the admin lib and reading and writing to the
|
||||
// subvolume, snapshot, and clone as appropriate.
|
||||
func TestWorkflow(t *testing.T) {
|
||||
fsa := getFSAdmin(t)
|
||||
volume := "cephfs"
|
||||
group := "workflow1"
|
||||
|
||||
// verify the volume we want to use
|
||||
l, err := fsa.ListVolumes()
|
||||
require.NoError(t, err)
|
||||
require.Contains(t, l, volume)
|
||||
|
||||
err = fsa.CreateSubVolumeGroup(volume, group, nil)
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
err := fsa.RemoveSubVolumeGroup(volume, group)
|
||||
assert.NoError(t, err)
|
||||
}()
|
||||
|
||||
subname := "files1"
|
||||
svopts := &SubVolumeOptions{
|
||||
Mode: 0777,
|
||||
Size: 2 * gibiByte,
|
||||
}
|
||||
err = fsa.CreateSubVolume(volume, group, subname, svopts)
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
err := fsa.RemoveSubVolume(volume, group, subname)
|
||||
assert.NoError(t, err)
|
||||
}()
|
||||
|
||||
// getpath
|
||||
subPath, err := fsa.SubVolumePath(volume, group, subname)
|
||||
require.NoError(t, err)
|
||||
require.NotEqual(t, "", subPath)
|
||||
|
||||
// connect to volume, cd to path (?)
|
||||
mount := fsConnect(t, "")
|
||||
defer func(mount *cephfs.MountInfo) {
|
||||
assert.NoError(t, mount.Unmount())
|
||||
assert.NoError(t, mount.Release())
|
||||
}(mount)
|
||||
|
||||
err = mount.ChangeDir(subPath)
|
||||
require.NoError(t, err)
|
||||
|
||||
// write some dirs & files
|
||||
err = mount.MakeDir("content1", 0770)
|
||||
require.NoError(t, err)
|
||||
|
||||
writeFile(t, mount, "content1/robots.txt",
|
||||
[]byte("robbie\nr2\nbender\nclaptrap\n"))
|
||||
writeFile(t, mount, "content1/songs.txt",
|
||||
[]byte("none of them knew they were robots\n"))
|
||||
assert.NoError(t, mount.MakeDir("content1/emptyDir1", 0770))
|
||||
|
||||
err = mount.MakeDir("content2", 0770)
|
||||
require.NoError(t, err)
|
||||
|
||||
writeFile(t, mount, "content2/androids.txt",
|
||||
[]byte("data\nmarvin\n"))
|
||||
assert.NoError(t, mount.MakeDir("content2/docs", 0770))
|
||||
writeFile(t, mount, "content2/docs/lore.txt",
|
||||
[]byte("Compendium\nLegend\nLore\nDeadweight\nSpirit at Aphelion\n"))
|
||||
|
||||
assert.NoError(t, mount.SyncFs())
|
||||
|
||||
// take a snapshot
|
||||
|
||||
snapname1 := "hotSpans1"
|
||||
err = fsa.CreateSubVolumeSnapshot(volume, group, subname, snapname1)
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
err := fsa.RemoveSubVolumeSnapshot(volume, group, subname, snapname1)
|
||||
assert.NoError(t, err)
|
||||
}()
|
||||
|
||||
sinfo, err := fsa.SubVolumeSnapshotInfo(volume, group, subname, snapname1)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, sinfo)
|
||||
|
||||
time.Sleep(500 * time.Millisecond) // is there a race?
|
||||
|
||||
// examine the snapshot
|
||||
snapPath := getSnapPath(t, mount, subPath, snapname1)
|
||||
require.NotEqual(t, "", snapPath)
|
||||
|
||||
tempPath := pathpkg.Join(snapPath, "content1/robots.txt")
|
||||
txt := readFile(t, mount, tempPath)
|
||||
assert.Contains(t, string(txt), "robbie")
|
||||
|
||||
// original subvol can be manipulated
|
||||
err = mount.Rename("content2/docs/lore.txt", "content1/lore.txt")
|
||||
assert.NoError(t, err)
|
||||
writeFile(t, mount, "content1/songs.txt",
|
||||
[]byte("none of them knew they were robots\nars moriendi\n"))
|
||||
|
||||
// snapshot may not be modified
|
||||
err = mount.Rename(
|
||||
pathpkg.Join(snapPath, "content2/docs/lore.txt"),
|
||||
pathpkg.Join(snapPath, "content1/lore.txt"))
|
||||
assert.Error(t, err)
|
||||
txt = readFile(t, mount, pathpkg.Join(snapPath, "content2/docs/lore.txt"))
|
||||
assert.Contains(t, string(txt), "Spirit")
|
||||
|
||||
// make a clone
|
||||
|
||||
clonename := "files2"
|
||||
err = fsa.CloneSubVolumeSnapshot(
|
||||
volume, group, subname, snapname1, clonename,
|
||||
&CloneOptions{TargetGroup: group})
|
||||
var x NotProtectedError
|
||||
if errors.As(err, &x) {
|
||||
err = fsa.ProtectSubVolumeSnapshot(volume, group, subname, snapname1)
|
||||
assert.NoError(t, err)
|
||||
defer func() {
|
||||
err := fsa.UnprotectSubVolumeSnapshot(volume, group, subname, snapname1)
|
||||
assert.NoError(t, err)
|
||||
}()
|
||||
|
||||
err = fsa.CloneSubVolumeSnapshot(
|
||||
volume, group, subname, snapname1, clonename,
|
||||
&CloneOptions{TargetGroup: group})
|
||||
}
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
err := fsa.ForceRemoveSubVolume(volume, group, clonename)
|
||||
assert.NoError(t, err)
|
||||
}()
|
||||
|
||||
// wait for cloning to complete
|
||||
for done := false; !done; {
|
||||
status, err := fsa.CloneStatus(volume, group, clonename)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, status)
|
||||
switch status.State {
|
||||
case ClonePending, CloneInProgress:
|
||||
time.Sleep(5 * time.Millisecond)
|
||||
case CloneComplete:
|
||||
done = true
|
||||
case CloneFailed:
|
||||
t.Fatal("clone failed")
|
||||
default:
|
||||
t.Fatalf("invalid status.State: %q", status.State)
|
||||
}
|
||||
}
|
||||
|
||||
// examine the clone
|
||||
clonePath, err := fsa.SubVolumePath(volume, group, clonename)
|
||||
require.NoError(t, err)
|
||||
require.NotEqual(t, "", clonePath)
|
||||
|
||||
txt = readFile(t, mount, pathpkg.Join(clonePath, "content1/robots.txt"))
|
||||
assert.Contains(t, string(txt), "robbie")
|
||||
|
||||
// clones are r/w
|
||||
err = mount.Rename(
|
||||
pathpkg.Join(clonePath, "content2/docs/lore.txt"),
|
||||
pathpkg.Join(clonePath, "content1/lore.txt"))
|
||||
assert.NoError(t, err)
|
||||
txt = readFile(t, mount, pathpkg.Join(clonePath, "content1/lore.txt"))
|
||||
assert.Contains(t, string(txt), "Spirit")
|
||||
|
||||
// it reflects what was in the snapshot
|
||||
txt = readFile(t, mount, pathpkg.Join(clonePath, "content1/songs.txt"))
|
||||
assert.Contains(t, string(txt), "robots")
|
||||
assert.NotContains(t, string(txt), "moriendi")
|
||||
|
||||
// ... with it's own independent data
|
||||
writeFile(t, mount, pathpkg.Join(clonePath, "content1/songs.txt"),
|
||||
[]byte("none of them knew they were robots\nsweet charity\n"))
|
||||
|
||||
// (orig)
|
||||
txt = readFile(t, mount, "content1/songs.txt")
|
||||
assert.Contains(t, string(txt), "robots")
|
||||
assert.Contains(t, string(txt), "moriendi")
|
||||
assert.NotContains(t, string(txt), "charity")
|
||||
|
||||
// (clone)
|
||||
txt = readFile(t, mount, pathpkg.Join(clonePath, "content1/songs.txt"))
|
||||
assert.Contains(t, string(txt), "robots")
|
||||
assert.NotContains(t, string(txt), "moriendi")
|
||||
assert.Contains(t, string(txt), "charity")
|
||||
}
|
||||
248
pkg/mod/github.com/ceph/go-ceph@v0.35.0/cephfs/cephfs.go
Normal file
248
pkg/mod/github.com/ceph/go-ceph@v0.35.0/cephfs/cephfs.go
Normal file
|
|
@ -0,0 +1,248 @@
|
|||
package cephfs
|
||||
|
||||
/*
|
||||
#cgo LDFLAGS: -lcephfs
|
||||
#cgo CPPFLAGS: -D_FILE_OFFSET_BITS=64
|
||||
#include <stdlib.h>
|
||||
#include <cephfs/libcephfs.h>
|
||||
*/
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
|
||||
"github.com/ceph/go-ceph/internal/retry"
|
||||
"github.com/ceph/go-ceph/rados"
|
||||
)
|
||||
|
||||
// MountInfo exports ceph's ceph_mount_info from libcephfs.cc
|
||||
type MountInfo struct {
|
||||
mount *C.struct_ceph_mount_info
|
||||
}
|
||||
|
||||
func createMount(id *C.char) (*MountInfo, error) {
|
||||
mount := &MountInfo{}
|
||||
ret := C.ceph_create(&mount.mount, id)
|
||||
if ret != 0 {
|
||||
return nil, getError(ret)
|
||||
}
|
||||
return mount, nil
|
||||
}
|
||||
|
||||
// validate checks whether mount.mount is ready to use or not.
|
||||
func (mount *MountInfo) validate() error {
|
||||
if mount.mount == nil {
|
||||
return ErrNotConnected
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Version returns the major, minor, and patch level of the libcephfs library.
|
||||
func Version() (int, int, int) {
|
||||
var cMajor, cMinor, cPatch C.int
|
||||
C.ceph_version(&cMajor, &cMinor, &cPatch)
|
||||
return int(cMajor), int(cMinor), int(cPatch)
|
||||
}
|
||||
|
||||
// CreateMount creates a mount handle for interacting with Ceph.
|
||||
func CreateMount() (*MountInfo, error) {
|
||||
return createMount(nil)
|
||||
}
|
||||
|
||||
// CreateMountWithId creates a mount handle for interacting with Ceph.
|
||||
// The caller can specify a unique id that will identify this client.
|
||||
func CreateMountWithId(id string) (*MountInfo, error) {
|
||||
cid := C.CString(id)
|
||||
defer C.free(unsafe.Pointer(cid))
|
||||
return createMount(cid)
|
||||
}
|
||||
|
||||
// CreateFromRados creates a mount handle using an existing rados cluster
|
||||
// connection.
|
||||
//
|
||||
// Implements:
|
||||
//
|
||||
// int ceph_create_from_rados(struct ceph_mount_info **cmount, rados_t cluster);
|
||||
func CreateFromRados(conn *rados.Conn) (*MountInfo, error) {
|
||||
mount := &MountInfo{}
|
||||
ret := C.ceph_create_from_rados(&mount.mount, C.rados_t(conn.Cluster()))
|
||||
if ret != 0 {
|
||||
return nil, getError(ret)
|
||||
}
|
||||
return mount, nil
|
||||
}
|
||||
|
||||
// ReadDefaultConfigFile loads the ceph configuration from the default config file.
|
||||
//
|
||||
// Implements:
|
||||
//
|
||||
// int ceph_conf_read_file(struct ceph_mount_info *cmount, const char *path_list);
|
||||
func (mount *MountInfo) ReadDefaultConfigFile() error {
|
||||
ret := C.ceph_conf_read_file(mount.mount, nil)
|
||||
return getError(ret)
|
||||
}
|
||||
|
||||
// ReadConfigFile loads the ceph configuration from the specified config file.
|
||||
//
|
||||
// Implements:
|
||||
//
|
||||
// int ceph_conf_read_file(struct ceph_mount_info *cmount, const char *path_list);
|
||||
func (mount *MountInfo) ReadConfigFile(path string) error {
|
||||
cPath := C.CString(path)
|
||||
defer C.free(unsafe.Pointer(cPath))
|
||||
ret := C.ceph_conf_read_file(mount.mount, cPath)
|
||||
return getError(ret)
|
||||
}
|
||||
|
||||
// ParseConfigArgv configures the mount using a unix style command line
|
||||
// argument vector.
|
||||
//
|
||||
// Implements:
|
||||
//
|
||||
// int ceph_conf_parse_argv(struct ceph_mount_info *cmount, int argc, const char **argv);
|
||||
func (mount *MountInfo) ParseConfigArgv(argv []string) error {
|
||||
if err := mount.validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
if len(argv) == 0 {
|
||||
return ErrEmptyArgument
|
||||
}
|
||||
cargv := make([]*C.char, len(argv))
|
||||
for i := range argv {
|
||||
cargv[i] = C.CString(argv[i])
|
||||
defer C.free(unsafe.Pointer(cargv[i]))
|
||||
}
|
||||
|
||||
ret := C.ceph_conf_parse_argv(mount.mount, C.int(len(cargv)), &cargv[0])
|
||||
return getError(ret)
|
||||
}
|
||||
|
||||
// ParseDefaultConfigEnv configures the mount from the default Ceph
|
||||
// environment variable CEPH_ARGS.
|
||||
//
|
||||
// Implements:
|
||||
//
|
||||
// int ceph_conf_parse_env(struct ceph_mount_info *cmount, const char *var);
|
||||
func (mount *MountInfo) ParseDefaultConfigEnv() error {
|
||||
if err := mount.validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
ret := C.ceph_conf_parse_env(mount.mount, nil)
|
||||
return getError(ret)
|
||||
}
|
||||
|
||||
// SetConfigOption sets the value of the configuration option identified by
|
||||
// the given name.
|
||||
//
|
||||
// Implements:
|
||||
//
|
||||
// int ceph_conf_set(struct ceph_mount_info *cmount, const char *option, const char *value);
|
||||
func (mount *MountInfo) SetConfigOption(option, value string) error {
|
||||
cOption := C.CString(option)
|
||||
defer C.free(unsafe.Pointer(cOption))
|
||||
cValue := C.CString(value)
|
||||
defer C.free(unsafe.Pointer(cValue))
|
||||
return getError(C.ceph_conf_set(mount.mount, cOption, cValue))
|
||||
}
|
||||
|
||||
// GetConfigOption returns the value of the Ceph configuration option
|
||||
// identified by the given name.
|
||||
//
|
||||
// Implements:
|
||||
//
|
||||
// int ceph_conf_get(struct ceph_mount_info *cmount, const char *option, char *buf, size_t len);
|
||||
func (mount *MountInfo) GetConfigOption(option string) (string, error) {
|
||||
cOption := C.CString(option)
|
||||
defer C.free(unsafe.Pointer(cOption))
|
||||
|
||||
var (
|
||||
err error
|
||||
buf []byte
|
||||
)
|
||||
// range from 4k to 256KiB
|
||||
retry.WithSizes(4096, 1<<18, func(size int) retry.Hint {
|
||||
buf = make([]byte, size)
|
||||
ret := C.ceph_conf_get(
|
||||
mount.mount,
|
||||
cOption,
|
||||
(*C.char)(unsafe.Pointer(&buf[0])),
|
||||
C.size_t(len(buf)))
|
||||
err = getError(ret)
|
||||
return retry.DoubleSize.If(err == errNameTooLong)
|
||||
})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
value := C.GoString((*C.char)(unsafe.Pointer(&buf[0])))
|
||||
return value, nil
|
||||
}
|
||||
|
||||
// Init the file system client without actually mounting the file system.
|
||||
//
|
||||
// Implements:
|
||||
//
|
||||
// int ceph_init(struct ceph_mount_info *cmount);
|
||||
func (mount *MountInfo) Init() error {
|
||||
return getError(C.ceph_init(mount.mount))
|
||||
}
|
||||
|
||||
// Mount the file system, establishing a connection capable of I/O.
|
||||
//
|
||||
// Implements:
|
||||
//
|
||||
// int ceph_mount(struct ceph_mount_info *cmount, const char *root);
|
||||
func (mount *MountInfo) Mount() error {
|
||||
ret := C.ceph_mount(mount.mount, nil)
|
||||
return getError(ret)
|
||||
}
|
||||
|
||||
// MountWithRoot mounts the file system using the path provided for the root of
|
||||
// the mount. This establishes a connection capable of I/O.
|
||||
//
|
||||
// Implements:
|
||||
//
|
||||
// int ceph_mount(struct ceph_mount_info *cmount, const char *root);
|
||||
func (mount *MountInfo) MountWithRoot(root string) error {
|
||||
croot := C.CString(root)
|
||||
defer C.free(unsafe.Pointer(croot))
|
||||
return getError(C.ceph_mount(mount.mount, croot))
|
||||
}
|
||||
|
||||
// Unmount the file system.
|
||||
//
|
||||
// Implements:
|
||||
//
|
||||
// int ceph_unmount(struct ceph_mount_info *cmount);
|
||||
func (mount *MountInfo) Unmount() error {
|
||||
ret := C.ceph_unmount(mount.mount)
|
||||
return getError(ret)
|
||||
}
|
||||
|
||||
// Release destroys the mount handle.
|
||||
//
|
||||
// Implements:
|
||||
//
|
||||
// int ceph_release(struct ceph_mount_info *cmount);
|
||||
func (mount *MountInfo) Release() error {
|
||||
if mount.mount == nil {
|
||||
return nil
|
||||
}
|
||||
ret := C.ceph_release(mount.mount)
|
||||
if err := getError(ret); err != nil {
|
||||
return err
|
||||
}
|
||||
mount.mount = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// SyncFs synchronizes all filesystem data to persistent media.
|
||||
func (mount *MountInfo) SyncFs() error {
|
||||
ret := C.ceph_sync_fs(mount.mount)
|
||||
return getError(ret)
|
||||
}
|
||||
|
||||
// IsMounted checks mount status.
|
||||
func (mount *MountInfo) IsMounted() bool {
|
||||
ret := C.ceph_is_mounted(mount.mount)
|
||||
return ret == 1
|
||||
}
|
||||
365
pkg/mod/github.com/ceph/go-ceph@v0.35.0/cephfs/cephfs_test.go
Normal file
365
pkg/mod/github.com/ceph/go-ceph@v0.35.0/cephfs/cephfs_test.go
Normal file
|
|
@ -0,0 +1,365 @@
|
|||
package cephfs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ceph/go-ceph/rados"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
var (
|
||||
testMdsName = "Z"
|
||||
)
|
||||
|
||||
func init() {
|
||||
mdsName := os.Getenv("GO_CEPH_TEST_MDS_NAME")
|
||||
if mdsName != "" {
|
||||
testMdsName = mdsName
|
||||
}
|
||||
}
|
||||
|
||||
func TestCreateMount(t *testing.T) {
|
||||
mount, err := CreateMount()
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, mount)
|
||||
assert.NoError(t, mount.Release())
|
||||
}
|
||||
|
||||
func fsConnect(t require.TestingT) *MountInfo {
|
||||
mount, err := CreateMount()
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, mount)
|
||||
|
||||
err = mount.ReadDefaultConfigFile()
|
||||
require.NoError(t, err)
|
||||
|
||||
timeout := time.After(time.Second * 5)
|
||||
ch := make(chan error)
|
||||
go func(mount *MountInfo) {
|
||||
ch <- mount.Mount()
|
||||
}(mount)
|
||||
select {
|
||||
case err = <-ch:
|
||||
case <-timeout:
|
||||
err = fmt.Errorf("timed out waiting for connect")
|
||||
}
|
||||
require.NoError(t, err)
|
||||
return mount
|
||||
}
|
||||
|
||||
func fsDisconnect(t assert.TestingT, mount *MountInfo) {
|
||||
assert.NoError(t, mount.Unmount())
|
||||
assert.NoError(t, mount.Release())
|
||||
}
|
||||
|
||||
func TestMountRoot(t *testing.T) {
|
||||
mount := fsConnect(t)
|
||||
fsDisconnect(t, mount)
|
||||
}
|
||||
|
||||
func TestSyncFs(t *testing.T) {
|
||||
mount := fsConnect(t)
|
||||
defer fsDisconnect(t, mount)
|
||||
|
||||
err := mount.SyncFs()
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestUnmountMount(t *testing.T) {
|
||||
t.Run("neverMounted", func(t *testing.T) {
|
||||
mount, err := CreateMount()
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, mount)
|
||||
assert.False(t, mount.IsMounted())
|
||||
assert.NoError(t, mount.Release())
|
||||
})
|
||||
t.Run("mountUnmount", func(t *testing.T) {
|
||||
mount := fsConnect(t)
|
||||
defer func() { assert.NoError(t, mount.Release()) }()
|
||||
assert.True(t, mount.IsMounted())
|
||||
|
||||
err := mount.Unmount()
|
||||
assert.NoError(t, err)
|
||||
assert.False(t, mount.IsMounted())
|
||||
})
|
||||
}
|
||||
|
||||
func TestReleaseMount(t *testing.T) {
|
||||
mount, err := CreateMount()
|
||||
assert.NoError(t, err)
|
||||
require.NotNil(t, mount)
|
||||
|
||||
assert.NoError(t, mount.Release())
|
||||
// call release again to ensure idempotency of the func
|
||||
assert.NoError(t, mount.Release())
|
||||
}
|
||||
|
||||
func radosConnect(t *testing.T) *rados.Conn {
|
||||
conn, err := rados.NewConn()
|
||||
require.NoError(t, err)
|
||||
err = conn.ReadDefaultConfigFile()
|
||||
require.NoError(t, err)
|
||||
|
||||
timeout := time.After(time.Second * 5)
|
||||
ch := make(chan error)
|
||||
go func(conn *rados.Conn) {
|
||||
ch <- conn.Connect()
|
||||
}(conn)
|
||||
select {
|
||||
case err = <-ch:
|
||||
case <-timeout:
|
||||
err = fmt.Errorf("timed out waiting for connect")
|
||||
}
|
||||
require.NoError(t, err)
|
||||
return conn
|
||||
}
|
||||
|
||||
func TestCreateFromRados(t *testing.T) {
|
||||
conn := radosConnect(t)
|
||||
mount, err := CreateFromRados(conn)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, mount)
|
||||
}
|
||||
|
||||
func TestCreateMountWithId(t *testing.T) {
|
||||
mount, err := CreateMountWithId("bobolink")
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, mount)
|
||||
defer func() { assert.NoError(t, mount.Release()) }()
|
||||
|
||||
err = mount.ReadDefaultConfigFile()
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = mount.Mount()
|
||||
assert.NoError(t, err)
|
||||
defer func() { assert.NoError(t, mount.Unmount()) }()
|
||||
|
||||
// verify the custom entity_id is visible in the 'session ls' output
|
||||
// of mds.
|
||||
cmd := []byte(`{"prefix": "session ls"}`)
|
||||
buf, info, err := mount.MdsCommand(
|
||||
testMdsName,
|
||||
[][]byte{cmd})
|
||||
assert.NoError(t, err)
|
||||
assert.NotEqual(t, "", string(buf))
|
||||
assert.Equal(t, "", string(info))
|
||||
assert.Contains(t, string(buf), `"bobolink"`)
|
||||
}
|
||||
|
||||
func TestMountWithRoot(t *testing.T) {
|
||||
bMount := fsConnect(t)
|
||||
defer fsDisconnect(t, bMount)
|
||||
|
||||
dir1 := "/test-mount-with-root"
|
||||
err := bMount.MakeDir(dir1, 0755)
|
||||
assert.NoError(t, err)
|
||||
defer bMount.RemoveDir(dir1)
|
||||
|
||||
sub1 := "/i.was.here"
|
||||
dir2 := dir1 + sub1
|
||||
err = bMount.MakeDir(dir2, 0755)
|
||||
assert.NoError(t, err)
|
||||
defer bMount.RemoveDir(dir2)
|
||||
|
||||
t.Run("withRoot", func(t *testing.T) {
|
||||
mount, err := CreateMount()
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, mount)
|
||||
defer func() {
|
||||
assert.NoError(t, mount.Release())
|
||||
}()
|
||||
|
||||
err = mount.ReadDefaultConfigFile()
|
||||
require.NoError(t, err)
|
||||
|
||||
err = mount.MountWithRoot(dir1)
|
||||
assert.NoError(t, err)
|
||||
defer func() {
|
||||
assert.NoError(t, mount.Unmount())
|
||||
}()
|
||||
|
||||
err = mount.ChangeDir(sub1)
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
t.Run("badRoot", func(t *testing.T) {
|
||||
mount, err := CreateMount()
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, mount)
|
||||
defer func() {
|
||||
assert.NoError(t, mount.Release())
|
||||
}()
|
||||
|
||||
err = mount.ReadDefaultConfigFile()
|
||||
require.NoError(t, err)
|
||||
|
||||
err = mount.MountWithRoot("/i-yam-what-i-yam")
|
||||
assert.Error(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestGetSetConfigOption(t *testing.T) {
|
||||
// we don't need an active connection for this, just the handle
|
||||
mount, err := CreateMount()
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, mount)
|
||||
defer func() { assert.NoError(t, mount.Release()) }()
|
||||
|
||||
err = mount.SetConfigOption("__dne__", "value")
|
||||
assert.Error(t, err)
|
||||
_, err = mount.GetConfigOption("__dne__")
|
||||
assert.Error(t, err)
|
||||
|
||||
origVal, err := mount.GetConfigOption("log_file")
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = mount.SetConfigOption("log_file", "/dev/null")
|
||||
assert.NoError(t, err)
|
||||
currVal, err := mount.GetConfigOption("log_file")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "/dev/null", currVal)
|
||||
|
||||
err = mount.SetConfigOption("log_file", origVal)
|
||||
assert.NoError(t, err)
|
||||
currVal, err = mount.GetConfigOption("log_file")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, origVal, currVal)
|
||||
}
|
||||
|
||||
func TestParseConfigArgv(t *testing.T) {
|
||||
mount, err := CreateMount()
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, mount)
|
||||
defer func() { assert.NoError(t, mount.Release()) }()
|
||||
|
||||
origVal, err := mount.GetConfigOption("log_file")
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = mount.ParseConfigArgv(
|
||||
[]string{"cephfs.test", "--log_file", "/dev/null"})
|
||||
assert.NoError(t, err)
|
||||
|
||||
currVal, err := mount.GetConfigOption("log_file")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "/dev/null", currVal)
|
||||
assert.NotEqual(t, "/dev/null", origVal)
|
||||
|
||||
// ensure that an empty slice triggers an error (not a crash)
|
||||
err = mount.ParseConfigArgv([]string{})
|
||||
assert.Error(t, err)
|
||||
|
||||
// ensure we get an error for an invalid mount value
|
||||
badMount := &MountInfo{}
|
||||
err = badMount.ParseConfigArgv(
|
||||
[]string{"cephfs.test", "--log_file", "/dev/null"})
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestParseDefaultConfigEnv(t *testing.T) {
|
||||
mount, err := CreateMount()
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, mount)
|
||||
defer func() { assert.NoError(t, mount.Release()) }()
|
||||
|
||||
origVal, err := mount.GetConfigOption("log_file")
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = os.Setenv("CEPH_ARGS", "--log_file /dev/null")
|
||||
assert.NoError(t, err)
|
||||
err = mount.ParseDefaultConfigEnv()
|
||||
assert.NoError(t, err)
|
||||
|
||||
currVal, err := mount.GetConfigOption("log_file")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "/dev/null", currVal)
|
||||
assert.NotEqual(t, "/dev/null", origVal)
|
||||
}
|
||||
|
||||
func TestValidate(t *testing.T) {
|
||||
mount, err := CreateMount()
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, mount)
|
||||
defer assert.NoError(t, mount.Release())
|
||||
|
||||
t.Run("mountCurrentDir", func(t *testing.T) {
|
||||
path := mount.CurrentDir()
|
||||
assert.Equal(t, path, "")
|
||||
})
|
||||
|
||||
t.Run("mountChangeDir", func(t *testing.T) {
|
||||
err := mount.ChangeDir("someDir")
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, err, ErrNotConnected)
|
||||
})
|
||||
|
||||
t.Run("mountMakeDir", func(t *testing.T) {
|
||||
err := mount.MakeDir("someName", 0444)
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, err, ErrNotConnected)
|
||||
})
|
||||
|
||||
t.Run("mountRemoveDir", func(t *testing.T) {
|
||||
err := mount.RemoveDir("someDir")
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, err, ErrNotConnected)
|
||||
})
|
||||
|
||||
t.Run("mountLink", func(t *testing.T) {
|
||||
err := mount.Link("/", "/")
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, err, ErrNotConnected)
|
||||
})
|
||||
|
||||
t.Run("mountUnlink", func(t *testing.T) {
|
||||
err := mount.Unlink("someFile")
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, err, ErrNotConnected)
|
||||
})
|
||||
|
||||
t.Run("mountSymlink", func(t *testing.T) {
|
||||
err := mount.Symlink("/", "/")
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, err, ErrNotConnected)
|
||||
})
|
||||
|
||||
t.Run("mountReadlink", func(t *testing.T) {
|
||||
_, err := mount.Readlink("somePath")
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, err, ErrNotConnected)
|
||||
})
|
||||
}
|
||||
|
||||
func TestReadConfigFile(t *testing.T) {
|
||||
file, err := os.CreateTemp("/tmp", "cephfs.conf")
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
assert.NoError(t, file.Close())
|
||||
assert.NoError(t, os.Remove(file.Name()))
|
||||
}()
|
||||
_, err = io.WriteString(
|
||||
file, "[global]\nfsid = 04862775-14d5-46e0-a015-000000000000\n")
|
||||
require.NoError(t, err)
|
||||
|
||||
mount, err := CreateMount()
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, mount)
|
||||
|
||||
err = mount.ReadConfigFile(file.Name())
|
||||
require.NoError(t, err)
|
||||
|
||||
v, err := mount.GetConfigOption("fsid")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "04862775-14d5-46e0-a015-000000000000", v)
|
||||
}
|
||||
|
||||
func TestVersion(t *testing.T) {
|
||||
var major, minor, patch = Version()
|
||||
assert.False(t, major < 0 || major > 1000, "invalid major")
|
||||
assert.False(t, minor < 0 || minor > 1000, "invalid minor")
|
||||
assert.False(t, patch < 0 || patch > 1000, "invalid patch")
|
||||
}
|
||||
74
pkg/mod/github.com/ceph/go-ceph@v0.35.0/cephfs/command.go
Normal file
74
pkg/mod/github.com/ceph/go-ceph@v0.35.0/cephfs/command.go
Normal file
|
|
@ -0,0 +1,74 @@
|
|||
package cephfs
|
||||
|
||||
/*
|
||||
#cgo LDFLAGS: -lcephfs
|
||||
#cgo CPPFLAGS: -D_FILE_OFFSET_BITS=64
|
||||
#include <stdlib.h>
|
||||
#include <cephfs/libcephfs.h>
|
||||
*/
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
|
||||
"github.com/ceph/go-ceph/internal/cutil"
|
||||
)
|
||||
|
||||
func cephBufferFree(p unsafe.Pointer) {
|
||||
C.ceph_buffer_free((*C.char)(p))
|
||||
}
|
||||
|
||||
// MdsCommand sends commands to the specified MDS.
|
||||
//
|
||||
// The args parameter takes a slice of byte slices but typically a single
|
||||
// slice element is sufficient. The use of two slices exists to best match
|
||||
// the structure of the underlying C call which is often a legacy interface
|
||||
// in Ceph.
|
||||
func (mount *MountInfo) MdsCommand(mdsSpec string, args [][]byte) ([]byte, string, error) {
|
||||
return mount.mdsCommand(mdsSpec, args, nil)
|
||||
}
|
||||
|
||||
// MdsCommandWithInputBuffer sends commands to the specified MDS, with an input
|
||||
// buffer.
|
||||
//
|
||||
// The args parameter takes a slice of byte slices but typically a single
|
||||
// slice element is sufficient. The use of two slices exists to best match
|
||||
// the structure of the underlying C call which is often a legacy interface
|
||||
// in Ceph.
|
||||
func (mount *MountInfo) MdsCommandWithInputBuffer(mdsSpec string, args [][]byte, inputBuffer []byte) ([]byte, string, error) {
|
||||
return mount.mdsCommand(mdsSpec, args, inputBuffer)
|
||||
}
|
||||
|
||||
// mdsCommand supports sending formatted commands to MDS.
|
||||
//
|
||||
// Implements:
|
||||
//
|
||||
// int ceph_mds_command(struct ceph_mount_info *cmount,
|
||||
// const char *mds_spec,
|
||||
// const char **cmd,
|
||||
// size_t cmdlen,
|
||||
// const char *inbuf, size_t inbuflen,
|
||||
// char **outbuf, size_t *outbuflen,
|
||||
// char **outs, size_t *outslen);
|
||||
func (mount *MountInfo) mdsCommand(mdsSpec string, args [][]byte, inputBuffer []byte) (buffer []byte, info string, err error) {
|
||||
spec := C.CString(mdsSpec)
|
||||
defer C.free(unsafe.Pointer(spec))
|
||||
ci := cutil.NewCommandInput(args, inputBuffer)
|
||||
defer ci.Free()
|
||||
co := cutil.NewCommandOutput().SetFreeFunc(cephBufferFree)
|
||||
defer co.Free()
|
||||
|
||||
ret := C.ceph_mds_command(
|
||||
mount.mount, // cephfs mount ref
|
||||
spec, // mds spec
|
||||
(**C.char)(ci.Cmd()),
|
||||
C.size_t(ci.CmdLen()),
|
||||
(*C.char)(ci.InBuf()),
|
||||
C.size_t(ci.InBufLen()),
|
||||
(**C.char)(co.OutBuf()),
|
||||
(*C.size_t)(co.OutBufLen()),
|
||||
(**C.char)(co.Outs()),
|
||||
(*C.size_t)(co.OutsLen()))
|
||||
buf, status := co.GoValues()
|
||||
return buf, status, getError(ret)
|
||||
}
|
||||
|
|
@ -0,0 +1,41 @@
|
|||
package cephfs
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestMdsCommand(t *testing.T) {
|
||||
mount := fsConnect(t)
|
||||
defer fsDisconnect(t, mount)
|
||||
|
||||
cmd := []byte(`{"prefix": "client ls"}`)
|
||||
buf, info, err := mount.MdsCommand(
|
||||
testMdsName,
|
||||
[][]byte{cmd})
|
||||
assert.NoError(t, err)
|
||||
assert.NotEqual(t, "", string(buf))
|
||||
assert.Equal(t, "", string(info))
|
||||
assert.Contains(t, string(buf), "ceph_version")
|
||||
// response should also be valid json
|
||||
var j []interface{}
|
||||
err = json.Unmarshal(buf, &j)
|
||||
assert.NoError(t, err)
|
||||
assert.GreaterOrEqual(t, len(j), 1)
|
||||
}
|
||||
|
||||
func TestMdsCommandError(t *testing.T) {
|
||||
mount := fsConnect(t)
|
||||
defer fsDisconnect(t, mount)
|
||||
|
||||
cmd := []byte("iAMinValId~~~")
|
||||
buf, info, err := mount.MdsCommand(
|
||||
testMdsName,
|
||||
[][]byte{cmd})
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, "", string(buf))
|
||||
assert.NotEqual(t, "", string(info))
|
||||
assert.Contains(t, string(info), "unparseable JSON")
|
||||
}
|
||||
|
|
@ -0,0 +1,30 @@
|
|||
package cephfs
|
||||
|
||||
/*
|
||||
#cgo LDFLAGS: -lcephfs
|
||||
#cgo CPPFLAGS: -D_FILE_OFFSET_BITS=64
|
||||
#include <cephfs/libcephfs.h>
|
||||
*/
|
||||
import "C"
|
||||
|
||||
// Some general connectivity and mounting functions are new in
|
||||
// Ceph Nautilus.
|
||||
|
||||
// GetFsCid returns the cluster ID for a mounted ceph file system.
|
||||
// If the object does not refer to a mounted file system, an error
|
||||
// will be returned.
|
||||
//
|
||||
// Note:
|
||||
//
|
||||
// Only supported in Ceph Nautilus and newer.
|
||||
//
|
||||
// Implements:
|
||||
//
|
||||
// int64_t ceph_get_fs_cid(struct ceph_mount_info *cmount);
|
||||
func (mount *MountInfo) GetFsCid() (int64, error) {
|
||||
ret := C.ceph_get_fs_cid(mount.mount)
|
||||
if ret < 0 {
|
||||
return 0, getError(C.int(ret))
|
||||
}
|
||||
return int64(ret), nil
|
||||
}
|
||||
|
|
@ -0,0 +1,32 @@
|
|||
package cephfs
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestGetFsCid(t *testing.T) {
|
||||
t.Run("unmounted", func(t *testing.T) {
|
||||
mount, err := CreateMount()
|
||||
defer func() { assert.NoError(t, mount.Release()) }()
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, mount)
|
||||
|
||||
err = mount.ReadDefaultConfigFile()
|
||||
require.NoError(t, err)
|
||||
|
||||
cid, err := mount.GetFsCid()
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, cid, int64(0))
|
||||
})
|
||||
t.Run("mounted", func(t *testing.T) {
|
||||
mount := fsConnect(t)
|
||||
defer fsDisconnect(t, mount)
|
||||
|
||||
cid, err := mount.GetFsCid()
|
||||
assert.NoError(t, err)
|
||||
assert.GreaterOrEqual(t, cid, int64(0))
|
||||
})
|
||||
}
|
||||
252
pkg/mod/github.com/ceph/go-ceph@v0.35.0/cephfs/directory.go
Normal file
252
pkg/mod/github.com/ceph/go-ceph@v0.35.0/cephfs/directory.go
Normal file
|
|
@ -0,0 +1,252 @@
|
|||
package cephfs
|
||||
|
||||
/*
|
||||
#cgo LDFLAGS: -lcephfs
|
||||
#cgo CPPFLAGS: -D_FILE_OFFSET_BITS=64
|
||||
#include <stdlib.h>
|
||||
#include <dirent.h>
|
||||
#include <cephfs/libcephfs.h>
|
||||
*/
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// Directory represents an open directory handle.
|
||||
type Directory struct {
|
||||
mount *MountInfo
|
||||
dir *C.struct_ceph_dir_result
|
||||
}
|
||||
|
||||
// OpenDir returns a new Directory handle open for I/O.
|
||||
//
|
||||
// Implements:
|
||||
//
|
||||
// int ceph_opendir(struct ceph_mount_info *cmount, const char *name, struct ceph_dir_result **dirpp);
|
||||
func (mount *MountInfo) OpenDir(path string) (*Directory, error) {
|
||||
var dir *C.struct_ceph_dir_result
|
||||
|
||||
cPath := C.CString(path)
|
||||
defer C.free(unsafe.Pointer(cPath))
|
||||
|
||||
ret := C.ceph_opendir(mount.mount, cPath, &dir)
|
||||
if ret != 0 {
|
||||
return nil, getError(ret)
|
||||
}
|
||||
|
||||
return &Directory{
|
||||
mount: mount,
|
||||
dir: dir,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Close the open directory handle.
|
||||
//
|
||||
// Implements:
|
||||
//
|
||||
// int ceph_closedir(struct ceph_mount_info *cmount, struct ceph_dir_result *dirp);
|
||||
func (dir *Directory) Close() error {
|
||||
if dir.dir == nil {
|
||||
return nil
|
||||
}
|
||||
if err := getError(C.ceph_closedir(dir.mount.mount, dir.dir)); err != nil {
|
||||
return err
|
||||
}
|
||||
dir.dir = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// Inode represents an inode number in the file system.
|
||||
type Inode uint64
|
||||
|
||||
// DType values are used to determine, when possible, the file type
|
||||
// of a directory entry.
|
||||
type DType uint8
|
||||
|
||||
const (
|
||||
// DTypeBlk indicates a directory entry is a block device.
|
||||
DTypeBlk = DType(C.DT_BLK)
|
||||
// DTypeChr indicates a directory entry is a character device.
|
||||
DTypeChr = DType(C.DT_CHR)
|
||||
// DTypeDir indicates a directory entry is a directory.
|
||||
DTypeDir = DType(C.DT_DIR)
|
||||
// DTypeFIFO indicates a directory entry is a named pipe (FIFO).
|
||||
DTypeFIFO = DType(C.DT_FIFO)
|
||||
// DTypeLnk indicates a directory entry is a symbolic link.
|
||||
DTypeLnk = DType(C.DT_LNK)
|
||||
// DTypeReg indicates a directory entry is a regular file.
|
||||
DTypeReg = DType(C.DT_REG)
|
||||
// DTypeSock indicates a directory entry is a UNIX domain socket.
|
||||
DTypeSock = DType(C.DT_SOCK)
|
||||
// DTypeUnknown indicates that the file type could not be determined.
|
||||
DTypeUnknown = DType(C.DT_UNKNOWN)
|
||||
)
|
||||
|
||||
// DirEntry represents an entry within a directory.
|
||||
type DirEntry struct {
|
||||
inode Inode
|
||||
name string
|
||||
dtype DType
|
||||
}
|
||||
|
||||
// Name returns the directory entry's name.
|
||||
func (d *DirEntry) Name() string {
|
||||
return d.name
|
||||
}
|
||||
|
||||
// Inode returns the directory entry's inode number.
|
||||
func (d *DirEntry) Inode() Inode {
|
||||
return d.inode
|
||||
}
|
||||
|
||||
// DType returns the Directory-entry's Type, indicating if it
|
||||
// is a regular file, directory, etc.
|
||||
// DType may be unknown and thus require an additional call
|
||||
// (stat for example) if Unknown.
|
||||
func (d *DirEntry) DType() DType {
|
||||
return d.dtype
|
||||
}
|
||||
|
||||
// DirEntryPlus is a DirEntry plus additional data (stat) for an entry
|
||||
// within a directory.
|
||||
type DirEntryPlus struct {
|
||||
DirEntry
|
||||
// statx: the converted statx returned by ceph_readdirplus_r
|
||||
statx *CephStatx
|
||||
}
|
||||
|
||||
// Statx returns cached stat metadata for the directory entry.
|
||||
// This call does not incur an actual file system stat.
|
||||
func (d *DirEntryPlus) Statx() *CephStatx {
|
||||
return d.statx
|
||||
}
|
||||
|
||||
// toDirEntry converts a c struct dirent to our go wrapper.
|
||||
func toDirEntry(de *C.struct_dirent) *DirEntry {
|
||||
return &DirEntry{
|
||||
inode: Inode(de.d_ino),
|
||||
name: C.GoString(&de.d_name[0]),
|
||||
dtype: DType(de.d_type),
|
||||
}
|
||||
}
|
||||
|
||||
// toDirEntryPlus converts c structs set by ceph_readdirplus_r to our go
|
||||
// wrapper.
|
||||
func toDirEntryPlus(de *C.struct_dirent, s C.struct_ceph_statx) *DirEntryPlus {
|
||||
return &DirEntryPlus{
|
||||
DirEntry: *toDirEntry(de),
|
||||
statx: cStructToCephStatx(s),
|
||||
}
|
||||
}
|
||||
|
||||
// ReadDir reads a single directory entry from the open Directory.
|
||||
// A nil DirEntry pointer will be returned when the Directory stream has been
|
||||
// exhausted.
|
||||
//
|
||||
// Implements:
|
||||
//
|
||||
// int ceph_readdir_r(struct ceph_mount_info *cmount, struct ceph_dir_result *dirp, struct dirent *de);
|
||||
func (dir *Directory) ReadDir() (*DirEntry, error) {
|
||||
if dir.dir == nil {
|
||||
return nil, errBadFile
|
||||
}
|
||||
var de C.struct_dirent
|
||||
ret := C.ceph_readdir_r(dir.mount.mount, dir.dir, &de)
|
||||
if ret < 0 {
|
||||
return nil, getError(ret)
|
||||
}
|
||||
if ret == 0 {
|
||||
return nil, nil // End-of-stream
|
||||
}
|
||||
return toDirEntry(&de), nil
|
||||
}
|
||||
|
||||
// ReadDirPlus reads a single directory entry and stat information from the
|
||||
// open Directory.
|
||||
// A nil DirEntryPlus pointer will be returned when the Directory stream has
|
||||
// been exhausted.
|
||||
// See Statx for a description of the wants and flags parameters.
|
||||
//
|
||||
// Implements:
|
||||
//
|
||||
// int ceph_readdirplus_r(struct ceph_mount_info *cmount, struct ceph_dir_result *dirp, struct dirent *de,
|
||||
// struct ceph_statx *stx, unsigned want, unsigned flags, struct Inode **out);
|
||||
func (dir *Directory) ReadDirPlus(
|
||||
want StatxMask, flags AtFlags) (*DirEntryPlus, error) {
|
||||
|
||||
if dir.dir == nil {
|
||||
return nil, errBadFile
|
||||
}
|
||||
var (
|
||||
de C.struct_dirent
|
||||
s C.struct_ceph_statx
|
||||
)
|
||||
ret := C.ceph_readdirplus_r(
|
||||
dir.mount.mount,
|
||||
dir.dir,
|
||||
&de,
|
||||
&s,
|
||||
C.uint(want),
|
||||
C.uint(flags),
|
||||
nil, // unused, internal Inode type not needed for high level api
|
||||
)
|
||||
if ret < 0 {
|
||||
return nil, getError(ret)
|
||||
}
|
||||
if ret == 0 {
|
||||
return nil, nil // End-of-stream
|
||||
}
|
||||
return toDirEntryPlus(&de, s), nil
|
||||
}
|
||||
|
||||
// RewindDir sets the directory stream to the beginning of the directory.
|
||||
//
|
||||
// Implements:
|
||||
//
|
||||
// void ceph_rewinddir(struct ceph_mount_info *cmount, struct ceph_dir_result *dirp);
|
||||
func (dir *Directory) RewindDir() {
|
||||
if dir.dir == nil {
|
||||
return
|
||||
}
|
||||
C.ceph_rewinddir(dir.mount.mount, dir.dir)
|
||||
}
|
||||
|
||||
// dirEntries provides a convenient wrapper around slices of DirEntry items.
|
||||
// For example, use the Names() call to easily get only the names from a
|
||||
// DirEntry slice.
|
||||
type dirEntries []*DirEntry
|
||||
|
||||
// list returns all the contents of a directory as a dirEntries slice.
|
||||
//
|
||||
// list is implemented using ReadDir. If any of the calls to ReadDir returns
|
||||
// an error List will return an error. However, all previous entries
|
||||
// collected will still be returned. Callers of this function may want to check
|
||||
// the entries return value even when an error is returned.
|
||||
// List rewinds the handle every time it is called to get a full
|
||||
// listing of directory contents.
|
||||
func (dir *Directory) list() (dirEntries, error) {
|
||||
var (
|
||||
err error
|
||||
entry *DirEntry
|
||||
entries = make(dirEntries, 0)
|
||||
)
|
||||
dir.RewindDir()
|
||||
for {
|
||||
entry, err = dir.ReadDir()
|
||||
if err != nil || entry == nil {
|
||||
break
|
||||
}
|
||||
entries = append(entries, entry)
|
||||
}
|
||||
return entries, err
|
||||
}
|
||||
|
||||
// names returns a slice of only the name fields from dir entries.
|
||||
func (entries dirEntries) names() []string {
|
||||
names := make([]string, len(entries))
|
||||
for i, v := range entries {
|
||||
names[i] = v.Name()
|
||||
}
|
||||
return names
|
||||
}
|
||||
248
pkg/mod/github.com/ceph/go-ceph@v0.35.0/cephfs/directory_test.go
Normal file
248
pkg/mod/github.com/ceph/go-ceph@v0.35.0/cephfs/directory_test.go
Normal file
|
|
@ -0,0 +1,248 @@
|
|||
package cephfs
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestOpenCloseDir(t *testing.T) {
|
||||
mount := fsConnect(t)
|
||||
defer fsDisconnect(t, mount)
|
||||
|
||||
dir1 := "/base"
|
||||
err := mount.MakeDir(dir1, 0755)
|
||||
assert.NoError(t, err)
|
||||
defer func() { assert.NoError(t, mount.RemoveDir(dir1)) }()
|
||||
|
||||
dir2 := dir1 + "/a"
|
||||
err = mount.MakeDir(dir2, 0755)
|
||||
assert.NoError(t, err)
|
||||
defer func() { assert.NoError(t, mount.RemoveDir(dir2)) }()
|
||||
|
||||
dir, err := mount.OpenDir(dir1)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, dir)
|
||||
err = dir.Close()
|
||||
assert.NoError(t, err)
|
||||
|
||||
dir, err = mount.OpenDir(dir2)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, dir)
|
||||
err = dir.Close()
|
||||
assert.NoError(t, err)
|
||||
|
||||
dir, err = mount.OpenDir("/no.such.dir")
|
||||
assert.Error(t, err)
|
||||
assert.Nil(t, dir)
|
||||
}
|
||||
|
||||
func TestReadDir(t *testing.T) {
|
||||
mount := fsConnect(t)
|
||||
defer fsDisconnect(t, mount)
|
||||
|
||||
dir1 := "/base"
|
||||
err := mount.MakeDir(dir1, 0755)
|
||||
assert.NoError(t, err)
|
||||
defer func() { assert.NoError(t, mount.RemoveDir(dir1)) }()
|
||||
|
||||
subdirs := []string{"a", "bb", "ccc", "dddd"}
|
||||
for _, s := range subdirs {
|
||||
spath := dir1 + "/" + s
|
||||
err = mount.MakeDir(spath, 0755)
|
||||
assert.NoError(t, err)
|
||||
defer func(d string) {
|
||||
assert.NoError(t, mount.RemoveDir(d))
|
||||
}(spath)
|
||||
}
|
||||
|
||||
t.Run("root", func(t *testing.T) {
|
||||
dir, err := mount.OpenDir("/")
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, dir)
|
||||
defer func() { assert.NoError(t, dir.Close()) }()
|
||||
|
||||
found := []string{}
|
||||
for {
|
||||
entry, err := dir.ReadDir()
|
||||
assert.NoError(t, err)
|
||||
if entry == nil {
|
||||
break
|
||||
}
|
||||
assert.NotEqual(t, Inode(0), entry.Inode())
|
||||
assert.NotEqual(t, "", entry.Name())
|
||||
found = append(found, entry.Name())
|
||||
}
|
||||
assert.Contains(t, found, "base")
|
||||
})
|
||||
t.Run("dir1", func(t *testing.T) {
|
||||
dir, err := mount.OpenDir(dir1)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, dir)
|
||||
defer func() { assert.NoError(t, dir.Close()) }()
|
||||
|
||||
found := []string{}
|
||||
for {
|
||||
entry, err := dir.ReadDir()
|
||||
assert.NoError(t, err)
|
||||
if entry == nil {
|
||||
break
|
||||
}
|
||||
assert.NotEqual(t, Inode(0), entry.Inode())
|
||||
assert.NotEqual(t, "", entry.Name())
|
||||
// we have created all the contents of this dir and they are all
|
||||
// empty dirs.
|
||||
assert.Equal(t, DTypeDir, entry.DType())
|
||||
found = append(found, entry.Name())
|
||||
}
|
||||
assert.Subset(t, found, subdirs)
|
||||
})
|
||||
}
|
||||
|
||||
func TestDirectoryList(t *testing.T) {
|
||||
mount := fsConnect(t)
|
||||
defer fsDisconnect(t, mount)
|
||||
|
||||
dir1 := "/base"
|
||||
err := mount.MakeDir(dir1, 0755)
|
||||
assert.NoError(t, err)
|
||||
defer func() { assert.NoError(t, mount.RemoveDir(dir1)) }()
|
||||
|
||||
subdirs := []string{"a", "bb", "ccc", "dddd"}
|
||||
for _, s := range subdirs {
|
||||
spath := dir1 + "/" + s
|
||||
err = mount.MakeDir(spath, 0755)
|
||||
assert.NoError(t, err)
|
||||
defer func(d string) {
|
||||
assert.NoError(t, mount.RemoveDir(d))
|
||||
}(spath)
|
||||
}
|
||||
|
||||
t.Run("root", func(t *testing.T) {
|
||||
dir, err := mount.OpenDir("/")
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, dir)
|
||||
defer func() { assert.NoError(t, dir.Close()) }()
|
||||
|
||||
entries, err := dir.list()
|
||||
assert.NoError(t, err)
|
||||
assert.Greater(t, len(entries), 1)
|
||||
found := entries.names()
|
||||
assert.Contains(t, found, "base")
|
||||
})
|
||||
t.Run("dir1", func(t *testing.T) {
|
||||
dir, err := mount.OpenDir(dir1)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, dir)
|
||||
defer func() { assert.NoError(t, dir.Close()) }()
|
||||
|
||||
entries, err := dir.list()
|
||||
assert.NoError(t, err)
|
||||
assert.Greater(t, len(entries), 1)
|
||||
found := entries.names()
|
||||
assert.Subset(t, found, subdirs)
|
||||
})
|
||||
t.Run("dir1Twice", func(t *testing.T) {
|
||||
dir, err := mount.OpenDir(dir1)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, dir)
|
||||
defer func() { assert.NoError(t, dir.Close()) }()
|
||||
|
||||
entries, err := dir.list()
|
||||
assert.NoError(t, err)
|
||||
assert.Greater(t, len(entries), 1)
|
||||
found := entries.names()
|
||||
assert.Subset(t, found, subdirs)
|
||||
|
||||
// verify that calling list gives a complete list
|
||||
// even after being used for the same directory already
|
||||
entries, err = dir.list()
|
||||
assert.NoError(t, err)
|
||||
assert.Greater(t, len(entries), 1)
|
||||
found = entries.names()
|
||||
assert.Subset(t, found, subdirs)
|
||||
})
|
||||
}
|
||||
|
||||
func TestReadDirPlus(t *testing.T) {
|
||||
mount := fsConnect(t)
|
||||
defer fsDisconnect(t, mount)
|
||||
|
||||
dir1 := "/base"
|
||||
err := mount.MakeDir(dir1, 0755)
|
||||
assert.NoError(t, err)
|
||||
defer func() { assert.NoError(t, mount.RemoveDir(dir1)) }()
|
||||
|
||||
subdirs := []string{"a", "bb", "ccc", "dddd"}
|
||||
for _, s := range subdirs {
|
||||
spath := dir1 + "/" + s
|
||||
err = mount.MakeDir(spath, 0755)
|
||||
assert.NoError(t, err)
|
||||
defer func(d string) {
|
||||
assert.NoError(t, mount.RemoveDir(d))
|
||||
}(spath)
|
||||
}
|
||||
|
||||
t.Run("root", func(t *testing.T) {
|
||||
dir, err := mount.OpenDir("/")
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, dir)
|
||||
defer func() { assert.NoError(t, dir.Close()) }()
|
||||
|
||||
found := []string{}
|
||||
for {
|
||||
entry, err := dir.ReadDirPlus(StatxBasicStats, AtSymlinkNofollow)
|
||||
assert.NoError(t, err)
|
||||
if entry == nil {
|
||||
break
|
||||
}
|
||||
assert.NotEqual(t, Inode(0), entry.Inode())
|
||||
assert.NotEqual(t, "", entry.Name())
|
||||
found = append(found, entry.Name())
|
||||
}
|
||||
assert.Contains(t, found, "base")
|
||||
})
|
||||
t.Run("dir1", func(t *testing.T) {
|
||||
dir, err := mount.OpenDir(dir1)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, dir)
|
||||
defer func() { assert.NoError(t, dir.Close()) }()
|
||||
|
||||
found := []string{}
|
||||
for {
|
||||
entry, err := dir.ReadDirPlus(StatxBasicStats, AtSymlinkNofollow)
|
||||
assert.NoError(t, err)
|
||||
if entry == nil {
|
||||
break
|
||||
}
|
||||
assert.NotEqual(t, Inode(0), entry.Inode())
|
||||
assert.NotEqual(t, "", entry.Name())
|
||||
// we have created all the contents of this dir and they are all
|
||||
// empty dirs.
|
||||
assert.Equal(t, DTypeDir, entry.DType())
|
||||
// get statx data from the entry, and check it
|
||||
st := entry.Statx()
|
||||
assert.Equal(t, StatxBasicStats, st.Mask&StatxBasicStats)
|
||||
assert.Equal(t, uint16(0755), st.Mode&0777)
|
||||
found = append(found, entry.Name())
|
||||
}
|
||||
assert.Subset(t, found, subdirs)
|
||||
})
|
||||
}
|
||||
|
||||
func TestReadDirPlusInvalid(t *testing.T) {
|
||||
mount := fsConnect(t)
|
||||
defer fsDisconnect(t, mount)
|
||||
|
||||
dir, err := mount.OpenDir("/")
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, dir)
|
||||
defer func() { assert.NoError(t, dir.Close()) }()
|
||||
|
||||
// Feed it an invalid flag to trigger in EINVAL in libcephfs. This could
|
||||
// break in the future if it ever becomes a valid flag but it works well
|
||||
// enough for now, and the error suddenly changing to no error will be
|
||||
// kinda obvious.
|
||||
_, err = dir.ReadDirPlus(StatxBasicStats, AtFlags(1<<13))
|
||||
assert.Error(t, err)
|
||||
}
|
||||
4
pkg/mod/github.com/ceph/go-ceph@v0.35.0/cephfs/doc.go
Normal file
4
pkg/mod/github.com/ceph/go-ceph@v0.35.0/cephfs/doc.go
Normal file
|
|
@ -0,0 +1,4 @@
|
|||
/*
|
||||
Package cephfs contains a set of wrappers around Ceph's libcephfs API.
|
||||
*/
|
||||
package cephfs
|
||||
51
pkg/mod/github.com/ceph/go-ceph@v0.35.0/cephfs/errors.go
Normal file
51
pkg/mod/github.com/ceph/go-ceph@v0.35.0/cephfs/errors.go
Normal file
|
|
@ -0,0 +1,51 @@
|
|||
package cephfs
|
||||
|
||||
/*
|
||||
#include <errno.h>
|
||||
*/
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/ceph/go-ceph/internal/errutil"
|
||||
)
|
||||
|
||||
func getError(e C.int) error {
|
||||
return errutil.GetError("cephfs", int(e))
|
||||
}
|
||||
|
||||
// getErrorIfNegative converts a ceph return code to error if negative.
|
||||
// This is useful for functions that return a usable positive value on
|
||||
// success but a negative error number on error.
|
||||
func getErrorIfNegative(ret C.int) error {
|
||||
if ret >= 0 {
|
||||
return nil
|
||||
}
|
||||
return getError(ret)
|
||||
}
|
||||
|
||||
// Public go errors:
|
||||
|
||||
var (
|
||||
// ErrEmptyArgument may be returned if a function argument is passed
|
||||
// a zero-length slice or map.
|
||||
ErrEmptyArgument = errors.New("Argument must contain at least one item")
|
||||
|
||||
// ErrNotConnected may be returned when client is not connected
|
||||
// to a cluster.
|
||||
ErrNotConnected = getError(-C.ENOTCONN)
|
||||
// ErrNotExist indicates a non-specific missing resource.
|
||||
ErrNotExist = getError(-C.ENOENT)
|
||||
// ErrOpNotSupported is returned in general for operations that are not
|
||||
// supported.
|
||||
ErrOpNotSupported = getError(-C.EOPNOTSUPP)
|
||||
|
||||
// Private errors:
|
||||
|
||||
errInvalid = getError(-C.EINVAL)
|
||||
errNameTooLong = getError(-C.ENAMETOOLONG)
|
||||
errRange = getError(-C.ERANGE)
|
||||
errBadFile = getError(-C.EBADF)
|
||||
errNotDir = getError(-C.ENOTDIR)
|
||||
)
|
||||
|
|
@ -0,0 +1,26 @@
|
|||
package cephfs
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestCephFSError(t *testing.T) {
|
||||
err := getError(0)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = getError(-5) // IO error
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, err.Error(), "cephfs: ret=-5, Input/output error")
|
||||
|
||||
errno, ok := err.(interface{ ErrorCode() int })
|
||||
assert.True(t, ok)
|
||||
require.NotNil(t, errno)
|
||||
assert.Equal(t, errno.ErrorCode(), -5)
|
||||
|
||||
err = getError(345) // no such errno
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, err.Error(), "cephfs: ret=345")
|
||||
}
|
||||
440
pkg/mod/github.com/ceph/go-ceph@v0.35.0/cephfs/file.go
Normal file
440
pkg/mod/github.com/ceph/go-ceph@v0.35.0/cephfs/file.go
Normal file
|
|
@ -0,0 +1,440 @@
|
|||
package cephfs
|
||||
|
||||
/*
|
||||
#cgo LDFLAGS: -lcephfs
|
||||
#cgo CPPFLAGS: -D_FILE_OFFSET_BITS=64
|
||||
#define _GNU_SOURCE
|
||||
#include <stdlib.h>
|
||||
#include <fcntl.h>
|
||||
#include <cephfs/libcephfs.h>
|
||||
|
||||
int _go_ceph_fchown(struct ceph_mount_info *cmount, int fd, uid_t uid, gid_t gid) {
|
||||
return ceph_fchown(cmount, fd, uid, gid);
|
||||
}
|
||||
*/
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"io"
|
||||
"unsafe"
|
||||
|
||||
"github.com/ceph/go-ceph/internal/cutil"
|
||||
)
|
||||
|
||||
const (
|
||||
// SeekSet is used with Seek to set the absolute position in the file.
|
||||
SeekSet = int(C.SEEK_SET)
|
||||
// SeekCur is used with Seek to position the file relative to the current
|
||||
// position.
|
||||
SeekCur = int(C.SEEK_CUR)
|
||||
// SeekEnd is used with Seek to position the file relative to the end.
|
||||
SeekEnd = int(C.SEEK_END)
|
||||
)
|
||||
|
||||
// SyncChoice is used to control how metadata and/or data is sync'ed to
|
||||
// the file system.
|
||||
type SyncChoice int
|
||||
|
||||
const (
|
||||
// SyncAll will synchronize both data and metadata.
|
||||
SyncAll = SyncChoice(0)
|
||||
// SyncDataOnly will synchronize only data.
|
||||
SyncDataOnly = SyncChoice(1)
|
||||
)
|
||||
|
||||
// File represents an open file descriptor in cephfs.
|
||||
type File struct {
|
||||
mount *MountInfo
|
||||
fd C.int
|
||||
}
|
||||
|
||||
// Open a file at the given path. The flags are the same os flags as
|
||||
// a local open call. Mode is the same mode bits as a local open call.
|
||||
//
|
||||
// Implements:
|
||||
//
|
||||
// int ceph_open(struct ceph_mount_info *cmount, const char *path, int flags, mode_t mode);
|
||||
func (mount *MountInfo) Open(path string, flags int, mode uint32) (*File, error) {
|
||||
if mount.mount == nil {
|
||||
return nil, ErrNotConnected
|
||||
}
|
||||
cPath := C.CString(path)
|
||||
defer C.free(unsafe.Pointer(cPath))
|
||||
ret := C.ceph_open(mount.mount, cPath, C.int(flags), C.mode_t(mode))
|
||||
if ret < 0 {
|
||||
return nil, getError(ret)
|
||||
}
|
||||
return &File{mount: mount, fd: ret}, nil
|
||||
}
|
||||
|
||||
func (f *File) validate() error {
|
||||
if f.mount == nil {
|
||||
return ErrNotConnected
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close the file.
|
||||
//
|
||||
// Implements:
|
||||
//
|
||||
// int ceph_close(struct ceph_mount_info *cmount, int fd);
|
||||
func (f *File) Close() error {
|
||||
if f.fd == -1 {
|
||||
// already closed
|
||||
return nil
|
||||
}
|
||||
if err := f.validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := getError(C.ceph_close(f.mount.mount, f.fd)); err != nil {
|
||||
return err
|
||||
}
|
||||
f.fd = -1
|
||||
return nil
|
||||
}
|
||||
|
||||
// read directly wraps the ceph_read call. Because read is such a common
|
||||
// operation we deviate from the ceph naming and expose Read and ReadAt
|
||||
// wrappers for external callers of the library.
|
||||
//
|
||||
// Implements:
|
||||
//
|
||||
// int ceph_read(struct ceph_mount_info *cmount, int fd, char *buf, int64_t size, int64_t offset);
|
||||
func (f *File) read(buf []byte, offset int64) (int, error) {
|
||||
if err := f.validate(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if len(buf) == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
bufptr := (*C.char)(unsafe.Pointer(&buf[0]))
|
||||
ret := C.ceph_read(
|
||||
f.mount.mount, f.fd, bufptr, C.int64_t(len(buf)), C.int64_t(offset))
|
||||
switch {
|
||||
case ret < 0:
|
||||
return 0, getError(ret)
|
||||
case ret == 0:
|
||||
return 0, io.EOF
|
||||
}
|
||||
return int(ret), nil
|
||||
}
|
||||
|
||||
// Read data from file. Up to len(buf) bytes will be read from the file.
|
||||
// The number of bytes read will be returned.
|
||||
// When nothing is left to read from the file, Read returns, 0, io.EOF.
|
||||
func (f *File) Read(buf []byte) (int, error) {
|
||||
// to-consider: should we mimic Go's behavior of returning an
|
||||
// io.ErrShortWrite error if write length < buf size?
|
||||
return f.read(buf, -1)
|
||||
}
|
||||
|
||||
// ReadAt will read data from the file starting at the given offset.
|
||||
// Up to len(buf) bytes will be read from the file.
|
||||
// The number of bytes read will be returned.
|
||||
// When nothing is left to read from the file, ReadAt returns, 0, io.EOF.
|
||||
func (f *File) ReadAt(buf []byte, offset int64) (int, error) {
|
||||
if offset < 0 {
|
||||
return 0, errInvalid
|
||||
}
|
||||
return f.read(buf, offset)
|
||||
}
|
||||
|
||||
// Preadv will read data from the file, starting at the given offset,
|
||||
// into the byte-slice data buffers sequentially.
|
||||
// The number of bytes read will be returned.
|
||||
// When nothing is left to read from the file the return values will be:
|
||||
// 0, io.EOF.
|
||||
//
|
||||
// Implements:
|
||||
//
|
||||
// int ceph_preadv(struct ceph_mount_info *cmount, int fd, const struct iovec *iov, int iovcnt,
|
||||
// int64_t offset);
|
||||
func (f *File) Preadv(data [][]byte, offset int64) (int, error) {
|
||||
if err := f.validate(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
iov := cutil.ByteSlicesToIovec(data)
|
||||
defer iov.Free()
|
||||
|
||||
ret := C.ceph_preadv(
|
||||
f.mount.mount,
|
||||
f.fd,
|
||||
(*C.struct_iovec)(iov.Pointer()),
|
||||
C.int(iov.Len()),
|
||||
C.int64_t(offset))
|
||||
switch {
|
||||
case ret < 0:
|
||||
return 0, getError(ret)
|
||||
case ret == 0:
|
||||
return 0, io.EOF
|
||||
}
|
||||
iov.Sync()
|
||||
return int(ret), nil
|
||||
}
|
||||
|
||||
// write directly wraps the ceph_write call. Because write is such a common
|
||||
// operation we deviate from the ceph naming and expose Write and WriteAt
|
||||
// wrappers for external callers of the library.
|
||||
//
|
||||
// Implements:
|
||||
//
|
||||
// int ceph_write(struct ceph_mount_info *cmount, int fd, const char *buf,
|
||||
// int64_t size, int64_t offset);
|
||||
func (f *File) write(buf []byte, offset int64) (int, error) {
|
||||
if err := f.validate(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if len(buf) == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
bufptr := (*C.char)(unsafe.Pointer(&buf[0]))
|
||||
ret := C.ceph_write(
|
||||
f.mount.mount, f.fd, bufptr, C.int64_t(len(buf)), C.int64_t(offset))
|
||||
if ret < 0 {
|
||||
return 0, getError(ret)
|
||||
}
|
||||
return int(ret), nil
|
||||
}
|
||||
|
||||
// Write data from buf to the file.
|
||||
// The number of bytes written is returned.
|
||||
func (f *File) Write(buf []byte) (int, error) {
|
||||
return f.write(buf, -1)
|
||||
}
|
||||
|
||||
// WriteAt writes data from buf to the file at the specified offset.
|
||||
// The number of bytes written is returned.
|
||||
func (f *File) WriteAt(buf []byte, offset int64) (int, error) {
|
||||
if offset < 0 {
|
||||
return 0, errInvalid
|
||||
}
|
||||
return f.write(buf, offset)
|
||||
}
|
||||
|
||||
// Pwritev writes data from the slice of byte-slice buffers to the file at the
|
||||
// specified offset.
|
||||
// The number of bytes written is returned.
|
||||
//
|
||||
// Implements:
|
||||
//
|
||||
// int ceph_pwritev(struct ceph_mount_info *cmount, int fd, const struct iovec *iov, int iovcnt,
|
||||
// int64_t offset);
|
||||
func (f *File) Pwritev(data [][]byte, offset int64) (int, error) {
|
||||
if err := f.validate(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
iov := cutil.ByteSlicesToIovec(data)
|
||||
defer iov.Free()
|
||||
|
||||
ret := C.ceph_pwritev(
|
||||
f.mount.mount,
|
||||
f.fd,
|
||||
(*C.struct_iovec)(iov.Pointer()),
|
||||
C.int(iov.Len()),
|
||||
C.int64_t(offset))
|
||||
if ret < 0 {
|
||||
return 0, getError(ret)
|
||||
}
|
||||
return int(ret), nil
|
||||
}
|
||||
|
||||
// Seek will reposition the file stream based on the given offset.
|
||||
//
|
||||
// Implements:
|
||||
//
|
||||
// int64_t ceph_lseek(struct ceph_mount_info *cmount, int fd, int64_t offset, int whence);
|
||||
func (f *File) Seek(offset int64, whence int) (int64, error) {
|
||||
if err := f.validate(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
// validate the seek whence value in case the caller skews
|
||||
// from the seek values we technically support from C as documented.
|
||||
// TODO: need to support seek-(hole|data) in mimic and later.
|
||||
switch whence {
|
||||
case SeekSet, SeekCur, SeekEnd:
|
||||
default:
|
||||
return 0, errInvalid
|
||||
}
|
||||
|
||||
ret := C.ceph_lseek(f.mount.mount, f.fd, C.int64_t(offset), C.int(whence))
|
||||
if ret < 0 {
|
||||
return 0, getError(C.int(ret))
|
||||
}
|
||||
return int64(ret), nil
|
||||
}
|
||||
|
||||
// Fchmod changes the mode bits (permissions) of a file.
|
||||
//
|
||||
// Implements:
|
||||
//
|
||||
// int ceph_fchmod(struct ceph_mount_info *cmount, int fd, mode_t mode);
|
||||
func (f *File) Fchmod(mode uint32) error {
|
||||
if err := f.validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ret := C.ceph_fchmod(f.mount.mount, f.fd, C.mode_t(mode))
|
||||
return getError(ret)
|
||||
}
|
||||
|
||||
// Fchown changes the ownership of a file.
|
||||
//
|
||||
// Implements:
|
||||
//
|
||||
// int ceph_fchown(struct ceph_mount_info *cmount, int fd, uid_t uid, gid_t gid);
|
||||
func (f *File) Fchown(user uint32, group uint32) error {
|
||||
if err := f.validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ret := C._go_ceph_fchown(f.mount.mount, f.fd, C.uid_t(user), C.gid_t(group))
|
||||
return getError(ret)
|
||||
}
|
||||
|
||||
// Fstatx returns information about an open file.
|
||||
//
|
||||
// Implements:
|
||||
//
|
||||
// int ceph_fstatx(struct ceph_mount_info *cmount, int fd, struct ceph_statx *stx,
|
||||
// unsigned int want, unsigned int flags);
|
||||
func (f *File) Fstatx(want StatxMask, flags AtFlags) (*CephStatx, error) {
|
||||
if err := f.validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var stx C.struct_ceph_statx
|
||||
ret := C.ceph_fstatx(
|
||||
f.mount.mount,
|
||||
f.fd,
|
||||
&stx,
|
||||
C.uint(want),
|
||||
C.uint(flags),
|
||||
)
|
||||
if err := getError(ret); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cStructToCephStatx(stx), nil
|
||||
}
|
||||
|
||||
// FallocFlags represent flags which determine the operation to be
|
||||
// performed on the given range.
|
||||
// CephFS supports only following two flags.
|
||||
type FallocFlags int
|
||||
|
||||
const (
|
||||
// FallocNoFlag means default option.
|
||||
FallocNoFlag = FallocFlags(0)
|
||||
// FallocFlKeepSize specifies that the file size will not be changed.
|
||||
FallocFlKeepSize = FallocFlags(C.FALLOC_FL_KEEP_SIZE)
|
||||
// FallocFlPunchHole specifies that the operation is to deallocate
|
||||
// space and zero the byte range.
|
||||
FallocFlPunchHole = FallocFlags(C.FALLOC_FL_PUNCH_HOLE)
|
||||
)
|
||||
|
||||
// Fallocate preallocates or releases disk space for the file for the
|
||||
// given byte range, the flags determine the operation to be performed
|
||||
// on the given range.
|
||||
//
|
||||
// Implements:
|
||||
//
|
||||
// int ceph_fallocate(struct ceph_mount_info *cmount, int fd, int mode,
|
||||
// int64_t offset, int64_t length);
|
||||
func (f *File) Fallocate(mode FallocFlags, offset, length int64) error {
|
||||
if err := f.validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
ret := C.ceph_fallocate(f.mount.mount, f.fd, C.int(mode), C.int64_t(offset), C.int64_t(length))
|
||||
return getError(ret)
|
||||
}
|
||||
|
||||
// LockOp determines operations/type of locks which can be applied on a file.
|
||||
type LockOp int
|
||||
|
||||
const (
|
||||
// LockSH places a shared lock.
|
||||
// More than one process may hold a shared lock for a given file at a given time.
|
||||
LockSH = LockOp(C.LOCK_SH)
|
||||
// LockEX places an exclusive lock.
|
||||
// Only one process may hold an exclusive lock for a given file at a given time.
|
||||
LockEX = LockOp(C.LOCK_EX)
|
||||
// LockUN removes an existing lock held by this process.
|
||||
LockUN = LockOp(C.LOCK_UN)
|
||||
// LockNB can be ORed with any of the above to make a nonblocking call.
|
||||
LockNB = LockOp(C.LOCK_NB)
|
||||
)
|
||||
|
||||
// Flock applies or removes an advisory lock on an open file.
|
||||
// Param owner is the user-supplied identifier for the owner of the
|
||||
// lock, must be an arbitrary integer.
|
||||
//
|
||||
// Implements:
|
||||
//
|
||||
// int ceph_flock(struct ceph_mount_info *cmount, int fd, int operation, uint64_t owner);
|
||||
func (f *File) Flock(operation LockOp, owner uint64) error {
|
||||
if err := f.validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// validate the operation values before passing it on.
|
||||
switch operation &^ LockNB {
|
||||
case LockSH, LockEX, LockUN:
|
||||
default:
|
||||
return errInvalid
|
||||
}
|
||||
|
||||
ret := C.ceph_flock(f.mount.mount, f.fd, C.int(operation), C.uint64_t(owner))
|
||||
return getError(ret)
|
||||
}
|
||||
|
||||
// Fsync ensures the file content that may be cached is committed to stable
|
||||
// storage.
|
||||
// Pass SyncAll to have this call behave like standard fsync and synchronize
|
||||
// all data and metadata.
|
||||
// Pass SyncDataOnly to have this call behave more like fdatasync (on linux).
|
||||
//
|
||||
// Implements:
|
||||
//
|
||||
// int ceph_fsync(struct ceph_mount_info *cmount, int fd, int syncdataonly);
|
||||
func (f *File) Fsync(sync SyncChoice) error {
|
||||
if err := f.validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ret := C.ceph_fsync(
|
||||
f.mount.mount,
|
||||
f.fd,
|
||||
C.int(sync),
|
||||
)
|
||||
return getError(ret)
|
||||
}
|
||||
|
||||
// Sync ensures the file content that may be cached is committed to stable
|
||||
// storage.
|
||||
// Sync behaves like Go's os package File.Sync function.
|
||||
func (f *File) Sync() error {
|
||||
return f.Fsync(SyncAll)
|
||||
}
|
||||
|
||||
// Truncate sets the size of the open file.
|
||||
// NOTE: In some versions of ceph a bug exists where calling ftruncate on a
|
||||
// file open for read-only is permitted. The go-ceph wrapper does no additional
|
||||
// checking and will inherit the issue on affected versions of ceph. Please
|
||||
// refer to the following issue for details:
|
||||
// https://tracker.ceph.com/issues/48202
|
||||
//
|
||||
// Implements:
|
||||
//
|
||||
// int ceph_ftruncate(struct ceph_mount_info *cmount, int fd, int64_t size);
|
||||
func (f *File) Truncate(size int64) error {
|
||||
if err := f.validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ret := C.ceph_ftruncate(
|
||||
f.mount.mount,
|
||||
f.fd,
|
||||
C.int64_t(size),
|
||||
)
|
||||
return getError(ret)
|
||||
}
|
||||
|
|
@ -0,0 +1,30 @@
|
|||
//go:build main
|
||||
|
||||
package cephfs
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
// TestFallocateModeZeroUnsupported and this test file exists merely to track
|
||||
// the backports for https://tracker.ceph.com/issues/68026. Once they are
|
||||
// available with release versions this can probably vanish.
|
||||
func TestFallocateModeZeroUnsupported(t *testing.T) {
|
||||
mount := fsConnect(t)
|
||||
defer fsDisconnect(t, mount)
|
||||
fname := "file1.txt"
|
||||
f, err := mount.Open(fname, os.O_RDWR|os.O_CREATE, 0644)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, f)
|
||||
defer func() {
|
||||
assert.NoError(t, f.Close())
|
||||
assert.NoError(t, mount.Unlink(fname))
|
||||
}()
|
||||
|
||||
err = f.Fallocate(FallocNoFlag, 0, 10)
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, ErrOpNotSupported, err)
|
||||
}
|
||||
17
pkg/mod/github.com/ceph/go-ceph@v0.35.0/cephfs/file_fd.go
Normal file
17
pkg/mod/github.com/ceph/go-ceph@v0.35.0/cephfs/file_fd.go
Normal file
|
|
@ -0,0 +1,17 @@
|
|||
//go:build ceph_preview
|
||||
|
||||
package cephfs
|
||||
|
||||
// Fd returns the integer open file descriptor in cephfs.
|
||||
// NOTE: It doesn't make sense to consume the returned integer fd anywhere
|
||||
// outside CephFS and is recommended not to do so given the undefined behaviour.
|
||||
// Also, as seen with the Go standard library, the fd is only valid as long as
|
||||
// the corresponding File object is intact in the sense that an fd from a closed
|
||||
// File object is invalid.
|
||||
func (f *File) Fd() int {
|
||||
if f == nil || f.mount == nil {
|
||||
return -1
|
||||
}
|
||||
|
||||
return int(f.fd)
|
||||
}
|
||||
|
|
@ -0,0 +1,46 @@
|
|||
//go:build ceph_preview
|
||||
|
||||
package cephfs
|
||||
|
||||
// Futime changes file/directory last access and modification times.
|
||||
//
|
||||
// Implements:
|
||||
//
|
||||
// int ceph_futime(struct ceph_mount_info *cmount, int fd, struct utimbuf *buf);
|
||||
func (f *File) Futime(times *Utime) error {
|
||||
if err := f.validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return f.mount.Futime(int(f.fd), times)
|
||||
}
|
||||
|
||||
// Futimens changes file/directory last access and modification times, here times param
|
||||
// is an array of Timespec struct having length 2, where times[0] represents the access time
|
||||
// and times[1] represents the modification time.
|
||||
//
|
||||
// Implements:
|
||||
//
|
||||
// int ceph_futimens(struct ceph_mount_info *cmount, int fd, struct timespec times[2]);
|
||||
func (f *File) Futimens(times []Timespec) error {
|
||||
if err := f.validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return f.mount.Futimens(int(f.fd), times)
|
||||
}
|
||||
|
||||
// Futimes changes file/directory last access and modification times, here times param
|
||||
// is an array of Timeval struct type having length 2, where times[0] represents the access time
|
||||
// and times[1] represents the modification time.
|
||||
//
|
||||
// Implements:
|
||||
//
|
||||
// int ceph_futimes(struct ceph_mount_info *cmount, int fd, struct timeval times[2]);
|
||||
func (f *File) Futimes(times []Timeval) error {
|
||||
if err := f.validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return f.mount.Futimes(int(f.fd), times)
|
||||
}
|
||||
|
|
@ -0,0 +1,160 @@
|
|||
//go:build ceph_preview
|
||||
|
||||
package cephfs
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestFileFutime(t *testing.T) {
|
||||
mount := fsConnect(t)
|
||||
defer fsDisconnect(t, mount)
|
||||
|
||||
fname := "futime_file.txt"
|
||||
f1, err := mount.Open(fname, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, f1)
|
||||
assert.NotEqual(t, -1, f1.Fd())
|
||||
defer func() {
|
||||
assert.NoError(t, f1.Close())
|
||||
assert.NoError(t, mount.Unlink(fname))
|
||||
}()
|
||||
|
||||
currentTime := Timespec{int64(time.Now().Second()), 0}
|
||||
newTime := &Utime{
|
||||
AcTime: currentTime.Sec,
|
||||
ModTime: currentTime.Sec,
|
||||
}
|
||||
err = f1.Futime(newTime)
|
||||
assert.NoError(t, err)
|
||||
|
||||
sx, err := mount.Statx(fname, StatxBasicStats, 0)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, currentTime, sx.Atime)
|
||||
assert.Equal(t, currentTime, sx.Mtime)
|
||||
|
||||
// Test invalid file object
|
||||
f2 := &File{}
|
||||
currentTime = Timespec{int64(time.Now().Second()), 0}
|
||||
newTime = &Utime{
|
||||
AcTime: currentTime.Sec,
|
||||
ModTime: currentTime.Sec,
|
||||
}
|
||||
err = f2.Futime(newTime)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestFileFutimens(t *testing.T) {
|
||||
mount := fsConnect(t)
|
||||
defer fsDisconnect(t, mount)
|
||||
|
||||
fname := "futimens_file.txt"
|
||||
f1, err := mount.Open(fname, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, f1)
|
||||
assert.NotEqual(t, -1, f1.Fd())
|
||||
defer func() {
|
||||
assert.NoError(t, f1.Close())
|
||||
assert.NoError(t, mount.Unlink(fname))
|
||||
}()
|
||||
|
||||
times := []Timespec{
|
||||
{int64(time.Now().Second()), 0},
|
||||
{int64(time.Now().Second()), 0},
|
||||
}
|
||||
err = f1.Futimens(times)
|
||||
assert.NoError(t, err)
|
||||
|
||||
sx, err := mount.Statx(fname, StatxBasicStats, 0)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, times[0], sx.Atime)
|
||||
assert.Equal(t, times[1], sx.Mtime)
|
||||
|
||||
// Test invalid file object
|
||||
f2 := &File{}
|
||||
times = []Timespec{
|
||||
{int64(time.Now().Second()), 0},
|
||||
{int64(time.Now().Second()), 0},
|
||||
}
|
||||
err = f2.Futimens(times)
|
||||
assert.Error(t, err)
|
||||
|
||||
// Test times array length more than 2
|
||||
times = []Timespec{
|
||||
{int64(time.Now().Second()), 0},
|
||||
{int64(time.Now().Second()), 0},
|
||||
{int64(time.Now().Second()), 0},
|
||||
}
|
||||
err = f1.Futimens(times)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestFileFutimes(t *testing.T) {
|
||||
mount := fsConnect(t)
|
||||
defer fsDisconnect(t, mount)
|
||||
|
||||
fname := "futimes_file.txt"
|
||||
f1, err := mount.Open(fname, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, f1)
|
||||
assert.NotEqual(t, -1, f1.Fd())
|
||||
defer func() {
|
||||
assert.NoError(t, f1.Close())
|
||||
assert.NoError(t, mount.Unlink(fname))
|
||||
}()
|
||||
|
||||
times := []Timespec{
|
||||
{int64(time.Now().Second()), 0},
|
||||
{int64(time.Now().Second()), 0},
|
||||
}
|
||||
newTimes := []Timeval{}
|
||||
for _, val := range times {
|
||||
newTimes = append(newTimes, Timeval{
|
||||
Sec: val.Sec,
|
||||
USec: int64(val.Nsec / 1000),
|
||||
})
|
||||
}
|
||||
err = f1.Futimes(newTimes)
|
||||
assert.NoError(t, err)
|
||||
|
||||
sx, err := mount.Statx(fname, StatxBasicStats, 0)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, times[0], sx.Atime)
|
||||
assert.Equal(t, times[1], sx.Mtime)
|
||||
|
||||
// Test invalid file object
|
||||
f2 := &File{}
|
||||
times = []Timespec{
|
||||
{int64(time.Now().Second()), 0},
|
||||
{int64(time.Now().Second()), 0},
|
||||
}
|
||||
newTimes = []Timeval{}
|
||||
for _, val := range times {
|
||||
newTimes = append(newTimes, Timeval{
|
||||
Sec: val.Sec,
|
||||
USec: int64(val.Nsec / 1000),
|
||||
})
|
||||
}
|
||||
err = f2.Futimes(newTimes)
|
||||
assert.Error(t, err)
|
||||
|
||||
// Test times array length more than 2
|
||||
times = []Timespec{
|
||||
{int64(time.Now().Second()), 0},
|
||||
{int64(time.Now().Second()), 0},
|
||||
{int64(time.Now().Second()), 0},
|
||||
}
|
||||
newTimes = []Timeval{}
|
||||
for _, val := range times {
|
||||
newTimes = append(newTimes, Timeval{
|
||||
Sec: val.Sec,
|
||||
USec: int64(val.Nsec / 1000),
|
||||
})
|
||||
}
|
||||
err = f1.Futimes(newTimes)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
132
pkg/mod/github.com/ceph/go-ceph@v0.35.0/cephfs/file_ops.go
Normal file
132
pkg/mod/github.com/ceph/go-ceph@v0.35.0/cephfs/file_ops.go
Normal file
|
|
@ -0,0 +1,132 @@
|
|||
//go:build !nautilus
|
||||
// +build !nautilus
|
||||
|
||||
package cephfs
|
||||
|
||||
/*
|
||||
#cgo LDFLAGS: -lcephfs
|
||||
#cgo CPPFLAGS: -D_FILE_OFFSET_BITS=64
|
||||
#include <errno.h>
|
||||
#include <stdlib.h>
|
||||
#include <cephfs/libcephfs.h>
|
||||
*/
|
||||
import "C"
|
||||
|
||||
import (
|
||||
ts "github.com/ceph/go-ceph/internal/timespec"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// Mknod creates a regular, block or character special file.
|
||||
//
|
||||
// Implements:
|
||||
//
|
||||
// int ceph_mknod(struct ceph_mount_info *cmount, const char *path, mode_t mode,
|
||||
// dev_t rdev);
|
||||
func (mount *MountInfo) Mknod(path string, mode uint16, dev uint16) error {
|
||||
if err := mount.validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cPath := C.CString(path)
|
||||
defer C.free(unsafe.Pointer(cPath))
|
||||
|
||||
ret := C.ceph_mknod(mount.mount, cPath, C.mode_t(mode), C.dev_t(dev))
|
||||
return getError(ret)
|
||||
}
|
||||
|
||||
// Utime struct is the equivalent of C.struct_utimbuf
|
||||
type Utime struct {
|
||||
// AcTime represents the file's access time in seconds since the Unix epoch.
|
||||
AcTime int64
|
||||
// ModTime represents the file's modification time in seconds since the Unix epoch.
|
||||
ModTime int64
|
||||
}
|
||||
|
||||
// Futime changes file/directory last access and modification times.
|
||||
//
|
||||
// Implements:
|
||||
//
|
||||
// int ceph_futime(struct ceph_mount_info *cmount, int fd, struct utimbuf *buf);
|
||||
func (mount *MountInfo) Futime(fd int, times *Utime) error {
|
||||
if err := mount.validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cFd := C.int(fd)
|
||||
uTimeBuf := &C.struct_utimbuf{
|
||||
actime: C.time_t(times.AcTime),
|
||||
modtime: C.time_t(times.ModTime),
|
||||
}
|
||||
|
||||
ret := C.ceph_futime(mount.mount, cFd, uTimeBuf)
|
||||
return getError(ret)
|
||||
}
|
||||
|
||||
// Timeval struct is the go equivalent of C.struct_timeval type
|
||||
type Timeval struct {
|
||||
// Sec represents seconds
|
||||
Sec int64
|
||||
// USec represents microseconds
|
||||
USec int64
|
||||
}
|
||||
|
||||
// Futimens changes file/directory last access and modification times, here times param
|
||||
// is an array of Timespec struct having length 2, where times[0] represents the access time
|
||||
// and times[1] represents the modification time.
|
||||
//
|
||||
// Implements:
|
||||
//
|
||||
// int ceph_futimens(struct ceph_mount_info *cmount, int fd, struct timespec times[2]);
|
||||
func (mount *MountInfo) Futimens(fd int, times []Timespec) error {
|
||||
if err := mount.validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(times) != 2 {
|
||||
return getError(-C.EINVAL)
|
||||
}
|
||||
|
||||
cFd := C.int(fd)
|
||||
cTimes := []C.struct_timespec{}
|
||||
for _, val := range times {
|
||||
cTs := &C.struct_timespec{}
|
||||
ts.CopyToCStruct(
|
||||
ts.Timespec(val),
|
||||
ts.CTimespecPtr(cTs),
|
||||
)
|
||||
cTimes = append(cTimes, *cTs)
|
||||
}
|
||||
|
||||
ret := C.ceph_futimens(mount.mount, cFd, &cTimes[0])
|
||||
return getError(ret)
|
||||
}
|
||||
|
||||
// Futimes changes file/directory last access and modification times, here times param
|
||||
// is an array of Timeval struct type having length 2, where times[0] represents the access time
|
||||
// and times[1] represents the modification time.
|
||||
//
|
||||
// Implements:
|
||||
//
|
||||
// int ceph_futimes(struct ceph_mount_info *cmount, int fd, struct timeval times[2]);
|
||||
func (mount *MountInfo) Futimes(fd int, times []Timeval) error {
|
||||
if err := mount.validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(times) != 2 {
|
||||
return getError(-C.EINVAL)
|
||||
}
|
||||
|
||||
cFd := C.int(fd)
|
||||
cTimes := []C.struct_timeval{}
|
||||
for _, val := range times {
|
||||
cTimes = append(cTimes, C.struct_timeval{
|
||||
tv_sec: C.time_t(val.Sec),
|
||||
tv_usec: C.suseconds_t(val.USec),
|
||||
})
|
||||
}
|
||||
|
||||
ret := C.ceph_futimes(mount.mount, cFd, &cTimes[0])
|
||||
return getError(ret)
|
||||
}
|
||||
205
pkg/mod/github.com/ceph/go-ceph@v0.35.0/cephfs/file_ops_test.go
Normal file
205
pkg/mod/github.com/ceph/go-ceph@v0.35.0/cephfs/file_ops_test.go
Normal file
|
|
@ -0,0 +1,205 @@
|
|||
//go:build !nautilus
|
||||
// +build !nautilus
|
||||
|
||||
package cephfs
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestMknod(t *testing.T) {
|
||||
mount := fsConnect(t)
|
||||
defer fsDisconnect(t, mount)
|
||||
|
||||
file1 := "/file1"
|
||||
mode1 := uint16(syscall.S_IFIFO | syscall.S_IRUSR | syscall.S_IWUSR)
|
||||
err := mount.Mknod(file1, mode1, 0)
|
||||
assert.NoError(t, err)
|
||||
|
||||
file2 := "/file2"
|
||||
mode2 := uint16(syscall.S_IFCHR)
|
||||
err = mount.Mknod(file2, mode2, 89)
|
||||
assert.NoError(t, err)
|
||||
|
||||
file3 := "/file3"
|
||||
mode3 := uint16(syscall.S_IFBLK)
|
||||
err = mount.Mknod(file3, mode3, 129)
|
||||
assert.NoError(t, err)
|
||||
|
||||
defer func() {
|
||||
assert.NoError(t, mount.Unlink(file1))
|
||||
assert.NoError(t, mount.Unlink(file2))
|
||||
assert.NoError(t, mount.Unlink(file3))
|
||||
}()
|
||||
|
||||
sx, err := mount.Statx(file1, StatxBasicStats, 0)
|
||||
assert.Equal(t, mode1, sx.Mode&mode1)
|
||||
|
||||
sx, err = mount.Statx(file2, StatxBasicStats, 0)
|
||||
assert.Equal(t, mode2, sx.Mode&mode2)
|
||||
assert.Equal(t, uint64(89), sx.Rdev)
|
||||
|
||||
sx, err = mount.Statx(file3, StatxBasicStats, 0)
|
||||
assert.Equal(t, mode3, sx.Mode&mode3)
|
||||
assert.Equal(t, uint64(129), sx.Rdev)
|
||||
|
||||
// Test invalid mount value
|
||||
mount1 := &MountInfo{}
|
||||
file4 := "/file4"
|
||||
err = mount1.Mknod(file4, uint16(syscall.S_IFCHR), 64)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestFutime(t *testing.T) {
|
||||
mount := fsConnect(t)
|
||||
defer fsDisconnect(t, mount)
|
||||
|
||||
fname := "futime_file.txt"
|
||||
f1, err := mount.Open(fname, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, f1)
|
||||
assert.NotEqual(t, -1, f1.Fd())
|
||||
defer func() {
|
||||
assert.NoError(t, f1.Close())
|
||||
assert.NoError(t, mount.Unlink(fname))
|
||||
}()
|
||||
|
||||
currentTime := Timespec{int64(time.Now().Second()), 0}
|
||||
newTime := &Utime{
|
||||
AcTime: currentTime.Sec,
|
||||
ModTime: currentTime.Sec,
|
||||
}
|
||||
err = mount.Futime(f1.Fd(), newTime)
|
||||
assert.NoError(t, err)
|
||||
|
||||
sx, err := mount.Statx(fname, StatxBasicStats, 0)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, currentTime, sx.Atime)
|
||||
assert.Equal(t, currentTime, sx.Mtime)
|
||||
|
||||
// Test invalid mount value
|
||||
mount1 := &MountInfo{}
|
||||
currentTime = Timespec{int64(time.Now().Second()), 0}
|
||||
newTime = &Utime{
|
||||
AcTime: currentTime.Sec,
|
||||
ModTime: currentTime.Sec,
|
||||
}
|
||||
err = mount1.Futime(f1.Fd(), newTime)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestFutimens(t *testing.T) {
|
||||
mount := fsConnect(t)
|
||||
defer fsDisconnect(t, mount)
|
||||
|
||||
fname := "futimens_file.txt"
|
||||
f1, err := mount.Open(fname, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, f1)
|
||||
assert.NotEqual(t, -1, f1.Fd())
|
||||
defer func() {
|
||||
assert.NoError(t, f1.Close())
|
||||
assert.NoError(t, mount.Unlink(fname))
|
||||
}()
|
||||
|
||||
times := []Timespec{
|
||||
{int64(time.Now().Second()), 0},
|
||||
{int64(time.Now().Second()), 0},
|
||||
}
|
||||
err = mount.Futimens(f1.Fd(), times)
|
||||
assert.NoError(t, err)
|
||||
|
||||
sx, err := mount.Statx(fname, StatxBasicStats, 0)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, times[0], sx.Atime)
|
||||
assert.Equal(t, times[1], sx.Mtime)
|
||||
|
||||
// Test invalid mount value
|
||||
mount1 := &MountInfo{}
|
||||
times = []Timespec{
|
||||
{int64(time.Now().Second()), 0},
|
||||
{int64(time.Now().Second()), 0},
|
||||
}
|
||||
err = mount1.Futimens(f1.Fd(), times)
|
||||
assert.Error(t, err)
|
||||
|
||||
// Test times array length more than 2
|
||||
times = []Timespec{
|
||||
{int64(time.Now().Second()), 0},
|
||||
{int64(time.Now().Second()), 0},
|
||||
{int64(time.Now().Second()), 0},
|
||||
}
|
||||
err = mount.Futimens(f1.Fd(), times)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestFutimes(t *testing.T) {
|
||||
mount := fsConnect(t)
|
||||
defer fsDisconnect(t, mount)
|
||||
|
||||
fname := "futimes_file.txt"
|
||||
f1, err := mount.Open(fname, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, f1)
|
||||
assert.NotEqual(t, -1, f1.Fd())
|
||||
defer func() {
|
||||
assert.NoError(t, f1.Close())
|
||||
assert.NoError(t, mount.Unlink(fname))
|
||||
}()
|
||||
|
||||
times := []Timespec{
|
||||
{int64(time.Now().Second()), 0},
|
||||
{int64(time.Now().Second()), 0},
|
||||
}
|
||||
newTimes := []Timeval{}
|
||||
for _, val := range times {
|
||||
newTimes = append(newTimes, Timeval{
|
||||
Sec: val.Sec,
|
||||
USec: int64(val.Nsec / 1000),
|
||||
})
|
||||
}
|
||||
err = mount.Futimes(f1.Fd(), newTimes)
|
||||
assert.NoError(t, err)
|
||||
|
||||
sx, err := mount.Statx(fname, StatxBasicStats, 0)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, times[0], sx.Atime)
|
||||
assert.Equal(t, times[1], sx.Mtime)
|
||||
|
||||
// Test invalid mount value
|
||||
mount1 := &MountInfo{}
|
||||
times = []Timespec{
|
||||
{int64(time.Now().Second()), 0},
|
||||
{int64(time.Now().Second()), 0},
|
||||
}
|
||||
newTimes = []Timeval{}
|
||||
for _, val := range times {
|
||||
newTimes = append(newTimes, Timeval{
|
||||
Sec: val.Sec,
|
||||
USec: int64(val.Nsec / 1000),
|
||||
})
|
||||
}
|
||||
err = mount1.Futimes(f1.Fd(), newTimes)
|
||||
assert.Error(t, err)
|
||||
|
||||
// Test times array length more than 2
|
||||
times = []Timespec{
|
||||
{int64(time.Now().Second()), 0},
|
||||
{int64(time.Now().Second()), 0},
|
||||
{int64(time.Now().Second()), 0},
|
||||
}
|
||||
newTimes = []Timeval{}
|
||||
for _, val := range times {
|
||||
newTimes = append(newTimes, Timeval{
|
||||
Sec: val.Sec,
|
||||
USec: int64(val.Nsec / 1000),
|
||||
})
|
||||
}
|
||||
err = mount.Futimes(f1.Fd(), newTimes)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
1012
pkg/mod/github.com/ceph/go-ceph@v0.35.0/cephfs/file_test.go
Normal file
1012
pkg/mod/github.com/ceph/go-ceph@v0.35.0/cephfs/file_test.go
Normal file
File diff suppressed because it is too large
Load Diff
163
pkg/mod/github.com/ceph/go-ceph@v0.35.0/cephfs/file_xattr.go
Normal file
163
pkg/mod/github.com/ceph/go-ceph@v0.35.0/cephfs/file_xattr.go
Normal file
|
|
@ -0,0 +1,163 @@
|
|||
package cephfs
|
||||
|
||||
/*
|
||||
#cgo LDFLAGS: -lcephfs
|
||||
#cgo CPPFLAGS: -D_FILE_OFFSET_BITS=64
|
||||
#define _GNU_SOURCE
|
||||
#include <stdlib.h>
|
||||
#include <sys/types.h>
|
||||
#include <sys/xattr.h>
|
||||
#include <cephfs/libcephfs.h>
|
||||
*/
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
|
||||
"github.com/ceph/go-ceph/internal/cutil"
|
||||
"github.com/ceph/go-ceph/internal/retry"
|
||||
)
|
||||
|
||||
// XattrFlags are used to control the behavior of set-xattr calls.
|
||||
type XattrFlags int
|
||||
|
||||
const (
|
||||
// XattrDefault specifies that set-xattr calls use the default behavior of
|
||||
// creating or updating an xattr.
|
||||
XattrDefault = XattrFlags(0)
|
||||
// XattrCreate specifies that set-xattr calls only set new xattrs.
|
||||
XattrCreate = XattrFlags(C.XATTR_CREATE)
|
||||
// XattrReplace specifies that set-xattr calls only replace existing xattr
|
||||
// values.
|
||||
XattrReplace = XattrFlags(C.XATTR_REPLACE)
|
||||
)
|
||||
|
||||
// SetXattr sets an extended attribute on the open file.
|
||||
//
|
||||
// NOTE: Attempting to set an xattr value with an empty value may cause the
|
||||
// xattr to be unset on some older versions of ceph.
|
||||
// Please refer to https://tracker.ceph.com/issues/46084
|
||||
//
|
||||
// Implements:
|
||||
//
|
||||
// int ceph_fsetxattr(struct ceph_mount_info *cmount, int fd, const char *name,
|
||||
// const void *value, size_t size, int flags);
|
||||
func (f *File) SetXattr(name string, value []byte, flags XattrFlags) error {
|
||||
if err := f.validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
if name == "" {
|
||||
return errInvalid
|
||||
}
|
||||
var vptr unsafe.Pointer
|
||||
if len(value) > 0 {
|
||||
vptr = unsafe.Pointer(&value[0])
|
||||
}
|
||||
cName := C.CString(name)
|
||||
defer C.free(unsafe.Pointer(cName))
|
||||
|
||||
ret := C.ceph_fsetxattr(
|
||||
f.mount.mount,
|
||||
f.fd,
|
||||
cName,
|
||||
vptr,
|
||||
C.size_t(len(value)),
|
||||
C.int(flags))
|
||||
return getError(ret)
|
||||
}
|
||||
|
||||
// GetXattr gets an extended attribute from the open file.
|
||||
//
|
||||
// Implements:
|
||||
//
|
||||
// int ceph_fgetxattr(struct ceph_mount_info *cmount, int fd, const char *name,
|
||||
// void *value, size_t size);
|
||||
func (f *File) GetXattr(name string) ([]byte, error) {
|
||||
if err := f.validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if name == "" {
|
||||
return nil, errInvalid
|
||||
}
|
||||
cName := C.CString(name)
|
||||
defer C.free(unsafe.Pointer(cName))
|
||||
|
||||
var (
|
||||
ret C.int
|
||||
err error
|
||||
buf []byte
|
||||
)
|
||||
// range from 1k to 64KiB
|
||||
retry.WithSizes(1024, 1<<16, func(size int) retry.Hint {
|
||||
buf = make([]byte, size)
|
||||
ret = C.ceph_fgetxattr(
|
||||
f.mount.mount,
|
||||
f.fd,
|
||||
cName,
|
||||
unsafe.Pointer(&buf[0]),
|
||||
C.size_t(size))
|
||||
err = getErrorIfNegative(ret)
|
||||
return retry.DoubleSize.If(err == errRange)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return buf[:ret], nil
|
||||
}
|
||||
|
||||
// ListXattr returns a slice containing strings for the name of each xattr set
|
||||
// on the file.
|
||||
//
|
||||
// Implements:
|
||||
//
|
||||
// int ceph_flistxattr(struct ceph_mount_info *cmount, int fd, char *list, size_t size);
|
||||
func (f *File) ListXattr() ([]string, error) {
|
||||
if err := f.validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var (
|
||||
ret C.int
|
||||
err error
|
||||
buf []byte
|
||||
)
|
||||
// range from 1k to 64KiB
|
||||
retry.WithSizes(1024, 1<<16, func(size int) retry.Hint {
|
||||
buf = make([]byte, size)
|
||||
ret = C.ceph_flistxattr(
|
||||
f.mount.mount,
|
||||
f.fd,
|
||||
(*C.char)(unsafe.Pointer(&buf[0])),
|
||||
C.size_t(size))
|
||||
err = getErrorIfNegative(ret)
|
||||
return retry.DoubleSize.If(err == errRange)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
names := cutil.SplitSparseBuffer(buf[:ret])
|
||||
return names, nil
|
||||
}
|
||||
|
||||
// RemoveXattr removes the named xattr from the open file.
|
||||
//
|
||||
// Implements:
|
||||
//
|
||||
// int ceph_fremovexattr(struct ceph_mount_info *cmount, int fd, const char *name);
|
||||
func (f *File) RemoveXattr(name string) error {
|
||||
if err := f.validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
if name == "" {
|
||||
return errInvalid
|
||||
}
|
||||
cName := C.CString(name)
|
||||
defer C.free(unsafe.Pointer(cName))
|
||||
|
||||
ret := C.ceph_fremovexattr(
|
||||
f.mount.mount,
|
||||
f.fd,
|
||||
cName)
|
||||
return getError(ret)
|
||||
}
|
||||
|
|
@ -0,0 +1,160 @@
|
|||
package cephfs
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
var xattrSamples = []struct {
|
||||
name string
|
||||
value []byte
|
||||
}{
|
||||
{
|
||||
name: "user.xPhrase",
|
||||
value: []byte("june and july"),
|
||||
},
|
||||
{
|
||||
name: "user.xHasNulls",
|
||||
value: []byte("\x00got\x00null?\x00"),
|
||||
},
|
||||
{
|
||||
name: "user.x2kZeros",
|
||||
value: make([]byte, 2048),
|
||||
},
|
||||
// Older versions of ceph had a bug where using an empty value caused the
|
||||
// xattr to be unset. This has been fixed for nautilus and octopus.
|
||||
{
|
||||
name: "user.xEmpty",
|
||||
value: []byte(""),
|
||||
},
|
||||
}
|
||||
|
||||
func TestGetSetXattr(t *testing.T) {
|
||||
mount := fsConnect(t)
|
||||
defer fsDisconnect(t, mount)
|
||||
fname := "TestGetSetXattr.txt"
|
||||
|
||||
f, err := mount.Open(fname, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666)
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
assert.NoError(t, f.Close())
|
||||
assert.NoError(t, mount.Unlink(fname))
|
||||
}()
|
||||
|
||||
for _, s := range xattrSamples {
|
||||
t.Run("roundTrip-"+s.name, func(t *testing.T) {
|
||||
err := f.SetXattr(s.name, s.value, XattrDefault)
|
||||
assert.NoError(t, err)
|
||||
b, err := f.GetXattr(s.name)
|
||||
assert.NoError(t, err)
|
||||
assert.EqualValues(t, s.value, b)
|
||||
})
|
||||
}
|
||||
|
||||
t.Run("missingXattrOnGet", func(t *testing.T) {
|
||||
_, err := f.GetXattr("user.never-set")
|
||||
assert.Error(t, err)
|
||||
})
|
||||
|
||||
t.Run("emptyNameGet", func(t *testing.T) {
|
||||
_, err := f.GetXattr("")
|
||||
assert.Error(t, err)
|
||||
})
|
||||
|
||||
t.Run("emptyNameSet", func(t *testing.T) {
|
||||
err := f.SetXattr("", []byte("foo"), XattrDefault)
|
||||
assert.Error(t, err)
|
||||
})
|
||||
|
||||
t.Run("invalidFile", func(t *testing.T) {
|
||||
f1 := &File{}
|
||||
err := f1.SetXattr(xattrSamples[0].name, xattrSamples[0].value, XattrDefault)
|
||||
assert.Error(t, err)
|
||||
_, err = f1.GetXattr(xattrSamples[0].name)
|
||||
assert.Error(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestListXattr(t *testing.T) {
|
||||
mount := fsConnect(t)
|
||||
defer fsDisconnect(t, mount)
|
||||
fname := "TestListXattr.txt"
|
||||
|
||||
f, err := mount.Open(fname, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666)
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
assert.NoError(t, f.Close())
|
||||
assert.NoError(t, mount.Unlink(fname))
|
||||
}()
|
||||
|
||||
t.Run("listXattrs1", func(t *testing.T) {
|
||||
for _, s := range xattrSamples[:1] {
|
||||
err := f.SetXattr(s.name, s.value, XattrDefault)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
xl, err := f.ListXattr()
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, xl, 1)
|
||||
assert.Contains(t, xl, xattrSamples[0].name)
|
||||
})
|
||||
|
||||
t.Run("listXattrs2", func(t *testing.T) {
|
||||
for _, s := range xattrSamples {
|
||||
err := f.SetXattr(s.name, s.value, XattrDefault)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
xl, err := f.ListXattr()
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, xl, 4)
|
||||
assert.Contains(t, xl, xattrSamples[0].name)
|
||||
assert.Contains(t, xl, xattrSamples[1].name)
|
||||
assert.Contains(t, xl, xattrSamples[2].name)
|
||||
assert.Contains(t, xl, xattrSamples[3].name)
|
||||
})
|
||||
|
||||
t.Run("invalidFile", func(t *testing.T) {
|
||||
f1 := &File{}
|
||||
_, err := f1.ListXattr()
|
||||
assert.Error(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestRemoveXattr(t *testing.T) {
|
||||
mount := fsConnect(t)
|
||||
defer fsDisconnect(t, mount)
|
||||
fname := "TestRemoveXattr.txt"
|
||||
|
||||
f, err := mount.Open(fname, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666)
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
assert.NoError(t, f.Close())
|
||||
assert.NoError(t, mount.Unlink(fname))
|
||||
}()
|
||||
|
||||
t.Run("removeXattr", func(t *testing.T) {
|
||||
s := xattrSamples[0]
|
||||
err := f.SetXattr(s.name, s.value, XattrDefault)
|
||||
err = f.RemoveXattr(s.name)
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("removeMissingXattr", func(t *testing.T) {
|
||||
s := xattrSamples[1]
|
||||
err := f.RemoveXattr(s.name)
|
||||
assert.Error(t, err)
|
||||
})
|
||||
|
||||
t.Run("emptyName", func(t *testing.T) {
|
||||
err := f.RemoveXattr("")
|
||||
assert.Error(t, err)
|
||||
})
|
||||
|
||||
t.Run("invalidFile", func(t *testing.T) {
|
||||
f1 := &File{}
|
||||
err := f1.RemoveXattr(xattrSamples[0].name)
|
||||
assert.Error(t, err)
|
||||
})
|
||||
}
|
||||
406
pkg/mod/github.com/ceph/go-ceph@v0.35.0/cephfs/fscompat.go
Normal file
406
pkg/mod/github.com/ceph/go-ceph@v0.35.0/cephfs/fscompat.go
Normal file
|
|
@ -0,0 +1,406 @@
|
|||
package cephfs
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ceph/go-ceph/internal/log"
|
||||
)
|
||||
|
||||
var (
|
||||
errIsDir = errors.New("is a directory")
|
||||
)
|
||||
|
||||
// MountWrapper provides a wrapper type that adapts a CephFS Mount into a
|
||||
// io.FS compatible type.
|
||||
type MountWrapper struct {
|
||||
mount *MountInfo
|
||||
enableTrace bool
|
||||
}
|
||||
|
||||
type fileWrapper struct {
|
||||
parent *MountWrapper
|
||||
file *File
|
||||
name string
|
||||
}
|
||||
|
||||
type dirWrapper struct {
|
||||
parent *MountWrapper
|
||||
directory *Directory
|
||||
name string
|
||||
}
|
||||
|
||||
type dentryWrapper struct {
|
||||
parent *MountWrapper
|
||||
de *DirEntryPlus
|
||||
}
|
||||
|
||||
type infoWrapper struct {
|
||||
parent *MountWrapper
|
||||
sx *CephStatx
|
||||
name string
|
||||
}
|
||||
|
||||
// Wrap a CephFS Mount object into a new type that is compatible with Go's io.FS
|
||||
// interface. CephFS Mounts are not compatible with io.FS directly because the
|
||||
// go-ceph library predates the addition of io.FS to Go as well as the fact that
|
||||
// go-ceph attempts to provide APIs that match the cephfs libraries first and
|
||||
// foremost.
|
||||
func Wrap(mount *MountInfo) *MountWrapper {
|
||||
wm := &MountWrapper{mount: mount}
|
||||
debugf(wm, "Wrap", "created")
|
||||
return wm
|
||||
}
|
||||
|
||||
/* MountWrapper:
|
||||
** Implements https://pkg.go.dev/io/fs#FS
|
||||
** Wraps cephfs.MountInfo
|
||||
*/
|
||||
|
||||
// SetTracing configures the MountWrapper and objects connected to it for debug
|
||||
// tracing. True enables tracing and false disables it. A debug logging
|
||||
// function must also be set using go-ceph's common.log.SetDebugf function.
|
||||
func (mw *MountWrapper) SetTracing(enable bool) {
|
||||
mw.enableTrace = enable
|
||||
}
|
||||
|
||||
// identify the MountWrapper object for logging purposes.
|
||||
func (mw *MountWrapper) identify() string {
|
||||
return fmt.Sprintf("MountWrapper<%p>", mw)
|
||||
}
|
||||
|
||||
// trace returns true if debug tracing is enabled.
|
||||
func (mw *MountWrapper) trace() bool {
|
||||
return mw.enableTrace
|
||||
}
|
||||
|
||||
// Open opens the named file. This may be either a regular file or a directory.
|
||||
// Directories opened with this function will return object compatible with the
|
||||
// io.ReadDirFile interface.
|
||||
func (mw *MountWrapper) Open(name string) (fs.File, error) {
|
||||
debugf(mw, "Open", "(%v)", name)
|
||||
// there are a bunch of patterns that fsTetster/testfs looks for that seems
|
||||
// under-documented. They mainly seem to try and enforce "clean" paths.
|
||||
// look for them and reject them here because ceph libs won't reject on
|
||||
// its own
|
||||
if strings.HasPrefix(name, "/") ||
|
||||
strings.HasSuffix(name, "/.") ||
|
||||
strings.Contains(name, "//") ||
|
||||
strings.Contains(name, "/./") ||
|
||||
strings.Contains(name, "/../") {
|
||||
return nil, &fs.PathError{Op: "open", Path: name, Err: errInvalid}
|
||||
}
|
||||
|
||||
d, err := mw.mount.OpenDir(name)
|
||||
if err == nil {
|
||||
debugf(mw, "Open", "(%v): dir ok", name)
|
||||
dw := &dirWrapper{parent: mw, directory: d, name: name}
|
||||
return dw, nil
|
||||
}
|
||||
if !errors.Is(err, errNotDir) {
|
||||
debugf(mw, "Open", "(%v): dir error: %v", name, err)
|
||||
return nil, &fs.PathError{Op: "open", Path: name, Err: err}
|
||||
}
|
||||
|
||||
f, err := mw.mount.Open(name, os.O_RDONLY, 0)
|
||||
if err == nil {
|
||||
debugf(mw, "Open", "(%v): file ok", name)
|
||||
fw := &fileWrapper{parent: mw, file: f, name: name}
|
||||
return fw, nil
|
||||
}
|
||||
debugf(mw, "Open", "(%v): file error: %v", name, err)
|
||||
return nil, &fs.PathError{Op: "open", Path: name, Err: err}
|
||||
}
|
||||
|
||||
/* fileWrapper:
|
||||
** Implements https://pkg.go.dev/io/fs#FS
|
||||
** Wraps cephfs.File
|
||||
*/
|
||||
|
||||
func (fw *fileWrapper) Stat() (fs.FileInfo, error) {
|
||||
debugf(fw, "Stat", "()")
|
||||
sx, err := fw.file.Fstatx(StatxBasicStats, AtSymlinkNofollow)
|
||||
if err != nil {
|
||||
debugf(fw, "Stat", "() -> err:%v", err)
|
||||
return nil, &fs.PathError{Op: "stat", Path: fw.name, Err: err}
|
||||
}
|
||||
debugf(fw, "Stat", "() ok")
|
||||
return &infoWrapper{fw.parent, sx, path.Base(fw.name)}, nil
|
||||
}
|
||||
|
||||
func (fw *fileWrapper) Read(b []byte) (int, error) {
|
||||
debugf(fw, "Read", "(...)")
|
||||
return fw.file.Read(b)
|
||||
}
|
||||
|
||||
func (fw *fileWrapper) Close() error {
|
||||
debugf(fw, "Close", "()")
|
||||
return fw.file.Close()
|
||||
}
|
||||
|
||||
func (fw *fileWrapper) identify() string {
|
||||
return fmt.Sprintf("fileWrapper<%p>[%v]", fw, fw.name)
|
||||
}
|
||||
|
||||
func (fw *fileWrapper) trace() bool {
|
||||
return fw.parent.trace()
|
||||
}
|
||||
|
||||
/* dirWrapper:
|
||||
** Implements https://pkg.go.dev/io/fs#ReadDirFile
|
||||
** Wraps cephfs.Directory
|
||||
*/
|
||||
|
||||
func (dw *dirWrapper) Stat() (fs.FileInfo, error) {
|
||||
debugf(dw, "Stat", "()")
|
||||
sx, err := dw.parent.mount.Statx(dw.name, StatxBasicStats, AtSymlinkNofollow)
|
||||
if err != nil {
|
||||
debugf(dw, "Stat", "() -> err:%v", err)
|
||||
return nil, &fs.PathError{Op: "stat", Path: dw.name, Err: err}
|
||||
}
|
||||
debugf(dw, "Stat", "() ok")
|
||||
return &infoWrapper{dw.parent, sx, path.Base(dw.name)}, nil
|
||||
}
|
||||
|
||||
func (dw *dirWrapper) Read(_ []byte) (int, error) {
|
||||
debugf(dw, "Read", "(...)")
|
||||
return 0, &fs.PathError{Op: "read", Path: dw.name, Err: errIsDir}
|
||||
}
|
||||
|
||||
func (dw *dirWrapper) ReadDir(n int) ([]fs.DirEntry, error) {
|
||||
debugf(dw, "ReadDir", "(%v)", n)
|
||||
if n > 0 {
|
||||
return dw.readDirSome(n)
|
||||
}
|
||||
return dw.readDirAll()
|
||||
}
|
||||
|
||||
const defaultDirReadCount = 256 // how many entries to read per loop
|
||||
|
||||
func (dw *dirWrapper) readDirAll() ([]fs.DirEntry, error) {
|
||||
debugf(dw, "readDirAll", "()")
|
||||
var (
|
||||
err error
|
||||
egroup []fs.DirEntry
|
||||
entries = make([]fs.DirEntry, 0)
|
||||
size = defaultDirReadCount
|
||||
)
|
||||
for {
|
||||
egroup, err = dw.readDirSome(size)
|
||||
entries = append(entries, egroup...)
|
||||
if err == io.EOF {
|
||||
err = nil
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
debugf(dw, "readDirAll", "() -> len:%v, err:%v", len(entries), err)
|
||||
return entries, err
|
||||
}
|
||||
|
||||
func (dw *dirWrapper) readDirSome(n int) ([]fs.DirEntry, error) {
|
||||
debugf(dw, "readDirSome", "(%v)", n)
|
||||
var (
|
||||
idx int
|
||||
err error
|
||||
entry *DirEntryPlus
|
||||
entries = make([]fs.DirEntry, n)
|
||||
)
|
||||
for {
|
||||
entry, err = dw.directory.ReadDirPlus(StatxBasicStats, AtSymlinkNofollow)
|
||||
debugf(dw, "readDirSome", "(%v): got entry:%v, err:%v", n, entry, err)
|
||||
if err != nil || entry == nil {
|
||||
break
|
||||
}
|
||||
switch entry.Name() {
|
||||
case ".", "..":
|
||||
continue
|
||||
}
|
||||
entries[idx] = &dentryWrapper{dw.parent, entry}
|
||||
idx++
|
||||
if idx >= n {
|
||||
break
|
||||
}
|
||||
}
|
||||
if idx == 0 {
|
||||
debugf(dw, "readDirSome", "(%v): EOF", n)
|
||||
return nil, io.EOF
|
||||
}
|
||||
debugf(dw, "readDirSome", "(%v): got entry:%v, err:%v", n, entries[:idx], err)
|
||||
return entries[:idx], err
|
||||
}
|
||||
|
||||
func (dw *dirWrapper) Close() error {
|
||||
debugf(dw, "Close", "()")
|
||||
return dw.directory.Close()
|
||||
}
|
||||
|
||||
func (dw *dirWrapper) identify() string {
|
||||
return fmt.Sprintf("dirWrapper<%p>[%v]", dw, dw.name)
|
||||
}
|
||||
|
||||
func (dw *dirWrapper) trace() bool {
|
||||
return dw.parent.trace()
|
||||
}
|
||||
|
||||
/* dentryWrapper:
|
||||
** Implements https://pkg.go.dev/io/fs#DirEntry
|
||||
** Wraps cephfs.DirEntryPlus
|
||||
*/
|
||||
|
||||
func (dew *dentryWrapper) Name() string {
|
||||
debugf(dew, "Name", "()")
|
||||
return dew.de.Name()
|
||||
}
|
||||
|
||||
func (dew *dentryWrapper) IsDir() bool {
|
||||
v := dew.de.DType() == DTypeDir
|
||||
debugf(dew, "IsDir", "() -> %v", v)
|
||||
return v
|
||||
}
|
||||
|
||||
func (dew *dentryWrapper) Type() fs.FileMode {
|
||||
m := dew.de.Statx().Mode
|
||||
v := cephModeToFileMode(m).Type()
|
||||
debugf(dew, "Type", "() -> %v", v)
|
||||
return v
|
||||
}
|
||||
|
||||
func (dew *dentryWrapper) Info() (fs.FileInfo, error) {
|
||||
debugf(dew, "Info", "()")
|
||||
sx := dew.de.Statx()
|
||||
name := dew.de.Name()
|
||||
return &infoWrapper{dew.parent, sx, name}, nil
|
||||
}
|
||||
|
||||
func (dew *dentryWrapper) identify() string {
|
||||
return fmt.Sprintf("dentryWrapper<%p>[%v]", dew, dew.de.Name())
|
||||
}
|
||||
|
||||
func (dew *dentryWrapper) trace() bool {
|
||||
return dew.parent.trace()
|
||||
}
|
||||
|
||||
/* infoWrapper:
|
||||
** Implements https://pkg.go.dev/io/fs#FileInfo
|
||||
** Wraps cephfs.CephStatx
|
||||
*/
|
||||
|
||||
func (iw *infoWrapper) Name() string {
|
||||
debugf(iw, "Name", "()")
|
||||
return iw.name
|
||||
}
|
||||
|
||||
func (iw *infoWrapper) Size() int64 {
|
||||
debugf(iw, "Size", "() -> %v", iw.sx.Size)
|
||||
return int64(iw.sx.Size)
|
||||
}
|
||||
|
||||
func (iw *infoWrapper) Sys() any {
|
||||
debugf(iw, "Sys", "()")
|
||||
return iw.sx
|
||||
}
|
||||
|
||||
func (iw *infoWrapper) Mode() fs.FileMode {
|
||||
v := cephModeToFileMode(iw.sx.Mode)
|
||||
debugf(iw, "Mode", "() -> %#o -> %#o/%v", iw.sx.Mode, uint32(v), v.Type())
|
||||
return v
|
||||
}
|
||||
|
||||
func (iw *infoWrapper) IsDir() bool {
|
||||
v := iw.sx.Mode&modeIFMT == modeIFDIR
|
||||
debugf(iw, "IsDir", "() -> %v", v)
|
||||
return v
|
||||
}
|
||||
|
||||
func (iw *infoWrapper) ModTime() time.Time {
|
||||
v := time.Unix(iw.sx.Mtime.Sec, iw.sx.Mtime.Nsec)
|
||||
debugf(iw, "ModTime", "() -> %v", v)
|
||||
return v
|
||||
}
|
||||
|
||||
func (iw *infoWrapper) identify() string {
|
||||
return fmt.Sprintf("infoWrapper<%p>[%v]", iw, iw.name)
|
||||
}
|
||||
|
||||
func (iw *infoWrapper) trace() bool {
|
||||
return iw.parent.trace()
|
||||
}
|
||||
|
||||
/* copy and paste values from the linux headers. We always need to use
|
||||
** the linux header values, regardless of the platform go-ceph is built
|
||||
** for. Rather than jumping through header hoops, copy and paste is
|
||||
** more consistent and reliable.
|
||||
*/
|
||||
const (
|
||||
/* file type mask */
|
||||
modeIFMT = uint16(0170000)
|
||||
/* file types */
|
||||
modeIFDIR = uint16(0040000)
|
||||
modeIFCHR = uint16(0020000)
|
||||
modeIFBLK = uint16(0060000)
|
||||
modeIFREG = uint16(0100000)
|
||||
modeIFIFO = uint16(0010000)
|
||||
modeIFLNK = uint16(0120000)
|
||||
modeIFSOCK = uint16(0140000)
|
||||
/* protection bits */
|
||||
modeISUID = uint16(0004000)
|
||||
modeISGID = uint16(0002000)
|
||||
modeISVTX = uint16(0001000)
|
||||
)
|
||||
|
||||
// cephModeToFileMode takes a linux compatible cephfs mode value
|
||||
// and returns a Go-compatiable os-agnostic FileMode value.
|
||||
func cephModeToFileMode(m uint16) fs.FileMode {
|
||||
// start with permission bits
|
||||
mode := fs.FileMode(m & 0777)
|
||||
// file type - inspired by go's src/os/stat_linux.go
|
||||
switch m & modeIFMT {
|
||||
case modeIFBLK:
|
||||
mode |= fs.ModeDevice
|
||||
case modeIFCHR:
|
||||
mode |= fs.ModeDevice | fs.ModeCharDevice
|
||||
case modeIFDIR:
|
||||
mode |= fs.ModeDir
|
||||
case modeIFIFO:
|
||||
mode |= fs.ModeNamedPipe
|
||||
case modeIFLNK:
|
||||
mode |= fs.ModeSymlink
|
||||
case modeIFREG:
|
||||
// nothing to do
|
||||
case modeIFSOCK:
|
||||
mode |= fs.ModeSocket
|
||||
}
|
||||
// protection bits
|
||||
if m&modeISUID != 0 {
|
||||
mode |= fs.ModeSetuid
|
||||
}
|
||||
if m&modeISGID != 0 {
|
||||
mode |= fs.ModeSetgid
|
||||
}
|
||||
if m&modeISVTX != 0 {
|
||||
mode |= fs.ModeSticky
|
||||
}
|
||||
return mode
|
||||
}
|
||||
|
||||
// wrapperObject helps identify an object to be logged.
|
||||
type wrapperObject interface {
|
||||
identify() string
|
||||
trace() bool
|
||||
}
|
||||
|
||||
// debugf formats info about a function and logs it.
|
||||
func debugf(o wrapperObject, fname, format string, args ...any) {
|
||||
if o.trace() {
|
||||
log.Debugf(fmt.Sprintf("%v.%v: %s", o.identify(), fname, format), args...)
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,79 @@
|
|||
package cephfs
|
||||
|
||||
import (
|
||||
"io/fs"
|
||||
"os"
|
||||
"testing"
|
||||
"testing/fstest"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
// "github.com/ceph/go-ceph/common/log"
|
||||
)
|
||||
|
||||
func TestFSCompat(t *testing.T) {
|
||||
mount := fsConnect(t)
|
||||
defer fsDisconnect(t, mount)
|
||||
|
||||
// set up a few dirs
|
||||
err := mount.MakeDir("fst_foo", 0755)
|
||||
require.NoError(t, err)
|
||||
defer func() { assert.NoError(t, mount.RemoveDir("fst_foo")) }()
|
||||
err = mount.MakeDir("fst_bar", 0755)
|
||||
require.NoError(t, err)
|
||||
defer func() { assert.NoError(t, mount.RemoveDir("fst_bar")) }()
|
||||
err = mount.MakeDir("fst_bar/fst_baz", 0755)
|
||||
require.NoError(t, err)
|
||||
defer func() { assert.NoError(t, mount.RemoveDir("fst_bar/fst_baz")) }()
|
||||
|
||||
// set up a few files
|
||||
writeFile(t, mount, "wibble.txt", []byte("nothing to see here"))
|
||||
defer func() { assert.NoError(t, mount.Unlink("wibble.txt")) }()
|
||||
writeFile(t, mount, "fst_bar/nuffin.txt", []byte(""))
|
||||
defer func() { assert.NoError(t, mount.Unlink("fst_bar/nuffin.txt")) }()
|
||||
writeFile(t, mount, "fst_bar/fst_baz/super.txt", []byte("this is my favorite file"))
|
||||
defer func() { assert.NoError(t, mount.Unlink("fst_bar/fst_baz/super.txt")) }()
|
||||
writeFile(t, mount, "boop.txt", []byte("abcdefg"))
|
||||
defer func() { assert.NoError(t, mount.Unlink("boop.txt")) }()
|
||||
|
||||
// uncomment for detailed debug level logging
|
||||
// log.SetDebugf(t.Logf)
|
||||
|
||||
t.Run("testFS", func(t *testing.T) {
|
||||
w := Wrap(mount)
|
||||
if err := fstest.TestFS(w, "wibble.txt", "fst_bar/nuffin.txt", "fst_bar/fst_baz/super.txt", "boop.txt"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("walkDir", func(t *testing.T) {
|
||||
w := Wrap(mount)
|
||||
dirs := []string{}
|
||||
files := []string{}
|
||||
fs.WalkDir(w, ".", func(path string, d fs.DirEntry, err error) error {
|
||||
assert.NoError(t, err)
|
||||
if d.IsDir() {
|
||||
dirs = append(dirs, path)
|
||||
} else {
|
||||
files = append(files, path)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
assert.Contains(t, dirs, ".")
|
||||
assert.Contains(t, dirs, "fst_foo")
|
||||
assert.Contains(t, dirs, "fst_bar")
|
||||
assert.Contains(t, dirs, "fst_bar/fst_baz")
|
||||
assert.Contains(t, files, "wibble.txt")
|
||||
assert.Contains(t, files, "boop.txt")
|
||||
assert.Contains(t, files, "fst_bar/nuffin.txt")
|
||||
assert.Contains(t, files, "fst_bar/fst_baz/super.txt")
|
||||
})
|
||||
}
|
||||
|
||||
func writeFile(t *testing.T, mount *MountInfo, name string, data []byte) {
|
||||
f, err := mount.Open(name, os.O_WRONLY|os.O_CREATE, 0600)
|
||||
require.NoError(t, err)
|
||||
defer func() { assert.NoError(t, f.Close()) }()
|
||||
_, err = f.Write(data)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
29
pkg/mod/github.com/ceph/go-ceph@v0.35.0/cephfs/makedirs.go
Normal file
29
pkg/mod/github.com/ceph/go-ceph@v0.35.0/cephfs/makedirs.go
Normal file
|
|
@ -0,0 +1,29 @@
|
|||
package cephfs
|
||||
|
||||
/*
|
||||
#cgo LDFLAGS: -lcephfs
|
||||
#cgo CPPFLAGS: -D_FILE_OFFSET_BITS=64
|
||||
#include <stdlib.h>
|
||||
#include <cephfs/libcephfs.h>
|
||||
*/
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// MakeDirs creates multiple directories at once.
|
||||
//
|
||||
// Implements:
|
||||
//
|
||||
// int ceph_mkdirs(struct ceph_mount_info *cmount, const char *path, mode_t mode);
|
||||
func (mount *MountInfo) MakeDirs(path string, mode uint32) error {
|
||||
if err := mount.validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
cPath := C.CString(path)
|
||||
defer C.free(unsafe.Pointer(cPath))
|
||||
|
||||
ret := C.ceph_mkdirs(mount.mount, cPath, C.mode_t(mode))
|
||||
return getError(ret)
|
||||
}
|
||||
|
|
@ -0,0 +1,27 @@
|
|||
package cephfs
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestMakeDirs(t *testing.T) {
|
||||
mount := fsConnect(t)
|
||||
defer fsDisconnect(t, mount)
|
||||
|
||||
dir1 := "/base/sub/way"
|
||||
err := mount.MakeDirs(dir1, 0o755)
|
||||
assert.NoError(t, err)
|
||||
defer func() {
|
||||
assert.NoError(t, mount.RemoveDir("/base/sub/way"))
|
||||
assert.NoError(t, mount.RemoveDir("/base/sub"))
|
||||
assert.NoError(t, mount.RemoveDir("/base"))
|
||||
}()
|
||||
|
||||
dir, err := mount.OpenDir(dir1)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, dir)
|
||||
err = dir.Close()
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
|
@ -0,0 +1,22 @@
|
|||
//
|
||||
// ceph_mount_perms_set available in mimic & later
|
||||
|
||||
package cephfs
|
||||
|
||||
/*
|
||||
#cgo LDFLAGS: -lcephfs
|
||||
#cgo CPPFLAGS: -D_FILE_OFFSET_BITS=64
|
||||
#include <cephfs/libcephfs.h>
|
||||
*/
|
||||
import "C"
|
||||
|
||||
// SetMountPerms applies the given UserPerm to the mount object, which it will
|
||||
// then use to define the connection's ownership credentials.
|
||||
// This function must be called after Init but before Mount.
|
||||
//
|
||||
// Implements:
|
||||
//
|
||||
// int ceph_mount_perms_set(struct ceph_mount_info *cmount, UserPerm *perm);
|
||||
func (mount *MountInfo) SetMountPerms(perm *UserPerm) error {
|
||||
return getError(C.ceph_mount_perms_set(mount.mount, perm.userPerm))
|
||||
}
|
||||
|
|
@ -0,0 +1,39 @@
|
|||
package cephfs
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestSetMountPerms(t *testing.T) {
|
||||
mount, err := CreateMount()
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, mount)
|
||||
defer func() { assert.NoError(t, mount.Release()) }()
|
||||
|
||||
err = mount.ReadDefaultConfigFile()
|
||||
require.NoError(t, err)
|
||||
|
||||
err = mount.Init()
|
||||
assert.NoError(t, err)
|
||||
|
||||
uperm := NewUserPerm(0, 500, []int{0, 500, 501})
|
||||
err = mount.SetMountPerms(uperm)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = mount.Mount()
|
||||
assert.NoError(t, err)
|
||||
defer func() { assert.NoError(t, mount.Unmount()) }()
|
||||
|
||||
t.Run("checkStat", func(t *testing.T) {
|
||||
dirname := "/check-mount-perms"
|
||||
err := mount.MakeDir(dirname, 0755)
|
||||
assert.NoError(t, err)
|
||||
defer mount.RemoveDir(dirname)
|
||||
sx, err := mount.Statx(dirname, StatxBasicStats, 0)
|
||||
require.NoError(t, err)
|
||||
assert.EqualValues(t, sx.Gid, 500)
|
||||
})
|
||||
}
|
||||
201
pkg/mod/github.com/ceph/go-ceph@v0.35.0/cephfs/path.go
Normal file
201
pkg/mod/github.com/ceph/go-ceph@v0.35.0/cephfs/path.go
Normal file
|
|
@ -0,0 +1,201 @@
|
|||
package cephfs
|
||||
|
||||
/*
|
||||
#cgo LDFLAGS: -lcephfs
|
||||
#cgo CPPFLAGS: -D_FILE_OFFSET_BITS=64
|
||||
#include <stdlib.h>
|
||||
#include <cephfs/libcephfs.h>
|
||||
*/
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// CurrentDir gets the current working directory.
|
||||
func (mount *MountInfo) CurrentDir() string {
|
||||
if err := mount.validate(); err != nil {
|
||||
return ""
|
||||
}
|
||||
cDir := C.ceph_getcwd(mount.mount)
|
||||
return C.GoString(cDir)
|
||||
}
|
||||
|
||||
// ChangeDir changes the current working directory.
|
||||
func (mount *MountInfo) ChangeDir(path string) error {
|
||||
if err := mount.validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
cPath := C.CString(path)
|
||||
defer C.free(unsafe.Pointer(cPath))
|
||||
|
||||
ret := C.ceph_chdir(mount.mount, cPath)
|
||||
return getError(ret)
|
||||
}
|
||||
|
||||
// MakeDir creates a directory.
|
||||
func (mount *MountInfo) MakeDir(path string, mode uint32) error {
|
||||
if err := mount.validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
cPath := C.CString(path)
|
||||
defer C.free(unsafe.Pointer(cPath))
|
||||
|
||||
ret := C.ceph_mkdir(mount.mount, cPath, C.mode_t(mode))
|
||||
return getError(ret)
|
||||
}
|
||||
|
||||
// RemoveDir removes a directory.
|
||||
func (mount *MountInfo) RemoveDir(path string) error {
|
||||
if err := mount.validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
cPath := C.CString(path)
|
||||
defer C.free(unsafe.Pointer(cPath))
|
||||
|
||||
ret := C.ceph_rmdir(mount.mount, cPath)
|
||||
return getError(ret)
|
||||
}
|
||||
|
||||
// Unlink removes a file.
|
||||
//
|
||||
// Implements:
|
||||
//
|
||||
// int ceph_unlink(struct ceph_mount_info *cmount, const char *path);
|
||||
func (mount *MountInfo) Unlink(path string) error {
|
||||
if err := mount.validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
cPath := C.CString(path)
|
||||
defer C.free(unsafe.Pointer(cPath))
|
||||
|
||||
ret := C.ceph_unlink(mount.mount, cPath)
|
||||
return getError(ret)
|
||||
}
|
||||
|
||||
// Link creates a new link to an existing file.
|
||||
//
|
||||
// Implements:
|
||||
//
|
||||
// int ceph_link (struct ceph_mount_info *cmount, const char *existing, const char *newname);
|
||||
func (mount *MountInfo) Link(oldname, newname string) error {
|
||||
if err := mount.validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
cOldname := C.CString(oldname)
|
||||
defer C.free(unsafe.Pointer(cOldname))
|
||||
|
||||
cNewname := C.CString(newname)
|
||||
defer C.free(unsafe.Pointer(cNewname))
|
||||
|
||||
ret := C.ceph_link(mount.mount, cOldname, cNewname)
|
||||
return getError(ret)
|
||||
}
|
||||
|
||||
// Symlink creates a symbolic link to an existing path.
|
||||
//
|
||||
// Implements:
|
||||
//
|
||||
// int ceph_symlink(struct ceph_mount_info *cmount, const char *existing, const char *newname);
|
||||
func (mount *MountInfo) Symlink(existing, newname string) error {
|
||||
if err := mount.validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
cExisting := C.CString(existing)
|
||||
defer C.free(unsafe.Pointer(cExisting))
|
||||
|
||||
cNewname := C.CString(newname)
|
||||
defer C.free(unsafe.Pointer(cNewname))
|
||||
|
||||
ret := C.ceph_symlink(mount.mount, cExisting, cNewname)
|
||||
return getError(ret)
|
||||
}
|
||||
|
||||
// Readlink returns the value of a symbolic link.
|
||||
//
|
||||
// Implements:
|
||||
//
|
||||
// int ceph_readlink(struct ceph_mount_info *cmount, const char *path, char *buf, int64_t size);
|
||||
func (mount *MountInfo) Readlink(path string) (string, error) {
|
||||
if err := mount.validate(); err != nil {
|
||||
return "", err
|
||||
}
|
||||
cPath := C.CString(path)
|
||||
defer C.free(unsafe.Pointer(cPath))
|
||||
|
||||
buf := make([]byte, 4096)
|
||||
ret := C.ceph_readlink(mount.mount,
|
||||
cPath,
|
||||
(*C.char)(unsafe.Pointer(&buf[0])),
|
||||
C.int64_t(len(buf)))
|
||||
if ret < 0 {
|
||||
return "", getError(ret)
|
||||
}
|
||||
|
||||
return string(buf[:ret]), nil
|
||||
}
|
||||
|
||||
// Statx returns information about a file/directory.
|
||||
//
|
||||
// Implements:
|
||||
//
|
||||
// int ceph_statx(struct ceph_mount_info *cmount, const char *path, struct ceph_statx *stx,
|
||||
// unsigned int want, unsigned int flags);
|
||||
func (mount *MountInfo) Statx(path string, want StatxMask, flags AtFlags) (*CephStatx, error) {
|
||||
if err := mount.validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cPath := C.CString(path)
|
||||
defer C.free(unsafe.Pointer(cPath))
|
||||
|
||||
var stx C.struct_ceph_statx
|
||||
ret := C.ceph_statx(
|
||||
mount.mount,
|
||||
cPath,
|
||||
&stx,
|
||||
C.uint(want),
|
||||
C.uint(flags),
|
||||
)
|
||||
if err := getError(ret); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cStructToCephStatx(stx), nil
|
||||
}
|
||||
|
||||
// Rename a file or directory.
|
||||
//
|
||||
// Implements:
|
||||
//
|
||||
// int ceph_rename(struct ceph_mount_info *cmount, const char *from, const char *to);
|
||||
func (mount *MountInfo) Rename(from, to string) error {
|
||||
if err := mount.validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
cFrom := C.CString(from)
|
||||
defer C.free(unsafe.Pointer(cFrom))
|
||||
cTo := C.CString(to)
|
||||
defer C.free(unsafe.Pointer(cTo))
|
||||
|
||||
ret := C.ceph_rename(mount.mount, cFrom, cTo)
|
||||
return getError(ret)
|
||||
}
|
||||
|
||||
// Truncate sets the size of the specified file.
|
||||
//
|
||||
// Implements:
|
||||
//
|
||||
// int ceph_truncate(struct ceph_mount_info *cmount, const char *path, int64_t size);
|
||||
func (mount *MountInfo) Truncate(path string, size int64) error {
|
||||
if err := mount.validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
cPath := C.CString(path)
|
||||
defer C.free(unsafe.Pointer(cPath))
|
||||
|
||||
ret := C.ceph_truncate(
|
||||
mount.mount,
|
||||
cPath,
|
||||
C.int64_t(size),
|
||||
)
|
||||
return getError(ret)
|
||||
}
|
||||
449
pkg/mod/github.com/ceph/go-ceph@v0.35.0/cephfs/path_test.go
Normal file
449
pkg/mod/github.com/ceph/go-ceph@v0.35.0/cephfs/path_test.go
Normal file
|
|
@ -0,0 +1,449 @@
|
|||
package cephfs
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestChangeDir(t *testing.T) {
|
||||
mount := fsConnect(t)
|
||||
defer fsDisconnect(t, mount)
|
||||
|
||||
dir1 := mount.CurrentDir()
|
||||
assert.NotNil(t, dir1)
|
||||
|
||||
err := mount.MakeDir("/asdf", 0755)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = mount.ChangeDir("/asdf")
|
||||
assert.NoError(t, err)
|
||||
|
||||
dir2 := mount.CurrentDir()
|
||||
assert.NotNil(t, dir2)
|
||||
|
||||
assert.NotEqual(t, dir1, dir2)
|
||||
assert.Equal(t, dir1, "/")
|
||||
assert.Equal(t, dir2, "/asdf")
|
||||
|
||||
err = mount.ChangeDir("/")
|
||||
assert.NoError(t, err)
|
||||
err = mount.RemoveDir("/asdf")
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestRemoveDir(t *testing.T) {
|
||||
dirname := "one"
|
||||
mount := fsConnect(t)
|
||||
defer fsDisconnect(t, mount)
|
||||
|
||||
err := mount.MakeDir(dirname, 0755)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = mount.SyncFs()
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Stat the location to verify dirname currently exists
|
||||
_, err = mount.Statx(dirname, StatxBasicStats, 0)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = mount.RemoveDir(dirname)
|
||||
assert.NoError(t, err)
|
||||
|
||||
_, err = mount.Statx(dirname, StatxBasicStats, 0)
|
||||
assert.Equal(t, err, ErrNotExist)
|
||||
}
|
||||
|
||||
func TestLink(t *testing.T) {
|
||||
mount := fsConnect(t)
|
||||
defer fsDisconnect(t, mount)
|
||||
|
||||
t.Run("rootDirOperations", func(t *testing.T) {
|
||||
// Root dir, both as source and destination.
|
||||
err := mount.Link("/", "/")
|
||||
// Error directory operations are not allowed.
|
||||
assert.Error(t, err)
|
||||
|
||||
dir1 := "myDir1"
|
||||
assert.NoError(t, mount.MakeDir(dir1, 0755))
|
||||
defer func() {
|
||||
assert.NoError(t, mount.RemoveDir(dir1))
|
||||
}()
|
||||
|
||||
// Creating link for a directory.
|
||||
err = mount.Link(dir1, "/")
|
||||
// Error, directory operations not allowed.
|
||||
assert.Error(t, err)
|
||||
})
|
||||
|
||||
// Non-root directory operations.
|
||||
fname := "testFile.txt"
|
||||
dir2 := "myDir2"
|
||||
assert.NoError(t, mount.MakeDir(dir2, 0755))
|
||||
defer func() {
|
||||
assert.NoError(t, mount.RemoveDir(dir2))
|
||||
}()
|
||||
|
||||
t.Run("dirAsSource", func(t *testing.T) {
|
||||
err := mount.Link(dir2, fname)
|
||||
// Error, directory operations not allowed.
|
||||
assert.Error(t, err)
|
||||
})
|
||||
|
||||
t.Run("dirAsDestination", func(t *testing.T) {
|
||||
f1, err := mount.Open(fname, os.O_WRONLY|os.O_CREATE, 0666)
|
||||
assert.NotNil(t, f1)
|
||||
assert.NoError(t, err)
|
||||
defer func() {
|
||||
assert.NoError(t, f1.Close())
|
||||
assert.NoError(t, mount.Unlink(fname))
|
||||
}()
|
||||
err = mount.Link(fname, dir2)
|
||||
// Error, destination exists.
|
||||
assert.Error(t, err)
|
||||
})
|
||||
|
||||
// File operations.
|
||||
t.Run("sourceDoesNotExist", func(t *testing.T) {
|
||||
fname := "notExist.txt"
|
||||
err := mount.Link(fname, "hardlnk")
|
||||
// Error, file does not exist.
|
||||
assert.Error(t, err)
|
||||
})
|
||||
|
||||
t.Run("sourceExistsSuccess", func(t *testing.T) {
|
||||
fname1 := "TestFile1.txt"
|
||||
f1, err := mount.Open(fname1, os.O_WRONLY|os.O_CREATE, 0666)
|
||||
assert.NotNil(t, f1)
|
||||
assert.NoError(t, err)
|
||||
defer func() {
|
||||
assert.NoError(t, f1.Close())
|
||||
assert.NoError(t, mount.Unlink(fname1))
|
||||
}()
|
||||
err = mount.Link(fname1, "hardlnk")
|
||||
defer func() { assert.NoError(t, mount.Unlink("hardlnk")) }()
|
||||
// No error, normal link operation.
|
||||
assert.NoError(t, err)
|
||||
// Verify that link got created.
|
||||
_, err = mount.Statx("hardlnk", StatxBasicStats, 0)
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("destExistsError", func(t *testing.T) {
|
||||
// Create hard link when destination exists.
|
||||
fname2 := "TestFile2.txt"
|
||||
fname3 := "TestFile3.txt"
|
||||
f2, err := mount.Open(fname2, os.O_WRONLY|os.O_CREATE, 0666)
|
||||
assert.NotNil(t, f2)
|
||||
assert.NoError(t, err)
|
||||
defer func() {
|
||||
assert.NoError(t, f2.Close())
|
||||
assert.NoError(t, mount.Unlink(fname2))
|
||||
}()
|
||||
f3, err := mount.Open(fname3, os.O_WRONLY|os.O_CREATE, 0666)
|
||||
assert.NotNil(t, f3)
|
||||
assert.NoError(t, err)
|
||||
defer func() {
|
||||
assert.NoError(t, f3.Close())
|
||||
assert.NoError(t, mount.Unlink(fname3))
|
||||
}()
|
||||
err = mount.Link(fname2, fname3)
|
||||
// Error, destination already exists.
|
||||
assert.Error(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestUnlink(t *testing.T) {
|
||||
mount := fsConnect(t)
|
||||
defer fsDisconnect(t, mount)
|
||||
|
||||
t.Run("fileUnlink", func(t *testing.T) {
|
||||
fname := "TestFile.txt"
|
||||
err := mount.Unlink(fname)
|
||||
// Error, file does not exist.
|
||||
assert.Error(t, err)
|
||||
|
||||
f, err := mount.Open(fname, os.O_WRONLY|os.O_CREATE, 0666)
|
||||
assert.NotNil(t, f)
|
||||
assert.NoError(t, err)
|
||||
defer func() {
|
||||
assert.NoError(t, f.Close())
|
||||
assert.NoError(t, mount.Unlink(fname))
|
||||
}()
|
||||
assert.NoError(t, mount.Link(fname, "hardlnk"))
|
||||
|
||||
err = mount.Unlink("hardlnk")
|
||||
// No Error, link will be removed.
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("dirUnlink", func(t *testing.T) {
|
||||
dirname := "/a"
|
||||
err := mount.MakeDir(dirname, 0755)
|
||||
assert.NoError(t, err)
|
||||
defer func() {
|
||||
assert.NoError(t, mount.RemoveDir(dirname))
|
||||
}()
|
||||
|
||||
err = mount.Unlink(dirname)
|
||||
// Error, not permitted on directory.
|
||||
assert.Error(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestSymlink(t *testing.T) {
|
||||
mount := fsConnect(t)
|
||||
defer fsDisconnect(t, mount)
|
||||
|
||||
// File operations.
|
||||
t.Run("sourceDoesNotExistSuccess", func(t *testing.T) {
|
||||
fname1 := "TestFile1.txt"
|
||||
err := mount.Symlink(fname1, "Symlnk1")
|
||||
// No Error, symlink works even if source file doesn't exist.
|
||||
assert.NoError(t, err)
|
||||
_, err = mount.Statx("Symlnk1", StatxBasicStats, 0)
|
||||
// Error, source is not there.
|
||||
assert.Error(t, err)
|
||||
|
||||
_, err = mount.Statx(fname1, StatxBasicStats, 0)
|
||||
// Error, source file is still not there.
|
||||
assert.Error(t, err)
|
||||
})
|
||||
|
||||
t.Run("symlinkExistsError", func(t *testing.T) {
|
||||
fname1 := "TestFile1.txt"
|
||||
f1, err := mount.Open(fname1, os.O_RDWR|os.O_CREATE, 0666)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, f1)
|
||||
defer func() {
|
||||
assert.NoError(t, f1.Close())
|
||||
assert.NoError(t, mount.Unlink(fname1))
|
||||
}()
|
||||
err = mount.Symlink(fname1, "Symlnk1")
|
||||
// Error, Symlink1 exists.
|
||||
assert.Error(t, err)
|
||||
defer func() {
|
||||
assert.NoError(t, mount.Unlink("Symlnk1"))
|
||||
}()
|
||||
})
|
||||
|
||||
t.Run("sourceExistsSuccess", func(t *testing.T) {
|
||||
fname2 := "TestFile2.txt"
|
||||
f2, err := mount.Open(fname2, os.O_RDWR|os.O_CREATE, 0666)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, f2)
|
||||
defer func() {
|
||||
assert.NoError(t, f2.Close())
|
||||
assert.NoError(t, mount.Unlink(fname2))
|
||||
}()
|
||||
err = mount.Symlink(fname2, "Symlnk2")
|
||||
assert.NoError(t, err)
|
||||
defer func() {
|
||||
assert.NoError(t, mount.Unlink("Symlnk2"))
|
||||
}()
|
||||
_, err = mount.Statx("Symlnk2", StatxBasicStats, 0)
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
|
||||
// Directory operations.
|
||||
t.Run("rootDirOps", func(t *testing.T) {
|
||||
err := mount.Symlink("/", "/")
|
||||
assert.Error(t, err)
|
||||
|
||||
err = mount.Symlink("/", "someDir")
|
||||
assert.NoError(t, err)
|
||||
defer func() {
|
||||
assert.NoError(t, mount.Unlink("someDir"))
|
||||
}()
|
||||
|
||||
err = mount.Symlink("someFile", "/")
|
||||
// Error, permission denied.
|
||||
assert.Error(t, err)
|
||||
})
|
||||
|
||||
t.Run("nonRootDir", func(t *testing.T) {
|
||||
// 1. Create a directory.
|
||||
// 2. Create a symlink to that directory.
|
||||
// 3. Create a file inside symlink.
|
||||
// 4. Ensure that it is not a directory.
|
||||
dirname := "mydir"
|
||||
err := mount.MakeDir(dirname, 0755)
|
||||
assert.NoError(t, err)
|
||||
defer func() {
|
||||
assert.NoError(t, mount.RemoveDir(dirname))
|
||||
}()
|
||||
|
||||
err = mount.Symlink(dirname, "symlnk")
|
||||
assert.NoError(t, err)
|
||||
defer func() {
|
||||
assert.NoError(t, mount.Unlink("symlnk"))
|
||||
}()
|
||||
|
||||
fname := "symlnk/file"
|
||||
f1, err := mount.Open(fname, os.O_RDWR|os.O_CREATE, 0666)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, f1)
|
||||
defer func() {
|
||||
assert.NoError(t, f1.Close())
|
||||
assert.NoError(t, mount.Unlink(fname))
|
||||
}()
|
||||
sx, err := mount.Statx("symlnk/file", StatxBasicStats, 0)
|
||||
assert.NoError(t, err)
|
||||
assert.NotEqual(t, sx.Mode&syscall.S_IFMT, uint16(syscall.S_IFDIR))
|
||||
})
|
||||
}
|
||||
|
||||
func TestReadlink(t *testing.T) {
|
||||
mount := fsConnect(t)
|
||||
defer fsDisconnect(t, mount)
|
||||
|
||||
t.Run("regularFile", func(t *testing.T) {
|
||||
fname := "file1.txt"
|
||||
f1, err := mount.Open(fname, os.O_RDWR|os.O_CREATE, 0666)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, f1)
|
||||
defer func() {
|
||||
assert.NoError(t, f1.Close())
|
||||
assert.NoError(t, mount.Unlink(fname))
|
||||
}()
|
||||
|
||||
buf, err := mount.Readlink(fname)
|
||||
// Error, given path is not symbolic link.
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, buf, "")
|
||||
})
|
||||
|
||||
t.Run("symLink", func(t *testing.T) {
|
||||
path1 := "path1"
|
||||
path2 := "path2"
|
||||
assert.NoError(t, mount.Symlink(path1, path2))
|
||||
defer func() {
|
||||
assert.NoError(t, mount.Unlink(path2))
|
||||
}()
|
||||
buf, err := mount.Readlink(path2)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, buf, path1)
|
||||
})
|
||||
|
||||
t.Run("hardLink", func(t *testing.T) {
|
||||
path3 := "path3"
|
||||
path4 := "path4"
|
||||
p, err := mount.Open(path3, os.O_RDWR|os.O_CREATE, 0666)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, p)
|
||||
defer func() {
|
||||
assert.NoError(t, p.Close())
|
||||
assert.NoError(t, mount.Unlink(path3))
|
||||
}()
|
||||
|
||||
assert.NoError(t, mount.Link(path3, path4))
|
||||
defer func() {
|
||||
assert.NoError(t, mount.Unlink(path4))
|
||||
}()
|
||||
buf, err := mount.Readlink(path4)
|
||||
// Error, path4 is not symbolic link.
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, buf, "")
|
||||
})
|
||||
}
|
||||
|
||||
func TestStatx(t *testing.T) {
|
||||
t.Run("statPath", func(t *testing.T) {
|
||||
mount := fsConnect(t)
|
||||
defer fsDisconnect(t, mount)
|
||||
|
||||
dirname := "statme"
|
||||
assert.NoError(t, mount.MakeDir(dirname, 0755))
|
||||
|
||||
st, err := mount.Statx(dirname, StatxBasicStats, 0)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, st)
|
||||
assert.Equal(t, uint16(0755), st.Mode&0777)
|
||||
|
||||
assert.NoError(t, mount.RemoveDir(dirname))
|
||||
|
||||
st, err = mount.Statx(dirname, StatxBasicStats, 0)
|
||||
assert.Error(t, err)
|
||||
assert.Nil(t, st)
|
||||
assert.Equal(t, ErrNotExist, err)
|
||||
})
|
||||
|
||||
t.Run("invalidMount", func(t *testing.T) {
|
||||
m := &MountInfo{}
|
||||
_, err := m.Statx("junk", StatxBasicStats, 0)
|
||||
assert.Error(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestRename(t *testing.T) {
|
||||
t.Run("invalidMount", func(t *testing.T) {
|
||||
m := &MountInfo{}
|
||||
err := m.Rename("foo", "bar")
|
||||
assert.Error(t, err)
|
||||
})
|
||||
|
||||
t.Run("renameDir", func(t *testing.T) {
|
||||
mount := fsConnect(t)
|
||||
defer fsDisconnect(t, mount)
|
||||
|
||||
n1 := "new_amsterdam"
|
||||
n2 := "new_york"
|
||||
assert.NoError(t, mount.MakeDir(n1, 0755))
|
||||
|
||||
err := mount.Rename(n1, n2)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.NoError(t, mount.RemoveDir(n2))
|
||||
})
|
||||
}
|
||||
|
||||
func TestTruncate(t *testing.T) {
|
||||
mount := fsConnect(t)
|
||||
defer fsDisconnect(t, mount)
|
||||
|
||||
fname := "TestTruncate.txt"
|
||||
defer mount.Unlink(fname)
|
||||
|
||||
// "touch" the file
|
||||
f, err := mount.Open(fname, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644)
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, f.Close())
|
||||
|
||||
t.Run("invalidMount", func(t *testing.T) {
|
||||
m := &MountInfo{}
|
||||
err := m.Truncate(fname, 0)
|
||||
assert.Error(t, err)
|
||||
})
|
||||
|
||||
t.Run("invalidSize", func(t *testing.T) {
|
||||
err := mount.Truncate(fname, -1)
|
||||
assert.Error(t, err)
|
||||
})
|
||||
|
||||
t.Run("invalidPath", func(t *testing.T) {
|
||||
err := mount.Truncate(".Non~Existant~", 0)
|
||||
assert.Error(t, err)
|
||||
})
|
||||
|
||||
t.Run("valid", func(t *testing.T) {
|
||||
err := mount.Truncate(fname, 1024)
|
||||
assert.NoError(t, err)
|
||||
|
||||
st, err := mount.Statx(fname, StatxBasicStats, 0)
|
||||
if assert.NoError(t, err) {
|
||||
assert.NotNil(t, st)
|
||||
assert.EqualValues(t, 1024, st.Size)
|
||||
}
|
||||
|
||||
err = mount.Truncate(fname, 0)
|
||||
assert.NoError(t, err)
|
||||
|
||||
st, err = mount.Statx(fname, StatxBasicStats, 0)
|
||||
if assert.NoError(t, err) {
|
||||
assert.NotNil(t, st)
|
||||
assert.EqualValues(t, 0, st.Size)
|
||||
}
|
||||
})
|
||||
}
|
||||
291
pkg/mod/github.com/ceph/go-ceph@v0.35.0/cephfs/path_xattr.go
Normal file
291
pkg/mod/github.com/ceph/go-ceph@v0.35.0/cephfs/path_xattr.go
Normal file
|
|
@ -0,0 +1,291 @@
|
|||
package cephfs
|
||||
|
||||
/*
|
||||
#cgo LDFLAGS: -lcephfs
|
||||
#cgo CPPFLAGS: -D_FILE_OFFSET_BITS=64
|
||||
#define _GNU_SOURCE
|
||||
#include <stdlib.h>
|
||||
#include <cephfs/libcephfs.h>
|
||||
*/
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
|
||||
"github.com/ceph/go-ceph/internal/cutil"
|
||||
"github.com/ceph/go-ceph/internal/retry"
|
||||
)
|
||||
|
||||
// SetXattr sets an extended attribute on the file at the supplied path.
|
||||
//
|
||||
// NOTE: Attempting to set an xattr value with an empty value may cause
|
||||
// the xattr to be unset. Please refer to https://tracker.ceph.com/issues/46084
|
||||
//
|
||||
// Implements:
|
||||
//
|
||||
// int ceph_setxattr(struct ceph_mount_info *cmount, const char *path, const char *name,
|
||||
// const void *value, size_t size, int flags);
|
||||
func (mount *MountInfo) SetXattr(path, name string, value []byte, flags XattrFlags) error {
|
||||
if err := mount.validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
if name == "" {
|
||||
return errInvalid
|
||||
}
|
||||
var vptr unsafe.Pointer
|
||||
if len(value) > 0 {
|
||||
vptr = unsafe.Pointer(&value[0])
|
||||
}
|
||||
cPath := C.CString(path)
|
||||
defer C.free(unsafe.Pointer(cPath))
|
||||
cName := C.CString(name)
|
||||
defer C.free(unsafe.Pointer(cName))
|
||||
|
||||
ret := C.ceph_setxattr(
|
||||
mount.mount,
|
||||
cPath,
|
||||
cName,
|
||||
vptr,
|
||||
C.size_t(len(value)),
|
||||
C.int(flags))
|
||||
return getError(ret)
|
||||
}
|
||||
|
||||
// GetXattr gets an extended attribute from the file at the supplied path.
|
||||
//
|
||||
// Implements:
|
||||
//
|
||||
// int ceph_getxattr(struct ceph_mount_info *cmount, const char *path, const char *name,
|
||||
// void *value, size_t size);
|
||||
func (mount *MountInfo) GetXattr(path, name string) ([]byte, error) {
|
||||
if err := mount.validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if name == "" {
|
||||
return nil, errInvalid
|
||||
}
|
||||
cPath := C.CString(path)
|
||||
defer C.free(unsafe.Pointer(cPath))
|
||||
cName := C.CString(name)
|
||||
defer C.free(unsafe.Pointer(cName))
|
||||
|
||||
var (
|
||||
ret C.int
|
||||
err error
|
||||
buf []byte
|
||||
)
|
||||
// range from 1k to 64KiB
|
||||
retry.WithSizes(1024, 1<<16, func(size int) retry.Hint {
|
||||
buf = make([]byte, size)
|
||||
ret = C.ceph_getxattr(
|
||||
mount.mount,
|
||||
cPath,
|
||||
cName,
|
||||
unsafe.Pointer(&buf[0]),
|
||||
C.size_t(size))
|
||||
err = getErrorIfNegative(ret)
|
||||
return retry.DoubleSize.If(err == errRange)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return buf[:ret], nil
|
||||
}
|
||||
|
||||
// ListXattr returns a slice containing strings for the name of each xattr set
|
||||
// on the file at the supplied path.
|
||||
//
|
||||
// Implements:
|
||||
//
|
||||
// int ceph_listxattr(struct ceph_mount_info *cmount, const char *path, char *list, size_t size);
|
||||
func (mount *MountInfo) ListXattr(path string) ([]string, error) {
|
||||
if err := mount.validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cPath := C.CString(path)
|
||||
defer C.free(unsafe.Pointer(cPath))
|
||||
|
||||
var (
|
||||
ret C.int
|
||||
err error
|
||||
buf []byte
|
||||
)
|
||||
// range from 1k to 64KiB
|
||||
retry.WithSizes(1024, 1<<16, func(size int) retry.Hint {
|
||||
buf = make([]byte, size)
|
||||
ret = C.ceph_listxattr(
|
||||
mount.mount,
|
||||
cPath,
|
||||
(*C.char)(unsafe.Pointer(&buf[0])),
|
||||
C.size_t(size))
|
||||
err = getErrorIfNegative(ret)
|
||||
return retry.DoubleSize.If(err == errRange)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
names := cutil.SplitSparseBuffer(buf[:ret])
|
||||
return names, nil
|
||||
}
|
||||
|
||||
// RemoveXattr removes the named xattr from the open file.
|
||||
//
|
||||
// Implements:
|
||||
//
|
||||
// int ceph_removexattr(struct ceph_mount_info *cmount, const char *path, const char *name);
|
||||
func (mount *MountInfo) RemoveXattr(path, name string) error {
|
||||
if err := mount.validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
if name == "" {
|
||||
return errInvalid
|
||||
}
|
||||
cPath := C.CString(path)
|
||||
defer C.free(unsafe.Pointer(cPath))
|
||||
cName := C.CString(name)
|
||||
defer C.free(unsafe.Pointer(cName))
|
||||
|
||||
ret := C.ceph_removexattr(
|
||||
mount.mount,
|
||||
cPath,
|
||||
cName)
|
||||
return getError(ret)
|
||||
}
|
||||
|
||||
// LsetXattr sets an extended attribute on the file at the supplied path.
|
||||
//
|
||||
// NOTE: Attempting to set an xattr value with an empty value may cause
|
||||
// the xattr to be unset. Please refer to https://tracker.ceph.com/issues/46084
|
||||
//
|
||||
// Implements:
|
||||
//
|
||||
// int ceph_lsetxattr(struct ceph_mount_info *cmount, const char *path, const char *name,
|
||||
// const void *value, size_t size, int flags);
|
||||
func (mount *MountInfo) LsetXattr(path, name string, value []byte, flags XattrFlags) error {
|
||||
if err := mount.validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
if name == "" {
|
||||
return errInvalid
|
||||
}
|
||||
var vptr unsafe.Pointer
|
||||
if len(value) > 0 {
|
||||
vptr = unsafe.Pointer(&value[0])
|
||||
}
|
||||
cPath := C.CString(path)
|
||||
defer C.free(unsafe.Pointer(cPath))
|
||||
cName := C.CString(name)
|
||||
defer C.free(unsafe.Pointer(cName))
|
||||
|
||||
ret := C.ceph_lsetxattr(
|
||||
mount.mount,
|
||||
cPath,
|
||||
cName,
|
||||
vptr,
|
||||
C.size_t(len(value)),
|
||||
C.int(flags))
|
||||
return getError(ret)
|
||||
}
|
||||
|
||||
// LgetXattr gets an extended attribute from the file at the supplied path.
|
||||
//
|
||||
// Implements:
|
||||
//
|
||||
// int ceph_lgetxattr(struct ceph_mount_info *cmount, const char *path, const char *name,
|
||||
// void *value, size_t size);
|
||||
func (mount *MountInfo) LgetXattr(path, name string) ([]byte, error) {
|
||||
if err := mount.validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if name == "" {
|
||||
return nil, errInvalid
|
||||
}
|
||||
cPath := C.CString(path)
|
||||
defer C.free(unsafe.Pointer(cPath))
|
||||
cName := C.CString(name)
|
||||
defer C.free(unsafe.Pointer(cName))
|
||||
|
||||
var (
|
||||
ret C.int
|
||||
err error
|
||||
buf []byte
|
||||
)
|
||||
// range from 1k to 64KiB
|
||||
retry.WithSizes(1024, 1<<16, func(size int) retry.Hint {
|
||||
buf = make([]byte, size)
|
||||
ret = C.ceph_lgetxattr(
|
||||
mount.mount,
|
||||
cPath,
|
||||
cName,
|
||||
unsafe.Pointer(&buf[0]),
|
||||
C.size_t(size))
|
||||
err = getErrorIfNegative(ret)
|
||||
return retry.DoubleSize.If(err == errRange)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return buf[:ret], nil
|
||||
}
|
||||
|
||||
// LlistXattr returns a slice containing strings for the name of each xattr set
|
||||
// on the file at the supplied path.
|
||||
//
|
||||
// Implements:
|
||||
//
|
||||
// int ceph_llistxattr(struct ceph_mount_info *cmount, const char *path, char *list, size_t size);
|
||||
func (mount *MountInfo) LlistXattr(path string) ([]string, error) {
|
||||
if err := mount.validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cPath := C.CString(path)
|
||||
defer C.free(unsafe.Pointer(cPath))
|
||||
|
||||
var (
|
||||
ret C.int
|
||||
err error
|
||||
buf []byte
|
||||
)
|
||||
// range from 1k to 64KiB
|
||||
retry.WithSizes(1024, 1<<16, func(size int) retry.Hint {
|
||||
buf = make([]byte, size)
|
||||
ret = C.ceph_llistxattr(
|
||||
mount.mount,
|
||||
cPath,
|
||||
(*C.char)(unsafe.Pointer(&buf[0])),
|
||||
C.size_t(size))
|
||||
err = getErrorIfNegative(ret)
|
||||
return retry.DoubleSize.If(err == errRange)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
names := cutil.SplitSparseBuffer(buf[:ret])
|
||||
return names, nil
|
||||
}
|
||||
|
||||
// LremoveXattr removes the named xattr from the open file.
|
||||
//
|
||||
// Implements:
|
||||
//
|
||||
// int ceph_lremovexattr(struct ceph_mount_info *cmount, const char *path, const char *name);
|
||||
func (mount *MountInfo) LremoveXattr(path, name string) error {
|
||||
if err := mount.validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
if name == "" {
|
||||
return errInvalid
|
||||
}
|
||||
cPath := C.CString(path)
|
||||
defer C.free(unsafe.Pointer(cPath))
|
||||
cName := C.CString(name)
|
||||
defer C.free(unsafe.Pointer(cName))
|
||||
|
||||
ret := C.ceph_lremovexattr(
|
||||
mount.mount,
|
||||
cPath,
|
||||
cName)
|
||||
return getError(ret)
|
||||
}
|
||||
|
|
@ -0,0 +1,300 @@
|
|||
package cephfs
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestGetSetXattrPath(t *testing.T) {
|
||||
mount := fsConnect(t)
|
||||
defer fsDisconnect(t, mount)
|
||||
fname := "TestGetSetXattrPath.txt"
|
||||
|
||||
f1, err := mount.Open(fname, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666)
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, f1.Close())
|
||||
defer func() {
|
||||
assert.NoError(t, mount.Unlink(fname))
|
||||
}()
|
||||
|
||||
for _, s := range xattrSamples {
|
||||
t.Run("roundTrip-"+s.name, func(t *testing.T) {
|
||||
err := mount.SetXattr(fname, s.name, s.value, XattrDefault)
|
||||
assert.NoError(t, err)
|
||||
b, err := mount.GetXattr(fname, s.name)
|
||||
assert.NoError(t, err)
|
||||
assert.EqualValues(t, s.value, b)
|
||||
})
|
||||
}
|
||||
|
||||
t.Run("missingXattrOnGet", func(t *testing.T) {
|
||||
_, err := mount.GetXattr(fname, "user.never-set")
|
||||
assert.Error(t, err)
|
||||
})
|
||||
|
||||
t.Run("emptyNameGet", func(t *testing.T) {
|
||||
_, err := mount.GetXattr(fname, "")
|
||||
assert.Error(t, err)
|
||||
})
|
||||
|
||||
t.Run("emptyNameSet", func(t *testing.T) {
|
||||
err := mount.SetXattr(fname, "", []byte("foo"), XattrDefault)
|
||||
assert.Error(t, err)
|
||||
})
|
||||
|
||||
t.Run("invalidMount", func(t *testing.T) {
|
||||
m := &MountInfo{}
|
||||
err := m.SetXattr(fname, xattrSamples[0].name, xattrSamples[0].value, XattrDefault)
|
||||
assert.Error(t, err)
|
||||
_, err = m.GetXattr(fname, xattrSamples[0].name)
|
||||
assert.Error(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestListXattrPath(t *testing.T) {
|
||||
mount := fsConnect(t)
|
||||
defer fsDisconnect(t, mount)
|
||||
fname := "TestListXattrPath.txt"
|
||||
|
||||
f1, err := mount.Open(fname, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666)
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, f1.Close())
|
||||
defer func() {
|
||||
assert.NoError(t, mount.Unlink(fname))
|
||||
}()
|
||||
|
||||
t.Run("listXattrs1", func(t *testing.T) {
|
||||
for _, s := range xattrSamples[:1] {
|
||||
err := mount.SetXattr(fname, s.name, s.value, XattrDefault)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
xl, err := mount.ListXattr(fname)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, xl, 1)
|
||||
assert.Contains(t, xl, xattrSamples[0].name)
|
||||
})
|
||||
|
||||
t.Run("listXattrs2", func(t *testing.T) {
|
||||
for _, s := range xattrSamples {
|
||||
err := mount.SetXattr(fname, s.name, s.value, XattrDefault)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
xl, err := mount.ListXattr(fname)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, xl, 4)
|
||||
assert.Contains(t, xl, xattrSamples[0].name)
|
||||
assert.Contains(t, xl, xattrSamples[1].name)
|
||||
assert.Contains(t, xl, xattrSamples[2].name)
|
||||
assert.Contains(t, xl, xattrSamples[3].name)
|
||||
})
|
||||
|
||||
t.Run("invalidMount", func(t *testing.T) {
|
||||
m := &MountInfo{}
|
||||
_, err := m.ListXattr(fname)
|
||||
assert.Error(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestRemoveXattrPath(t *testing.T) {
|
||||
mount := fsConnect(t)
|
||||
defer fsDisconnect(t, mount)
|
||||
fname := "TestRemoveXattrPath.txt"
|
||||
|
||||
f1, err := mount.Open(fname, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666)
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, f1.Close())
|
||||
defer func() {
|
||||
assert.NoError(t, mount.Unlink(fname))
|
||||
}()
|
||||
|
||||
t.Run("removeXattr", func(t *testing.T) {
|
||||
s := xattrSamples[0]
|
||||
err := mount.SetXattr(fname, s.name, s.value, XattrDefault)
|
||||
err = mount.RemoveXattr(fname, s.name)
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("removeMissingXattr", func(t *testing.T) {
|
||||
s := xattrSamples[1]
|
||||
err := mount.RemoveXattr(fname, s.name)
|
||||
assert.Error(t, err)
|
||||
})
|
||||
|
||||
t.Run("emptyName", func(t *testing.T) {
|
||||
err := mount.RemoveXattr(fname, "")
|
||||
assert.Error(t, err)
|
||||
})
|
||||
|
||||
t.Run("invalidMount", func(t *testing.T) {
|
||||
m := &MountInfo{}
|
||||
err := m.RemoveXattr(fname, xattrSamples[0].name)
|
||||
assert.Error(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestGetSetXattrLinkPath(t *testing.T) {
|
||||
mount := fsConnect(t)
|
||||
defer fsDisconnect(t, mount)
|
||||
fname := "TestGetSetXattrLinkPath.txt"
|
||||
lname := "TestGetSetXattrLinkPath.lnk"
|
||||
|
||||
f1, err := mount.Open(fname, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666)
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, f1.Close())
|
||||
err = mount.Symlink(fname, lname)
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
assert.NoError(t, mount.Unlink(fname))
|
||||
assert.NoError(t, mount.Unlink(lname))
|
||||
}()
|
||||
|
||||
for _, s := range xattrSamples {
|
||||
t.Run("roundTrip-"+s.name, func(t *testing.T) {
|
||||
err := mount.LsetXattr(lname, s.name, s.value, XattrDefault)
|
||||
assert.NoError(t, err)
|
||||
b, err := mount.LgetXattr(lname, s.name)
|
||||
assert.NoError(t, err)
|
||||
assert.EqualValues(t, s.value, b)
|
||||
})
|
||||
}
|
||||
|
||||
t.Run("linkVsFile", func(t *testing.T) {
|
||||
s := xattrSamples[0]
|
||||
err := mount.LsetXattr(lname, s.name, s.value, XattrDefault)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// not on the file
|
||||
err = mount.LremoveXattr(fname, s.name)
|
||||
assert.Error(t, err)
|
||||
// on the link
|
||||
err = mount.LremoveXattr(lname, s.name)
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("missingXattrOnGet", func(t *testing.T) {
|
||||
_, err := mount.LgetXattr(lname, "user.never-set")
|
||||
assert.Error(t, err)
|
||||
})
|
||||
|
||||
t.Run("emptyNameGet", func(t *testing.T) {
|
||||
_, err := mount.LgetXattr(lname, "")
|
||||
assert.Error(t, err)
|
||||
})
|
||||
|
||||
t.Run("emptyNameSet", func(t *testing.T) {
|
||||
err := mount.LsetXattr(lname, "", []byte("foo"), XattrDefault)
|
||||
assert.Error(t, err)
|
||||
})
|
||||
|
||||
t.Run("invalidMount", func(t *testing.T) {
|
||||
m := &MountInfo{}
|
||||
err := m.LsetXattr(lname, xattrSamples[0].name, xattrSamples[0].value, XattrDefault)
|
||||
assert.Error(t, err)
|
||||
_, err = m.LgetXattr(lname, xattrSamples[0].name)
|
||||
assert.Error(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestListXattrLinkPath(t *testing.T) {
|
||||
mount := fsConnect(t)
|
||||
defer fsDisconnect(t, mount)
|
||||
fname := "TestListXattrLinkPath.txt"
|
||||
lname := "TestListXattrLinkPath.lnk"
|
||||
|
||||
f1, err := mount.Open(fname, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666)
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, f1.Close())
|
||||
err = mount.Symlink(fname, lname)
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
assert.NoError(t, mount.Unlink(fname))
|
||||
assert.NoError(t, mount.Unlink(lname))
|
||||
}()
|
||||
|
||||
t.Run("listXattrs1", func(t *testing.T) {
|
||||
for _, s := range xattrSamples[:1] {
|
||||
err := mount.LsetXattr(lname, s.name, s.value, XattrDefault)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
// not on the file
|
||||
xl, err := mount.LlistXattr(fname)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, xl, 0)
|
||||
// on the link
|
||||
xl, err = mount.LlistXattr(lname)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, xl, 1)
|
||||
assert.Contains(t, xl, xattrSamples[0].name)
|
||||
})
|
||||
|
||||
t.Run("listXattrs2", func(t *testing.T) {
|
||||
for _, s := range xattrSamples {
|
||||
err := mount.LsetXattr(lname, s.name, s.value, XattrDefault)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
xl, err := mount.LlistXattr(lname)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, xl, 4)
|
||||
assert.Contains(t, xl, xattrSamples[0].name)
|
||||
assert.Contains(t, xl, xattrSamples[1].name)
|
||||
assert.Contains(t, xl, xattrSamples[2].name)
|
||||
assert.Contains(t, xl, xattrSamples[3].name)
|
||||
})
|
||||
|
||||
t.Run("invalidMount", func(t *testing.T) {
|
||||
m := &MountInfo{}
|
||||
_, err := m.LlistXattr(lname)
|
||||
assert.Error(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestRemoveXattrLinkPath(t *testing.T) {
|
||||
mount := fsConnect(t)
|
||||
defer fsDisconnect(t, mount)
|
||||
fname := "TestRemoveXattrLinkPath.txt"
|
||||
lname := "TestRemoveXattrLinkPath.lnk"
|
||||
|
||||
f1, err := mount.Open(fname, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666)
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, f1.Close())
|
||||
err = mount.Symlink(fname, lname)
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
assert.NoError(t, mount.Unlink(fname))
|
||||
assert.NoError(t, mount.Unlink(lname))
|
||||
}()
|
||||
|
||||
t.Run("removeXattr", func(t *testing.T) {
|
||||
s := xattrSamples[0]
|
||||
err := mount.LsetXattr(lname, s.name, s.value, XattrDefault)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// not on the file
|
||||
err = mount.LremoveXattr(fname, s.name)
|
||||
assert.Error(t, err)
|
||||
// on the link
|
||||
err = mount.LremoveXattr(lname, s.name)
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("removeMissingXattr", func(t *testing.T) {
|
||||
s := xattrSamples[1]
|
||||
err := mount.LremoveXattr(lname, s.name)
|
||||
assert.Error(t, err)
|
||||
})
|
||||
|
||||
t.Run("emptyName", func(t *testing.T) {
|
||||
err := mount.LremoveXattr(lname, "")
|
||||
assert.Error(t, err)
|
||||
})
|
||||
|
||||
t.Run("invalidMount", func(t *testing.T) {
|
||||
m := &MountInfo{}
|
||||
err := m.LremoveXattr(lname, xattrSamples[0].name)
|
||||
assert.Error(t, err)
|
||||
})
|
||||
}
|
||||
|
|
@ -0,0 +1,48 @@
|
|||
package cephfs
|
||||
|
||||
/*
|
||||
#cgo LDFLAGS: -lcephfs
|
||||
#cgo CPPFLAGS: -D_FILE_OFFSET_BITS=64
|
||||
#include <stdlib.h>
|
||||
#include <cephfs/libcephfs.h>
|
||||
|
||||
int _go_ceph_chown(struct ceph_mount_info *cmount, const char *path, uid_t uid, gid_t gid) {
|
||||
return ceph_chown(cmount, path, uid, gid);
|
||||
}
|
||||
|
||||
int _go_ceph_lchown(struct ceph_mount_info *cmount, const char *path, uid_t uid, gid_t gid) {
|
||||
return ceph_lchown(cmount, path, uid, gid);
|
||||
}
|
||||
*/
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// Chmod changes the mode bits (permissions) of a file/directory.
|
||||
func (mount *MountInfo) Chmod(path string, mode uint32) error {
|
||||
cPath := C.CString(path)
|
||||
defer C.free(unsafe.Pointer(cPath))
|
||||
|
||||
ret := C.ceph_chmod(mount.mount, cPath, C.mode_t(mode))
|
||||
return getError(ret)
|
||||
}
|
||||
|
||||
// Chown changes the ownership of a file/directory.
|
||||
func (mount *MountInfo) Chown(path string, user uint32, group uint32) error {
|
||||
cPath := C.CString(path)
|
||||
defer C.free(unsafe.Pointer(cPath))
|
||||
|
||||
ret := C._go_ceph_chown(mount.mount, cPath, C.uid_t(user), C.gid_t(group))
|
||||
return getError(ret)
|
||||
}
|
||||
|
||||
// Lchown changes the ownership of a file/directory/etc without following symbolic links
|
||||
func (mount *MountInfo) Lchown(path string, user uint32, group uint32) error {
|
||||
cPath := C.CString(path)
|
||||
defer C.free(unsafe.Pointer(cPath))
|
||||
|
||||
ret := C._go_ceph_lchown(mount.mount, cPath, C.uid_t(user), C.gid_t(group))
|
||||
return getError(ret)
|
||||
}
|
||||
|
|
@ -0,0 +1,95 @@
|
|||
package cephfs
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestChmodDir(t *testing.T) {
|
||||
dirname := "two"
|
||||
var statsBefore uint32 = 0755
|
||||
var statsAfter uint32 = 0700
|
||||
mount := fsConnect(t)
|
||||
defer fsDisconnect(t, mount)
|
||||
|
||||
err := mount.MakeDir(dirname, statsBefore)
|
||||
assert.NoError(t, err)
|
||||
defer mount.RemoveDir(dirname)
|
||||
|
||||
err = mount.SyncFs()
|
||||
assert.NoError(t, err)
|
||||
|
||||
sx, err := mount.Statx(dirname, StatxBasicStats, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, uint32(sx.Mode&0777), statsBefore)
|
||||
|
||||
err = mount.Chmod(dirname, statsAfter)
|
||||
assert.NoError(t, err)
|
||||
|
||||
sx, err = mount.Statx(dirname, StatxBasicStats, 0)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, uint32(sx.Mode&0777), statsAfter)
|
||||
}
|
||||
|
||||
func TestChown(t *testing.T) {
|
||||
dirname := "three"
|
||||
// dockerfile creates bob user account
|
||||
var bob uint32 = 1010
|
||||
var root uint32
|
||||
|
||||
mount := fsConnect(t)
|
||||
defer fsDisconnect(t, mount)
|
||||
|
||||
err := mount.MakeDir(dirname, 0755)
|
||||
assert.NoError(t, err)
|
||||
defer mount.RemoveDir(dirname)
|
||||
|
||||
err = mount.SyncFs()
|
||||
assert.NoError(t, err)
|
||||
|
||||
sx, err := mount.Statx(dirname, StatxBasicStats, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, sx.Uid, root)
|
||||
assert.Equal(t, sx.Gid, root)
|
||||
|
||||
err = mount.Chown(dirname, bob, bob)
|
||||
assert.NoError(t, err)
|
||||
|
||||
sx, err = mount.Statx(dirname, StatxBasicStats, 0)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, sx.Uid, bob)
|
||||
assert.Equal(t, sx.Gid, bob)
|
||||
}
|
||||
|
||||
func TestLchown(t *testing.T) {
|
||||
dirname := "four"
|
||||
var bob uint32 = 1010
|
||||
var root uint32
|
||||
|
||||
mount := fsConnect(t)
|
||||
defer fsDisconnect(t, mount)
|
||||
|
||||
err := mount.MakeDir(dirname, 0755)
|
||||
assert.NoError(t, err)
|
||||
defer mount.RemoveDir(dirname)
|
||||
|
||||
err = mount.SyncFs()
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = mount.Symlink(dirname, "symlnk")
|
||||
assert.NoError(t, err)
|
||||
defer mount.Unlink("symlnk")
|
||||
|
||||
err = mount.Lchown("symlnk", bob, bob)
|
||||
sx, err := mount.Statx("symlnk", StatxBasicStats, AtSymlinkNofollow)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, sx.Uid, bob)
|
||||
assert.Equal(t, sx.Gid, bob)
|
||||
sx, err = mount.Statx(dirname, StatxBasicStats, AtSymlinkNofollow)
|
||||
assert.Equal(t, sx.Uid, root)
|
||||
assert.Equal(t, sx.Gid, root)
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user